\input{"/home/zack/Notes/Latex/preamble.tex"} \addbibresource{Algebra.bib} \let\Begin\begin \let\End\end \newcommand\wrapenv[1]{#1} \makeatletter \def\ScaleWidthIfNeeded{% \ifdim\Gin@nat@width>\linewidth \linewidth \else \Gin@nat@width \fi } \def\ScaleHeightIfNeeded{% \ifdim\Gin@nat@height>0.9\textheight 0.9\textheight \else \Gin@nat@width \fi } \makeatother \setkeys{Gin}{width=\ScaleWidthIfNeeded,height=\ScaleHeightIfNeeded,keepaspectratio}% \title{ \rule{\linewidth}{1pt} \\ \textbf{ Algebra } \\ {\normalsize University of Georgia, Fall 2019} \\ \rule{\linewidth}{2pt} } \titlehead{ \begin{center} \includegraphics[width=\linewidth,height=0.5\textheight,keepaspectratio]{figures/cover.png} \end{center} \begin{minipage}{.35\linewidth} \begin{flushleft} \vspace{2em} {\fontsize{6pt}{2pt} \textit{Notes: These are notes live-tex'd from a graduate course in Algebra taught by Dan Nakano at the University of Georgia in Fall 2019. As such, any errors or inaccuracies are almost certainly my own. } } \\ \end{flushleft} \end{minipage} \hfill \begin{minipage}{.65\linewidth} \end{minipage} } \begin{document} \date{} \maketitle \begin{flushleft} \textbf{D. Zack Garza} \\ \textit{University of Georgia} \\ \textit{dzackgarza@gmail.com} \\ {\tiny \textit{Last updated:} 2020-10-22 } \end{flushleft} \newpage \tableofcontents \hypertarget{summary}{% \section{Summary}\label{summary}} \begin{itemize} \tightlist \item Groups and rings, including Sylow theorems, \item Classifying small groups, \item Finitely generated abelian groups, \item Jordan-Holder theorem, \item Solvable groups, \item Simplicity of the alternating group, \item Euclidean domains, \item Principal ideal domains, \item Unique factorization domains, \item Noetherian rings, \item Hilbert basis theorem, \item Zorn's lemma, and \item Existence of maximal ideals and vector space bases. \end{itemize} Previous course web pages: \begin{itemize} \tightlist \item \href{https://asilata.github.io/8000fall17/}{Fall 2017, Asilata Bapat} \end{itemize} \hypertarget{thursday-august-15th}{% \section{Thursday August 15th}\label{thursday-august-15th}} \begin{quote} We'll be using Hungerford's Algebra text. \end{quote} \hypertarget{definitions}{% \subsection{Definitions}\label{definitions}} The following definitions will be useful to know by heart: \begin{itemize} \tightlist \item The order of a group \item Cartesian product \item Relations \item Equivalence relation \item Partition \item Binary operation \item Group \item Isomorphism \item Abelian group \item Cyclic group \item Subgroup \item Greatest common divisor \item Least common multiple \item Permutation \item Transposition \item Orbit \item Cycle \item The symmetric group \(S_{n}\) \item The alternating group \(A_{n}\) \item Even and odd permutations \item Cosets \item Index \item The direct product of groups \item Homomorphism \item Image of a function \item Inverse image of a function \item Kernel \item Normal subgroup \item Factor group \item Simple group \end{itemize} Here is a rough outline of the course: \begin{itemize} \tightlist \item Group Theory \begin{itemize} \tightlist \item Groups acting on sets \item Sylow theorems and applications \item Classification \item Free and free abelian groups \item Solvable and simple groups \item Normal series \end{itemize} \item Galois Theory \begin{itemize} \tightlist \item Field extensions \item Splitting fields \item Separability \item Finite fields \item Cyclotomic extensions \item Galois groups \item Solvability by radicals \end{itemize} \item Module theory \begin{itemize} \tightlist \item Free modules \item Homomorphisms \item Projective and injective modules \item Finitely generated modules over a PID \end{itemize} \item Linear Algebra \begin{itemize} \tightlist \item Matrices and linear transformations \item Rank and determinants \item Canonical forms \item Characteristic polynomials \item Eigenvalues and eigenvectors \end{itemize} \end{itemize} \hypertarget{preliminaries}{% \subsection{Preliminaries}\label{preliminaries}} \textbf{Definition}: A \textbf{group} is an ordered pair \((G, \wait: G\cross G \to G)\) where \(G\) is a set and \(\wait\) is a binary operation, which satisfies the following axioms: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \textbf{Associativity}: \((g_1 g_2)g_3 = g_1(g_2 g_3)\), \item \textbf{Identity}: \(\exists e\in G \suchthat ge = eg = g\), \item \textbf{Inverses}: \(g\in G \implies \exists h\in G \suchthat gh = gh = e\). \end{enumerate} \emph{Examples of groups:} \begin{itemize} \item \((\ZZ, +)\) \item \((\QQ, +)\) \item \((\QQ\units, \times)\) \item \((\RR\units, \times)\) \item (\(\GL(n, \RR), \times) = \theset{A \in \mathrm{Mat}_n \suchthat \det(A) \neq 0}\) \item \((S_n, \circ)\) \end{itemize} \textbf{Definition:} A subset \(S \subseteq G\) is a \textbf{subgroup} of \(G\) iff \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \textbf{Closure}: \(s_1, s_2 \in S \implies s_1 s_2 \in S\) \item \textbf{Identity}: \(e\in S\) \item \textbf{Inverses}: \(s\in S \implies s\inv \in S\) \end{enumerate} We denote such a subgroup \(S \leq G\). \emph{Examples of subgroups:} \begin{itemize} \item \((\ZZ, +) \leq (\QQ, +)\) \item \(\SL(n, \RR) \leq \GL(n, \RR)\), where \(\SL(n, \RR) = \theset{A\in \GL(n, \RR) \suchthat \det(A) = 1}\) \end{itemize} \hypertarget{cyclic-groups}{% \subsection{Cyclic Groups}\label{cyclic-groups}} \textbf{Definition}: A group \(G\) is \textbf{cyclic} iff \(G\) is generated by a single element. \emph{Exercise}: Show \begin{align*} \generators{g} = \theset{g^n \suchthat n\in\ZZ} \cong \intersect_{g\in G} \theset{H \mid H \leq G \text{ and } g\in H} .\end{align*} \textbf{Theorem:} Let \(G\) be a cyclic group, so \(G = \generators{g}\). \begin{itemize} \item If \(\abs{G} = \infty\), then \(G \cong \ZZ\). \item If \(\abs{G} = n < \infty\), then \(G \cong \ZZ_n\). \end{itemize} \textbf{Definition}: Let \(H \leq G\), and define a \textbf{right coset of \(G\)} by \(aH = \theset{ah \suchthat H \in H}\). A similar definition can be made for \textbf{left cosets}. \textbf{The ``Fundamental Theorem of Cosets''}: \begin{align*} aH = bH \iff b\inv a \in H \text{ and } Ha = Hb \iff ab\inv \in H .\end{align*} \textbf{Some facts:} \begin{itemize} \item Cosets partition \(H\), i.e. \begin{align*} b\not\in H \implies aH \intersect bH = \theset{e} .\end{align*} \item \(\abs{H} = \abs{aH} = \abs{Ha}\) for all \(a\in G\). \end{itemize} \textbf{Theorem (Lagrange)}: If \(G\) is a finite group and \(H \leq G\), then \(\abs{H} \divides \abs{G}\). \textbf{Definition} A subgroup \(N \leq G\) is \textbf{normal} iff \(gN = Ng\) for all \(g\in G\), or equivalently \(gNg\inv \subseteq N\). (I denote this \(N \normal G\).) When \(N \normal G\), the set of left/right cosets of \(N\) themselves have a group structure. So we define \begin{align*} G/N = \theset{gN \suchthat g\in G} \text{ where } (g_1 N)\cdot (g_2 N) \definedas (g_1 g_2) N .\end{align*} Given \(H, K \leq G\), define \begin{align*} HK = \theset{hk \mid h\in H, ~k\in K} .\end{align*} We have a general formula, \begin{align*} \abs{HK} = \frac{\abs H \abs K}{\abs{H \intersect K}}. \end{align*} \hypertarget{homomorphisms}{% \subsection{Homomorphisms}\label{homomorphisms}} \textbf{Definition}: Let \(G,G'\) be groups, then \(\varphi: G \to G'\) is a \textbf{homomorphism} if \(\varphi(ab) = \varphi(a) \varphi(b)\). \emph{Examples of homomorphisms}: \begin{itemize} \item \(\exp: (\RR, +) \to (\RR^{> 0}, \wait)\) since \begin{align*} \exp(a+b) \definedas e^{a+b} = e^a e^b \definedas \exp(a) \exp(b) .\end{align*} \item \(\det: (\GL(n, \RR), \times) \to (\RR\units, \times)\) since \begin{align*}\det(AB) = \det(A) \det(B).\end{align*} \item Let \(N \normal G\) and define \end{itemize} \begin{align*} \varphi: G &\to G/N \\ g &\mapsto gN .\end{align*} \begin{itemize} \tightlist \item Let \(\varphi: \ZZ \to \ZZ_n\) where \(\phi(g) = [g] = g \mod n\) where \(\ZZ_n \cong \ZZ/n\ZZ\) \end{itemize} \textbf{Definition}: Let \(\varphi: G \to G'\). Then \(\varphi\) is a \textbf{monomorphism} iff it is injective, an \textbf{epimorphism} iff it is surjective, and an \textbf{isomorphism} iff it is bijective. \hypertarget{direct-products}{% \subsection{Direct Products}\label{direct-products}} Let \(G_1, G_2\) be groups, then define \begin{align*} G_1 \cross G_2 = \theset{(g_1, g_2) \suchthat g_1 \in G, g_2 \in G_2} \text{ where } (g_1, g_2)(h_1, h_2) = (g_1 h_1, g_2 ,h_2). \end{align*} We have the formula \(\abs{G_1 \cross G_2} = \abs{G_1} \abs{G_2}\). \hypertarget{finitely-generated-abelian-groups}{% \subsection{Finitely Generated Abelian Groups}\label{finitely-generated-abelian-groups}} \textbf{Definition}: We say a group is \textbf{abelian} if \(G\) is commutative, i.e.~\(g_1, g_2 \in G \implies g_1 g_2 = g_2 g_1\). \textbf{Definition}: A group is \textbf{finitely generated} if there exist \(\theset{g_1, g_2, \cdots g_n} \subseteq G\) such that \(G = \generators{g_1, g_2, \cdots g_n}\). This generalizes the notion of a cyclic group, where we can simply intersect all of the subgroups that contain the \(g_i\) to define it. We know what cyclic groups look like -- they are all isomorphic to \(\ZZ\) or \(\ZZ_n\). So now we'd like a structure theorem for abelian finitely generated groups. \textbf{Theorem}: Let \(G\) be a finitely generated abelian group. Then \begin{align*}G \cong \ZZ^r \times \displaystyle\prod_{i=1}^s \ZZ_{p_i^{\alpha _i}}\end{align*} for some finite \(r,s \in \NN\) where the \(p_i\) are (not necessarily distinct) primes. \emph{Example}: Let \(G\) be a finite abelian group of order 4. Then \(G \cong \ZZ_4\) or \(\ZZ_2^2\), which are not isomorphic because every element in \(\ZZ_2^2\) has order 2 where \(\ZZ_4\) contains an element of order 4. \hypertarget{fundamental-homomorphism-theorem}{% \subsection{Fundamental Homomorphism Theorem}\label{fundamental-homomorphism-theorem}} Let \(\varphi: G \to G'\) be a group homomorphism and define \begin{align*} \ker \varphi \definedas \theset{g\in G \suchthat \varphi(g) = e'} .\end{align*} \hypertarget{the-first-homomorphism-theorem}{% \subsubsection{The First Homomorphism Theorem}\label{the-first-homomorphism-theorem}} \textbf{Theorem}: There exists a map \(\varphi': G/\ker \varphi \to G'\) such that the following diagram commutes: \begin{center} \begin{tikzcd} G \arrow[dd, "\eta"'] \arrow[rr, "\varphi", dotted] & & G' \\ & & \\ G/\ker \varphi \arrow[rruu, "\varphi'"] & & \end{tikzcd} \end{center} That is, \(\varphi = \varphi' \circ \eta\), and \(\varphi'\) is an isomorphism onto its image, so \(G/\ker \varphi = \im \varphi\). This map is given by \begin{align*} \varphi'(g(\ker \varphi)) = \varphi(g) .\end{align*} \emph{Exercise}: Check that \(\varphi\) is well-defined. \hypertarget{the-second-theorem}{% \subsubsection{The Second Theorem}\label{the-second-theorem}} \textbf{Theorem}: Let \(K, N \leq G\) where \(N \normal G\). Then \begin{align*} \frac K {N \intersect K} \cong \frac {NK} N \end{align*} \emph{Proof:} Define a map \begin{align*} K &\mapsvia{\varphi} NK/N \\ k &\mapsto kN .\end{align*} You can show that \(\varphi\) is onto, then look at \(\ker \varphi\); note that \begin{align*} kN = \varphi(k) = N \iff k \in N ,\end{align*} and so \(\ker \varphi = N \intersect K\). \(\qed\) \hypertarget{tuesday-august-20th}{% \section{Tuesday August 20th}\label{tuesday-august-20th}} \hypertarget{the-fundamental-homomorphism-theorems}{% \subsection{The Fundamental Homomorphism Theorems}\label{the-fundamental-homomorphism-theorems}} \textbf{Theorem 1:} Let \(\varphi: G \to G'\) be a homomorphism. Then there is a canonical homomorphism \(\eta: G \to G/\ker \varphi\) such that the usual diagram commutes. Moreover, this map induces an isomorphism \(G /\ker \varphi \cong \im \varphi\). \textbf{Theorem 2:} Let \(K, N \leq G\) and suppose \(N \normal G\). Then there is an isomorphism \begin{align*} \frac K {K \intersect N} \cong \frac {NK} {N} \end{align*} \emph{Proof Sketch:} Show that \(K \intersect N \normal G\), and \(NK\) is a subgroup exactly because \(N\) is normal. \textbf{Theorem 3:} Let \(H, K \normal G\) such that \(H \leq K\). Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(H/K\) is normal in \(G/K\). \item The quotient \((G/K) / (H/K) \cong G/H\). \end{enumerate} \emph{Proof:} We'll use the first theorem. Define a map \begin{align*} \phi: G/K &\to G/H \\ gk &\mapsto gH .\end{align*} \emph{Exercise}: Show that \(\phi\) is surjective, and that \(\ker \phi \cong H/K\). \(\qed\) \hypertarget{permutation-groups}{% \subsection{Permutation Groups}\label{permutation-groups}} Let \(A\) be a set, then a \emph{permutation} on \(A\) is a bijective map \(A \selfmap\). This can be made into a group with a binary operation given by composition of functions. Denote \(S_{A}\) the set of permutations on \(A\). \textbf{Theorem:} \(S_{A}\) is in fact a group. \emph{Proof:} Exercise. Follows from checking associativity, inverses, identity, etc. \(\qed\) In the special case that \(A = \theset{1, 2, \cdots n}\), then \(S_{n} \definedas S_{A}\). Recall two line notation \begin{align*} \left(\begin{matrix} 1 & 2 & \cdots & n\\ \sigma(1) & \sigma(2) & \cdots & \sigma(n) \end{matrix}\right) \end{align*} Moreover, \(\abs{S_{n}} = n!\) by a combinatorial counting argument. \emph{Example:} \(S_{3}\) is the symmetries of a triangle. \emph{Example:} The symmetries of a square are \emph{not} given by \(S_{4}\), it is instead \(D_{4}\). \hypertarget{orbits-and-the-symmetric-group}{% \subsection{Orbits and the Symmetric Group}\label{orbits-and-the-symmetric-group}} Permutations \(S_{A}\) \emph{act} on \(A\), and if \(\sigma \in S_{A}\), then \(\generators{\sigma}\) also acts on \(A\). Define \(a \sim b\) iff there is some \(n\) such that \(\sigma^{n}(a) = b\). This is an equivalence relation, and thus induces a partition of \(A\). See notes for diagram. The equivalence classes under this relation are called the \emph{orbits} under \(\sigma\). \emph{Example:} \begin{align*} \left(\begin{matrix} 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 \\ 8 & 2 & 6 & 3 & 7 & 4 & 5 & 1 \end{matrix}\right) = (1 8)(2)(3 6 4)(5 7). \end{align*} \textbf{Definition:} A permutation \(\sigma \in S_{n}\) is a \emph{cycle} iff it contains at most one orbit with more than one element. The \emph{length} of a cycle is the number of elements in the largest orbit. Recall cycle notation: \(\sigma = (\sigma(1) \sigma(2) \cdots \sigma(n))\). \begin{quote} Note that this is read right-to-left by convention! \end{quote} \textbf{Theorem:} Every permutation \(\sigma \in S_{n}\) can be written as a product of disjoint cycles. \textbf{Definition:} A \emph{transposition} is a cycle of length 2. \textbf{Proposition:} Every permutation is a product of transpositions. \emph{Proof:} \begin{align*} (a_{1} a_{2} \cdots a_{n}) = (a_{1} a_{n}) (a_{1} a_{n-1}) \cdots (a_{1} a_{2}) .\end{align*} \(\qed\) This is not a unique decomposition, however, as e.g.~\(\id = (1 2)^{2} = (3 4)^{2}\). \textbf{Theorem:} Any \(\sigma \in S_{n}\) can be written as \textbf{either} \begin{itemize} \item An even number of transpositions, or \item An odd number of transpositions. \end{itemize} \emph{Proof:} Define \begin{align*} A_{n} = \theset{\sigma \in S_{n} \suchthat \sigma\text{ is even}} .\end{align*} We claim that \(A_{n} \normal S_{n}\). \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item Closure: If \(\tau_{1}, \tau_{2}\) are both even, then \(\tau_{1}\tau_{2}\) also has an even number of transpositions. \item The identity has an even number of transpositions, since zero is even. \item Inverses: If \(\sigma = \prod_{i=1}^{s} \tau_{i}\) where \(s\) is even, then \(\sigma\inv = \prod_{i=1}^{s} \tau_{s-i}\). But each \(\tau\) is order 2, so \(\tau\inv = \tau\), so there are still an even number of transpositions. \end{enumerate} So \(A_{n}\) is a subgroup. It is normal because it is index 2, or the kernel of a homomorphism, or by a direct computation. \hypertarget{groups-acting-on-sets}{% \subsection{Groups Acting on Sets}\label{groups-acting-on-sets}} Think of this as a generalization of a \(G\dash\)module. \textbf{Definition:} A group \(G\) is said to \emph{act} on a set \(X\) if there exists a map \(G\cross X \to X\) such that \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(e\actson x = x\) \item \((g_{1} g_{2})\actson x = g_{1} \actson (g_{2} \actson x)\). \end{enumerate} \emph{Examples:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(G = S_{A} \actson A\) \item \(H \leq G\), then \(G \actson X = G/H\) where \(g \actson xH = (gx)H\). \item \(G \actson G\) by conjugation, i.e.~\(g\actson x = gxg\inv\). \end{enumerate} \textbf{Definition:} Let \(x\in X\), then define the \textbf{stabilizer subgroup} \begin{align*} G_{x} = \theset{g\in G \suchthat g\actson x = x} \leq G \end{align*} We can also look at the dual notion, \begin{align*} X_{g} = \theset{x\in X \suchthat g\actson x = x}. \end{align*} We then define the \emph{orbit} of an element \(x\) as \begin{align*} Gx = \theset{g\actson x \suchthat g\in G} \end{align*} and we have a similar result where \(x\sim y \iff x\in Gy\), and the orbits partition \(X\). \textbf{Theorem:} Let \(G\) act on \(X\). We want to know the number of elements in an orbit, and it turns out that \begin{align*} \abs{Gx} = [G: G_{x}] \end{align*} \emph{Proof:} Construct a map \(Gx \mapsvia{\psi} G/Gx\) where \(\psi(g\actson x) = g Gx\). \emph{Exercise:} Show that this is well-defined, so if 2 elements are equal then they go to the same coset. \emph{Exercise}: Show that this is surjective. Injectivity: \(\psi(g_{1} x) = \psi(g_{2} x)\), so \(g_{1} Gx = g_{2} Gx\) and \((g_{2}\inv g_{1}) Gx = Gx\) so \begin{align*} g_{2}\inv g_{1} \in Gx \iff g_{2}\inv g_{1} \actson x = x \iff g_{1}x = g_{2} x .\end{align*} \(\qed\) Next time: Burnside's theorem, proving the Sylow theorems. \hypertarget{thursday-august-22nd}{% \section{Thursday August 22nd}\label{thursday-august-22nd}} \hypertarget{group-actions}{% \subsection{Group Actions}\label{group-actions}} Let \(G\) be a group and \(X\) be a set; we say \(G\) \emph{acts} on \(X\) (or that \(X\) is a \(G\dash\) set) when there is a map \(G\cross X \to X\) such that \(ex = x\) and \begin{align*} (gh) \actson x = g \actson (h \actson x) .\end{align*} We then define the \textbf{stabilizer} of \(x\) as \begin{align*} \mathrm{Stab}_G(x) = G_x \definedas \theset{g\in G \suchthat g\actson x = x} \leq G, \end{align*} and the \textbf{orbit} \begin{align*} G.x = \mathcal O_x \definedas \theset{g\actson x \suchthat x\in X} \subseteq X. \end{align*} When \(G\) is finite, we have \begin{align*} \abs{G.x} = \frac{\abs G}{\abs{G_x}}. \end{align*} We can also consider the \textbf{fixed points} of \(X\), \begin{align*} X_g = \theset{x\in X \mid g\actson x = x ~~\forall g\in G} \subseteq X \end{align*} \hypertarget{burnsides-theorem}{% \subsection{Burnside's Theorem}\label{burnsides-theorem}} \textbf{Theorem (Burnside):} Let \(X\) be a \(G\dash\)set and \(v \definedas \abs{X/G}\) be the number of orbits. Then \begin{align*} v \abs{G} = \sum_{g\in G} \abs{X_g}. \end{align*} \emph{Proof:} Define \begin{align*} N = \theset{(g,x) \mid g\actson x = x} \subseteq G \cross X ,\end{align*} we then have \begin{align*} \abs N &= \sum_{g\in G} \abs{X_g} \\ &= \sum_{x\in X} \abs{G_x} \\ &= \sum_{x\in X} \frac{\abs G}{\abs {G.x}} \quad\text{by Orbit-Stabilizer} \\ &= \abs{G} \left( \sum_{x\in X} \frac{1}{\abs{G.x}} \right) \\ &= \abs{G} \sum_{G.x ~\in~ X/G} \left( \sum_{y ~\in~ G.x} \frac{1}{\abs{G.x}} \right) \\ &= \abs{G} \sum_{G.x ~\in~ X/G} \left( \abs{G.x} \frac{1}{\abs{G.x}} \right) \\ &= \abs{G} \sum_{G.x ~\in~ X/G} 1 \\ &= \abs{G} v .\end{align*} The last two equalities follow from the following fact: since the orbits partition \(X\), say into \(X = \displaystyle\disjoint_{i=1}^v \sigma_i\), so let \(\sigma = \theset{\sigma_i \mid 1 \leq i \leq v}\). By abuse of notation, replace each orbit in \(\sigma\) with a representative element \(x_i\in \sigma_i \subset X\). We then have \begin{align*} \sum_{x \in \sigma} \frac{1}{\abs{G.x}} = \frac{1}{\abs{G.x}} \abs{\sigma} = 1. \end{align*} \(\qed\) \emph{Application:} Consider seating 10 people around a circular table. How many distinct seating arrangements are there? Let \(X\) be the set of configurations, \(G = S_{10}\), and let \(G\actson X\) by permuting configurations. Then \(v\), the number of orbits under this action, yields the number of distinct seating arrangements. By Burnside, we have \begin{align*} v = \frac{1}{\abs{G}} \sum_{g\in G} \abs{X_g} = \frac{1}{10} (10!) = 9! \end{align*} since \(X_g = \theset{x\in X \mid g\actson x = x} = \emptyset\) unless \(g = e\), and \(X_e = X\). \hypertarget{sylow-theory}{% \subsection{Sylow Theory}\label{sylow-theory}} Recall Lagrange's theorem: If \(H \leq G\) and \(G\) is finite, then \(\abs{H}\) divides \(\abs{G}\). Consider the converse: if \(n\) divides \(\abs G\), does there exist a subgroup of size \(n\)? The answer is \textbf{no} in general, and a counterexample is \(A_4\) which has \(4!/2 = 12\) elements but no subgroup of order 6. \hypertarget{class-functions}{% \subsubsection{Class Functions}\label{class-functions}} Let \(X\) be a \(G\dash\)set, and choose orbit representatives \(x_1 \cdots x_v\). Then \begin{align*} \abs{X} = \sum_{i=1}^v \abs{G .x_i}. \end{align*} We can then separately count all orbits with exactly one element, which is exactly \begin{align*} X_G = \theset{x\in G \mid g\actson x = x ~ \forall g\in G} \end{align*}. We then have \begin{align*} \abs X = \abs{X_G} + \sum_{i=j}^v \abs{G. x_i} \end{align*} for some \(j\) where \(\abs{G.x_i} > 1\) for all \(i \geq j\). \textbf{Theorem:} Let \(G\) be a group of order \(p^n\) for \(p\) a prime. Then \begin{align*} \abs X = \abs{X_G} \mod p .\end{align*} \emph{Proof:} We know that \begin{align*} \abs{G.x_i} = [G : G_{x_i}] \text{ for } j \leq i \leq v \text{ and } \abs{Gx_i} > 1 \implies G.x_i \neq G ,\end{align*} and thus \(p\) divides \([G: G x_i]\). The result follows. \(\qed\) \emph{Application:} If \(\abs G = p^n\), then the center \(Z(G)\) is nontrivial. Let \(X=G\) act on itself by conjugation, so \(g\actson x = gxg\inv\). Then \begin{align*} X_G = \theset{x\in G \suchthat gxg\inv = x} = \theset{x\in G \suchthat gx = xg} = Z(G) \end{align*} But then, by the previous theorem, we have \begin{align*} \abs{Z(G)} \equiv \abs{X}\equiv \abs{G} \mod p ,\end{align*} but since \(Z(G) \leq G\) we have \(\abs{Z(G)} \cong 0 \mod p\). So in particular, \(Z(G) \neq \theset{e}\). \textbf{Definition:} A group \(G\) is a \textbf{\(p\dash\)group} iff every element in \(G\) has order \(p^k\) for some \(k\). A subgroup is a \(p\dash\)group exactly when it is a \(p\dash\)group in its own right. \hypertarget{cauchys-theorem}{% \subsubsection{Cauchy's Theorem}\label{cauchys-theorem}} \textbf{Theorem (Cauchy):} Let \(G\) be a finite group, where \(p\) is prime and divides \(\abs{G}\). Then \(G\) has an element (and thus a subgroup) of order \(p\). \emph{Proof:} Consider \begin{align*} X = \theset{(g_1, g_2, \cdots , g_p) \in G^{\oplus p} \mid g_1g_2\cdots g_p = e} .\end{align*} Given any \(p-1\) elements, say \(g_1 \cdots g_{p-1}\), the remaining element is completely determined by \(g_p = (g_1 \cdots g_{p-1})\inv\). So \(\abs X = \abs{G}^{p-1}\).and since \(p \divides \abs{G}\), we have \(p \divides \abs X\). Now let \(\sigma \in S_p\) the symmetric group act on \(X\) by index permutation, i.e. \begin{align*} \sigma \actson (g_1, g_2 \cdots g_p) = (g_{\sigma(1)}, g_{\sigma(2)}, \cdots, g_{\sigma(p)}) .\end{align*} \emph{Exercise}: Check that this gives a well-defined group action. Let \(\sigma = (1~2~\cdots~p) \in S_p\), and note \(\generators{\sigma} \leq S_p\) also acts on \(X\) where \(\abs{\generators{\sigma}} = p\). Therefore we have \begin{align*} \abs{X} = \abs{X_{\generators{\sigma}}} \mod p. \end{align*} Since \(p\divides \abs{X}\), it follows that \(\abs{X_{\generators{\sigma}}} = 0 \mod p\), and thus \(p \divides \abs{X_{\generators{\sigma}}}\). If \(\generators{\sigma}\) fixes \((g_1, g_2, \cdots g_p)\), then \(g_1 = g_2 = \cdots g_p\). Note that \((e, e, \cdots) \in X_{\generators{\sigma}}\), as is \((a, a, \cdots a)\) since \(p \divides \abs{X_{\generators{\sigma}}}\). So there is some \(a\in G\) such that \(a^p = 1\). Moreover, \(\generators{a} \leq G\) is a subgroup of size \(p\). \(\qed\) \hypertarget{normalizers}{% \subsubsection{Normalizers}\label{normalizers}} Let \(G\) be a group and \(X = S\) be the set of subgroups of \(G\). Let \(G\) act on \(X\) by \(g\actson H = gHg\inv\). What is the stabilizer? \begin{align*} G_x = G_H = \theset{g\in G \mid gHg\inv = H} ,\end{align*} making \(G_H\) the largest subgroup such that \(H \normal G_H\). So we \textbf{define} \(N_G(H) \definedas G_H\). \emph{Lemma:} Let \(H\) be a \(p\dash\)subgroup of \(G\) of order \(p^n\). Then \begin{align*} [N_G(H) : H] = [G : H] \mod p .\end{align*} \emph{Proof:} Let \(S = G/H\) be the set of left \(H\dash\)cosets in \(G\). Now let \(H\) act on \(S\) by \begin{align*} H\actson x + H \definedas (hx) + H .\end{align*} By a previous theorem, \(\abs{G/H} = \abs{S} = \abs{S_H} \mod p\), where \(\abs{G/H} = [G: H]\). What is \(S_H\)? This is given by \begin{align*} S_H = \theset{x + H \in S \suchthat xHx\inv \in H \forall h\in H} .\end{align*} Therefore \(x\in N_G(H)\). \(\qed\) \textbf{Corollary:} Let \(H \leq G\) be a subgroup of order \(p^n\). If \(p \divides [G: H]\) then \(N_G(H) \neq H\). \emph{Proof:} Exercise. \(\qed\) \textbf{Theorem:} Let \(G\) be a finite group, then \(G\) is a \(p\dash\)group \(\iff \abs{G} = p^n\) for some \(n\geq 1\). \emph{Proof:} Suppose \(\abs{G} = p^n\) and \(a \in G\). Then \(\abs{\generators{a}} = p^\alpha\) for some \(\alpha\). Conversely, suppose \(G\) is a \(p\dash\)group. Factor \(\abs{G}\) into primes and suppose \(\exists q\) such that \(q \divides \abs{G}\) but \(q \neq p\). By Cauchy, we can then get a subgroup \(\generators{c}\) such that \(\abs{\generators{c}} \divides q\), but then \(\abs{G} \neq p^n\). \(\qed\) \hypertarget{tuesday-august-27th}{% \section{Tuesday August 27th}\label{tuesday-august-27th}} Let \(G\) be a finite group and \(p\) a prime. TFAE: \begin{itemize} \tightlist \item \(\abs{H} = p^n\) for some \(n\) \item Every element of \(H\) has order \(p^\alpha\) for some \(\alpha\). \end{itemize} If either of these are true, we say \(H\) is a \emph{\(p\dash\)group}. Let \(H\) be a \(p\dash\)group, last time we proved that if \(p \divides [G : H]\) then \(N_G(H) \neq H\). \hypertarget{sylow-theorems}{% \subsection{Sylow Theorems}\label{sylow-theorems}} Let \(G\) be a finite group and suppose \(\abs{G} = p^n m\) where \((m, n) = 1\). Then \hypertarget{sylow-1}{% \subsubsection{Sylow 1}\label{sylow-1}} \begin{quote} Idea: take a prime factorization of \(\abs{G}\), then there are subgroups of order \(p^i\) for \emph{every} prime power appearing, up to the maximal power. \end{quote} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item \(G\) contains a subgroup of order \(p^i\) for every \(1 \leq i \leq n\). \item Every subgroup \(H\) of order \(p^i\) where \(i < n\) is a normal subgroup in a subgroup of order \(p^{i+1}\). \end{enumerate} \emph{Proof:} By induction on \(i\). For \(i=1\), we know this by Cauchy's theorem. If we show (2), that shows (1) as a consequence. So suppose this holds for \(i < n\). Let \(H \leq G\) where \(\abs{H} = p^i\), we now want a subgroup of order \(p^{i+1}\). Since \(p\divides [G: H]\), by the previous theorem, \(H < N_G(H)\) is a proper subgroup (?). Now consider the canonical projection \(N_G(H) \to N_G(H) /H\). Since \begin{align*} p \divides [N_G(H) : H] = \abs{N_G(H)/ H} ,\end{align*} by Cauchy there is a subgroup of order \(p\) in this quotient. Call it \(K\). Then \(\pi\inv(K) \leq N_G(H)\). \emph{Exercise}: Show that \(\abs{\phi\inv(K)} = p^{i+1}\). It now follows that \(H \normal \phi\inv(K)\). \(\qed\) \textbf{Definition}: For \(G\) a finite group and \(\abs{G} = p^n m\) where \(p\) does not divide \(m\). Then a subgroup of order \(p^n\) is called a \textbf{Sylow \(p\dash\)subgroup}. \begin{quote} Note: by Sylow 1, these exist. \end{quote} \hypertarget{sylow-2}{% \subsubsection{Sylow 2}\label{sylow-2}} If \(P_1, P_2\) are Sylow \(p\dash\)subgroups of \(G\), then \(P_1\) and \(P_2\) are conjugate. \emph{Proof:} Let \(\mathcal L\) be the left cosets of \(P_1\), i.e.~\(\mathcal L = G/P_1\). Let \(P_2\) act on \(\mathcal L\) by \begin{align*} p_2 \actson (g + P_1) \definedas (p_2g) + P_1 .\end{align*} By a previous theorem about orbits and fixed points, we have \begin{align*} \abs{\mathcal L_{P_2}} = \abs{\mathcal L} \mod p. \end{align*} Since \(p\) does not divide \(\abs{\mathcal L}\), we have \(p\) does not divide \(\abs{\mathcal L_{P_2}}\). So \(\mathcal L_{P_2}\) is nonempty. So there exists a coset \(xP_1\) such that \(xP_1 \in \mathcal L_{P_2}\), and thus \begin{align*} yxP_1 = xP_1 \text{ for all } y\in P_2 .\end{align*} Then \(x\inv y x P_1 = P_1\) for all \(y\in P_2\), and so \(x\inv P_2 x = P_1\). So \(P_1\) and \(P_2\) are conjugate. \(\qed\) \hypertarget{sylow-3}{% \subsubsection{Sylow 3}\label{sylow-3}} Let \(G\) be a finite group, and \(p\divides \abs G\). Let \(r_p\) be the number of Sylow \(p\dash\)subgroups of \(G\). Then \begin{itemize} \item \(r_p \cong 1 \mod p\). \item \(r_p \divides \abs G\). \item \(r_p = [G : N_G(P)]\) \end{itemize} \emph{Proof:} Let \(X = \mathcal S\) be the set of Sylow \(p\dash\)subgroups, and let \(P \in X\) be a fixed Sylow \(p\dash\)subgroup. Let \(P \actson \mathcal S\) by conjugation, so for \(\overline P \in \mathcal S\) let \(x \actson \overline P = x \overline P x\inv\). By a previous theorem, we have \begin{align*} \abs{\mathcal S} = \mathcal{S}_P \mod p \end{align*} What are the fixed points \(\mathcal{S}_P\)? \begin{align*} \mathcal{S}_P = \theset{T \in \mathcal S \suchthat xTx\inv = T \quad \forall x\in P}. \end{align*} Let \(\mathcal T \in \mathcal{S}_P\), so \(xTx\inv = T\) for all \(x\in P\). Then \(P \leq N_G(T)\), so both \(P\) and \(T\) are Sylow \(p\dash\) subgroups in \(N_G(H)\) as well as \(G\). So there exists a \(f\in N_G(T)\) such that \(T = gPg\inv\). But the point is that in the normalizer, there is only \textbf{one} Sylow \(p\dash\) subgroup. But then \(T\) is the unique largest normal subgroup of \(N_G(T)\), which forces \(T = P\). Then \(\mathcal{S}_P = \theset{P}\), and using the formula, we have \(r_p \cong 1 \mod p\). Now modify this slightly by letting \(G\) act on \(\mathcal S\) (instead of just \(P\)) by conjugation. Since all Sylows are conjugate, by Sylow (1) there is only one orbit, so \(\mathcal S = GP\) for \(P \in \mathcal S\). But then \begin{align*} r_p = \abs{\mathcal S} = \abs{GP} = [G: G_p] \divides \abs{G}. \end{align*} Note that this gives a precise formula for \(r_p\), although the theorem is just an upper bound of sorts, and \(G_p = N_G(P)\). \hypertarget{applications-of-sylow-theorems}{% \subsection{Applications of Sylow Theorems}\label{applications-of-sylow-theorems}} Of interest historically: classifying finite \emph{simple} groups, where a group \(G\) is \emph{simple} If \(N \normal G\) and \(N \neq \theset{e}\), then \(N=G\). \emph{Example:} Let \(G = \ZZ_p\), any subgroup would need to have order dividing \(p\), so \(G\) must be simple. \emph{Example:} \(G = A_n\) for \(n\geq 5\) (see Galois theory) One major application is proving that groups of a certain order are \emph{not} simple. \emph{Applications:} \textbf{Proposition:} Let \(\abs G = p^n q\) with \(p > q\). Then \(G\) is not simple. \emph{Proof:} \begin{quote} Strategy: Find a proper normal nontrivial subgroup using Sylow theory. Can either show \(r_p = 1\), or produce normal subgroups by intersecting distinct Sylow p-subgroups. \end{quote} Consider \(r_p\), then \(r_p = p^\alpha q^\beta\) for some \(\alpha, \beta\). But since \(r_p \cong 1 \mod p\), \(p\) does not divide \(r_p\), we must have \(r_p = 1, q\). But since \(q < p\) and \(q\neq 1 \mod p\), this forces \(r_p = 1\). So let \(P\) be a sylow \(p\dash\)subgroup, then \(P < G\). Then \(gPg\inv\) is also a sylow, but there's only 1 of them, so \(P\) is normal. \(\qed\) \textbf{Proposition}: Let \(\abs{G} = 45\), then \(G\) is not simple. \emph{Proof}: Exercise. \(\qed\) \textbf{Proposition}: Let \(\abs{G} = p^n\), then \(G\) is not simple if \(n > 1\). \emph{Proof:} By Sylow (1), there is a normal subgroup of order \(p^{n-1}\) in \(G\). \(\qed\) \textbf{Proposition:} Let \(\abs{G} = 48\), then \(G\) is not simple. \emph{Proof:} Note \(48 = 2^4 3\), so consider \(r_2\), the number of Sylow 2-subgroups. Then \(r_2 \cong 1 \mod 2\) and \(r_2 \divides 48\). So \(r_2 = 1, 3\). If \(r_2 = 1\), we're done, otherwise suppose \(r_2 = 3\). Let \(H \neq K\) be Sylow 2-subgroups, so \(\abs H = \abs K = 2^4 = 16\). Now consider \(H \intersect K\), which is a subgroup of \(G\). How big is it? Since \(H\neq K, \abs{H \intersect K} < 16\). The order has to divides 16, so we in fact have \(\abs{H \intersect K} \leq 8\). Suppose it is less than 4, towards a contradiction. Then \begin{align*} \abs{HK} = \frac{\abs H \abs K}{\abs{H \intersect K}} \geq \frac{(16)(16)}{4} = 64 > \abs{G} = 48. \end{align*} So we can only have \(\abs{H \intersect K} = 8\). Since this is an index 2 subgroup in both \(H\) and \(K\), it is in fact normal. But then \begin{align*} H, K \subseteq N_G(H \intersect K) \definedas X .\end{align*} But then \(\abs X\) must be a multiple of 16 \emph{and} divide 48, so it's either 16 or 28. But \(\abs X > 16\), because \(H \subseteq X\) and \(K \subseteq X\). So then \begin{align*} N_G(H \intersect K) = G \text{ and so } H \intersect K \normal G .\end{align*} \(\qed\) \hypertarget{thursday-august-29th}{% \section{Thursday August 29th}\label{thursday-august-29th}} \hypertarget{classification-of-groups-of-certain-orders}{% \subsection{Classification of Groups of Certain Orders}\label{classification-of-groups-of-certain-orders}} We have a classification of some finite abelian groups. \begin{longtable}[]{@{}lll@{}} \toprule Order of G & Number of Groups & List of Distinct Groups\tabularnewline \midrule \endhead 1 & 1 & \(\theset{e}\)\tabularnewline 2 & 1 & \(\ZZ_2\)\tabularnewline 3 & 1 & \(\ZZ_3\)\tabularnewline 4 & 2 & \(\ZZ_4, \ZZ_2^2\)\tabularnewline 5 & 1 & \(\ZZ_5\)\tabularnewline 6 & 2 & \(\ZZ_6, S_3\) (*)\tabularnewline 7 & 1 & \(\ZZ_7\)\tabularnewline 8 & 5 & \(\ZZ_8,\ZZ_4 \cross \ZZ_2, \ZZ_2^3, D_4, Q\)\tabularnewline 9 & 2 & \(\ZZ_9, \ZZ_3^2\)\tabularnewline 10 & 2 & \(\ZZ_{10}, D_5\)\tabularnewline 11 & 1 & \(\ZZ_{11}\)\tabularnewline \bottomrule \end{longtable} \emph{Exercise}: show that groups of order \(p^2\) are abelian. We still need to justify \(S_3, D_4, Q, D_5\). Recall that for any group \(A\), we can consider the free group on the elements of \(A\) given by \(F[A]\). \begin{quote} Note that we can also restrict \(A\) to just its generators. \end{quote} There is then a homomorphism \(F[A] \to A\), where the kernel is the relations. \emph{Example:} \begin{align*} \ZZ \ast \ZZ = \generators{x, y \mid xyx\inv y\inv = e} \text{ where } x = (1, 0),~ y = (0, 1) .\end{align*} \hypertarget{groups-of-order-6}{% \subsection{Groups of Order 6}\label{groups-of-order-6}} Let \(G\) be nonabelian of order \(6\). \begin{quote} Idea: look at subgroups of index 2. \end{quote} Let \(P\) be a Sylow 3-subgroup of \(G\), then \(r_3 = 1\) so \(P\normal G\). Moreover, \(P\) is cyclic since it is order 3, so \(P = \generators{a}\). But since \(\abs{G/P} = 2\), it is also cyclic, so \(G/P = \generators{bP}\). Note that \(b\not\in P\), but \(b^2 \in P\) since \((bP)^2 = P\), so \(b^2 \in \theset{e, a, a^2}\). If \(b=a, a^2\) then \(b\) has order 6, but this would make \(G = \generators{b}\) cyclic and thus abelian. So \(b^2=1\). Since \(P \normal G\), we have \(bPb\inv = P\), and in particular \(bab\inv\) has order 3. So either \(bab\inv = a\), or \(bab\inv = a^2\). If \(bab\inv = a\), then \(G\) is abelian, so \(bab\inv = a^2\). So \begin{align*} G = \generators{a, b \mid a^3 = e, b^2 = e, bab\inv = a^2} .\end{align*} We've shown that \emph{if} there is such a nonabelian group, then it must satisfy these relations -- we still need to produce some group that actually realizes this. Consider the symmetries of the triangle: \includegraphics{figures/2019-09-03-09:52.png}\\ You can check that \(a,b\) satisfy the appropriate relations. \hypertarget{groups-of-order-10}{% \subsection{Groups of Order 10}\label{groups-of-order-10}} For order 10, a similar argument yields \begin{align*} G = \generators{a, b \mid a^5 = 1, b^2 = 1, ba=a^4b} ,\end{align*} and this is realized by symmetries of the pentagon where \(a = (1~2~3~4~5), b=(1~4)(2~3)\). \hypertarget{groups-of-order-8}{% \subsection{Groups of Order 8}\label{groups-of-order-8}} Assume \(G\) is nonabelian of order 8. \(G\) has no elements of order 8, so the only possibilities for orders of elements are 1, 2, or 4. Assume all elements have order 1 or 2. Let \(a,b\in G\), consider \begin{align*} (ab)^2 = abab \implies ab=b\inv a\inv = ba ,\end{align*} and thus \(G\) is abelian. So there must be an element of order 4. So suppose \(a\in G\) has order 4, which is an index 2 subgroup, and so \(\generators{a} \normal G\). But \(\abs{G/\generators a} = 2\) is cyclic, so \(G/\generators a = \generators{bH}\). \begin{quote} Note that \(b^2 \in H = \generators a\). \end{quote} If \(b^2=a, a^3\) then \(b\) will have order 8, making \(G\) cyclic. So \(b^2 = 1, a^2\). These are both valid possibilities. Since \(H \normal G\), we have \(b\generators a b\inv = \generators a\), and since \(a\) has order 4, so does \(bab\inv\). So \(bab\inv = a, a^3\), but \(a\) is not an option because this would make \(G\) abelian. So we have two options: \begin{align*} G_1 &= \generators{a, b \mid a^4 = 1, b^2=1, bab\inv = a^3} \\ G_2 &= \generators{a, b \mid a^4 = 1, b^2 = a^2, bab\inv = a^3} .\end{align*} \emph{Exercise}: prove \(G_1 \not\cong G_2\). Now to realize these groups: \begin{itemize} \item \(G_1\) is the group of symmetries of the square, where \(a = (1~2~3~4), b=(1~3)\). \item \(G_2 \cong Q\), the quaternions, where \(Q = \theset{\pm 1,\pm i, \pm j, \pm k}\), and there are relations (add picture here). \end{itemize} \hypertarget{some-nice-facts}{% \subsection{Some Nice Facts}\label{some-nice-facts}} \begin{itemize} \tightlist \item If and \(\phi: G\to G'\), then \begin{itemize} \tightlist \item \(N \normal G \implies N \normal \phi(G)\), although it is not necessarily normal in \(G\). \item \(N' \normal G' \implies \phi\inv(N') \normal G\) \end{itemize} \end{itemize} \textbf{Definition}: A \emph{maximal normal subgroup} is a normal subgroup \(M \normal G\) that is properly contained in \(G\), and if \(M \leq N \normal G\) (where \(N\) is proper) then \(M = N\). \textbf{Theorem:} \(M\) is a maximal normal subgroup of \(G\) iff \(G/M\) is simple. \hypertarget{simple-groups}{% \subsection{Simple Groups}\label{simple-groups}} \textbf{Definition}: A group \(G\) is simple iff \(N \normal G \implies N = \theset e, G\). Note that if an abelian group has \emph{any} subgroups, then it is not simple, so \(G = \ZZ_p\) is the only simple abelian group. Another example of a simple group is \(A_n\) for \(n\geq 5\). \textbf{Theorem (Feit-Thompson, 1964):} Every finite nonabelian simple group has even order. \begin{quote} Note that this is a consequence of the ``odd order theorem''. \end{quote} \hypertarget{series-of-groups}{% \subsection{Series of Groups}\label{series-of-groups}} A composition series is a descending series of pairwise normal subgroups such that each successive quotient is simple: \begin{align*} G_0 \normal G_1 \normal G_2 \cdots \normal \theset e \\ G_i/G_{i+1}~\text{ simple} .\end{align*} \emph{Example:} \begin{align*} \ZZ_9 \normal \ZZ_3 \normal \theset e \\ \ZZ_9 / \ZZ_3 = \ZZ_3,\\ \ZZ_3 / \theset e = \ZZ_3 .\end{align*} \emph{Example:} \begin{align*} \ZZ_6 \normal \ZZ_3 \normal \theset e \\ \ZZ_6 / \ZZ_3 = \ZZ_2 \\ \ZZ_2 / \theset e = \ZZ_2 .\end{align*} but also \begin{align*} \ZZ_6 \normal \ZZ_2 \normal \theset e \\ \ZZ_6 / \ZZ_2 = \ZZ_3 \\ \ZZ_3 / \theset e = \ZZ_3 .\end{align*} \textbf{Theorem (Jordan-Holder):} Any two composition series are ``isomorphic'' in the sense that the same quotients appear in both series, up to a permutation. \textbf{Definition:} A group is \emph{solvable} iff it has a composition series where all factors are abelian. \emph{Exercise}: Show that any abelian group is solvable. \emph{Example:} \(S_n\) is \emph{not} solvable for \(n\geq 5\), since \begin{align*} S_n &\normal A_n \normal \theset e \\ S_n / A_n &= \ZZ_2~\text{simple} \\ A_n / \theset{e} &= A_n~\text{simple} \iff n\geq 5 .\end{align*} \emph{Example:} \begin{align*} S_4 &\normal A_4 \normal G \normal \theset e \quad \text{where } \abs H = 4 \\ S_4 / A_4 &= \ZZ_2 \\ A_4 / H &= \ZZ_3 \\ H / \theset e &= \theset{a, b}? .\end{align*} \hypertarget{august-30th}{% \section{August 30th}\label{august-30th}} Recall the Sylow theorems: \begin{itemize} \item \(p\) groups exist for \emph{every} \(p^i\) dividing \(\abs{G}\), and \(H(p) \normal H(p^2) \normal \cdots H(p^n)\). \item All Sylow \(p\dash\)subgroups are conjugate. \item Numerical constraints \begin{itemize} \item \(r_p \cong 1 \mod p\), \item \(r_p \divides \abs{G}\) and \(r_p \divides m\), \end{itemize} \end{itemize} \hypertarget{internal-direct-products}{% \subsection{Internal Direct Products}\label{internal-direct-products}} Suppose \(H, K \leq G\), and consider the smallest subgroup containing both \(H\) and \(K\). Denote this \(H \vee K\). If either \(H\) or \(K\) is normal in \(G\), then we have \(H\vee K = HK\). There is a ``recipe'' for proving you have a direct product of groups: \textbf{Theorem (Recognizing Direct Products)}: Let \(G\) be a group, \(H \normal G\) and \(K\normal G\), and \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(H\vee K = HK = G\), \item \(H \intersect K = \theset{e}\). \end{enumerate} Then \(G \cong H \times K\). \emph{Proof:} We first want to show that \(hk = kh ~\forall k\in K, h\in H\). We then have \begin{align*} hkh\inv k\inv = (hkh\inv)k\inv \in K = h(kh\inv k\inv) \in H \implies hkh\inv k\inv \in H\intersect K = \theset{e}. \end{align*} So define \begin{align*} \phi: H \cross K \to G \\ (h, k) \mapsto hk ,\end{align*} \emph{Exercise:} check that this is a homomorphism, it is surjective, and injective. \(\qed\) \emph{Applications:} \textbf{Theorem:} Every group of order \(p^2\) is abelian. \emph{Proof:} If \(G\) is cyclic, then it is abelian and \(G \cong \ZZ_{p^2}\). So suppose otherwise. By Cauchy, there is an element of order \(p\) in \(G\). So let \(H = \generators{a}\), for which we have \(\abs{H} = p\). Then \(H \normal G\) by Sylow 1, since it's normal in \(H(p^2)\), which would have to equal \(G\). Now consider \(b\not\in H\). By Lagrange, we must have \(o(b) = 1, p\), and since \(e\in H\), we must have \(o(b) = p\). This uses fact that \(G\) is not cyclic. Now let \(K = \generators{b}\). Then \(\abs{K} = p\), and \(K \normal G\) by the same argument. \(\qed\) \textbf{Theorem:} Let \(\abs{G} = pq\) where \(q\neq 1 \mod p\) and \(p < q\). Then \(G\) is cyclic (and thus abelian). \emph{Proof:} Use Sylow 1. Let \(P\) be a sylow \(p\dash\)subgroup. We want to show that \(P \normal G\) to apply our direct product lemma, so it suffices to show \(r_p = 1\). We know \(r_p = 1 \mod p\) and \(r_p \divides \abs{G} = pq\), and so \(r_p = 1,q\). It can't be \(q\) because \(p < q\). Now let \(Q\) be a sylow \(q\dash\)subgroup. Then \(r_q \cong 1 \mod 1\) and \(r_q \divides pq\), so \(r_q = 1, q\). But since \(p< q\), we must have \(r_q = 1\). So \(Q \normal G\) as well. We now have \(P \intersect Q = \emptyset\) (why?) and \begin{align*} \abs{PQ} = \frac{\abs P \abs Q}{ \abs{P \intersect Q} } = \abs P \abs Q = pq, \end{align*} and so \(G = PQ\), and \(G \cong \ZZ_p \cross \ZZ_q \cong \ZZ_{pq}\). \(\qed\) \emph{Example:} Every group of order \(15 = 5^1 3^1\) is cyclic. \hypertarget{determination-of-groups-of-a-given-order}{% \subsection{Determination of groups of a given order}\label{determination-of-groups-of-a-given-order}} \begin{longtable}[]{@{}lll@{}} \toprule Order of G & Number of Groups & List of Distinct Groups\tabularnewline \midrule \endhead 1 & 1 & \(\theset{e}\)\tabularnewline 2 & 1 & \(\ZZ_2\)\tabularnewline 3 & 1 & \(\ZZ_3\)\tabularnewline 4 & 2 & \(\ZZ_4, \ZZ_2^2\)\tabularnewline 5 & 1 & \(\ZZ_5\)\tabularnewline 6 & 2 & \(\ZZ_6, S_3\) (*)\tabularnewline 7 & 1 & \(\ZZ_7\)\tabularnewline 8 & 5 & \(\ZZ_8,\ZZ_4 \cross \ZZ_2, \ZZ_2^3, D_8,Q\)\tabularnewline 9 & 2 & \(\ZZ_9, \ZZ_3^2\)\tabularnewline 10 & 2 & \(\ZZ_{10}, D_5\)\tabularnewline 11 & 1 & \(\ZZ_{11}\)\tabularnewline \bottomrule \end{longtable} We still need to justify 6, 8, and 10. \hypertarget{free-groups}{% \subsection{Free Groups}\label{free-groups}} Define an \emph{alphabet} \(A = \theset{a_1, a_2, \cdots a_n}\), and let a \emph{syllable} be of the form \(a_i^m\) for some \(m\). A \emph{word} is any expression of the form \(\prod_{n_i} a_{n_i}^{m_i}\). We have two operations, \begin{itemize} \item Concatenation, i.e.~\((a_1 a_2) \star (a_3^2 a_5) = a_1 a_2 a_3^2 a_5\). \item Contraction, i.e.~\((a_1 a_2^2) \star (a_2\inv a_5) = a_1 a_2^2 a_2\inv a_5 = a_1 a_2 a_5\). \end{itemize} If we've contracted a word as much as possible, we say it is \emph{reduced}. We let \(F[A]\) be the set of reduced words and define a binary operation \begin{align*} f: F[A] \cross F[A] &\to F[A] \\ (w_1, w_2) &\mapsto w_1 w_2 \text{ (reduced) } .\end{align*} \textbf{Theorem:} \((A, f)\) is a group. \emph{Proof:} Exercise. \(\qed\) \textbf{Definition:} \(F[A]\) is called the \textbf{free group generated by \(A\)}. A group \(G\) is called \emph{free} on a subset \(A \subseteq G\) iff \(G \cong F[A]\). \emph{Examples:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(A = \theset{x} \implies F[A] = \theset{x^n \suchthat n \in \ZZ} \cong \ZZ\). \item \(A = \theset{x, y} \implies F[A] = \ZZ \ast \ZZ\) (not defined yet!). \end{enumerate} Note that there are not relations, i.e.~\(xyxyxy\) is \emph{reduced}. To abelianize, we'd need to introduce the relation \(xy = yx\). \emph{Properties:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item If \(G\) is free on \(A\) and free on \(B\) then we must have \(\abs A = \abs B\). \item Any (nontrivial) subgroup of a free group is free. \begin{quote} (See Fraleigh or Hungerford for possible Algebraic proofs!) \end{quote} \end{enumerate} \textbf{Theorem:} Let \(G\) be generated by some (possibly infinite) subset \(A = \theset{A_i\mid i\in I}\) and \(G'\) be generated by some \(A_i' \subseteq A_i\). Then \begin{enumerate} \def\labelenumi{\alph{enumi}.} \item There is at most one homomorphism \(a_i \to a_i'\). \item If \(G \cong F[A]\), there is exactly \emph{one} homomorphism. \end{enumerate} \textbf{Corollary:} Every group \(G'\) is a homomorphic image of a free group. \emph{Proof:} Let \(A\) be the generators of \(G'\) and \(G = F[A]\), then define \begin{align*} \phi: F[A] &\to G' \\ a_i &\mapsto a_i .\end{align*} This is onto exactly because \(G' = \generators{a_i}\), and using the theorem above we're done. \(\qed\) \hypertarget{generators-and-relations}{% \subsection{Generators and Relations}\label{generators-and-relations}} Let \(G\) be a group and \(A \subseteq G\) be a generating subset so \(G = \generators{a \mid a\in A}\). There exists a \(\phi: F[A] \surjects G\), and by the first isomorphism theorem, we have \(F[A] / \ker \phi \cong G\). Let \(R = \ker \phi\), these provide the \emph{relations}. \emph{Examples:} Let \(G = \ZZ_3 = \generators{[1]_3}\). Let \(x = [1]_3\), then define \(\phi: F[\theset{x}] \surjects \ZZ_3\). Then since \([1] + [1] + [1] = [0] \mod 3\), we have \(\ker \phi = \generators{x^3}\). Let \(G = \ZZ \oplus \ZZ\), then \(G \cong \generators{x,y \mid [x,y] = 1}\). \begin{quote} We'll use this for groups of order 6 -- there will be only one presentation that is nonabelian, and we'll exhibit such a group. \end{quote} \hypertarget{september-9th}{% \section{September 9th}\label{september-9th}} \hypertarget{series-of-groups-1}{% \subsection{Series of Groups}\label{series-of-groups-1}} Recall that a \emph{simple} group has no nontrivial normal subgroups. \emph{Example:} \begin{align*} \ZZ_6 \normal \generators{[3]} \normal \generators{[0]} \\ \ZZ_6 / \generators{[3]} &= \ZZ_3 \\ \generators{[3]} / \generators{[0]} &= \ZZ_2 .\end{align*} \textbf{Definition:} A \emph{normal series} (or an \emph{invariant series}) of a group \(G\) is a finite sequence \(H_i \leq G\) such that \(H_i \normal H_{i+1}\) and \(H_n = G\), so we obtain \begin{align*} H_1 \normal H_2 \normal \cdots \normal H_n = G .\end{align*} \textbf{Definition:} A normal series \(\theset{K_i}\) is a \textbf{refinement} of \(\theset{H_i}\) if \(K_i \leq H_i\) for each \(i\). \textbf{Definition:} We say two normal series of the same group \(G\) are \emph{isomorphic} if there is a bijection from \begin{align*} \theset{H_i / H_{i+1}} \iff \theset{K_j / K_{j+1}} \end{align*} \textbf{Theorem (Schreier):} Any two normal series of \(G\) has isomorphic refinements. \textbf{Definition:} A normal series of \(G\) is a \textbf{composition series} iff all of the successive quotients \(H_i / H_{i+1}\) are \textbf{simple}. Note that every finite group has a composition series, because any group is a maximal normal subgroup of itself. \textbf{Theorem (Jordan-Holder):} Any two composition series of a group \(G\) are isomorphic. \emph{Proof:} Apply Schreier's refinement theorem. \(\qed\) \emph{Example:} Consider \(S_n \normal A_n \normal \theset e\). This is a composition series, with quotients \(Z_2, A_n\), which are both simple. \textbf{Definition:} A group \(G\) is \textbf{solvable} iff it has a composition series in which all of the successive quotients are \textbf{abelian}. \emph{Examples:} \begin{itemize} \item Any abelian group is solvable. \item \(S_n\) is not solvable for \(n\geq 5\), since \(A_n\) is not abelian for \(n\geq 5\). \end{itemize} \textbf{Recall Feit-Thompson:} Any nonabelian simple group is of \emph{even} order. \textbf{Consequence:} Every group of \emph{odd} order is solvable. \hypertarget{the-commutator-subgroup}{% \subsection{The Commutator Subgroup}\label{the-commutator-subgroup}} Let \(G\) be a group, and let \([G, G] \leq G\) be the subgroup of \(G\) generated by elements \(aba\inv b\inv\), i.e.~every element is a \emph{product} of commutators. So \([G, G]\) is called \emph{the commutator subgroup}. \textbf{Theorem:} Let \(G\) be a group, then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \([G,G] \leq G\) \item \([G,G]\) is a normal subgroup \item \(G/[G, G]\) is abelian. \item \([G,G]\) is the smallest normal subgroup such that the quotient is abelian, \begin{quote} I.e., \(H \normal G\) and if \(G/N\) is abelian \(\implies [G, G] \leq N\). \end{quote} \end{enumerate} \emph{Proof of 1:} \([G, G]\) is a subgroup: \begin{itemize} \tightlist \item Closure is clear from definition as generators. \item The identity is \(e = e e\inv e e\inv\). \item So it suffices to show that \((aba\inv b\inv)\inv \in [G, G]\), but this is given by \(bab\inv a\inv\) which is of the correct form. \end{itemize} \(\qed\) \emph{Proof of 2:} \([G, G]\) is normal. Let \(x_i \in [G, G]\), then we want to show \(g \prod x_i g\inv \in [G, G]\), but this reduces to just showing \(gx g\inv \in [G, G]\) for a single \(x\in [G, G]\). Then, \begin{align*} g(aba\inv b\inv ) g\inv &= (g\inv aba\inv) e (b\inv g) \\ &= (g\inv aba\inv)(gb\inv b g\inv)(b\inv g) \\ &= [(g\inv a)b (g\inv a)\inv b\inv] [bg\inv b\inv g] \\ &\in [G, G] .\end{align*} \(\qed\) \emph{Proof of 3:} \(G/[G, G]\) is abelian. Let \(H = [G, G]\). We have \(aH bH = (ab) H\) and \(bH aH = (ba)H\). But \(abH = baH\) because \((ba)\inv(ab) = a\inv b\inv a b \in [G, G]\). \(\qed\) \emph{Proof of 4:} \(H \normal G\) and if \(G/N\) is abelian \(\implies [G, G] \leq N\). Suppose \(G/N\) is abelian. Let \(aba\inv b\inv \in [G, G]\). Then \(abN = baN\), so \(aba\inv b\inv \in N\) and thus \([G, G] \subseteq N\). \(\qed\) \hypertarget{free-abelian-groups}{% \subsection{Free Abelian Groups}\label{free-abelian-groups}} \emph{Example:} \(\ZZ \cross \ZZ\). Take \(e_1 = (1, 0), e_2 = (0, 1)\). Then \((x,y) \in \ZZ^2\) can be written \(x(1,0) + y(0, 1)\), so \(\theset{e_i}\) behaves like a basis for a vector space. \textbf{Definition:} A group \(G\) is \emph{free abelian} if there is a subset \(X\subseteq G\) such that every \(g \in G\) can be represented as \begin{align*} g = \sum_{i=1}^r n_i x_i,\quad x_i \in X,~n_i \in \ZZ. \end{align*} Equivalently, \(X\) generates \(G\), so \(G = \generators{X}\), and if \(\sum n_i x_i = 0 \implies n_i = 0~~\forall i\). If this is the case, we say \(X\) is a \textbf{basis} for \(G\). \emph{Examples:} \begin{itemize} \item \(\ZZ^n\) is free abelian \item \(\ZZ_n\) is not free abelian, since \(n [1] = 0\) and \(n\neq 0\). \begin{quote} In general, you can replace \(\ZZ_n\) by any finite group and replace \(n\) with the order of the group. \end{quote} \end{itemize} \textbf{Theorem:} If \(G\) is free abelian on \(X\) where \(\abs X = r\), then \(G \cong \ZZ^r\). \textbf{Theorem:} If \(X = \theset{x_i}_{i=1}^r\), then a basis for \(\ZZ^r\) is given by \begin{align*} \theset{(1, 0, 0, \cdots), (0, 1, 0, \cdots), \cdots, (0, \cdots, 0, 1)} \definedas \theset{e_1, e_2, \cdots, e_r} \end{align*} \emph{Proof:} Use the map \(\phi: G \to \ZZ^r\) where \(x_i \mapsto e_i\), and check that this is an isomorphism of groups. \textbf{Theorem:} Let \(G\) be free abelian with two bases \(X, X'\), then \(\abs X = \abs X'\). \textbf{Definition:} Let \(G\) be free abelian, then if \(X\) is a basis then \(\abs X\) is called the \emph{rank} of of \(G\). \hypertarget{thursday-september-5th}{% \section{Thursday September 5th}\label{thursday-september-5th}} \hypertarget{rings}{% \subsection{Rings}\label{rings}} Recall the definition of a ring: A \emph{ring} \((R, +, \times)\) is a set with binary operations such that \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item \((R, +)\) is a group, \item \((R, \times)\) is a monoid. \end{enumerate} \emph{Examples:} \(R = \ZZ, \QQ, \RR, \CC\), or the ring of \(n\times n\) matrices, or \(\ZZ_n\). A ring is \emph{commutative} iff \(ab = ba\) for every \(a,b\in R\), and \emph{a ring with unity} is a ring such that \(\exists 1 \in R\) such that \(a1 = 1a = a\). \emph{Exercise}: Show that \(1\) is unique if it exists. In a ring with unity, an element \(a\in R\) is a \emph{unit} iff \(\exists b\in R\) such that \(ab = ba = 1\). \textbf{Definition:} A ring with unity is a \textbf{division ring} \(\iff\) every nonzero element is a unit. \textbf{Definition:} A division ring is a \emph{field} \(\iff\) it is commutative. \textbf{Definition:} Suppose that \(a,b \neq 0\) with \(ab = 0\). Then \(a,b\) are said to be \emph{zero divisors}. \textbf{Definition:} A commutative ring without zero divisors is an \emph{integral domain}. \emph{Example:} In \(\ZZ_n\), an element \(a\) is a zero divisor iff \(\gcd(a, n) \neq 1\). \emph{Fact:} In a ring with no zero divisors, we have \begin{align*} ab = ac \text{ and } a\neq 0 \implies b=c .\end{align*} \textbf{Theorem:} Every field is an integral domain. \emph{Proof:} Let \(R\) be a field. If \(ab=0\) and \(a\neq 0\), then \(a\inv\) exists and so \(b=0\). \(\qed\) \textbf{Theorem:} Any finite integral domain is a field. \emph{Proof:} \begin{quote} Idea: Similar to the pigeonhole principle. \end{quote} Let \(D = \theset{0, 1, a_1, \cdots, a_n}\) be an integral domain. Let \(a_j \neq 0, 1\) be arbitrary, and consider \(a_j D = \theset{a_j x \mid x\in D\setminus\theset{0}}\). Then \(a_j D = D\setminus\theset{0}\) as sets. But \begin{align*} a_j D = \theset{a_j, a_j a_1, a_j a_2, \cdots, a_j a_n}. \end{align*} Since there are no zero divisors, \(0\) does not occur among these elements, so some \(a_j a_k\) must be equal to 1. \(\qed\) \hypertarget{field-extensions}{% \subsection{Field Extensions}\label{field-extensions}} If \(F \leq E\) are fields, then \(E\) is a vector space over \(F\), for which the dimension turns out to be important. \textbf{Definition}: We can consider \begin{align*} \aut(E/F) \definedas \theset{\sigma: E \selfmap \mid f\in F \implies \sigma(f) = f}, \end{align*} i.e.~the field automorphisms of \(E\) that fix \(F\). \emph{Examples of field extensions:} \(\CC \to \RR \to \QQ\). Let \(F(x)\) be the smallest field containing both \(F\) and \(x\). Given this, we can form a diagram \begin{center} \begin{tikzcd} & F(x, y) & \\ F(x)\ar[ur] & & F(y)\ar[ul] \\ & F\ar[ul] \ar[ur] \ar[uu] & \end{tikzcd} \end{center} Let \(F[x]\) the polynomials with coefficients in \(F\). \textbf{Theorem:} Let \(F\) be a field and \(f(x) \in F[x]\) be a non-constant polynomial. Then there exists an \(F \to E\) and some \(\alpha \in E\) such that \(f(\alpha) = 0\). \emph{Proof:} Since \(F[x]\) is a unique factorization domain, given \(f(x)\) we can find an irreducible \(p(x)\) such that \(f(x) = p(x) g(x)\) for some \(g(x)\). So consider \(E = F[x] / (p)\). Since \(p\) is irreducible, \((p)\) is a prime ideal, but in \(F[x]\) prime ideals are maximal and so \(E\) is a field. Then define \begin{align*} \psi: F &\to E \\ a &\mapsto a + (p) .\end{align*} Then \(\psi\) is a homomorphism of rings: supposing \(\psi(\alpha) = 0\), we must have \(\alpha \in (p)\). But all such elements are multiples of a polynomial of degree \(d \geq 1\), and \(\alpha\) is a scalar, so this can only happen if \(\alpha = 0\). Then consider \(\alpha = x + (p)\); the claim is that \(p(\alpha) = 0\) and thus \(f(\alpha) = 0\). We can compute \begin{align*} p(x + (p)) &= a_0 + a_1(x + (p)) + \cdots + a_n(x + (p))^n \\ &= p(x) + (p) = 0 .\end{align*} \(\qed\) \emph{Example:} \(\RR[x] / (x^2 + 1)\) over \(\RR\) is isomorphic to \(\CC\) as a field. \hypertarget{algebraic-and-transcendental-elements}{% \subsection{Algebraic and Transcendental Elements}\label{algebraic-and-transcendental-elements}} \textbf{Definition:} An element \(\alpha \in E\) with \(F \to E\) is \textbf{algebraic} over \(F\) iff there is a nonzero polynomial in \(f \in F[x]\) such that \(f(\alpha) = 0\). Otherwise, \(\alpha\) is said to be \textbf{transcendental}. \emph{Examples:} \begin{itemize} \item \(\sqrt 2 \in \RR \from \QQ\) is algebraic, since it satisfies \(x^2 - 2\). \item \(\sqrt{-1} \in \CC \from \QQ\) is algebraic, since it satisfies \(x^2 + 1\). \item \(\pi, e \in \RR \from \QQ\) are transcendental \begin{quote} This takes some work to show. \end{quote} \end{itemize} An \emph{algebraic number} \(\alpha \in \CC\) is an element that is algebraic over \(\QQ\). \emph{Fact:} The set of algebraic numbers forms a field. \textbf{Definition:} Let \(F \leq E\) be a field extension and \(\alpha \in E\). Define a map \begin{align*} \phi_\alpha: F[x] &\to E \\ \phi_\alpha(f) &= f(\alpha) .\end{align*} This is a homomorphism of rings and referred to as the \emph{evaluation homomorphism}. \textbf{Theorem:} Then \(\phi_\alpha\) is injective iff \(\alpha\) is transcendental. \begin{quote} Note: otherwise, this map will have a kernel, which will be generated by a single element that is referred to as the \textbf{minimal polynomial} of \(\alpha\). \end{quote} \hypertarget{minimal-polynomials}{% \subsection{Minimal Polynomials}\label{minimal-polynomials}} \textbf{Theorem:} Let \(F\leq E\) be a field extension and \(\alpha \in E\) algebraic over \(F\). Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item There exists a polynomial \(p\in F[x]\) of minimal degree such that \(p(\alpha) = 0\). \item \(p\) is irreducible. \item \(p\) is unique up to a constant. \end{enumerate} \emph{Proof:} Since \(\alpha\) is algebraic, \(f(\alpha) = 0\). So write \(f\) in terms of its irreducible factors, so \(f(x) = \prod p_j(x)\) with each \(p_j\) irreducible. Then \(p_i(\alpha) = 0\) for some \(i\) because we are in a field and thus don't have zero divisors. So there exists at least one \(p_i(x)\) such that \(p(\alpha) = 0\), so let \(q\) be one such polynomial of minimal degree. Suppose that \(\deg q < \deg p_i\). Using the Euclidean algorithm, we can write \(p(x) = q(x) c(x) + r(x)\) for some \(c\), and some \(r\) where \(\deg r < \deg q\). But then \(0 = p(\alpha) = q(\alpha)c(\alpha) + r(\alpha)\), but if \(q(\alpha) = 0\), then \(r(\alpha) = 0\). So \(r(x)\) is identically zero, and so \(p(x) - q(x) = c(x) = c\), a constant. \(\qed\) \textbf{Definition:} Let \(\alpha \in E\) be algebraic over \(F\), then the unique monic polynomial \(p\in F[x]\) of minimal degree such that \(p(\alpha) = 0\) is the \textbf{minimal polynomial} of \(\alpha\). \emph{Example:} \(\sqrt{1 + \sqrt 2}\) has minimal polynomial \(x^4 + x^2 - 1\), which can be found by raising it to the 2nd and 4th power and finding a linear combination that is constant. \hypertarget{tuesday-september-10th}{% \section{Tuesday September 10th}\label{tuesday-september-10th}} \hypertarget{vector-spaces}{% \subsection{Vector Spaces}\label{vector-spaces}} \textbf{Definition:} Let \(\FF\) be a field. A \textbf{vector space} is an abelian group \(V\) with a map \(\FF \cross V \to V\) such that \begin{itemize} \item \(\alpha(\beta \vector v) = (\alpha \beta) \vector v\) \item \((\alpha + \beta)\vector v = \alpha \vector v + \beta \vector v\), \item \(\alpha(\vector v + \vector w) = \alpha \vector v + \alpha \vector w\) \item \(1\vector v = \vector v\) \end{itemize} \emph{Examples:} \(\RR^n, \CC^n , F[x] = \mathrm{span}(\theset{1, x, x^2, \cdots}), L^2(\RR)\) \textbf{Definition:} Let \(V\) be a vector space over \(\FF\); then a set \(W \subseteq V\) \emph{spans} \(V\) iff for every \(\vector v\in V\), one can write \(\vector v = \sum \alpha_i \vector w_i\) where \(\alpha_i \in \FF,~\vector w_i \in W\). \textbf{Definition:} \(V\) is \emph{finite dimensional} if there exists a finite spanning set. \textbf{Definition:} A set \(W \subseteq V\) is \emph{linearly independent} iff \begin{align*} \sum \alpha_i \vector w_i = \vector 0 \implies \alpha_i = 0 \text{ for all } i .\end{align*} \textbf{Definition:} A \emph{basis} for \(V\) is a set \(W \subseteq V\) such that \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(W\) is linearly independent, and \item \(W\) spans \(V\). \end{enumerate} A basis is a midpoint between a spanning set and a linearly independent set. We can add vectors to a set until it is spanning, and we can throw out vectors until the remaining set is linearly independent. This is encapsulated in the following theorems: \textbf{Theorem:} If \(W\) spans \(V\), then some subset of \(W\) spans \(V\). \textbf{Theorem:} If \(W\) is a set of linearly independent vectors, then some superset of \(W\) is a basis for \(V\). \emph{Fact:} Any finite-dimensional vector spaces has a finite basis. \textbf{Theorem:} If \(W\) is a linearly independent set and \(B\) is a basis, then \(\abs{B} \leq \abs W\). \textbf{Corollary:} Any two bases have the same number of elements. \begin{quote} So we define the dimension of \(V\) to be the number of elements in any basis, which is a unique number. \end{quote} \hypertarget{algebraic-extensions}{% \subsection{Algebraic Extensions}\label{algebraic-extensions}} \textbf{Definition:} \(E \geq F\) is an algebraic extension iff every \(\alpha \in E\) is algebraic of \(F\). \textbf{Definition:} \(E \geq F\) is a \emph{finite extension} iff \(E\) is finite-dimensional as an \(F\dash\)vector space. \emph{Notation:} \([E: F] = \dim_F E\), the dimension of \(E\) as an \(F\dash\)vector space. \emph{Observation:} If \(E = F(\alpha)\) where \(\alpha\) is algebraic over \(F\), then \(E\) is an algebraic extension of \(F\). \emph{Observation:} If \(E\geq F\) and \([E: F] = 1\), then \(E=F\). \textbf{Theorem:} If \(E \geq F\) is a finite extension, then \(E\) is algebraic over \(F\). \emph{Proof:} Let \(\beta \in E\). Then the set \(\theset{1, \beta, \beta^2, \cdots}\) is not linearly independent. So \(\sum_{i=0}^n c_i \beta^i = 0\) for some \(n\) and some \(c_i\). But then \(\beta\) is algebraic. \(\qed\) \begin{quote} Note that the converse is not true in general. \emph{Example}: Let \(E = \overline \RR\) be the algebraic numbers. Then \(E \geq \QQ\) is algebraic, but \([E : \QQ] = \infty\). \end{quote} \textbf{Theorem:} Let \(K \geq E \geq F\), then \([K: F] = [K: E] [E: F]\). \emph{Proof:} Let \(\theset{\alpha_i}^m\) be a basis for \(E/F\) Let \(\theset{\beta_i}^n\) be a basis for \(K / E\). Then the RHS is \(mn\). \emph{Claim:} \(\theset{\alpha_i \beta_j}^{m, n}\) is a basis for \(K/ F\). \emph{Linear independence:} \begin{align*} \sum_{i, j} c_{ij} \alpha _i \beta_j &= 0 \\ \implies \sum_j \sum_i c_{ij} \alpha_i \beta_j &= 0 \\ \implies \sum_i c_{ij} \alpha_i &= 0 \quad \text{since $\beta$ form a basis} \\ \implies \sum c_{ij} &= 0 \quad \text{since $\alpha$ form a basis} .\end{align*} \emph{Exercise}: Show this is also a spanning set. \(\qed\) \textbf{Corollary:} Let \(E_r \geq E_{r-1} \geq \cdots \geq E_1 \geq F\), then \begin{align*} [E_r: F]= [E_r: E_{r-1}][E_{r-1}:E_{r-2}] \cdots [E_2: E_1][E_1 : F] .\end{align*} \emph{Observation:} If \(\alpha \in E \geq F\) and \(\alpha\) is algebraic over \(F\) where \(E \geq F(\alpha) \geq F\), then \(F(\alpha)\) is algebraic (since \([F(\alpha): F] < \infty\)) and \([F(\alpha): F]\) is the degree of the minimal polynomial of \(\alpha\) over \(F\). \textbf{Corollary:} Let \(E = F(\alpha) \geq F\) where \(\alpha\) is algebraic. Then \begin{align*}\beta \in F(\alpha) \implies \deg \min(\beta, F) \divides \deg \min(\alpha, F) .\end{align*} \emph{Proof:} Since \(F(\alpha) \geq F(\beta) \geq F\), we have \([F(\alpha): F] = [F(\alpha): F(\beta)][F(\beta): F]\). But just note that \begin{align*} [F(\alpha): F] &= \deg \min (\alpha, F) \text{ and } \\ [F(\beta): F] &= \deg \min (\beta, F) .\end{align*} \(\qed\) \textbf{Theorem:} Let \(E \geq F\) be algebraic, then \begin{align*} [E: F] < \infty \iff E = F(\alpha_1, \cdots, \alpha_n) \text{ for some } \alpha_n \in E .\end{align*} \hypertarget{algebraic-closures}{% \subsection{Algebraic Closures}\label{algebraic-closures}} \textbf{Definition:} Let \(E \geq F\), and define \begin{align*} \overline{F_E} = \theset{\alpha \in E \mid \alpha \text{ is algebraic over } F} \end{align*} to be the \textbf{algebraic closure of \(F\) in \(E\)}. \emph{Example:} \(\QQ \injects \CC\), while \(\overline \QQ = \Bbb{A}\) is the field of algebraic numbers, which is a dense subfield of \(\CC\). \textbf{Proposition:} \(\overline{F_E}\) is a always field. \emph{Proof:} Let \(\alpha, \beta \in \overline{F_E}\), so \([F(\alpha, \beta): F] < \infty\). Then \(F(\alpha, \beta) \subseteq \overline{F_E}\) is algebraic over \(F\) and \begin{align*} \alpha\pm \beta, \quad \alpha\beta,\quad \frac \alpha \beta \quad \in F(\alpha, \beta) .\end{align*} So \(\overline{F_E}\) is a subfield of \(E\) and thus a field. \textbf{Definition:} A field \(F\) is \textbf{algebraically closed} iff every non-constant polynomial in \(F[x]\) is a root in \(F\). Equivalently, every polynomial in \(F[x]\) can be factored into linear factors. If \(F\) is algebraically closed and \(E\geq F\) and \(E\) is algebraic, then \(E=F\). \hypertarget{the-fundamental-theorem-of-algebra}{% \subsubsection{The Fundamental Theorem of Algebra}\label{the-fundamental-theorem-of-algebra}} \textbf{Theorem (Fundamental Theorem of Algebra):} \(\CC\) is an algebraically closed field. \emph{Proof:} \textbf{Liouville's theorem}: A bounded entire function \(f: \CC \selfmap\) is constant. \begin{itemize} \item \emph{Bounded} means \(\exists M \suchthat z\in \CC \implies \abs{f(z)} \leq M\). \item \emph{Entire} means analytic everywhere. \end{itemize} Let \(f(z) \in \CC[z]\) be a polynomial without a zero which is non-constant. Then \(\frac 1 {f(z)}: \CC \selfmap\) is analytic and bounded, and thus constant, and contradiction. \(\qed\) \hypertarget{geometric-constructions}{% \subsection{Geometric Constructions:}\label{geometric-constructions}} Given the tools of a straightedge and compass, what real numbers can be constructed? Let \(\mathcal C\) be the set of such numbers. \textbf{Theorem:} \(C\) is a subfield of \(\RR\). \hypertarget{thursday-september-12th}{% \section{Thursday September 12th}\label{thursday-september-12th}} \hypertarget{geometric-constructions-1}{% \subsection{Geometric Constructions}\label{geometric-constructions-1}} \textbf{Definition:} A real number \(\alpha\) is said to be \textbf{constructible} iff \(\abs \alpha\) is constructible using a ruler and compass. Let \(\mathcal C\) be the set of constructible numbers. Note that \(\pm 1\) is constructible, and thus so is \(\ZZ\). \textbf{Theorem:} \(\mathcal{C}\) is a field. \emph{Proof:} It suffices to construct \(\alpha \pm \beta,~~ \alpha\beta,~~ \alpha / \beta\). \emph{Showing \(\pm\) and inverses:} Relatively easy. \emph{Showing closure under products:} \includegraphics{figures/2019-09-17-09:48.png}\\ \textbf{Corollary:} \(\QQ \leq \mathcal C\) is a subfield. Can we get all of \(\RR\) with \(\mathcal C\)? The operations we have are \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item Intersect 2 lines (gives nothing new) \item Intersect a line and a circle \item Intersect 2 circles \end{enumerate} Operation (3) reduces to (2) by subtracting two equations of a circle (\(x^2 + y^2 + ax + by + c\)) to get an equation of a line. Operation (2) reduces to solving quadratic equations. \textbf{Theorem:} \(\mathcal C\) contains precisely the real numbers obtained by adjoining finitely many square roots of elements in \(\QQ\). \emph{Proof:} Need to show that \(\alpha \in \mathcal C \implies \sqrt \alpha \in \mathcal C\). \begin{itemize} \item Bisect \(PA\) to get \(B\). \item Draw a circle centered at \(B\). \item Let \(Q\) be intersection of circle with \(y\) axis and \(O\) be the origin. \item Note triangles 1 and 2 are similar, so \begin{align*} \frac{OQ}{OA} = \frac{PO}{OQ} \implies (OQ)^2 = (PO)(OA) = 1\alpha .\end{align*} \end{itemize} \(\qed\) \emph{Corollary:} Let \(\gamma \in \mathcal{C}\) be constructible. Then there exist \(\theset{\alpha_i}_{i=1}^n\) such that \begin{align*} \gamma = \prod_{i=1}^n \alpha_i \quad\text{and}\quad [\QQ(\alpha_1, \cdots, \alpha_j): \QQ(\alpha_1, \cdots, \alpha_{j-1})] = 2 ,\end{align*} and \([\QQ(\alpha): \QQ] = 2^d\) for some \(d\). \textbf{Applications:} \textbf{Doubling the cube:} Given a cube of size 1, can we construct one of size 2? To do this, we'd need \(x^3 = 2\). But note that \(\min(\sqrt[3]{2}, \QQ) = x^3 - 2 = f(x)\) is irreducible over \(\QQ\). So \([\QQ(\sqrt[3]{2}): \QQ] = 3 \neq 2^d\) for any \(d\), so this can not be constructible. \textbf{Trisections of angles:} We want to construct regular polygons, so we'll need to construct angles. We can get some by bisecting known angles, but can we get all of them? \emph{Example:} Attempt to construct \(20^\circ\) by trisecting the known angle \(60^\circ\), which is constructible using a triangle of side lengths \(1,2,\sqrt 3\). If \(20^\circ\) were constructible, \(\cos 20^\circ\) would be as well. There is an identity \begin{align*} \cos 3\theta = 4\cos^3 \theta - 3\cos \theta .\end{align*} Letting \(\theta = 20^\circ\) so \(3\theta = 60^\circ\), we obtain \begin{align*} \frac 1 2 = 4(\cos 20^\circ)^3 - 3\cos 20^\circ, \end{align*} so if we let \(x = \cos 20^\circ\) then \(x\) satisfies the polynomial \(f(x) = 8x^3 - 6x - 1\), which is irreducible. But then \([\QQ(20^\circ):\QQ] = 3 \neq 2^d\), so \(\cos 20^\circ \not\in\mathcal C\). \hypertarget{finite-fields}{% \subsection{Finite Fields}\label{finite-fields}} \textbf{Definition:} The \emph{characteristic} of \(F\) is the smallest \(n\geq 0\) such that \(n1 = 0\), or \(0\) if such an \(n\) does not exist. \emph{Exercise}: For a field \(F\), show that \(\ch F = 0\) or \(p\) a prime. Note that if \(\ch F = 0\), then \(\ZZ \in F\) since \(1,~ 1+1,~ 1+1+1, \cdots\) are all in \(F\). Since inverses must also exist in \(F\), we must have \(\QQ \in F\) as well. So \(\ch F = 0 \iff F\) is infinite. If \(\ch F = p\), it follows that \(\ZZ_p \subset F\). \textbf{Theorem:} \begin{align*} \text{For } E \geq F \text{ where } [E: F] = n \text{ and } F \text{ finite }, \quad \abs F = q \implies \abs E = q^n .\end{align*} \emph{Proof:} \(E\) is a vector space over \(F\). Let \(\theset{v_i}^n\) be a basis. Then \(\alpha \in E \implies \alpha = \sum_{i=1}^n a_i v_i\) where each \(a_i \in F\). There are \(q\) choices for each \(a_i\), and \(n\) coefficients, yielding \(q^n\) distinct elements. \(\qed\) \textbf{Corollary:} Let \(E\) be a finite field where \(\ch E = p\). Then \(\abs E = p^n\) for some \(n\). \textbf{Theorem:} Let \(\ZZ_p \leq E\) with \(\abs E = p^n\). If \(\alpha \in E\), then \(\alpha\) satisfies \begin{align*} x^{p^n} - x \in \ZZ_p[x]. \end{align*} \emph{Proof:} If \(\alpha = 0\), we're done. So suppose \(\alpha \neq 0\), then \(\alpha \in E\units\), which is a group of order \(p^n - 1\). So \(\alpha^{p^n - 1} = 1\), and thus \(\alpha \alpha^{p^n - 1} = \alpha 1 \implies \alpha^{p^n} = \alpha\). \(\qed\) \textbf{Definition:} \(\alpha \in F\) is an \emph{\(n\)th root of unity} iff \(\alpha^n = 1\). It is a \emph{primitive} root of unity of \(n\) iff \(k\leq n \implies \alpha^k \neq 1\) (so \(n\) is the smallest power for which this holds). \textbf{Fact:} If \(F\) is a finite field, then \(F\units\) is a cyclic group. \textbf{Corollary:} If \(E \geq F\) with \([E: F] = n\), then \(E = F(\alpha)\) for just a single element \(\alpha\). \emph{Proof:} Choose \(\alpha \in E\units\) such that \(\generators \alpha = E\units\). Then \(E = F(\alpha)\). \(\qed\) Next time: Showing the existence of a field with \(p^n\) elements. For now: derivatives. Let \(f(x) \in F[x]\) by a polynomial with a multiple zero \(\alpha \in E\) for some \(E \geq F\). If it has multiplicity \(m \geq 2\), then note that \begin{align*} f(x) = (x-\alpha)^m g(x) \implies f'(x) m(x-\alpha)^{m-1}g(x) + g'(x)(x-\alpha)^m \implies f'(\alpha) = 0. \end{align*} So \begin{align*} \alpha \text{ a multiple zero of } f \implies f'(\alpha) = 0 .\end{align*} The converse is also useful. \emph{Application:} Let \(f(x) = x^{p^n} - x\), then \(f'(x) = p^n x^{p^n - 1} - 1 = -1 \neq 0\), so all of the roots are distinct. \hypertarget{tuesday-september-17th}{% \section{Tuesday September 17th}\label{tuesday-september-17th}} \hypertarget{finite-fields-and-roots-of-polynomials}{% \subsection{Finite Fields and Roots of Polynomials}\label{finite-fields-and-roots-of-polynomials}} \emph{Recall from last time:} Let \(\FF\) be a finite field. Then \(\FF\units = \FF\setminus\theset{0}\) is \emph{cyclic} (this requires some proof). Let \(f \in \FF[x]\) with \(f(\alpha) = 0\). Then \(\alpha\) is a \emph{multiple root} if \(f'(\alpha) = 0\). \textbf{Lemma:} Let \(\FF\) be a finite field with characteristic \(p > 0\). Then \begin{align*} f(x) = x^{p^n} - x \in \FF[x] \end{align*} has \(p^n\) distinct roots. \emph{Proof:} \begin{align*} f'(x) = p^n x^{p^n-1}-1 = -1 ,\end{align*} since we are in char \(p\). This is identically -1, so \(f'(x) \neq 0\) for any \(x\). So there are no multiple roots. Since there are at most \(p^n\) roots, this gives exactly \(p^n\) distinct roots. \(\qed\) \textbf{Theorem:} A field with \(p^n\) elements exists (denoted \(\mathbb{GF}(p^n)\)) for every prime \(p\) and every \(n > 0\). \emph{Proof:} Consider \(\ZZ_p \subseteq K \subseteq \overline{\ZZ}_p\) where \(K\) is the set of zeros of \(x^{p^n}-x\). Then we claim \(K\) is a field. Suppose \(\alpha, \beta \in K\). Then \((\alpha \pm \beta)^{p^n} = \alpha^{p^n} \pm \beta^{p^n}\). We also have \begin{align*} (\alpha\beta)^{p^n} = \alpha^{p^n}\beta^{p^n} - \alpha\beta \text{ and } \alpha^{-p^n} = \alpha\inv .\end{align*} So \(K\) is a field and \(\abs K = p^n\). \(\qed\) \textbf{Corollary:} Let \(F\) be a finite field. If \(n\in\NN^+\), then there exists an \(f(x) \in F[x]\) that is irreducible of degree \(n\). \emph{Proof:} Let \(F\) be a finite field, so \(\abs F = p^r\). By the previous lemma, there exists a \(K\) such that \(\ZZ_p \subseteq k \subseteq \overline F\). \(K\) is defined as \begin{align*} K \definedas \theset{\alpha \in F \mid \alpha^{p^n} - \alpha = 0} .\end{align*} We also have \begin{align*} F = \theset{\alpha \in \overline F \suchthat \alpha^{p^n} - \alpha = 0}.\end{align*} Moreover, \(p^{rs} = p^r p^{r(s-1)}\). So let \(\alpha \in F\), then \(\alpha^{p^r} - \alpha = 0\). Then \begin{align*} \alpha^{p^{rn}} = \alpha^{p^r p^{r(n-1)}} = (\alpha^{p^r})^{p^{r(n-1)}} = \alpha^{p^{r(n-1)}} ,\end{align*} and we can continue reducing this way to show that this is yields to \(\alpha^{p^r} = \alpha\). So \(\alpha \in K\), and thus \(F \leq K\). We have \([K:F] = n\) by counting elements. Now \(K\) is simple, because \(K\units\) is cyclic. Let \(\beta\) be the generator, then \(K = F(\beta)\). This the minimal polynomial of \(\beta\) in \(F\) has degree \(n\), so take this to be the desired \(f(x)\). \(\qed\) \hypertarget{simple-extensions}{% \subsection{Simple Extensions}\label{simple-extensions}} Let \(F \leq E\) and \begin{align*} \phi_\alpha: F[x] &\to E \\ f &\mapsto f(\alpha) .\end{align*} denote the evaluation map. \textbf{Case 1:} Suppose \(\alpha\) is \textbf{algebraic} over \(F\). There is a kernel for this map, and since \(F[x]\) is a PID, this ideal is generated by a single element -- namely, the minimal polynomial of \(\alpha\). Thus (applying the first isomorphism theorem), we have \(F(\alpha) \supseteq E\) isomorphic to \(F[x] / \min(\alpha, F)\). Moreover, \(F(\alpha)\) is the smallest subfield of \(E\) containing \(F\) and \(\alpha\). \textbf{Case 2:} Suppose \(\alpha\) is \textbf{transcendental} over \(F\). Then \(\ker \phi_\alpha = 0\), so \(F[x] \injects E\). Thus \(F[x] \cong F[\alpha]\). \textbf{Definition:} \(E \geq F\) is a \emph{simple extension} if \(E = F(\alpha)\) for some \(\alpha \in E\). \textbf{Theorem:} Let \(E = F(\alpha)\) be a simple extension of \(F\) where \(\alpha\) is algebraic over \(F\). Then every \(\beta \in E\) can be uniquely expressed as \begin{align*} \beta = \sum_{i=0}^{n-1} c_i \alpha^i \text{ where } n = \deg \min(\alpha, F) .\end{align*} \emph{Proof:} \emph{Existence:} We have \begin{align*} F(\alpha) = \theset{\sum_{i=1}^r \beta_i \alpha^i \suchthat \beta_i \in F} ,\end{align*} so all elements look like polynomials in \(\alpha\). Using the minimal polynomial, we can reduce the degree of any such element by rewriting \(\alpha^n\) in terms of lower degree terms: \begin{align*} f(x) = \sum_{i=0}^n a_i x^i, \quad f(\alpha) &= 0 \\ \implies \sum_{i=0}^n a_i \alpha^i &= 0 \\ \implies \alpha^n &= -\sum_{i=0}^{n-1} a_i \alpha^i .\end{align*} \emph{Uniqueness:} Suppose \(\sum c_i \alpha^i = \sum^{n-1} d_i \alpha^i\). Then \(\sum^{n-1} (c_i - d_i) \alpha^i = 0\). But by minimality of the minimal polynomial, this forces \(c_i - d_i = 0\) for all \(i\). \(\qed\) \begin{quote} Note: if \(\alpha\) is algebraic over \(F\), then \(\theset{1, \alpha, \cdots \alpha^{n-1}}\) is a basis for \(F(\alpha)\) over \(F\) where \(n = \deg \min(\alpha, F)\). Moreover, \begin{align*} [F(\alpha):F] = \dim_F F(\alpha) = \deg\min(\alpha, F) .\end{align*} \end{quote} \begin{quote} Note: adjoining any root of a minimal polynomial will yield isomorphic (usually not \emph{identical}) fields. These are distinguished as subfields of the algebraic closure of the base field. \end{quote} \textbf{Theorem:} Let \(F \leq E\) with \(\alpha \in E\) algebraic over \(F\). If \(\deg\min(\alpha, F) = n\), then \(F(\alpha)\) has dimension \(n\) over \(F\), and \(\theset{1, \alpha, \cdots, \alpha^{n-1}}\) is a basis for \(F(\alpha)\) over \(F\). Moreover, any \(\beta \in F(\alpha)\), is \emph{also} algebraic over \(F\),and \(\deg\min(\beta, F) \divides \deg \min(\alpha, F)\). \emph{Proof of first part:} Exercise. \emph{Proof of second part:} We want to show that \(\beta\) is algebraic over \(F\). We have \begin{align*} [F(\alpha):F] = [F(\alpha): F(\beta)][F(\beta): F] ,\end{align*} so \([F(\beta) : F]\) is less than \(n\) since this is a finite extension, and the division of degrees falls out immediately. \(\qed\) \hypertarget{automorphisms-and-galois-theory}{% \subsection{Automorphisms and Galois Theory}\label{automorphisms-and-galois-theory}} Let \(F\) be a field and \(\overline F\) be its algebraic closure. Consider subfields of the algebraic closure, i.e.~\(E\) such that \(F \leq E \leq \overline F\). Then \(E \geq F\) is an algebraic extension. \textbf{Definition:} \(\alpha, \beta \in E\) are \emph{conjugates} iff \(\min(\alpha, F) = \min(\beta, F)\). \emph{Examples:} \begin{itemize} \item \(\sqrt[3]{3}, \sqrt[3]{3}\zeta, \sqrt[3]{3}\zeta^2\) are all conjugates, where \(\zeta = e^{2\pi i/3}\). \item \(\alpha = a+bi \in \CC\) has conjugate \(\bar \alpha = a-bi\), and \begin{align*} \min(\alpha, \RR) = \min(\bar \alpha, \RR) = x^2 - 2ax + (a^2 + b^2) .\end{align*} \end{itemize} \hypertarget{thursday-september-19th}{% \section{Thursday September 19th}\label{thursday-september-19th}} \hypertarget{conjugates}{% \subsection{Conjugates}\label{conjugates}} Let \(E \geq F\) be a field extension. Then \(\alpha,\beta \in E\) are \emph{conjugate} \(\iff \min(\alpha, F) = \min(\beta, F)\) in \(F[x]\). \emph{Example:} \(a + bi, a-bi\) are conjugate in \(\CC/\RR\), since they both have minimal polynomial \(x^2 - 2ax + (a^2 + b^2)\) over \(\RR\). \textbf{Theorem:} Let \(F\) be a field and \(\alpha, \beta \in E \geq F\) with \(\deg \min (\alpha, F) = \deg \min(\beta, F)\), i.e. \begin{align*} [F(\alpha): F] = [F(\beta): F] .\end{align*} Then \(\alpha, \beta\) are conjugates \(\iff F(\alpha) \cong F(\beta)\) under the map \begin{align*} \phi: F(\alpha) &\to F(\beta)\\ \sum_i a_i \alpha^i &\mapsto \sum_i a_i \beta^i .\end{align*} \emph{Proof:} Suppose \(\phi\) is an isomorphism. Let \begin{align*} f \definedas \min (\alpha, F) = \sum c_i x^i \text{ where } c_i \in F ,\end{align*} so \(f(\alpha) = 0\). Then \begin{align*} 0 = f(\alpha) = f(\sum c_i \alpha^i) = \sum c_i \beta^i ,\end{align*} so \(\beta\) satisfies \(f\) as well, and thus \(f = \min(\alpha, F) \divides \min(\beta, F)\). But we can repeat this argument with \(f\inv\) and \(g(x) \definedas \min(\beta, F)\), and so we get an equality. Thus \(\alpha, \beta\) are conjugates. Conversely, suppose \(\alpha, \beta\) are conjugates so that \(f = g\). Check that \(\phi\) is a homomorphism of fields, so that \begin{align*} \phi(x + y) = \phi(x) + \phi(y) \text{ and } \phi(xy) = \phi(x) \phi(y) .\end{align*} Then \(\phi\) is clearly surjective, so it remains to check injectivity. To see that \(\phi\) is injective, suppose \(f(z) = 0\). Then \(\sum a_i \beta^i = 0\). But by linear independence, this forces \(a_i = 0\) for all \(i\), which forces \(z=0\). \(\qed\) \textbf{Corollary:} Let \(\alpha \in \overline F\) be algebraic over \(F\). Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(\phi: F(\alpha) \injects \overline F\) for which \(\phi(f) = f\) for all \(f\in F\) maps \(\alpha\) to one of its conjugates. \item If \(\beta \in \overline F\) is a conjugate of \(\alpha\), then there exists one isomorphism \(\psi: F(\alpha) \to F(\beta)\) such that \(\psi(f) = f\) for all \(f\in F\). \end{enumerate} \textbf{Corollary:} Let \(f \in \RR[x]\) and suppose \(f(a+bi) = 0\). Then \(f(a - bi) = 0\) as well. \emph{Proof:} We know \(i, -i\) are conjugates since they both have minimal polynomial \(f(x) = x^2 + 1\). By (2), we have an isomorphism \(\RR[i] \mapsvia{\psi} \RR[-i]\). We have \(\psi(a+bi) = a-bi\), and \(f(a+bi) = 0\). This isomorphism commutes with \(f\), so we in fact have \begin{align*} 0 = \psi(f(a+bi)) = f(\psi(a-bi)) = f(a-bi) .\end{align*} \(\qed\) \hypertarget{fixed-fields-and-automorphisms}{% \subsection{Fixed Fields and Automorphisms}\label{fixed-fields-and-automorphisms}} \textbf{Definition:} Let \(F\) be a field and \(\psi: F \selfmap\) is an \emph{automorphism} iff \(\psi\) is an isomorphism. \textbf{Definition:} Let \(\sigma: E\selfmap\) be an automorphism. Then \(\sigma\) is said to \emph{fix} \(a\in E\) iff \(\sigma(a) = a\). For any subset \(F \subseteq E\), \(\sigma\) fixes \(F\) iff \(\sigma\) fixes every element of \(F\). \emph{Example:} Let \(E = \QQ(\sqrt 2, \sqrt 5) \supseteq \QQ = F\). A basis for \(E/F\) is given by \(\theset{1, \sqrt 2, \sqrt 5, \sqrt {10}}\). Suppose \(\psi: E\selfmap\) fixes \(\QQ\). By the previous theorem, we must have \(\psi(\sqrt 2) = \pm \sqrt 2\) and \(\psi(\sqrt 5) = \pm \sqrt 5\). What is fixed by \(\psi\)? Suppose we define \(\psi\) on generators, \(\psi(\sqrt 2) = -\sqrt 2\) and \(\psi(\sqrt 5) = \sqrt 5\). Then \begin{align*} f(c_0 + c_1 \sqrt 2 + c_2 \sqrt 5 + c_3 \sqrt{10}) = c_0 - c_1\sqrt 2 + c_2 \sqrt 5 - c_3\sqrt{10} .\end{align*} This forces \(c_1 = 0, c_3 = 0\), and so \(\psi\) fixes \(\theset{ c_0 + c_2 \sqrt 5 } = \QQ(\sqrt 5)\). \textbf{Theorem:} Let \(I\) be a set of automorphisms of \(E\) and define \begin{align*} E_I = \theset{\alpha \in E \suchthat \sigma(a) = a ~\forall \sigma \in I} \end{align*} Then \(E_I \leq E\) is a subfield. \emph{Proof:} Let \(a,b \in E_i\). We need to show \(a \pm b, ab, b\neq 0 \implies b\inv \in I\). We have \(\sigma(a\pm b) = \sigma(a) \pm \sigma(b) = a + b \in I\) since \(\sigma\) fixes everything in \(I\). Moreover \begin{align*} \sigma(ab) = \sigma(a)\sigma(b) = ab \in I \quad \text{ and } \quad \sigma(b\inv) = \sigma(b)\inv = b\inv \in I .\end{align*} \(\qed\) \textbf{Definition:} Given a set \(I\) of automorphisms of \(F\), \(E_I\) is called the \emph{fixed field} of \(E\) under \(I\). \textbf{Theorem:} Let \(E\) be a field and \(A = \theset{\sigma:E \selfmap \suchthat \sigma\text{ is an automorphism }}\). Then \(A\) is a group under function composition. \textbf{Theorem:} Let \(E/F\) be a field extension, and define \begin{align*} G(E/F) = \theset{\sigma:E\selfmap \suchthat f\in F \implies \sigma(f) = f} .\end{align*} Then \(G(E/F) \leq A\) is a subgroup which contains \(F\). \emph{Proof:} This contains the identity function. Now if \(\sigma(f) = f\) then \(f = \sigma\inv(f)\), and \begin{align*} \sigma, \tau \in G(E/F) \implies (\sigma \circ \tau)(f) = \sigma(\tau(f)) = \sigma(f) = f .\end{align*} \(\qed\) \begin{quote} Note \(G(E/F)\) is called the group of automorphisms of \(E\) fixing \(F\), i.e.~\textbf{the Galois Group.} \end{quote} \textbf{Theorem (Isomorphism Extension):} Suppose \(F \leq E \leq \overline F\), so \(E\) is an algebraic extension of \(F\). Suppose similarly that we have \(F' \leq E' \leq \overline F'\), where we want to find \(E'\). Then any \(\sigma: F \to F'\) that is an isomorphism can be lifted to some \(\tau: E \to E'\), where \(\tau(f) = \sigma(f)\) for all \(f\in F\). \begin{center} \begin{tikzcd} \bar F \ar[d, dash] & \bar F' \ar[d, dash, red] \\ E \ar[d, dash]\ar[r, "\tau", red] & {\color{red} E'} \ar[d, dash, red] \\ F \ar[r, "\sigma"] & F \end{tikzcd} \end{center} \hypertarget{tuesday-october-1st}{% \section{Tuesday October 1st}\label{tuesday-october-1st}} \hypertarget{isomorphism-extension-theorem}{% \subsection{Isomorphism Extension Theorem}\label{isomorphism-extension-theorem}} Suppose we have \(F\leq E \leq \overline F\) and \(F' \leq E' \leq \overline{F}'\). Supposing also that we have an isomorphism \(\sigma: F \to F'\), we want to extend this to an isomorphism from \(E\) to \emph{some} subfield of \(\overline{F}'\) over \(F'\). \textbf{Theorem:} Let \(E\) be an algebraic extension of \(F\) and \(\sigma: F \to F'\) be an isomorphism of fields. Let \(\overline{F}'\) be the algebraic closure of \(F'\). Then there exists a \(\tau: E \to E'\) where \(E' \leq F'\) such that \(\tau(f) = \sigma(f)\) for all \(f \in F\). \emph{Proof:} See Fraleigh. Uses Zorn's lemma. \(\qed\) \textbf{Corollary:} Let \(F\) be a field and \(\overline F, \overline F'\) be algebraic closures of \(F\). Then \(\overline F \cong \overline F'\). \emph{Proof:} Take the identity \(F \to F\) and lift it to some \(\tau: \overline F \to E = \tau(\overline F)\) inside \(\overline F '\). \begin{center} \begin{tikzcd} & \bar F'\ar[d, dash] \\ \bar F\ar[r, "\tau", red]\ar[d, dash] & E = \tau(\bar F)\ar[d, dash] \\ F\ar[r, "\id"] & F \end{tikzcd} \end{center} Then \(\tau(\overline F)\) is algebraically closed, and \(\overline F' \geq \tau(\overline F)\) is an algebraic extension. But then \(\overline F' = \tau(\overline F)\). \(\qed\) \textbf{Corollary:} Let \(E \geq F\) be an algebraic extension with \(\alpha, \beta \in E\) conjugates. Then the conjugation isomorphism that sends \(\alpha \to \beta\) can be extended to \(E\). \emph{Proof:} \begin{center} \begin{tikzcd} \bar F \ar[d, dash] & \bar F \ar[d, dash, red] \\ E \ar[r, "\tau", red]\ar[d, dash] & {\color{red}E }\ar[d, dash] \\ F(\alpha) \ar[r, "\psi"]\ar[d, dash] & F(\beta)\ar[d, dash] \\ F \ar[r, "\id"] & F \\ \end{tikzcd} \end{center} \begin{quote} Note: Any isomorphism needs to send algebraic elements to algebraic elements, and even more strictly, conjugates to conjugates. \end{quote} Counting the number of isomorphisms: Let \(E \geq F\) be a finite extension. We want to count the number of isomorphisms from \(E\) to a subfield of \(\overline F\) that leave \(F\) fixed. I.e., how many ways can we fill in the following diagram? \begin{center} \begin{tikzcd} \bar F \ar[d, dash] & \bar F \ar[d, dash, red] \\ E \ar[r, "\tau", red]\ar[d, dash] & {\color{red}E }\ar[d, dash] \\ F \ar[r, "\id"] & F \\ \end{tikzcd} \end{center} Let \(G(E/F) \definedas \Gal(E/F)\); this will be a finite group if \([E: F] < \infty\). \textbf{Theorem:} Let \(E \geq F\) with \([E: F] < \infty\) and \(\sigma: F \to F'\) be an isomorphism. Then the number of isomorphisms \(\tau: E \to E'\) extending \(\sigma\) is \emph{finite}. \emph{Proof:} Since \([E: F]\) is finite, we have \(F_0 \definedas F(\alpha_1, \alpha_2, \cdots, \alpha_t)\) for some \(t\in \NN\). Let \(\tau: F_0 \to E'\) be an isomorphism extending \(\sigma\). Then \(\tau(\alpha_i)\) must be a conjugate of \(\alpha_i\), of which there are only finitely many since \(\deg \min(\alpha_j, F)\) is finite. So there are at most \(\prod_i \deg\min(\alpha_i, F)\) isomorphisms. \emph{Example:} \(f(x) = x^3 - 2\), which has roots \(\sqrt[3] 2, \sqrt[3] 2 \zeta, \sqrt[3] \zeta^2\). Two other concepts to address: \begin{itemize} \item Separability (multiple roots) \item Splitting Fields (containing all roots) \end{itemize} \textbf{Definition:} Let \begin{align*} \theset{E: F} \definedas \abs{ \theset{\sigma: E \to E' \suchthat \sigma \text{ is an isomorphism extending } \id: F \to F}} ,\end{align*} and define this to be the \emph{index}. \textbf{Theorem:} Suppose \(F \leq E \leq K\), then \begin{align*} \theset{K: F} = \theset{K: E} \theset{E: F}. \end{align*} \emph{Proof:} Exercise. \(\qed\) \emph{Example:} \(\QQ(\sqrt 2, \sqrt 5)/\QQ\), which is an extension of \emph{degree} 4. It also turns out that \begin{align*} \theset{\QQ(\sqrt 2, \sqrt 5) : \QQ} = 4. \end{align*} \textbf{Questions:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item When does \([E: F] = \theset{E: F}\)? (This is always true in characteristic zero.) \item When is \(\theset{E: F} = \abs{\Gal(E/F)}\)? \end{enumerate} Note that in this example, \(\sqrt 5 \mapsto \pm \sqrt 5\) and likewise for \(\sqrt 2\), so any isomorphism extending the identity must in fact be an \emph{automorphism}. We have automorphisms \begin{align*} \sigma_1: (\sqrt 2, \sqrt 5) &\mapsto (-\sqrt 2,\sqrt 5) \\ \sigma_2: (\sqrt 2, \sqrt 5) &\mapsto (\sqrt 2, -\sqrt 5) ,\end{align*} as well as \(\id\) and \(\sigma_1 \circ \sigma_2\). Thus \(\Gal(E/F) \cong \ZZ_2^2\). \hypertarget{separable-extensions}{% \subsection{Separable Extensions}\label{separable-extensions}} \textbf{Goal}: When is \(\theset{E: F} = [E: F]\)? We'll first see what happens for simple extensions. \textbf{Definition:} Let \(f \in F[x]\) and \(\alpha\) be a zero of \(f\) in \(\overline F\). The maximum \(\nu\) such that \((x-\alpha)^\nu \divides f\) is called the \emph{multiplicity} of \(f\). \textbf{Theorem:} Let \(f\) be irreducible. Then all zeros of \(f\) in \(\overline F\) have the same multiplicity. \emph{Proof:} Let \(\alpha, \beta\) satisfy \(f\), where \(f\) is irreducible. Then consider the following lift: \begin{center} \begin{tikzcd} \bar F\ar[d, dash] & \bar F\ar[d, dash] \\ F(\alpha) \ar[r, "\psi"]\ar[d, dash] & F(\beta) \ar[d, dash] \\ F \ar[r, "\id"] & F \\ \end{tikzcd} \end{center} This induces a map \begin{align*} F(\alpha)[x] &\mapsvia{\tau} F(\beta)[x] \\ \sum c_i x^i &\mapsto \sum \psi(c_i) x^i ,\end{align*} so \(x\mapsto x\) and \(\alpha \mapsto \beta\), so \(x\mapsto x\) and \(\alpha \mapsto \beta\). Then \(\tau(f(x)) = f(x)\) and \begin{align*} \tau((x-\alpha)^\nu) = (x-\beta)^\nu .\end{align*} So write \(f(x) = (x-\alpha)^\nu h(x)\), then \begin{align*} \tau(f(x)) = \tau((x-\alpha)^\nu) \tau(h(x)) .\end{align*} Since \(\tau(f(x)) = f(x)\), we then have \begin{align*} f(x) = (x-\beta)^\nu \tau(h(x)) .\end{align*} So we get \(\mathrm{mult}(\alpha) \leq \mathrm{mult}(\beta)\). But repeating the argument with \(\alpha, \beta\) switched yields the reverse inequality, so they are equal. \(\qed\) \emph{Observation:} If \(F(\alpha) \to E'\) extends the identity on \(F\), then \(E' = F(\beta)\) where \(\beta\) is a root of \(f \definedas \min(\alpha, F)\). Thus we have \begin{align*} \theset{F(\alpha): F} = \abs{\theset{\text{distinct roots of } f}} .\end{align*} Moreover, \begin{align*} [F(\alpha): F] = \theset{F(\alpha) : F} \nu \end{align*} where \(\nu\) is the multiplicity of a root of \(\min(\alpha, F)\). \textbf{Theorem:} Let \(E \geq F\), then \(\theset{E: F} \divides [E: F]\). \hypertarget{thursday-october-3rd}{% \section{Thursday October 3rd}\label{thursday-october-3rd}} When can we guarantee that there is a \(\tau: E\selfmap\) lifting the identity? If \(E\) is \emph{separable}, then we have \(\abs{ \Gal (E/F) } = \theset{E: F} [E: F]\). \textbf{Fact:} \(\{F(\alpha): F \}\) is equal to number of \emph{distinct} zeros of \(\mathrm{min}(\alpha, F)\). If \(F\) is algebraic, then \([F(\alpha): F]\) is the degree of the extension, and \(\theset{F(\alpha): F} \divides [F(\alpha): F]\). \textbf{Theorem:} Let \(E \geq F\) be finite, then \(\theset{E: F} \divides [E:F]\). \emph{Proof:} If \(E \geq F\) is finite, \(E = F(\alpha_1, \cdots, \alpha_n)\). So \(\mathrm{min}(\alpha_i, F)\) has \(a_j\) as a root, so let \(n_j\) be the number of distinct roots, and \(v_j\) the respective multiplicities. Then \begin{align*} [F: F(\alpha_1, \cdots, \alpha_{n-1})] = n_j v_j = v_j \theset{F: F(\alpha_1, \cdots, \alpha_{n-1})} .\end{align*} So \([E: F] = \prod_j n_j v_j\) and \(\theset{E:F} = \prod_j n_j\), and we obtain divisibility. \(\qed\) \textbf{Definitions:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item An extension \(E \geq F\) is \textbf{separable} iff \([E:F] = \{E: F\}\) \item An element \(\alpha \in E\) is \textbf{separable} iff \(F(\alpha) \geq F\) is a separable extension. \item A polynomial \(f(x) \in F[x]\) is \textbf{separable} iff \(f(\alpha) = 0 \implies \alpha\) is separable over \(F\). \end{enumerate} \textbf{Lemma:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(\alpha\) is separable over \(F\) iff \(\min(\alpha, F)\) has zeros of multiplicity one. \item Any irreducible polynomial \(f(x) \in F[x]\) is separable iff \(f(x)\) has zeros of multiplicity one. \end{enumerate} \emph{Proof of (1):} Note that \([F(\alpha): F] = \deg \min(\alpha, F)\), and \(\theset{F(\alpha): F}\) is the number of distinct zeros of \(\min(\alpha, F)\). Since all zeros have multiplicity 1, we have \([F(\alpha): F] = \theset{F(\alpha): F}\). \(\qed\) \emph{Proof of (2):} If \(f(x) \in F[x]\) is irreducible and \(\alpha\in \overline F\) a root, then \(\min(\alpha, F) \divides f(\alpha)\). But then \(f(x) = \ell \min(\alpha, F)\) for some constant \(\ell \in F\), since \(\min(\alpha, F)\) was monic and only had zeros of multiplicity one. \(\qed\) \textbf{Theorem:} If \(K \geq E \geq F\) and \([K:F] < \infty\), then \(K\) is separable over \(F\) iff \(K\) is separable over \(E\) and \(E\) is separable over \(F\). \emph{Proof:} \begin{align*} [K: F] &= [K:E] [E: F] \\ &= \{K:E\} \{E: F\} \\ &= \{K: F\} .\end{align*} \textbf{Corollary:} Let \(E \geq F\) be a finite extension. Then \begin{align*} E \text{ is separable over } F \iff \text{ Every } \alpha \in E \text{ is separable over } F .\end{align*} \emph{Proof:} \(\implies\): Suppose \(E \geq F\) is separable. Then \(E \geq F(\alpha) \geq F\) implies that \(F(\alpha)\) is separable over \(F\) and thus \(\alpha\) is separable. \(\impliedby\): Suppose every \(\alpha \in E\) is separable over \(F\). Since \(E = F(\alpha_1, \cdots, \alpha_n)\), build a tower of extensions over \(F\). For the first step, consider \(F(\alpha_1, \alpha_2) \to F(\alpha_1) \to F\). We know \(F(\alpha_1)\) is separable over \(F\). To see that \(F(\alpha_1, \alpha_2)\) is separable over \(F(\alpha_1)\), consider \(\alpha_2\). \(\alpha_2\) is separable over \(F \iff \min(\alpha_2, F)\) has roots of multiplicity one. Then \(\min(\alpha_2, F(\alpha_1)) \divides \min(\alpha_2, F)\), so \(\min(\alpha_2, F(\alpha))\) has roots of multiplicity one. Thus \(F(\alpha_1, \alpha_2)\) is separable over \(F(\alpha_1)\). \(\qed\) \hypertarget{perfect-fields}{% \subsection{Perfect Fields}\label{perfect-fields}} \textbf{Lemma:} \(f(x) \in F[x]\) has a multiple root \(\iff f(x), f'(x)\) have a nontrivial (multiple) common factor. \emph{Proof}: \(\implies\): Let \(K\geq F\) be an extension field of \(F\). Suppose \(f(x), g(x)\) have a common factor in \(K[x]\); then \(f,g\) also have a common factor in \(F[x]\). If \(f, g\) do not have a common factor in \(F[x]\), then \(\gcd(f, g) = 1\) in \(F[x]\), and we can find \(p(x), q(x) \in F[x]\) such that \(f(x)p(x) + g(x)q(x) = 1\). But this equation holds in \(K[x]\) as well, so \(\gcd(f, g) = 1\) in \(K[x]\). We can therefore assume that the roots of \(f\) lie in \(F\). Let \(\alpha\in F\) be a root of \(f\). Then \begin{align*} f(x) &= (x-\alpha)^m g(x) \\ f'(x) &= m(x-\alpha)^{m-1} g(x) + (x-\alpha)^m g'(x) .\end{align*} If \(\alpha\) is a multiple root, \(m > 2\), and thus \((x-\alpha) \divides f'\). \(\impliedby\): Suppose \(f\) does not have a multiple root. We can assume all of the roots are in \(F\), so we can split \(f\) into linear factors. So \begin{align*} f(x) = \prod_{i=1}^n (x-\alpha_i) \\ f'(x) = \sum_{i=1}^n \prod_{j\neq i} (x-\alpha_j) .\end{align*} But then \(f'(\alpha_k) = \prod{j\neq k} (x - \alpha_j) \neq 0\). Thus \(f, f'\) can not have a common root. \(\qed\) \begin{quote} Moral: we can thus test separability by taking derivatives. \end{quote} \textbf{Definition:} A field \(F\) is \emph{perfect} if every finite extension of \(F\) is separable. \textbf{Theorem}: Every field of characteristic zero is perfect. \emph{Proof:} Let \(F\) be a field with \(\mathrm{char}(F) = 0\), and let \(E \geq F\) be a finite extension. Let \(\alpha \in E\), we want to show that \(\alpha\) is separable. Consider \(f = \min(\alpha, F)\). We know that \(f\) is irreducible over \(F\), and so its only factors are \(1, f\). If \(f\) has a multiple root, then \(f, f'\) have a common factor in \(F[x]\). By irreducibility, \(f \divides f'\), but \(\deg f' < \deg f\), which implies that \(f'(x) = 0\). But this forces \(f(x) = c\) for some constant \(c\in F\), which means \(f\) has no roots -- a contradiction. So \(\alpha\) separable for all \(\alpha \in E\), so \(E\) is separable over \(F\), and \(F\) is thus perfect. \(\qed\) \textbf{Theorem:} Every finite field is perfect. \emph{Proof:} Let \(F\) be a finite field with \(\mathrm{char} F = p > 0\) and let \(E \geq F\) be finite. Then \(E = F(\alpha)\) for some \(\alpha\in E\), since \(E\) is a simple extension (look at \(E^*\)?) So \(E\) is separable over \(F\) iff \(\min(\alpha, F)\) has distinct roots. So \(E\units = E\setminus\theset{0}\), and so \(\abs{E} = p^n \implies \abs{E} = p^{n-1}\). Thus all elements of \(E\) satisfy \begin{align*} f(x) \definedas x^{p^n} - x \in \ZZ_p[x] .\end{align*} So \(\min(\alpha, F) \divides f(x)\). One way to see this is that \emph{every} element of \(E\) satisfies \(f\), since there are exactly \(p^n\) distinct roots. Another way is to note that \begin{align*} f'(x) = p^nx^{p^n - 1} - 1 = -1 \neq 0 .\end{align*} Since \(f(x)\) has no multiple roots, \(\min(\alpha, F)\) can not have multiple roots either. \(\qed\) \begin{quote} Note that \([E: F] < \infty \implies F(\alpha_1, \cdots, \alpha_n)\) for some \(\alpha_i \in E\) that are algebraic over \(F\). \end{quote} \hypertarget{primitive-elements}{% \subsection{Primitive Elements}\label{primitive-elements}} \textbf{Theorem (Primitive Element):} Let \(E\geq F\) be a finite extension and separable. Then there exists an \(\alpha \in E\) such that \(E = F(\alpha)\). \emph{Proof:} See textbook. \textbf{Corollary:} Every finite extension of a field of characteristic zero is simple. \hypertarget{tuesday-october-8th}{% \section{Tuesday October 8th}\label{tuesday-october-8th}} \hypertarget{splitting-fields}{% \subsection{Splitting Fields}\label{splitting-fields}} For \(\overline F \geq E \geq F\), we can use the lifting theorem to get a \(\tau: E \to E'\). What conditions guarantee that \(E = E'\)? If \(E = F(\alpha)\), then \(E' = F(\beta)\) for some \(\beta\) a conjugate of \(\alpha\). Thus we need \(E\) to contain conjugates of all of its elements. \textbf{Definition:} Let \(\theset{f_i(x) \in F[x] \suchthat i\in I}\) be any collection of polynomials. We way that \(E\) is a \textbf{splitting field} \(\iff E\) is the smallest subfield of \(\overline F\) containing all roots of the \(f_i\). \emph{Examples:} \begin{itemize} \item \(\QQ(\sqrt 2, \sqrt 3)\) is a splitting field for \(\theset{x^-2, x^2 - 5}\). \item \(\CC\) is a splitting field for \(\theset{x^2 + 1}\). \item \(\QQ(\sqrt[3] 2)\) is \emph{not} a splitting field for any collection of polynomials. \end{itemize} \textbf{Theorem:} Let \(F \leq E \leq \overline F\). Then \(E\) is a splitting field over \(F\) for some set of polynomials \(\iff\) every isomorphism of \(E\) fixing \(F\) is in fact an automorphism. \emph{Proof:} \(\implies:\) Let \(E\) be a splitting field of \(\theset{f_i(x) \suchthat f_i(x) \in F[x], i\in I}\). Then \(E = \generators{\alpha_j \mid j\in J}\) where \(\alpha_j\) are the roots of all of the \(f_i\). Suppose \(\sigma: E \to E'\) is an isomorphism fixing \(F\). Then consider \(\sigma(\alpha_j)\) for some \(j \in J\). We have \begin{align*} \min(\alpha, F) = p(x) = a_0 + a_1 x + \cdots a_{n-1}x^{n-1} + a_n x^n ,\end{align*} and so \begin{align*} p(x) = 0,~~ 0\in F \implies 0 = \sigma(p(\alpha_j)) = \sum_i a_i \sigma(\alpha_j)^i .\end{align*} Thus \(\sigma(\alpha_j)\) is a conjugate, and thus a root of some \(f_i(x)\). \(\impliedby:\) Suppose any isomorphism of \(E\) leaving \(F\) fixed is an automorphism. Let \(g(x)\) be an irreducible polynomial and \(\alpha \in E\) a root. \begin{center} \begin{tikzcd} \ar[d, dash] \bar F & \bar F \ar[d, dash] \\ E \ar[r, "\tau"]\ar[d, dash] & \ar[d, dash]E' {\color{blue} = E} \\ F(\alpha) \ar[r, "\id"]\ar[d, dash] & F(\beta) \ar[d, dash] \\ F \ar[r, "\id"] & F \\ \end{tikzcd} \end{center} Using the lifting theorem, where \(F(\alpha \leq E\), we get a map \(\tau: E \to E'\) lifting the identity and the conjugation homomorphism. But this says that \(E'\) must contain every conjugate of \(\alpha\). Therefore we can take the collection \begin{align*} S = \theset{g_i(x) \in F[x] \suchthat g_i \text{ irreducible and has a root in } E} .\end{align*} This defines a splitting field for \(\theset{g_j}\), and we're done. \(\qed\) \emph{Examples:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item \(x^2 + 1 \in \RR[x]\) splits in \(\CC\), i.e.~\(x^2 + 1 = (x+i)(x-i)\). \item \(x^2 - 2 \in \QQ[x]\) splits in \(\QQ(\sqrt 2)\). \end{enumerate} \textbf{Corollary:} Let \(E\) be a splitting field over \(F\). Then every \textbf{irreducible} polynomial in \(F[x]\) with a root \(\alpha \in E\) splits in \(E[x]\). \textbf{Corollary:} The index \(\{ E: F \}\) (the number of distinct lifts of the identity). If \(E\) is a splitting field and \(\tau:E \to E'\) lifts the identity on \(F\), then \(E = E'\). Thus \(\{ E : F \}\) is the number of automorphisms, i.e.~\(\abs{\Gal(E/F)}\). \textbf{Question:} When is it the case that \begin{align*} [E: F] = \{E: F\} = \abs{\Gal(E/F)}? \end{align*} \begin{itemize} \item The first equality occurs when \(E\) is separable. \item The second equality occurs when \(E\) is a splitting field. \end{itemize} \begin{quote} Characteristic zero implies separability \end{quote} \textbf{Definition:} If \(E\) satisfies both of these conditions, it is said to be a \textbf{Galois extension}. Some cases where this holds: \begin{itemize} \item \(E \geq F\) a finite algebraic extension with \(E\) characteristic zero. \item \(E\) a finite field, since it is a splitting field for \(x^{p^n} - x\). \end{itemize} \emph{Example 1:} \(\QQ(\sqrt 2, \sqrt 5)\) is \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item A degree 4 extension, \item The number of automorphisms was 4, and \item The Galois group was \(\ZZ_2^2\), of size 4. \end{enumerate} \emph{Example 2}: \(E\) the splitting field of \(x^3 - 3\) over \(\QQ\). This polynomial has roots \(\sqrt[3] 3,~ \zeta_3 \sqrt[3] 3,~ \zeta_3^2 \sqrt[3] 3\) where \(\zeta_3^3 = 1\). Then \(E = \QQ(\sqrt[3] 3, \zeta_3)\), where \begin{align*} \min(\sqrt[3] 3, \QQ) &= x^3 - 3 \\ \min(\zeta_3, \QQ) &= x^2 + x + 1 ,\end{align*} so this is a degree 6 extension. Since \(\ch \QQ = 0\), we have \([E: \QQ] = \{E: \QQ\}\) for free. We know that any automorphism has to map \begin{align*} \sqrt[3] 3 &\mapsto \sqrt[3] 3,~ \sqrt[3] 3 \zeta_3,~ \sqrt[3] 3 \zeta_3^2 \\ \zeta_3 &\mapsto \zeta_3,~ \zeta_3^2 .\end{align*} You can show this is nonabelian by composing a few of these, thus the Galois group is \(S^3\). \emph{Example 3} If \([E: F] = 2\), then \(E\) is automatically a splitting field. Since it's a finite extension, it's algebraic, so let \(\alpha \in E\setminus F\). Then \(\min(\alpha, F)\) has degree 2, and thus \(E = F(\alpha)\) contains all of its roots, making \(E\) a splitting field. \hypertarget{the-galois-correspondence}{% \subsection{The Galois Correspondence}\label{the-galois-correspondence}} There are three key players here: \begin{align*} [E: F],\quad \{E: F\},\quad \Gal(E/F) .\end{align*} How are they related? \textbf{Definition:} Let \(E \geq F\) be a finite extension. \(E\) is \textbf{normal} (or Galois) over \(F\) iff \(E\) is a separable splitting field over \(F\). \emph{Examples:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(\QQ(\sqrt 2, \sqrt 3)\) is normal over \(\QQ\). \item \(\QQ(\sqrt[3] 3)\) is not normal (not a splitting field of any irreducible polynomial in \(\QQ[x]\)). \item \(\QQ(\sqrt[3] 3, \zeta_3)\) is normal \end{enumerate} \textbf{Theorem:} Let \(F \leq E \leq K \leq \overline F\), where \(K\) is a finite normal extension of \(F\). Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(K\) is a normal extension of \(E\) as well, \item \(\Gal(K/E) \leq \Gal(K/F)\). \item For \(\sigma, \tau \in \Gal(K/F)\), \begin{align*} \sigma\mid_E = \tau\mid_E \iff \sigma, \tau \text{ are in the same left coset of }~ \frac{\Gal(K/F)}{\Gal(K/E)} .\end{align*} \end{enumerate} \emph{Proof of (1):} Since \(K\) is separable over \(F\), we have \(K\) separable over \(E\). Then \(K\) is a splitting field for polynomials in \(F[x] \subseteq E[x]\). Thus \(K\) is normal over \(E\). \(\qed\) \emph{Proof of (2):} \begin{center} \begin{tikzcd} K \ar[r, "\tau"]\ar[d, dash] & K\ar[d, dash] \\ E \ar[r, "\id"]\ar[d, dash] & E\ar[d, dash] \\ F \ar[r, "\id"] & F \\ \end{tikzcd} \end{center} So this follows by definition. \(\qed\) \emph{Proof of (3):} Let \(\sigma, \tau \in \Gal(K/F)\) be in the same left coset. Then \begin{align*} \tau\inv\sigma \in \Gal(K/E) ,\end{align*} so let \(\mu \definedas \tau\inv\sigma\). Note that \(\mu\) fixes \(E\) by definition. So \(\sigma = \tau \mu\), and thus \begin{align*} \sigma(e) = \tau(\mu(e)) = \tau(e) \text{ for all } e\in E .\end{align*} \(\qed\) \begin{quote} Note: We don't know if the intermediate field \(E\) is actually a \emph{normal} extension of \(F\). \end{quote} \begin{quote} \textbf{Standard example:} \(K \geq E \geq F\) where \begin{align*} K = \QQ(\sqrt[3] 3, \zeta_3)\quad E = \QQ(\sqrt[3] 3) \quad F = \QQ .\end{align*} Then \(K \normal E\) and \(K\normal F\), since \(\Gal(K/F) = S_3\) and \(\Gal(K/E) = \ZZ_2\). But \(E \ntrianglelefteq F\), since \(\ZZ_2 \ntrianglelefteq S_3\). \end{quote} \hypertarget{thursday-october-10th}{% \section{Thursday October 10th}\label{thursday-october-10th}} \hypertarget{computation-of-automorphisms}{% \subsection{Computation of Automorphisms}\label{computation-of-automorphisms}} Setup: \begin{itemize} \item \(F \leq E \leq K \leq \overline F\) \item \([K: F] < \infty\) \item \(K\) is a normal extension of \(F\) \end{itemize} \textbf{Facts:} \begin{itemize} \item \(\Gal(K/E) = \theset{\sigma \in \Gal(K/F) \suchthat \sigma(e) = e ~\forall e\in E}\). \item \(\sigma, \tau \in \Gal(K/F)\) and \(\restrictionof{\sigma}{E} = \restrictionof{\tau}{E} \iff \sigma, \tau\) are in the same left coset of \(\Gal(K/F) / \Gal(K/E)\). \end{itemize} \emph{Example}: \(K = \QQ(\sqrt 2, \sqrt 5)\). Then \(\Gal(K/\QQ) \cong \ZZ_2^2\), given by the following automorphisms: \begin{align*} \id: \sqrt 2 &\mapsto \sqrt 2, \quad& \sqrt 5 &\mapsto \sqrt 5 \\ \rho_1: \sqrt 2 &\mapsto \sqrt 2, \quad& \sqrt 5 &\mapsto -\sqrt 5 \\ \rho_2: \sqrt 2 &\mapsto -\sqrt 2, \quad& \sqrt 5 &\mapsto \sqrt 5 \\ \rho_1 \circ \rho_2: \sqrt 2 &\mapsto -\sqrt 2, \quad& \sqrt 5 &\mapsto -\sqrt 5 .\end{align*} We then get the following subgroup/subfield correspondence: \begin{center} \begin{tikzcd}[column sep=small] & & \ZZ_2^2 & & & & & & {\QQ(\sqrt 2, \sqrt 5)} \arrow[lldd] \arrow[dd] \arrow[rrdd] & & \\ & & & & & & & & & & \\ {\theset{\id, \rho_1}} \arrow[rruu] & & {\theset{\id, \rho_2}} \arrow[uu] & & {\theset{\id, \rho_1 \circ \rho_2}} \arrow[lluu] & & \QQ(\sqrt 2) \arrow[rrdd] & & \QQ(\sqrt 5) \arrow[dd] & & \QQ(\sqrt{10}) \arrow[lldd] \\ & & & & & & & & & & \\ & & \theset{\id} \arrow[lluu] \arrow[uu] \arrow[rruu] & & & & & & \RR & & \end{tikzcd} \end{center} \hypertarget{fundamental-theorem-of-galois-theory}{% \subsection{Fundamental Theorem of Galois Theory}\label{fundamental-theorem-of-galois-theory}} Recall that \(\definedas \Gal(K/E)\). \textbf{Theorem (Fundamental Theorem of Galois Theory):} Let \(\mathcal D\) be the collection of subgroups of \(\Gal(K/F)\) and \(\mathcal C\) be the collection of subfields \(E\) such that \(F \leq E \leq K\). Define a map \begin{align*} \lambda: \mathcal C &\to \mathcal D \\ \lambda(E) &\definedas \theset{\sigma \in \Gal(K/F) \mid \sigma(e) = e ~\forall e\in E} .\end{align*} Then \(\lambda\) is a bijective map, and \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(\lambda(E) = \Gal(K/E)\) \item \(E = K_{\lambda(E)}\) \item If \(H \leq \Gal(K/F)\) then \begin{align*} \lambda(K_H) = H \end{align*} \item \([K: E] = \abs{\lambda(E)}\) and \begin{align*} [E: F] = [\Gal(K/F): \lambda(E)] \end{align*} \item \(E\) is normal over \(F \iff \lambda(E) \normal \Gal(K/F)\), and in this case \begin{align*} \Gal(E/F) \cong \Gal(K/F) / \Gal(K/E) .\end{align*} \item \(\lambda\) is order-reversing, i.e. \begin{align*} E_1 \leq E_2 \implies \lambda(E_2) \leq \lambda(E_1) .\end{align*} \end{enumerate} \emph{Proof of 1:} Proved earlier. \(\qed\) \emph{Proof of 2:} We know that \(E \leq L_{\Gal(K/E)}\). Let \(\alpha \in K\setminus E\); we want to show that \(\alpha\) is not fixed by all automorphisms in \(\Gal(K/E)\). We build the following tower: \begin{center} \begin{tikzcd} K \arrow[rr, "\tau'", dotted] & & K \\ E(\alpha) \arrow[rr, "\tau"] \arrow[u] & & E(\beta) \arrow[u] \\ E \arrow[rr, "\id"] \arrow[u] & & E \arrow[u] \\ F \arrow[rr, "\id"] \arrow[u] & & F \arrow[u] \end{tikzcd} \end{center} This uses the isomorphism extension theorem, and the fact that \(K\) is normal over \(F\). If \(\beta\neq \alpha\), then \(\beta\) must be a conjugate of \(\alpha\), so \(\tau'(\alpha) \neq \alpha\) while \(\tau' \in \Gal(K/E)\). \(\qed\) \begin{quote} \textbf{Claim:} \(\lambda\) is injective. \emph{Proof:} Suppose \(\lambda(E_1) = \lambda(E_2)\). Then by (2), \(E_1 = K_{\lambda(E_1)} = K_{\lambda(E_2)} = E_2\). \(\qed\) \end{quote} \emph{Proof of 3:} We want to show that if \(H\leq \Gal(K/F)\) then \(\lambda(K_H) = H\). We know \(H \leq \lambda(K_H) = \Gal(K/K_H) \leq \Gal(K/F)\), so suppose \(H \lneq \lambda(K_H)\). Since \(K\) is a finite, separable extension, \(K = K_H(\alpha)\) for some \(\alpha \in K\). Let \begin{align*} n = [K: K_H] = {K: K_H} = \abs{\Gal(K/K_H)} .\end{align*} Since \(H \lneq \lambda(K_H)\), we have \(\abs{H} < n\). So denote \(H = \theset{\sigma, \sigma_2, \cdots}\) and let define \begin{align*} f(x) = \prod_i (x - \sigma_i(\alpha)) .\end{align*} We then have \begin{itemize} \item \(\deg f = \abs{H}\) \item The coefficients of \(f\) are symmetric polynomials in the \(\sigma_i(\alpha)\) and are fixed under any \(\sigma\in H\) \item \(f(x) \in K_H(\alpha)[x]\) \item \(f(\alpha) = 0\) since \(\sigma_i(\alpha) = \alpha\) for every \(i\). \end{itemize} This is a contradiction, so we must have \begin{align*} [K_H: K] = n = \deg \min(\alpha, K_H) \leq \deg f = \abs{H} .\end{align*} \(\qed\) \begin{quote} Assuming (3), \(\lambda\) is surjective, so suppose \(H < \Gal(K/F)\). Then \(\lambda(K_H) = H \implies \lambda\) is surjective. \end{quote} \emph{Proof of 4:} \begin{align*} \abs{\lambda(E)} &= \abs{\Gal(K/E)} =_{\text{splitting field}} [K: E] \\ [E: F] &=_{\text{separable}} \{E: F\} =_{\text{previous part}} [\Gal(K/F): \lambda(E)] .\end{align*} \emph{Proof of 5:} We have \(F\leq E \leq K\) and \(E\) is separable over \(F\), so \(E\) is normal over \(F \iff E\) is a splitting field over \(F\). That is, every extension \(E'/E\) maps \(K\) to itself, since \(K\) is normal. \begin{center} \begin{tikzcd} K & & K \\ E \arrow[u] & & E' \arrow[u] \\ F \arrow[rr, "id"] \arrow[u] & & F \arrow[u] \end{tikzcd} \end{center} So \(E\) is normal over \(F \iff\) for all \(\sigma \in \Gal(K/F), \sigma(\alpha) \in E\) for all \(\alpha \in E\). By a previous property, \(E = K_{\Gal(K/E)}\), and so \begin{align*} \sigma(\alpha) \in E &\iff \tau(\sigma(\alpha)) = \sigma(\alpha) &\quad \forall \tau \in \Gal(K/E) \\ &\iff (\sigma\inv \tau \sigma) (\alpha) = \alpha S&\quad \forall \tau \in \Gal(K/E) \\ &\iff \sigma\inv \tau\sigma \in \Gal(K/E) \\ &\iff \Gal(K/E) \normal \Gal(K/F) .\end{align*} Now assume \(E\) is a normal extension of \(F\), and let \begin{align*} \phi: \Gal(K/F) &\to \Gal(E/F) \\ \sigma &\mapsto \restrictionof{\sigma}{E} .\end{align*} Then \(\phi\) is well-defined precisely because \(E\) is normal over \(F\), and we can apply the extension theorem: \begin{center} \begin{tikzcd} K & & K \\ E \arrow[u] \arrow[rr, "\tau"] & & E \arrow[u] \\ F \arrow[rr, "\id"] \arrow[u] & & F \arrow[u] \end{tikzcd} \end{center} \(\phi\) is surjective by the extension theorem, and \(\phi\) is a homomorphism, so consider \(\ker \phi\). Let \(\phi(\sigma) = \restrictionof{\sigma}{E} = \id\). Then \(\phi\) fixes elements of \(E \iff \sigma \in \Gal(K/E)\), and thus \(\ker \phi = \Gal(K/E)\). \(\qed\) \emph{Proof of 6:} \begin{align*} E_1 \leq E_2 \iff &\Gal(K/E_2) \leq &\Gal(K/E_1) \\ & \shortparallel &\shortparallel \\ &\lambda(E_2) \leq &\lambda(E_1) .\end{align*} \begin{quote} Example: \(K = \QQ(\sqrt[3] 2, \zeta_3)\). Then \(\min(\zeta, \QQ) = x^2 + x + 1\) and \(\Gal(K/\QQ) = S_3\). There is a subgroup of order 2, \(E = \Gal(K/\QQ(\sqrt[3] 2)) \leq \Gal(K/\QQ)\), but \(E\) doesn't correspond to a normal extension of \(F\), so this subgroup is not normal. On the other hand, \(\Gal(\QQ(\zeta_3), \QQ) \normal \Gal(K/ \QQ)\). \end{quote} \hypertarget{tuesday-october-15th}{% \section{Tuesday October 15th}\label{tuesday-october-15th}} \hypertarget{cyclotomic-extensions}{% \subsection{Cyclotomic Extensions}\label{cyclotomic-extensions}} \textbf{Definition:} Let \(K\) denote the splitting field of \(x^n-1\) over \(F\). Then \(K\) is called the \textbf{\(n\)th cyclotomic extension of \(F\)}. If we set \(f(x) = x^n-1\), then \(f'(x) = nx^{n-1}\). So if \(\ch F\) does not divide \(n\), then the splitting field is separable. So this splitting field is in fact normal. Suppose that \(\ch F\) doesn't divide \(n\), then \(f(x)\) has \(n\) zeros, and let \(\zeta_1, \zeta_2\) be two zeros. Then \((\zeta_1 \zeta_2)^n = \zeta_1^n \zeta_2^n = 1\), so the product is a zero as well, and the roots of \(f\) form a subgroup in \(K\units\). So let's specialize to \(F = \QQ\). The roots of \(f\) are the \(n\)th roots of unity, i.e.~\(\zeta_n = e^{2\pi i / n}\), and are given by \(\theset{\zeta_n, \zeta_n^2, \zeta_n^3, \cdots, \zeta_n^{n-1}}\). The \emph{primitive} roots of unity are given by \(\theset{\zeta_n^m \suchthat \gcd(m, n) = 1}\). \textbf{Definition:} Let \begin{align*} \Phi_n(x) = \prod_{i=1}^{\varphi(n)} (x-\alpha_i) ,\end{align*} where this product runs over all of the primitive \(n\)th roots of unity. Let \(G\) be \(\Gal(K/\QQ)\). Then any \(\sigma\in G\) will permute the primitive \(n\)th roots of unity. Moreover, it \emph{only} permutes primitive roots, so every \(\sigma\) fixes \(\Phi_n(x)\). But this means that the coefficients must lie in \(\QQ\). Since \(\zeta\) generates all of the roots of \(\Phi_n\), we in fact have \(K = \QQ(\zeta)\). But what is the group structure of \(G\)? Since any automorphism is determined by where it sends a generator, we have automorphisms \(\tau_m(\zeta) = \zeta^m\) for each \(m\) such that \(\gcd(m, n) = 1\). But then \(\tau_{m_1} \circ \tau_{m_2} = \tau_{m_1 + m_2}\), and so \(G \cong G_m \leq \ZZ_n\) as a ring, where \begin{align*} G_m = \theset{[m] \suchthat \gcd(m, n) = 1} \end{align*} and \(\abs G = \varphi(n)\). \begin{quote} Note that as a \emph{set}, there are the units \(\ZZ_n\units\). \end{quote} \textbf{Theorem:} The Galois group of the \(n\)th cyclotomic extension over \(\QQ\) has \(\varphi(n)\) elements and is isomorphic to \(G_m\). \textbf{Special case}: \(n=p\) where \(p\) is a prime. Then \(\phi(p) = p-1\), and \begin{align*} \Phi_p(x) = \frac{x^p - 1}{x-1} = x^{p-1} + x^{p-2} + \cdots + x + 1 .\end{align*} Note that \(\ZZ_p\units\) is in fact cyclic, although this may not always happen. In this case, we have \(\Gal(K/\QQ) \cong \ZZ_p\units\). \hypertarget{construction-of-n-gons}{% \subsection{Construction of n-gons}\label{construction-of-n-gons}} To construct the vertices of an n-gon, we will need to construct the angle \(2\pi/n\), or equivalently, \(\zeta_n\). Note that if \([\QQ(\zeta_n) : \QQ] \neq 2^\ell\) for some \(\ell\in\NN\), then the \(n\dash\)gon is \emph{not} constructible. \emph{Example:} An 11-gon. Noting that \([\QQ(\zeta_{11}) : \QQ] = 10 \neq 2^\ell\), the 11-gon is not constructible. Since this is only a sufficient condition, we'll refine this. \textbf{Definition:} A prime of the form \(p = 2^{2^k}+1\) are called \textbf{Fermat primes}. \textbf{Theorem:} The regular \(n\dash\)gon is constructible \(\iff\) all odd primes dividing \(n\) are \emph{Fermat primes} \(p\) where \(p^2\) does not divide \(n\). \emph{Example:} Consider \begin{align*} \Phi_5(x) = x^4 + x^3 + x^2 + x + 1 .\end{align*} Then take \(\zeta = \zeta_5\); we then obtain the roots as \(\theset{1, \zeta, \zeta^2, \zeta^3, \zeta^4}\) and \(\QQ(\zeta)\) is the splitting field. Any automorphism is of the form \(\sigma_r: \zeta \mapsto \zeta^r\) for \(r=1,2,3,4\). So \(\abs{\Gal(K/\QQ)} = 4\), and is cyclic and thus isomorphic to \(\ZZ_4\). Corresponding to \(0 \to \ZZ_2 \to \ZZ_4\), we have the extensions \begin{align*} \QQ \to \QQ(\zeta^2) \to \QQ(\zeta) .\end{align*} How can we get a basis for the degree 2 extension \(\QQ(\zeta^2)/\QQ\)? Let \begin{align*} \lambda(E) = \theset{\sigma \in \Gal(\QQ(\zeta)/\QQ) \suchthat \sigma(e) = e ~\forall e\in E } ,\end{align*} \(\lambda(K_H) = H\) where \(H\) is a subgroup of \(\Gal(\QQ(\zeta)/\QQ)\), and \begin{align*} K_H = \theset{x\in K \suchthat \sigma(x) = x ~\forall \sigma\in H} .\end{align*} Note that if \(\ZZ_4 = \generators{\psi}\), then \(\ZZ_2 \leq \ZZ_4\) is given by \(\ZZ_2 = \generators{\psi^2}\). We can compute that if \(\psi(\zeta) = \zeta^2\), then \begin{align*} \psi^2(\zeta) &= \zeta\inv \\ \psi^2(\zeta^2) &= \zeta^{-2}\\ \psi^2(\zeta^3) &= \zeta^{-3} .\end{align*} Noting that \(\zeta_4\) is a linear combination of the other \(\zeta\)s, we have a basis \(\theset{1, \zeta, \zeta^2, \zeta^3}\). Then you can explicitly compute the fixed field by writing out \begin{align*} \sigma(a + b\zeta + c\zeta^2 + d\zeta^3) = a + b\sigma(\zeta) + c\sigma(\zeta^2) + \cdots ,\end{align*} gathering terms, and seeing how this restricts the coefficients. In this case, it yields \(\QQ(\zeta^2 + \zeta^3)\). \hypertarget{the-frobenius-automorphism}{% \subsection{The Frobenius Automorphism}\label{the-frobenius-automorphism}} \textbf{Definition:} Let \(p\) be a prime and \(F\) be a field of characteristic \(p>0\). Then \begin{align*} \sigma_p: F &\to F \\ \sigma_p(x) &= x^p \end{align*} is denoted the \emph{Frobenius map}. \textbf{Theorem:} Let \(F\) be a finite field of characteristic \(p > 0\). Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item \(\phi_p\) is an automorphism, and \item \(\phi_p\) fixes \(F_{\sigma_p} = \ZZ_p\). \end{enumerate} \emph{Proof of part 1:} Since \(\sigma_p\) is a field homomorphism, we have \begin{align*} \sigma_p(x+y) = (x+y)^p = x^p + y^p \text{ and } \sigma(xy) = (xy)^p = x^p y^p \end{align*} Note that \(\sigma_p\) is injective, since \(\sigma_p(x) =0 \implies x^p=0 \implies x=0\) since we are in a field. Since \(F\) is finite, \(\sigma_p\) is also surjective, and is thus an automorphism. \emph{Proof of part 2:} If \(\sigma(x) = x\), then \begin{align*} x^p = x \implies x^p-x = 0 ,\end{align*} which implies that \(x\) is a root of \(f(x) = x^p - x\). But these are exactly the elements in the prime ring \(\ZZ_p\). \(\qed\) \hypertarget{thursday-october-17th}{% \section{Thursday October 17th}\label{thursday-october-17th}} \hypertarget{example-galois-group-computation}{% \subsection{Example Galois Group Computation}\label{example-galois-group-computation}} \emph{Example:} What is the Galois group of \(x^4-2\) over \(\QQ\)? First step: find the roots. We can find directly that there are 4 roots given by \begin{align*} \theset{\pm \sqrt[4] 2, \pm i \sqrt[4] 2} \definedas \theset{r_i} .\end{align*} The splitting field will then be \(\QQ(\sqrt[4] 2, i)\), which is separable because we are in characteristic zero. So this is a normal extension. We can find some automorphisms: \begin{align*} \sqrt[4] 2 \mapsto r_i, \quad i \mapsto \pm i .\end{align*} So \(\abs G = 8\), and we can see that \(G\) can't be abelian because this would require every subgroup to be abelian and thus normal, which would force every intermediate extension to be normal. But the intermediate extension \(\QQ(\sqrt[4] 2)/\QQ\) is not a normal extension since it's not a splitting field. So the group must be \(D_4\). \(\qed\) \hypertarget{insolubility-of-the-quintic}{% \subsection{Insolubility of the Quintic}\label{insolubility-of-the-quintic}} \hypertarget{symmetric-functions}{% \subsubsection{Symmetric Functions}\label{symmetric-functions}} Let \(F\) be a field, and let \begin{align*} F(y_1, \cdots , y_n) = \theset{\frac{f(y_1, \cdots, y_n)}{g(y_1, \cdots, y_n)} \suchthat f, g \in F[y_1, \cdots, y_n]} \end{align*} be the set of \emph{rational} functions over \(F\). Then \(S_n \actson F(y_1, \cdots, y_n)\) by permuting the \(y_i\), i.e. \begin{align*} \sigma \left(\frac{ f(y_1, \cdots, y_n) }{ g(y_1, \cdots, y_n) }\right) = \frac{ f(\sigma(y_1), \cdots, \sigma(y_n)) }{ g(\sigma(y_1), \cdots, \sigma(y_n)) } .\end{align*} \textbf{Definition:} A function \(f \in F(\alpha_1, \cdots, \alpha_n)\) is \textbf{symmetric} \(\iff\) under this action, \(\sigma\actson f = f\) for all \(\sigma \in S_n\). \emph{Examples:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item \(f(y_1, \cdots, y_n) = \prod y_i\) \item \(f(y_1, \cdots, y_n) = \sum y_i\). \end{enumerate} \hypertarget{elementary-symmetric-functions}{% \subsubsection{Elementary Symmetric Functions}\label{elementary-symmetric-functions}} Consider \(f(x) \in F(y_1, \cdots, y_n)[x]\) given by \(\prod (x-y_i)\). Then \(\sigma f = f\), so \(f\) is a symmetric function. Moreover, all coefficients are fixed by \(S_n\). So the coefficients themselves are symmetric functions. Concretely, we have \begin{longtable}[]{@{}ll@{}} \toprule Coefficient & Term\tabularnewline \midrule \endhead 1 & \((-1)^n\)\tabularnewline \(x^{n-1}\) & \(-y_1 - y_2 - \cdots - y_n\)\tabularnewline \(x^{n-2}\) & \(y_1y_2 + y_1y_3 + \cdots + y_2y_3 + \cdots\)\tabularnewline \bottomrule \end{longtable} The coefficient of \(x^{n-i}\) is referred to as the \emph{\(i\)th elementary symmetric function}. Consider an intermediate extension \(E\) given by joining all of the elementary symmetric functions: \includegraphics{figures/2019-10-17-09:56.png}\\ Let \(K\) denote the base field with \emph{all} symmetric functions adjoined; then \(K\) is an intermediate extension, and we have the following results: \textbf{Theorem}: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(E \leq K\) is a field extension. \item \(E \leq F(y_1, \cdots, y_n)\) is a finite, normal extension since it is the splitting field of \(f(x) = \prod (x-y_i)\), which is separable. \end{enumerate} We thus have \begin{align*} [F(y_1, \cdots, y_n): E] \leq n! < \infty .\end{align*} \emph{Proof:} We'll show that in fact \(E = K\), so all symmetric functions are generated by the elementary symmetric functions. By definition of symmetric functions, \(K\) is exactly the fixed field \(F(y_1, \cdots, y_n)_{S_n}\), and \(\abs S_n = n!\). So we have \begin{align*} n! &= \abs{ \Gal(F(y_1, \cdots, y_n / K))} \\ & \leq \{F(y_1, \cdots, y_n) : K\} \\ & \leq [F(y_1, \cdots, y_n): K] .\end{align*} But now we have \begin{align*} n! \leq [F(y_1, \cdots, y_n):K] \leq [F(y_1, \cdots, y_n) : E] \leq n! \end{align*} which forces \(K=E\). \(\qed\) \textbf{Theorem}: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item Every symmetric function can be written as a combination of sums, products, and possibly quotients of elementary symmetric functions. \item \(F(y_1, \cdots, y_n)\) is a finite normal extension of \(F(s_1, \cdots, s_n)\) of degree \(n!\). \item \(\Gal(F(y_1, \cdots, y_n) / F(s_1, \cdots, s_n)) \cong S_n\). \end{enumerate} We know that every group \(G \injects S_n\) by Cayley's theorem. So there exists an intermediate extension \begin{align*} F(s_1, \cdots, s_n) \leq L \leq F(y_1, \cdots, y_n) \end{align*} such that \(G = \Gal(F(y_1, \cdots, y_n) / L)\). \begin{quote} Open question: which groups can be realized as Galois groups over \(\QQ\)? Old/classic question, possibly some results in the other direction (i.e.~characterizations of which groups \emph{can't} be realized as such Galois groups). \end{quote} \hypertarget{extensions-by-radicals}{% \subsubsection{Extensions by Radicals}\label{extensions-by-radicals}} Let \(p(x) = \sum a_i x^i \in \QQ[x]\) be a polynomial of degree \(n\). Can we find a formula for the roots as a function of the coefficients, possibly involving radicals? \begin{itemize} \item For \(n = 1\) this is clear \item For \(n=2\) we have the quadratic formula. \item For \(n = 3\), there is a formula by work of Cardano. \item For \(n = 4\), this is true by work of Ferrari. \item For \(n \geq 5\), there can \textbf{not} be a general equation. \end{itemize} \textbf{Definition:} Let \(K \geq F\) be a field extension. Then \(K\) is an \textbf{extension of \(F\) by radicals} (or a \textbf{radical extension}) \(\iff\) \(K = \alpha_1, \cdots, \alpha_n\) for some \(\alpha_i\) such that \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item Each \(\alpha_i^{m_i} \in F\) for some \(m_i > 0\). \item For each \(i\), \(\alpha_i^{\ell_i} \in F(\alpha_1, \cdots, \alpha_{i-1})\) for some \(\ell_i < m_i\) (?). \end{enumerate} \textbf{Definition:} A polynomial \(f(x) \in F[x]\) is \textbf{solvable by radicals} over \(F\) \(\iff\) the splitting field of \(f\) is contained in some radical extension. \emph{Example:} Over \(\QQ\), the polynomials \(x^5-1\) and \(x^3-2\) are solvable by radicals. Recall that \(G\) is \emph{solvable} if there exists a normal series \begin{align*} 1 \normal H_1 \normal H_2 \cdots \normal H_n \normal G \text{ such that } H_n/H_{n-1} \text{ is abelian } \forall n .\end{align*} \hypertarget{the-splitting-field-of-xn-a-is-solvable}{% \subsubsection{\texorpdfstring{The Splitting Field of \(x^n-a\) is Solvable}{The Splitting Field of x\^{}n-a is Solvable}}\label{the-splitting-field-of-xn-a-is-solvable}} \textbf{Lemma}: Let \(\ch F = 0\) and \(a\in F\). If \(K\) is the splitting field of \(p(x) = x^n-a\), then \(\Gal(K/F)\) is a solvable group. \emph{Example:} Let \(p(x) = x^4-2 / \QQ\), which had Galois group \(D_4\). \emph{Proof:} Suppose that \(F\) contains all \(n\)th roots of unity, \(\theset{1, \zeta, \zeta^2, \cdots, \zeta^[n-1]}\) where \(\zeta\) is a primitive \(n\)th root of unity. If \(\beta\) is any root of \(p(x)\), then \(\zeta^i\beta\) is also a root for any \(1\leq i \leq n-1\). This in fact yields \(n\) distinct roots, and is thus all of the them. Since the splitting field \(K\) is of the form \(F(\beta)\), then if \(\sigma \in \Gal(K/F)\), then \(\sigma(\beta) = \zeta^i \beta\) for some \(i\). Then if \(\tau \in \Gal(K/F)\) is any other automorphism, then \(\tau(\beta) = \zeta^k \beta\) and thus (exercise) the Galois group is abelian and thus solvable. Suppose instead that \(F\) does not contain all \(n\)th roots of unity. So let \(F' = F(\zeta)\), so \(F \leq F(\zeta) = F' \leq K\). Then \(F \leq F(\zeta)\) is a splitting field (of \(x^n-1\)) and separable since we are in characteristic zero and this is a finite extension. Thus this is a normal extension. We thus have \(\Gal(K/F) / \Gal(K/F(\zeta)) \cong \Gal(F(\zeta)/ F)\). We know that \(\Gal(F(\zeta)/ F)\) is abelian since this is a cyclotomic extension, and so is \(\Gal(K/F(\zeta))\). We thus obtain a normal series \begin{align*} 1 \normal \Gal(K/F(\zeta)) \normal \Gal(K/F) \end{align*} Thus we have a solvable group. \(\qed\) \hypertarget{tuesday-october-22nd}{% \section{Tuesday October 22nd}\label{tuesday-october-22nd}} \hypertarget{certain-radical-extensions-are-solvable}{% \subsection{Certain Radical Extensions are Solvable}\label{certain-radical-extensions-are-solvable}} Recall the definition of an extension being \emph{radical} (see above). We say that a polynomial \(f(x) \in K[x]\) is \emph{solvable by radicals} iff its splitting field \(L\) is a radical extension of \(K\). \textbf{Lemma:} Let \(F\) be a field of characteristic zero. If \(K\) is a splitting field of \(f(x) = x^n - a \in F[x]\), then \(\Gal(K/F)\) is a solvable group. \textbf{Theorem:} Let \(F\) be characteristic zero, and suppose \(F \leq E \leq K \leq \overline F\) be algebraic extension where \(E/F\) is normal and \(K\) a radical extension of \(F\). Moreover, suppose \([K:F] < \infty\). Then \(\Gal(E/F)\) is solvable. \emph{Proof:} The claim is that \(K\) is contained in some \(L\) where \(F \subset L\), \(L\) is a finite normal radical extension, and \(\Gal{L/F}\) is solvable. Since \(K\) is a radical extension of \(F\), we have \(F = K(\alpha_1, \cdots, \alpha_n)\) and \(\alpha_i^{n_i} \in K(\alpha_1, \cdots, \alpha_{i-1})\) for each \(i\) and some \(n_i \in \NN\). Let \(L_1\) be the splitting field of \(f_1(x) = x^{n_1} - \alpha_1^{n_1}\), then by the previous lemma, \(L_1\) is a normal extension and \(\Gal(L_1/F)\) is a solvable group. Inductively continue this process, and letting \begin{align*} f_2(x) = \prod_{\sigma \in \Gal(L_1/F)} x^{n_2} - \sigma(\alpha_2)^{n_2} \in F[x] .\end{align*} Note that the action of the Galois group on this polynomial is stable. Let \(L_2\) be the splitting field of \(f_2\), then \(L_2\) is a finite normal radical extension. Then \begin{align*} \frac{ \Gal(L_2/F) }{ \Gal(L_2/L_1) } \cong \Gal(L_1/F) ,\end{align*} which is solvable, and the denominator in this quotient is solvable, so the total group must be solvable as well. \(\qed\) \hypertarget{proof-insolubility-of-the-quintic}{% \subsection{Proof: Insolubility of the Quintic}\label{proof-insolubility-of-the-quintic}} \textbf{Theorem (Insolubility of the quintic):} Let \(y_1, \cdots, y_n\) be independent transcendental elements in \(\RR\), then the polynomial \(f(x) = \prod (x-y_i)\) is not solvable by radicals over \(\QQ(s_1, \cdots, s_n)\) where the \(s_i\) are the elementary symmetric polynomials in \(y_i\). \begin{quote} So there are no polynomial relations between the transcendental elements. \end{quote} \emph{Proof:} Let \(n\geq 5\) and suppose \(y_i\) are transcendental over \(\RR\) and linearly independent over \(\QQ\). Then consider \begin{align*} s_1 &= \sum y_i \\ s_2 &= \sum_{i\leq j} y_i y_j \\ \cdots \\ s_n &= \prod_i y_i .\end{align*} Then \(\QQ(y_1, \cdots, y_n)/ \QQ(s_1, \cdots, s_n)\) would be a normal extension precisely if \(A_n \normal S_n\) (by previous theorem). For \(n\geq 5\), \(A_n\) is simple, and thus \(S_n\) is not solvable in this range. Thus the polynomial is not solvable by radicals, since the splitting field of \(f(x)\) is \(\QQ(y_1, \cdots, y_n)\). \(\qed\) \hypertarget{rings-and-modules}{% \subsection{Rings and Modules}\label{rings-and-modules}} Recall that a ring is given by \((R, +, \cdot)\), where \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item \((R, +)\) is an abelian group, \item \((R, \cdot)\) is a monoid, \item The distributive laws hold. \end{enumerate} An \emph{ideal} is certain type of subring that allows taking quotients, and is defined by \(I \normal R \iff I\leq R\) and \(RI, IR \subseteq I\). The quotient is given by \(R/I = \theset{r + I \suchthat r\in R}\), and the ideal property is what makes this well-defined. Much like groups, we have some notion of homomorphism \(\phi: R\to R'\), where \(\phi(ax+y) = \phi(a)\phi(x) + \phi(y)\). \hypertarget{modules}{% \subsubsection{Modules}\label{modules}} We want to combine the following two notions: \begin{itemize} \item Groups acting on sets, and \item Vector spaces \end{itemize} \textbf{Definition:} Let \(R\) be a ring and \(M\) an abelian group. Then if there is a map \begin{align*} R\cross M &\to M \\ (r,m) &\mapsto rm .\end{align*} such that \(\forall s,r_1,r_2 \in R\) and \(m_1,m_2 \in M\) we have \begin{itemize} \tightlist \item \((sr_1 + r_2)(m_1 + m_2) = sr_1m_1 + sr_1m_2 + r_2m_1 + r_2 m_2\) \item \(1\in R \implies 1m = m\). \end{itemize} then \(M\) is said to be an \textbf{\(R\dash\)module.} \begin{quote} Think of \(R\) like the group acting by scalar multiplication, and \(M\) the set of vectors with vector addition. \end{quote} \emph{Examples:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(R = k\) a field, then a \(k\dash\)module is a vector space. \item \(R = G\) an abelian group, then \(R\) is a \(\ZZ\dash\)module where \begin{align*} n\actson a \definedas \sum_{i=1}^n a .\end{align*} \end{enumerate} \begin{quote} (In fact, these two notions are equivalent.) \end{quote} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \setcounter{enumi}{2} \item \(I \normal R\), then \(M \definedas R/I\) is an ring, which has an underlying abelian group, so \(M\) is an \(R\dash\)module where \begin{align*} M\actson R = r\actson(s+I) \definedas (rs) + I .\end{align*} \item For \(M\) an abelian group, \(R \definedas \mathrm{End}(M) = \hom_{\text{AbGrp}}(M, M)\) is a ring, and \(M\) is a left \(R\dash\)module given by \begin{align*} f\actson m \definedas f(m) .\end{align*} \end{enumerate} \textbf{Definition:} Let \(M, N\) be left \(R\dash\)modules. Then \(f: M \to N\) is an \(R\dash\)module homomorphism \(\iff\) \begin{align*} f(rm_1 + m_2) = rf(m_1) + f(m_2) .\end{align*} \textbf{Definition:} \emph{Monomorphisms} are injective maps, \emph{epimorphisms} are surjections, and \emph{isomorphisms} are both. \textbf{Definition}: A \emph{submodule} \(N\leq M\) is a subset that is closed under all module operations. We can consider images, kernels, and inverse images, so we can formulate homomorphism theorems analogous to what we saw with groups/rings: \textbf{Theorem:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item If \(M \mapsvia{f} N\) in \(R\dash\)mod, then \begin{align*} M / \ker(f) \cong \im(f) .\end{align*} \item Let \(M, N \leq L\), then \(M+N \leq L\) as well, and \begin{align*} \frac{M}{M\intersect N} \cong \frac{M+N}{N} .\end{align*} \item If \(M\leq M\leq L\), then \begin{align*} \frac{M}{N} \cong \frac{L/M}{L/N} \end{align*} \end{enumerate} \begin{quote} Note that we can always quotient, since there's an underlying abelian group, and thus the ``normality''/ideal condition is always satisfied for submodules. Just consider \begin{align*} M/N \definedas \theset{m + N \suchthat m\in M} ,\end{align*} then \(R\actson (M/N)\) in a well-defined way that gives \(M/N\) the structure of an \(R\dash\)module as well. \end{quote} \hypertarget{thursday-october-24}{% \section{Thursday October 24}\label{thursday-october-24}} \hypertarget{conjugates-1}{% \subsection{Conjugates}\label{conjugates-1}} Let \(E\geq F\). Then \(\alpha, \beta \in E\) are \textbf{conjugate} iff \(\min(\alpha, F) = \min(\beta, F)\). \emph{Example:} \(\alpha \pm bi \in \CC\). \textbf{Theorem:} Let \(F\) be a field and \(\alpha, \beta \in F\) with \(\deg \min (\alpha, F) = \deg \min (\beta, F)\), so \begin{align*} [F(\alpha): F] = [F(\beta): F] .\end{align*} Then \(\alpha, \beta\) are conjugates \(\iff\) \(F(\alpha) \cong F(\beta)\) under the \emph{conjugation map}, \begin{align*} \psi: F(\alpha) &\to F(\beta) \\ \sum_{i=1}^{n-1} a_i \alpha^i &\mapsto \sum_{i=1}^{n-1} a_i \beta^i .\end{align*} \emph{Proof:} \(\impliedby\): Suppose that \(\psi\) is an isomorphism. Let \(\min(\alpha, F) = p(x) = \sum c_i x^i\) where each \(c_i \in F\). Then \begin{align*} 0 = \psi(0) = \psi(p(\alpha)) = p(\beta) \implies \min(\beta, F) \divides \min(\alpha, F) .\end{align*} Applying the same argument to \(q(x) = \min(\beta, F)\) yields \(\min(\beta, F) = \min(\alpha, F)\). \(\implies\): Suppose \(\alpha, \beta\) are conjugates. \emph{Exercise:} Check that \(\psi\) is surjective and \begin{align*} \psi(x+y) = \psi(x) + \psi(y) \\ \psi(xy) = \psi(x) \psi(y) .\end{align*} Let \(z = \sum a_i \alpha^i\). Supposing that \(\psi(z) = 0\), we have \(\sum a_i \beta^i = 0\). By linear independence, this forces \(a_i = 0\) for all \(i\), and thus \(z=0\). So \(\psi\) is injective. \(\qed\) \textbf{Corollary:} Let \(\alpha \in \overline F\) be algebraic. Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item Any \(\phi: F(\alpha) \injects \overline F\) such that \(\phi(f) = f\) for all \(f\in F\) must map \(\alpha\) to a conjugate. \item If \(\beta \in \overline F\) is a conjugate of \(\alpha\), then there exists an isomorphism \(\phi: F(\alpha) \to F(\beta) \subseteq \overline F\) such that \(\phi(f) = f\) for all \(f\in F\). \end{enumerate} \emph{Proof of 1:} Let \(\min(\alpha, F) = p(x) = \sum a_i x^i\). Note that \(0 = \psi(p(\alpha)) = p(\psi(\alpha))\), and since \(p\) was irreducible, \(p\) must also be the minimal polynomial of \(\psi(\alpha)\). Thus \(\psi(\alpha)\) is a conjugate of \(\alpha\). \(\qed\) \emph{Proof of 2:} \(F(\alpha)\) is generated by \(F\) and \(\alpha\), and \(\psi\) is completely determined by where it sends \(F\) and \(\alpha\). This shows uniquness. \(\qed\) \textbf{Corollary:} Let \(f(x) \in \RR[x]\) and suppose \(f(a+bi)= 0\). Then \(f(a-bi) = 0\). \emph{Proof:} Both \(i, -i\) are conjugates and \(\min(i, \RR) = \min(-i, \RR) = x^2 + 1 \in \RR[x]\). We then have a map \begin{align*} \psi: \RR[i] &\to \RR[-i] \\ \psi(a+bi) = a + b(-i) .\end{align*} So if \(f(a+bi) = 0\), then \(0 = \psi(f(a+bi)) = f(\psi(a+bi)) = f(a-bi)\). \(\qed\) \hypertarget{october-27th}{% \section{October 27th}\label{october-27th}} \hypertarget{modules-1}{% \subsection{Modules}\label{modules-1}} Let \(R\) be a ring and \(M\) be an \(R\dash\)module. \textbf{Definition:} For a subset \(X\subseteq M\), we can define the \emph{submodule generated by \(X\)} as \begin{align*} \generators{X} \definedas \intersect_{X \subseteq N \leq M} N \subseteq M .\end{align*} Then \(M\) is generated by \(X\) iff \(M = \generators{X}\). As a special case, when \(X = \theset{m}\) consists of a single element, we write \begin{align*} \generators{m} = Rm \definedas \theset{rm \suchthat r\in R} .\end{align*} In general, we have \begin{align*} \generators{X} = \theset{\sum r_i x_i \mid r_i \in R, x_i \in X} .\end{align*} \hypertarget{direct-products-and-direct-sums}{% \subsection{Direct Products and Direct Sums}\label{direct-products-and-direct-sums}} \textbf{Definition:} Let \(\theset{M_i}\) be a finite collection of \(R\dash\)modules, and let \begin{align*} N = \bigoplus M_i = \theset{\sum m_i \mid m_i \in M_i} \end{align*} with multiplication given by \(\gamma \sum m_i = \sum \gamma m_i\) denote the \textbf{direct sum}. For an infinite collection, we require that all but finitely many terms are zero. \textbf{Definition:} Define \(N = \prod M_i\) denote the \textbf{direct product}, where we now drop the condition that finitely many terms are zero. When the indexing set is finite, \(\bigoplus M_i \cong \prod M_i\). In general, \(\bigoplus M_i \injects \prod M_i\). Note that the natural inclusions \begin{align*} \iota_j: M_j \injects \prod M_i \end{align*} and projections \begin{align*} \pi_j: \prod M_i \surjects M_j \end{align*} are both \(R\dash\)module homomorphisms. Theorem: \(M \cong \bigoplus M_i\) iff there exist maps \(\pi_j: M \to M_j\) and \(\iota_j: M_j \to M\) such that \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \begin{align*} \pi_j \circ \iota_k = \begin{cases} 1m & j=k \\ 0 & \text{else}\end{cases} \end{align*} \item \(\sum_j \iota_j \circ \pi_j = \id_M\) \end{enumerate} \textbf{Remark:} Let \(M, N\) be \(R\dash\)modules. Then \(\hom_{R-\text{mod}}(M, N)\) is an abelian group. \hypertarget{internal-direct-sums}{% \subsection{Internal Direct Sums}\label{internal-direct-sums}} For a collection of submodules of \(M\) given by \(\theset{M_i}\), denote the \emph{internal direct sum} \begin{align*} \sum M_i \definedas \theset{m_1 + m_2 + \cdots \mid m_i \in M_i} \end{align*} iff it satisfies the following conditions: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(M = \sum_i M_i\) \item \(M_i \intersect M_j = \theset{0}\) for \(i\neq j\). \end{enumerate} \hypertarget{exact-sequences}{% \subsection{Exact Sequences}\label{exact-sequences}} \textbf{Definition:} A sequence of the form \begin{align*} 0 \to M_1 \mapsvia{i} M_2 \mapsvia{p} M_3 \to 0 \end{align*} where \begin{itemize} \item \(i\) is a monomorphism \item \(p\) is an empimorphism \item \(\im i = \ker p\) \end{itemize} is said to be \textbf{short exact}. \emph{Examples}: \begin{itemize} \item \begin{align*} 0 \to 2\ZZ \injects \ZZ \surjects \ZZ/2\ZZ \to 0 \end{align*} \item For any epimorphism \(\pi: M\to N\), \begin{align*} 0 \to \ker \pi \to M \to N \to 0 \end{align*} \item \begin{align*} 0 \to M_1 \to M_1 \oplus M_2 \to M_2 \to 0 \end{align*} \end{itemize} In general, any sequence \begin{align*} \cdots \to M_i \mapsvia{f_i} M_{i+1} \mapsvia{f_{i+1}} \cdots \end{align*} is \textbf{exact} iff \(\im f_i = \ker f_{i+1}\). \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item If \(\alpha, \gamma\) are monomorphisms then \(\beta\) is a monomorphism. \end{enumerate} \hypertarget{tuesday-october-29th}{% \section{Tuesday October 29th}\label{tuesday-october-29th}} \hypertarget{exact-sequences-1}{% \subsection{Exact Sequences}\label{exact-sequences-1}} \textbf{Lemma (Short Five):} Consider a diagram of the following form: \begin{center}\includesvg[width=\linewidth]{a759b2d9d1e1bc93ce916c766a1615c329007618}\end{center} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(\alpha, \gamma\) monomorphisms implies \(\beta\) is a monomorphism. \item \(\alpha, \gamma\) epimorphisms implies \(\beta\) is an epimorphism. \item \(\alpha, \gamma\) isomorphisms implies \(\beta\) is an isomorphism. \end{enumerate} Moreover, (1) and (2) together imply (3). \emph{Proof:} Exercise. \emph{Example proof of (2)}: Suppose \(\alpha, \gamma\) are monomorphisms. \begin{itemize} \tightlist \item Let \(n\in N\) with \(\beta(n) = 0\), then \(g' \circ \beta(n) = 0\). \item \(\implies \gamma \circ g (n) = 0\). \item \(\implies g(n) = 0\) \item \(\implies \exists m\in M\) such that \(f(m) = n\) \item \(\implies \beta \circ f (m) = \beta(n)\) \item \(\implies f' \alpha(m) = \beta (n) = 0\) \item \(\implies \alpha(m) = 0\) \item \(\implies f'\) is injective, so \(m=0\) and \(n=f(m) = 0\). \end{itemize} \(\qed\) \textbf{Definition:} Two exact sequences are \emph{isomorphic} iff in the following diagram, \(f,g,h\) are all isomorphisms: \begin{center} \begin{tikzcd} 0 \arrow[r] & M \arrow[dd, "f"] \arrow[r] & N \arrow[dd, "g"] \arrow[r] & Q \arrow[dd, "h"] \arrow[r] & 0 \\ & & & & \\ 0 \arrow[r] & M \arrow[r] & N \arrow[r] & Q \arrow[r] & 0 \end{tikzcd} \end{center} \textbf{Theorem:} Let \(0 \to M_1 \mapsvia f M_2 \mapsvia g M_3 \to 0\) be a SES. Then TFAE: \begin{itemize} \item There exists an \(R\dash\)module homomorphisms \(h: M_3 \to M_2\) such that \(g\circ h = \id_{M_3}\). \item There exists an \(R\dash\)module homomorphisms \(k: M_2 \to M_1\) such that \(k\circ f = \id_{M_1}\). \item The sequence is isomorphic to \(0 \to M_1 \to M_1 \oplus M_3 \to M_3 \to 0\). \end{itemize} \emph{Proof:} Define \(\phi: M_1 \oplus M_3 \to M_2\) by \(\phi(m_1 + m_2) = f(m_1) + h(m_2)\). We need to show that the following diagram commutes: \begin{center} \begin{tikzcd} 0 \arrow[r] & M_1 \arrow[dd, "\id", latex'-latex',double,thin] \arrow[r] & M_1 \oplus M_3 \arrow[r] & M_3 \arrow[dd, "\id", latex'-latex',double,thin] \arrow[r] & 0 \\ & & & & \\ 0 \arrow[r] & M_1 \arrow[r] & M_2 \arrow[uu, "\phi"'] \arrow[r] & M_3 \arrow[r] & 0 \end{tikzcd} \end{center} We can check that \begin{align*} (g\circ \phi)(m_1 + m_2) = g( f(m_1)) + g(h(m_2)) = m_2 = \pi(m_1 + m_2).\end{align*} This yields \(1 \implies 3\), and \(2 \implies 3\) is similar. To see that \(3 \implies 1, 2\), we attempt to define \(k, h\) in the following diagram: \begin{center} \begin{tikzcd} 0 \arrow[r] & M_1 \arrow[r] \arrow[dd, "\id", latex'-latex',double,thin] & M_1 \oplus M_3 \arrow[r] \arrow[l, "\pi_1"', bend right] & M_3 \arrow[dd, "\id", latex'-latex',double,thin] \arrow[l, "\iota_2"', bend right] \arrow[r] & 0 \\ & & & & \\ 0 \arrow[r] & M_1 \arrow[r] & M_2 \arrow[r] \arrow[uu, "\phi"'] \arrow[l, "k", bend left] & M_3 \arrow[r] \arrow[l, "h", bend left] \arrow[r] & 0 \end{tikzcd} \end{center} So define \(k = \pi_1 \circ \phi\inv\) and \(h = \phi \circ \iota_2\). It can then be checked that \begin{align*} g \circ h = g \circ \phi \circ \iota_2 = \pi_2 \circ \iota_2 = \id_{M_3} .\end{align*} \(\qed\) \hypertarget{free-modules}{% \subsection{Free Modules}\label{free-modules}} \begin{quote} Moral: A \emph{free module} is a module with a basis. \end{quote} \textbf{Definition:} A subset \(X = \theset{x_i}\) is \emph{linearly independent} iff \begin{align*} \sum r_i x_i = 0 \implies r_i = 0 ~\forall i .\end{align*} \textbf{Definition:} A subset \(X\) \emph{spans} \(M\) iff \begin{align*} m\in M \implies m = \sum_{i=1}^n r_i x_i \quad \text{ for some }r_i \in R,~x_i \in X .\end{align*} \textbf{Definition:} A subset \(X\) is a basis \(\iff\) it is a linearly independent spanning set. \emph{Example:} \(\ZZ_6\) is an abelian group and thus a \(\ZZ\dash\)module, but not free because \(3 \actson [2] = [6] = 0\), so there are torsion elements. This contradicts linear independence for any subset. \textbf{Theorem (Characterization of Free Modules):} Let \(R\) be a unital ring and \(M\) a unital \(R\dash\)module (so \(1\actson m = m\)). TFAE: \begin{itemize} \item There exists a nonempty basis of \(M\). \item \(M = \oplus_{i\in I} R\) for some index set \(I\). \item There exists a non-empty set \(X\) and a map \(\iota: X \injects M\) such that given \(f: X \to N\) for \(N\) any \(R\dash\) module, \(\exists! \tilde f: M \to N\) such that the following diagram commutes. \end{itemize} \begin{center} \begin{tikzcd} M \arrow[rrdd, "\exists! \tilde f", dotted] & & \\ & & \\ X \arrow[rr, "f"] \arrow[uu, "\iota", hook] & & N \end{tikzcd} \end{center} \textbf{Definition:} An \(R\dash\)module is \emph{free} iff any of 1,2, or 3 hold. \emph{Proof of \(1 \implies 2\):} Let \(X\) be a basis for \(M\), then define \(M \to \oplus_{x\in X} Rx\) by \(\phi(m) = \sum r_i x_i\). It can be checked that \begin{itemize} \item This is an \(R\dash\)module homomorphism, \item \(\phi(m) = 0 \implies r_j = 0 ~\forall j \implies m = 0\), so \(\phi\) is injective, \item \(\phi\) is surjective, since \(X\) is a spanning set. \end{itemize} So \(M \cong \bigoplus_{x\in X} Rx\), so it only remains to show that \(Rx \cong R\). We can define the map \begin{align*} \pi_x: R &\to Rx \\ r &\mapsto rx .\end{align*} Then \(\pi_x\) is onto, and is injective exactly because \(X\) is a linearly independent set. Thus \(M \cong \oplus R\). \(\qed\) \emph{Proof of \(1 \implies 3\):} Let \(X\) be a basis, and suppose there are two maps \(X \mapsvia{\iota} M\) and \(X \mapsvia{f} M\). Then define \begin{align*} \tilde f: M &\to N \\ \sum_i r_i x_i &\mapsto \sum_i r_i f(x_i) .\end{align*} This is clearly an \(R\dash\)module homomorphism, and the diagram commutes because \((\tilde f \circ \iota)(x) = f(x)\). This is unique because \(\tilde f\) is determined precisely by \(f(X)\). \(\qed\) \emph{Proof of \(3 \implies 2\):} We use the usual ``2 diagram'' trick to produce maps \begin{align*} \tilde f: M \to \bigoplus_{x\in X} R \\ \tilde g: \bigoplus_{x\in X}R \to M .\end{align*} Then commutativity forces \begin{align*} \tilde f \circ \tilde g = \tilde g \circ \tilde f = \id .\end{align*} \(\qed\) \emph{Proof of \(2 \implies 1\):} We have \(M = \oplus_{i\in I} R\) by (2). So there exists a map \begin{align*} \psi: \oplus_{i\in I} R \to M ,\end{align*} so let \(X \definedas \theset{\psi(1_i) \mid i\in I}\), which we claim is a basis. To see that \(X\) is a basis, suppose \(\sum r_i \psi(1_i) = 0\). Then \(\psi(\sum r_i 1_i) = 0\) and thus \(\sum r_i 1_i = 0\) and \(r_i = 0\) for all \(i\). Checking that it's a spanning set: Exercise. \(\qed\) \textbf{Corollary:} Every \(R\dash\)module is the homomorphic image of a free module. \emph{Proof:} Let \(M\) be an \(R\dash\)module, and let \(X\) be any set of generators of \(R\). Then we can make a map \begin{align*} M \to \bigoplus_{x\in X} R \end{align*} and there is a map \(X \injects M\), so the universal property provides a map \begin{align*} \tilde f: \bigoplus_{x\in X} R \to M .\end{align*} Moreover, \(\bigoplus_{x\in X} R\) is free. \(\qed\) \emph{Examples:} \begin{itemize} \item \(\ZZ_n\) is \textbf{not} a free \(\ZZ\dash\)module for any \(n\). \item If \(V\) is a vector space over a field \(k\), then \(V\) is a free \(k\dash\)module (even if \(V\) is infinite dimensional). \item Every nonzero submodule of a free module over a PID is free. \end{itemize} \textbf{Some facts:} Let \(R = k\) be a field (or potentially a division ring). \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item Every maximal linearly independent subset is a basis for \(V\). \item Every vector space has a basis. \item Every linearly independent set is contained in a basis \item Every spanning set contains a basis. \item Any two bases of a vector space have the same cardinality. \end{enumerate} \textbf{Theorem (Invariant Dimension):} Let \(R\) be a commutative ring and \(M\) a free \(R\dash\)module. If \(X_1, X_2\) are bases for \(R\), then \(\abs{X_1} = \abs{X_2}\). Any ring satisfying this condition is said to have the \textbf{invariant dimension property}. \begin{quote} Note that it's difficult to say much more about generic modules. For example, even a finitely generated module may \emph{not} have an invariant number of generators. \end{quote} \hypertarget{tuesday-november-5th}{% \section{Tuesday November 5th}\label{tuesday-november-5th}} \hypertarget{free-vs-projective-modules}{% \subsection{Free vs Projective Modules}\label{free-vs-projective-modules}} Let \(R\) be a PID. Then any nonzero submodule of a free module over a PID is free, and any projective module over \(R\) is free. Recall that a module \(M\) is \textbf{projective} \(\iff M\) is a direct summand of a free module. In general, \begin{itemize} \item Free \(\implies\) projective, but \item Projective \(\centernot\implies\) free. \end{itemize} \emph{Example:} Consider \(\ZZ_6 = \ZZ_2 \oplus \ZZ_3\) as a \(\ZZ\dash\)module. Is this free as a \(\ZZ\dash\)module? Note that \(\ZZ_2\) is a submodule and thus projective, but \(\ZZ_2\) is not free since it is not a free module over \(\ZZ\). What fails here is that \(\ZZ_6\) is not a PID, since it is not a domain. \hypertarget{annihilators}{% \subsection{Annihilators}\label{annihilators}} \textbf{Definition:} Let \(m\in M\) a module, then define \begin{align*} \mathrm{Ann}_m \definedas \theset{r\in R \suchthat r.m = 0 } \normal R. \end{align*} We can then define a map \begin{align*} \phi: R \to R.m \\ r \mapsto r.m .\end{align*} Then \(\ker \phi = \mathrm{Ann}_m\), and \(R/\mathrm{Ann} \cong R.m\). We can also define \begin{align*} M_t \definedas \theset{m\in M \suchthat \mathrm{Ann}_m \neq 0} \leq M. \end{align*} \textbf{Lemma:} Let \(R\) be a PID and \(p\) a prime element. Then \begin{itemize} \tightlist \item If \(p^i m = 0\) then \(\mathrm{Ann}_m = (p^j)\) where \(0\leq j\leq i\). \item If \(\mathrm{Ann}_m = (p^i)\), then \(p^jm \neq 0\) for any \(j < m\). \end{itemize} \emph{Proof of (1):} Since we are in a PID and the annihilator is an ideal, we have \(\mathrm{Ann}_m \definedas (r)\) for some \(r\in M\). Then \(p^i \in (r)\), so \(r \divides p^i\). But \(p\) was prime, to up to scaling by units, we have \(r = p^j\) for some \(j \leq i\). \(\qed\) \emph{Proof of (2):} Towards a contradiction, suppose that \(\mathrm{Ann}_m = (p^i)\) and \(p^jm = 0\) for some \(j < i\). Then \(p^j \in \mathrm{Ann}_m\), so \(p^j \divides p^i\). But this forces \(j \leq i\), a contradiction. \(\qed\) \emph{Some terminology:} \begin{itemize} \item \(\mathrm{Ann}_m\) is the \textbf{order ideal} of \(m\). \item \(M_t\) is the \textbf{torsion submodule} of \(M\). \item \(M\) is \textbf{torsion} iff \(M = M_t\). \item \(M\) is \textbf{torsion free} iff \(M_t = 0\). \item \(\mathrm{Ann}_m = (r)\) is said to have \textbf{order \(r\)}. \item \(Rm\) is the \textbf{cyclic module} generated by \(m\). \end{itemize} \textbf{Theorem:} A finitely generated \emph{torsion-free} module over a PID is free. \emph{Proof:} Let \(M = \generators{X}\) for some finite generating set. We can assume \(M \neq (0)\). If \(m\neq 0 \in M\), with \(rm = 0\) iff \(r=0\). So choose \(S = \theset{x_1, \cdots , x_n} \subseteq X\) to be a maximal linearly independent subset of generators, so \begin{align*} \sum r_i x_i = 0 \implies r_i = 0 ~\forall i .\end{align*} Consider the submodule \(F \definedas \generators{x_1, \cdots, x_n} \leq M\); then \(S\) is a basis for \(F\) and thus \(F\) is free. The claim is that \(M \cong F\). Supposing otherwise, let \(y\in X\setminus S\). Then \(S \union \theset{y}\) can not be linearly independent, so there exists \(r_y, r_i \in R\) such that \begin{align*} r_y y + \sum r_i x^i = 0 .\end{align*} Thus \(r_y y = - \sum r_i x^i\), where \(r_y \neq 0\). Since \(\abs X < \infty\), let \begin{align*} r = \prod_{y \in X\setminus S} r_y .\end{align*} Then \(rX = \theset{rx \suchthat x\in X} \subseteq F\), and \(rM \leq F\). Now using the particular \(r\) we've just defined, define a map \begin{align*} f: M &\to M \\ m &\mapsto rm .\end{align*} Then \(\im f = r.M\), and since \(M\) is torsion-free, \(\ker f = (0)\). So \(M \cong rM \subseteq F\) and \(M\) is free. \(\qed\) \textbf{Theorem:} Let \(M\) be a finitely generated module over a PID \(R\). Then \(M\) can be decomposed as \begin{align*} M \cong M_t \oplus F \end{align*} where \(M_t\) is torsion and \(F\) is free of finite rank, and \(F \cong M/M_t\). \begin{quote} Note: we also have \(M/F \cong F_t\) since this is a direct sum. \end{quote} \emph{Proof:} \emph{Part 1: \(M/M_t\) is torsion free.} Suppose that \(r(m + M_t) = M_t\), so that \(r\) acting on a coset is the zero coset. Then \(rm + M_t = M_t\), so \(rm \in M_t\), so there exists some \(r'\) such that \(r'(rm) = 0\) by definition of \(M_t\). But then \((r'r)m = 0\), so in fact \(m\in M_t\) and thus \(m + M_t = M_t\), making \(M/M_t\) torsion free. \emph{Part 2: \(F \cong M/M_t\).} We thus have a SES \begin{align*} 0 \to M_t \to M \to M/M_t \definedas F \to 0 ,\end{align*} and since we've shown that \(F\) is torsion-free, by the previous theorem \(F\) is free. Moreover, every SES with a free module in the right-hand slot splits: \begin{center} \begin{tikzcd} & & & & & & X \arrow[dd, "\iota", hook] \arrow[lldd, "f", tail] & & \\ & & & & & & & & \\ 0 \arrow[rr] & & M_t \arrow[rr] & & M \arrow[rr, "f"'] & & F \arrow[rr] \arrow[ll, "h" description, dotted, bend right] & & 0 \end{tikzcd} \end{center} For \(X = \theset{x_j}\) a generating set of \(F\), we can choose elements \(\theset{y_i} \in \pi\inv(\iota(X))\) to construct a set map \(f: X \to M\). By the universal property of free modules, we get a map \(h: F \to M\). It remains to check that this is actually a splitting, but we have \begin{align*} \pi \circ h (x_j) = \pi(h(\iota(x_j))) = \pi(f(x_j)) = \pi(y_j) = x_j. \end{align*} \textbf{Lemma:} Let \(R\) be a PID, and \(r\in R\) factor as \(r = \prod p_i^{k_i}\) as a prime factorization. Then \begin{align*} R/(r) \cong \bigoplus R/(p_i^{k_i}). \end{align*} Since \(R\) is a UFD, suppose that \(\gcd(s ,t) = 1\). Then the claim is that \begin{align*} R/(st) = R/(s) \oplus R/(t) ,\end{align*} which will prove the lemma by induction. Define a map \begin{align*} \alpha: R/(s) \oplus R/(t) &\to R/(st) \\ (x + (s), y+(t)) &\mapsto tx + sy + (st) .\end{align*} \emph{Exercise}: Show that this map is well-defined. Since \(\gcd(s, t) = 1\), there exist \(u, v\) such that \(su + vt = 1\). Then for any \(r\in R\), we have \begin{align*} rsu + rvt = r ,\end{align*} so for any given \(r\in R\) we can pick \(x =tv\) and \(y=su\) so that this holds. As a result, the map \(\alpha\) is onto. Now suppose \(tx + sy \in (st)\); then \(tx + sy = stz\). We have \(su + vt = 1\), and thus \begin{align*} utx + usy = ustz \implies utx + (y-tvy) = ustz .\end{align*} We can thus write \begin{align*} y = ustv - utx + tvy \in (t) .\end{align*} Similarly, \(x\in (t)\), so \(\ker \alpha = 0\). \(\qed\) \hypertarget{classification-of-finitely-generated-modules-over-a-pid}{% \subsection{Classification of Finitely Generated Modules Over a PID}\label{classification-of-finitely-generated-modules-over-a-pid}} \textbf{Theorem (Classification of Finitely Generated Modules over a PID):} Let \(M\) be a finitely generated \(R\dash\)module where \(R\) is a PID. Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \tightlist \item \begin{align*} M \cong F \bigoplus_{i=1}^t R/(r_i) \end{align*} where \(F\) is free of finite rank and \(r_1 \divides r_2 \divides \cdots \divides r_t\). The rank and list of ideals occurring is uniquely determined by \(M\). The \(r_i\) are referred to as the \textbf{invariant factors}. \end{enumerate} \begin{enumerate} \def\labelenumi{\alph{enumi}.} \setcounter{enumi}{1} \tightlist \item \begin{align*} M \cong F \bigoplus_{i=1}^k R/(p_i^{s_i}) \end{align*} where \(F\) is free of finite rank and \(p_i\) are primes that need not be distinct. The rank and ideals are uniquely determined by \(M\). The \(p_i^{s_i}\) are referred to as \textbf{elementary divisors}. \end{enumerate} \hypertarget{thursday-november-7th}{% \section{Thursday November 7th}\label{thursday-november-7th}} \hypertarget{projective-modules}{% \subsection{Projective Modules}\label{projective-modules}} \textbf{Definition:} A \textbf{projective} module \(P\) over a ring \(R\) is an \(R\dash\)module such that the following diagram commutes: \begin{center} \begin{tikzcd} & & P \arrow[dd, "f"] \arrow[lldd, "\exists \phi", dashed] \\ & & \\ M \arrow[rr, "g"] & & N \end{tikzcd} \end{center} i.e.~for every surjective map \(g:M \surjects N\) and every map \(f: P \to N\) there exists a lift \(\phi: P \to M\) such that that \(g \circ \phi = f\). \textbf{Theorem}: Every free module is projective. \emph{Proof:} Suppose \(M \surjects N \to 0\) and \(F \mapsvia{f} N\), so we have the following situation: \begin{center} \begin{tikzcd} & & x \arrow[d, hook] \arrow[llddd, dotted] & & \\ & & F \arrow[dd, "f"] \arrow[lldd, "\exists \phi", dashed] & & \\ & & & & \\ M \arrow[rr, "g", two heads] & & N \arrow[rr, dotted] & & 0 \end{tikzcd} \end{center} For every \(x\in X\), there exists an \(m_x \in M\) such that \(g(m_x) = f(i(x))\). By freeness, there exists a \(\phi: F \to M\) such that this diagram commutes. \(\qed\) \textbf{Corollary:} Every \(R\dash\)module is the homomorphic image of a projective module. \emph{Proof:} If \(M\) is an \(R\dash\)module, then \(F \surjects M\) where \(F\) is free, but free modules are surjective. \(\qed\) \textbf{Theorem:} Let \(P\) be an \(R\dash\)module. Then TFAE: \begin{enumerate} \def\labelenumi{\alph{enumi}.} \item \(P\) is projective. \item Every SES \(0 \to M \to N \to P \to 0\) splits. \item There exists a free module \(F\) such that \(F = P \oplus K\) for some other module \(K\). \end{enumerate} \emph{Proof:} \(a \implies b\): We set up the following situation, where \(s\) is produced by the universal property: \begin{center} \begin{tikzcd} & & & P \arrow[d, "\id", two heads, hook] \arrow[ld, "\exists s"] & \\ 0 \arrow[r] & M \arrow[r] & N \arrow[r, two heads] & P \arrow[r] & 0 \end{tikzcd} \end{center} \(\qed\) \(b \implies c\): Suppose we have \(0 \to M \to N \to P \to 0\) a SES which splits, then \(N \cong M \oplus P\) by a previous theorem. \(\qed\) \(c\implies a\): We have the following situation: \begin{center} \begin{tikzcd} & & F = P \oplus K \arrow[dd, "\pi", bend right] \arrow[lldddd, "\exists h", dotted] \\ & & \\ & & P \arrow[uu, "\iota", bend right] \arrow[dd, "f"] \arrow[lldd, "\phi = \iota \circ h", dotted] \\ & & \\ M \arrow[rr, two heads] & & N \end{tikzcd} \end{center} By the previous argument, there exists an \(h: F\to M\) such that \(g\circ h = f \circ \pi\). Set \(\phi = h\circ \iota\). \emph{Exercise}: Check that \(g\circ \phi = f\). \(\qed\) \textbf{Theorem:} \(\bigoplus P_i\) is projective \(\iff\) each \(P_i\) is projective. \emph{Proof:} \(\implies\): Suppose \(\oplus P_i\) is projective. Then there exists some \(F = K \oplus \bigoplus P_i\) where \(F\) is free. But then \(P_i\) is a direct summand of \(F\), and is thus projective. \(\impliedby\): Suppose each \(P_i\) is projective. Then there exists \(F_i = P_i \oplus K_i\), so \(F \definedas \bigoplus F_i = \bigoplus (P_i \oplus K_i) = \bigoplus P_i \oplus \bigoplus K_i\). So \(\bigoplus P_i\) is a direct summand of a free module, and thus projective. \(\qed\) \begin{quote} Note that a direct sum has \emph{finitely many} nonzero terms. Can use the fact that a direct sum of free modules is still free by taking a union of bases. \end{quote} \emph{Example of a projective module that is not free:} Take \(R = \ZZ_6\), which is not a PID and not a domain. Then \(\ZZ_6 = \ZZ_2 \oplus \ZZ_3\), and \(\ZZ_2, \ZZ_3\) are projective \(R\dash\)modules. By previous statements, we know these are torsion as \(\ZZ\dash\)modules, and thus not free. \hypertarget{endomorphisms-as-matrices}{% \subsection{Endomorphisms as Matrices}\label{endomorphisms-as-matrices}} \begin{quote} See section 7.1 in Hungerford \end{quote} Let \(M_{m, n}(\RR)\) denote \(m\times n\) matrices with coefficients in \(R\). This is an \(R\dash R\) bimodule, and since \(R\) is not necessarily a commutative ring, these two module actions may not be equivalent. If \(m=n\), then \(M_{n,n}(R)\) is a ring under the usual notions of matrix addition and multiplication. \textbf{Theorem:} Let \(V, W\) be vector spaces where \(\dim V = m\) and \(\dim W = n\). Let \(\hom_k(V, W)\) be the set of linear transformations between them. Then \(\hom_k(V, W) \cong M_{m, n}(k)\) as \(k\dash\)vector spaces. \emph{Proof:} Choose bases of \(V, W\). Then consider \begin{align*} T: V \to W \\ v_1 \mapsto \sum_{i=1}^n a_{1, i} ~w_i \\ v_2 \mapsto \sum_{i=1}^n a_{2, i} ~w_i \\ \vdots \end{align*} This produces a map \begin{align*} f: \hom_k(V, W) &\to M_{m, n}(k) \\ T &\mapsto (a_{i, j}) ,\end{align*} which is a matrix. \begin{quote} \emph{Exercise: Check that this is bijective.} \end{quote} \(\qed\) \textbf{Theorem:} Let \(M, N\) be free left \(R\dash\)modules of rank \(m, n\) respectively. Then \(\hom_R(M, N) \cong M_{m, n}(R)\) as \(R\dash R\) bimodules. \emph{Notation:} Suppose \(M, N\) are free \(R\dash\)modules, then denote \(\beta_m, \beta_n\) be fixed respective bases. We then write \([T]_{\beta_m, \beta_n} \definedas (a_{i, j})\) to be its \emph{matrix representation}. \textbf{Theorem}: Let \(R\) be a ring and let \(V, W, Z\) be three free left \(R\dash\)modules with bases \(\beta_v, \beta_w, \beta_z\) respectively. If \(T: V \to W, S: W\to Z\) are \(R\dash\)module homomorphisms, then \(S \circ T: V \to Z\) exists and \begin{align*} [S \circ T]_{\beta_v, \beta_z} = [T]_{\beta_v, \beta_w} [S]_{\beta_w, \beta_z} \end{align*} \emph{Proof:} Exercise. Show that \begin{align*} (S \circ T)(v_i) = \sum_j^t \sum_k^m a_{ik} b_{kj} z_j .\end{align*} \(\qed\) \hypertarget{matrices-and-opposite-rings}{% \subsection{Matrices and Opposite Rings}\label{matrices-and-opposite-rings}} Suppose \(\Gamma: \hom_R(V, V) \to M_n(R)\) and \(V\) is a free left \(R\dash\)module. By the theorem, we have \(\Gamma(T \circ S) = \Gamma(S) \Gamma(T)\). We say that \(\Gamma\) is an \textbf{anti-homomorphism}. To address this mixup, given a ring \(R\) we can define \(R^{op}\) which has the same underlying set of \(R\) but with the modified multiplication \begin{align*} x \cdot y \definedas yx \in R .\end{align*} If \(R\) is commutative, then \(R \cong R^{op}\). \(\qed\) \textbf{Theorem}: Let \(R\) be a unital ring and \(V\) an \(R\dash\)module. Then \(\hom_R(V, V) \cong M_n(R^{op})\) as rings. \emph{Proof}: Since \(\Gamma(S \circ T) = \Gamma(T) \Gamma(S)\), define a map \begin{align*} \Theta: M_{n, n}(R) &\to M_{n, n}(R^{op}) \\ A &\mapsto A^t .\end{align*} Then \begin{align*} \Theta(AB) = (AB)^t = B^t A^t = \Theta(B) \Theta(A) ,\end{align*} so \(\Theta\) is an anti-isomorphism. Thus \(\Theta\circ \Gamma\) is an anti-anti-homomorphism, i.e.~a usual homomorphism. \(\qed\) \textbf{Definition:} A matrix \(A\) is \textbf{invertible} iff there exists a \(B\) such that \(AB = BA = \id_n\). \textbf{Proposition:} Let \(R\) be a unital ring and \(V, W\) free \(R\dash\)modules with \(\dim V = n, \dim W = m\). Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(T \in \hom_R(V, W)\) is an isomorphisms iff \([T]_{\beta_v, \beta_w}\) is invertible. \item \([T\inv]_{\beta_v, \beta_w} = [T]_{\beta_v, \beta_w}\inv\). \end{enumerate} \textbf{Definition:} We'll say that two matrices \(A, B\) are \textbf{equivalent} iff there exist \(P, Q\) invertible such that \(PAQ = B\). \hypertarget{tuesday-november-12th}{% \section{Tuesday November 12th}\label{tuesday-november-12th}} \hypertarget{equivalence-and-similarity}{% \subsection{Equivalence and Similarity}\label{equivalence-and-similarity}} Recall from last time: If \(V, W\) are free left \(R\dash\)modules of ranks \(m,n\) respectively with bases \(\beta_v, \beta_w\) respectively, then \begin{align*} \hom_R(V, W) \cong M_{m, n}(R) .\end{align*} \textbf{Definition:} Two matrices \(A, B \in M_{m \times n}(R)\) are \textbf{equivalent} iff \begin{align*} \exists P \in \GL(m, R),~ \exists Q \in \GL(n, R) \quad \text{ such that } \quad A = PBQ .\end{align*} \textbf{Definition:} Two matrices \(A, B \in M_m(R)\) are \textbf{similar} iff \begin{align*} \exists P \in \GL(m, R) \quad \text{ such that } \quad A = P\inv B P .\end{align*} \textbf{Theorem:} Let \(T: V\to W\) be an \(R\dash\)module homomorphism. Then \(T\) has an \(m\times n\) matrix relative to other bases for \(V, W\) \(\iff\) \begin{align*} B = P [T]_{\beta_v, \beta_w} Q .\end{align*} \emph{Proof}: \(\implies\): Let \(\beta_v', \beta_w'\) be other bases. Then we want \(B = [T]_{\beta_v', \beta_w'}\), so just let \begin{align*} P = [\id]_{\beta_v', \beta_v} \quad Q = [\id]_{\beta_w, \beta_w'} .\end{align*} \(\qed\) \(\impliedby\): Suppose \(B = P [T]_{\beta_v, \beta_w} Q\) for some \(P, Q\). Let \(g: V\to V\) be the transformation associated to \(P\), and \(h: W \to W\) associated to \(Q\inv\). Then \begin{align*} P &= [\id]_{g(\beta_v), \beta_v} \\ \implies Q\inv &= [\id]_{h(\beta_w), \beta_w} \\ \implies Q &= [\id]_{\beta_w, h(\beta_w)} \\ \implies B &= [T]_{g(\beta_v), h(\beta_w)} .\end{align*} \(\qed\) \textbf{Corollary:} Let \(V\) be a free \(R\dash\)module and \(\beta_v\) a basis of size \(n\). Then \(T: V\to V\) has an \(n\times n\) matrix relative to \(\beta_v\) relative to another basis \(\iff\) \begin{align*} B = P [T]_{\beta_v, \beta_v} P\inv .\end{align*} \begin{quote} Note how this specializes to the case of linear transformations, particularly when \(B\) is diagonalizable. \end{quote} \hypertarget{review-of-linear-algebra}{% \subsection{Review of Linear Algebra:}\label{review-of-linear-algebra}} Let \(D\) be a division ring. Recall the notions of rank and nullity, and the statement of the rank-nullity theorem. Note that we can always factor a linear transformation \(\phi: E\to F\) as the following short exact sequence: \begin{align*} 0 \to \ker \phi \to E \mapsvia{\phi} \im \phi \to 0, \end{align*} and since every module over a division ring is free, this sequence splits and \(E \cong \ker\phi \oplus \im \phi\). Taking dimensions yields the rank-nullity theorem. Let \(A\in M_{m, n}(D)\) and define \begin{itemize} \item \(R(A) \in D^n\) is the span of the rows of \(A\), and \item \(C(A) \in D^m\) is the span of the columns of \(A\). \end{itemize} Recall that finding a basis of the \textbf{row space} involves doing Gaussian Elimination and taking the rows which have nonzero pivots. For a basis of the \textbf{column space}, you take the corresponding columns in the \emph{original} matrix. \begin{quote} Note that in this case, \(\dim R(A) = \dim C(A)\), and in fact these are always equal. \end{quote} \textbf{Theorem (Rank and Equivalence):} Let \(\phi: V\to W\) be a linear transformation and \(A\) be the matrix of \(\phi\) relative to \(\beta_v, \beta_v'\). Then \(\dim \im \pi = \dim C(A) = \dim R(A)\). \emph{Proof}: Construct the matrix \(A = [\phi]_{\beta_v, \beta_w}\). Then \(\phi: V \to W\) descends to a map \(A: D^m \to D^n\). Writing the matrix \(A\) out and letting \(v\in D^m\) a row vector act on \(A\) from the \emph{left} yields a column vector \(Av \in D^n\). But then \(\im \phi\) corresponds to \(R(A)\), and so \begin{align*} \dim \im \phi = \dim R(A) = \dim C(A) .\end{align*} \(\qed\) \hypertarget{canonical-forms}{% \subsection{Canonical Forms}\label{canonical-forms}} Let \(1 \leq r \leq \min(m, n)\), and define \(E_r\) to be the \(m\times n\) matrix with the \(r\times r\) identity matrix in the top-left block. \textbf{Theorem}: Let \(A, B \in M_{m,n}(D)\). Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(A\) is equivalent to \(E_r \iff \rank A = r\) \begin{itemize} \tightlist \item That is, \(\exists P,Q\) such that \(E_r = PAQ\) \end{itemize} \item \(A\) is equivalent to \(B\) iff \(\rank A = \rank B\). \item \(E_r\) for \(r = 0, 1, \cdots, \min(m,n)\) is a complete set of representatives for the relation of matrix equivalence on \(M_{m, n}(D)\). \end{enumerate} Let \(X = M_{m, n}(D)\) and \(G = \GL_m(D) \cross \GL_n(D)\), then \begin{align*} G \actson X \text{ by } (P, Q) \actson A \definedas PAQ\inv .\end{align*} Then the orbits under this action are exactly \(\theset{E_r \mid 0 \leq r \leq \min(m, n)}\). \emph{Proof}: Note that 2 and 3 follow from 1, so we'll show 1. \(\implies\): Let \(A\) be an \(m\times n\) matrix for some linear transformation \(\phi: D^m \to D^n\) relative to some basis. Assume \(\rank A = \dim \im \phi = r\). We can find a basis such that \(\phi(u_i) = v_i\) for \(1 \leq i \leq r\), and \(\phi(u_i) = 0\) otherwise. Relative to this basis, \([\phi] = E_r\). But then \(A\) is equivalent to \(E_r\). \(\impliedby\): If \(A = PE_r Q\) with \(P, Q\) invertible, then \(\dim \im A = \dim \im E_r\), and thus \(\rank A = \rank E_r = r\). How do we do this? Recall the row operations: \begin{itemize} \item Interchange rows \item Multiply a row by a unit \item Add one row to another \end{itemize} But each corresponds to left-multiplication by an elementary matrix, each of which is invertible. If you proceed this way until the matrix is in RREF, you produce \(P \prod P_i A\). You can now multiply on the \emph{right} by elementary matrices to do column operations and move all pivots to the top-left block, which yields \(E_r\). \(\qed\) \textbf{Theorem:} Let \(A \in M_{m, n}(R)\) where \(R\) is a PID. Then \(A\) is equivalent to a matrix with \(L_r\) in the top-left block, where \(L_r\) is a diagonal matrix with \(L_{ii} = d_i\) such that \(d_1 \divides d_2 \divides \cdots \divides d_r\). Each \((d_i)\) is uniquely determined by \(A\). \hypertarget{thursday-november-14th}{% \section{Thursday November 14th}\label{thursday-november-14th}} \hypertarget{equivalence-to-canonical-forms}{% \subsection{Equivalence to Canonical Forms}\label{equivalence-to-canonical-forms}} Let \(D\) be a division ring and \(k\) a field. Recall that a matrix \(A\) is \emph{equivalent} to \(B \iff \exists P, Q\) such that \(PBQ=A\). From a previous theorem, if \(\rank(A) = r\), then \(A\) is equivalent to a matrix with \(I_r\) in the top-left block. \textbf{Theorem:} Let \(A\) be a matrix over a PID \(R\). Then \(A\) is equivalent to a matrix with \(L_r\) in the top-left corner, where \(L_r = \mathrm{diag}(d_1, d_2, \cdots, d_r)\) and \(d_1 \divides d_2 \divides \cdots \divides d_r\), and the \(d_i\) are uniquely determined. \textbf{Theorem:} Let \(A\) be an \(n\times n\) matrix over a division ring \(D\). TFAE: \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(\rank A = n\). \item \(A\) is equivalent to \(I_n\). \item \(A\) is invertible. \end{enumerate} \(1\implies 2\): Use Gaussian elimination. \(2\implies 3\): \(A = PI_n Q = PQ\) where \(P, Q\) are invertible, so \(PQ = A\) is invertible. \(3\implies 1\): If \(A\) is invertible, then \(A: D^n \to D^n\) is bijective and thus surjective, so \(\dim \im A = n\). \begin{quote} Note: the image is now \emph{row space} because we are taking \emph{left} actions. \end{quote} \(\qed\) \hypertarget{determinants}{% \subsection{Determinants}\label{determinants}} \textbf{Definition:} Let \(M_1, \cdots, M_n\) be \(R\dash\)modules, and then \(f: \prod M_i \to R\) is \(n\dash\)linear iff \begin{align*} f( m_1, m_2, \cdots, rm_k + sm_k', \cdots, m_n ) = \\ r f( m_1, \cdots, m_k, \cdots m_k) + sf(m_1, \cdots, m_k', \cdots, m_n ) .\end{align*} \emph{Example:} The inner product is a 2-linear form. \textbf{Definition:} \(f\) is \textbf{symmetric} iff \begin{align*} f(m_1, \cdots, m_n) = f(m_{\sigma(1)}, \cdots, m_{\sigma(n)}) ~~\forall \sigma \in S_n .\end{align*} \textbf{Definition:} \(f\) is \textbf{skew-symmetric} iff \begin{align*} f(m_1, \cdots, m_n) = \mathrm{sgn}(\sigma) f(m_{\sigma(1)}, \cdots, m_{\sigma(n)}) ~~\forall \sigma \in S_n ,\end{align*} where \begin{align*} \mathrm{sgn}(\sigma) = \begin{cases} 1 & \sigma \text{ is even } \\ -1 & \sigma \text{ is odd } \end{cases} .\end{align*} \textbf{Definition:} \(f\) is \textbf{alternating} iff \begin{align*} m_i = m_j \text{ for some pair } (i, j) \implies f(m_1, \cdots, m_n) = 0 .\end{align*} \textbf{Theorem:} Let \(f\) be an \(n\dash\)linear form. If \(f\) is alternating, then \(f\) is skew-symmetric. \emph{Proof:} It suffices to show the \(n=2\) case. We have \begin{align*} 0 &= f(m+1 + m_2, m_1 + m_2) \\ &= f(m_1, m_1) + f(m_1, m_2) + f(m_2, m_1) + f(m_2, m_2) \\ &= f(m_1, m_2) + f(m_2, m_1)\\ \implies f(m_1, m_2) &= - f(m_2, m_1) .\end{align*} \(\qed\) \textbf{Theorem:} Let \(R\) be a unital commutative ring and let \(r\in R\) be arbitrary. Then \begin{align*} \exists! f: \bigoplus_{i=1}^n R^n \to R ,\end{align*} where \(f\) is an alternating \(R\dash\)form such that \(f(\vector e_i) = r\) for all \(i\), where \(\vector e_i = [0, 0, \cdots, 0, 1, 0, \cdots, 0, 0]\). \begin{quote} \(R^n\) is a free module, so \(f\) can be identified with a matrix once a basis is chosen. \end{quote} \emph{Proof}: \emph{Existence:} Let \(x_i = [a_{i1}, a_{i2}, \cdots, a_{in}]\) and define \begin{align*} f(x_1, \cdots, x_n) = \sum_{\sigma \in S_n} \mathrm{sgn}(\sigma) r \prod_i a_{i \sigma(i)} .\end{align*} \emph{Exercise:} Check that \(f(\vector e_1, \cdots, \vector e_n) = r\) and \(f\) is \(n\dash\)linear. Moreover, \(f\) is alternating. Consider \(f(x_1, \cdots, x_n)\) where \(x_i = x_j\) for some \(i\neq j\). Letting \(\phi = (i, j)\), we can write \(S_n = A_n \disjoint A_n \rho\). If \(\sigma\) is even, then the summand is \begin{align*} (+1)r a_{1\sigma(1)} \cdots a_{n\sigma(n)} .\end{align*} Since \(x_i = x_j\), we'll have \(\prod_k a_{ik} = \prod a_{jk}\). Then consider applying \(\sigma \rho\). We have \begin{align*} -r \prod a_{i\sigma(i)} &= -r a_{1\sigma(1)} \cdots \mathbf{a}_{j \sigma(j)} \cdots \mathbf{a}_{i \sigma(i)} \cdots a_{n, \sigma(n)} \\ &= -r \prod a_{i\sigma(i)} = -r a_{1\sigma(1)} \cdots \mathbf{a}_{i \sigma(i)} \cdots \mathbf{a}_{j \sigma(j)} \cdots a_{n, \sigma(n)} ,\end{align*} which permutes the \(i,j\) terms. So these two terms cancel, the remaining terms are untouched. \emph{Uniqueness}: Let \(x_i = \sum_j a_{ij} \vector e_j\). Then \begin{align*} f(x_1, \cdots, x_n) &= f(\sum_{j_1} a^1_j \vector e_j, \cdots, \sum_{j_n} a^n_j \vector e_j) \\ &= \sum_{j_1} \cdots \sum_{j_n} f(\vector e_{j_1}, \cdots, \vector e_{j_n} ) a_{1, j_1} \cdots a_{n, j_n} \\ &= \sum_{\sigma \in S_n} \mathrm{sgn}(\sigma) f(\vector e_1, \cdots, \vector e_n) a_{1, \sigma(1)} \cdots a_{n, \sigma(n)} \\ &= \sum_{\sigma \in S_n} \mathrm{sgn}(\sigma) r a_{1, \sigma(1)} \cdots a_{n, \sigma(n)} .\end{align*} \(\qed\) \textbf{Definition:} Let \(R\) be a commutative unital ring and define \(\mathrm{det}: M_n(R) \to R\) is the unique \(n\dash\)alternating form with \(\det(I) = 1\), and is called the \emph{determinant}. \textbf{Theorem:} Let \(A, B \in M_{n}(R)\). Then \begin{enumerate} \def\labelenumi{\alph{enumi}.} \item \(\abs{AB} = \abs A \abs B\) \item \(A\) is invertible \(\iff \abs{A} \in R\units\) \item \(A \sim B \implies \abs A = \abs B\). \item \(\abs{A^t} = \abs A\). \item If \(A\) is triangular, then \(\abs A\) is the product of the diagonal entries. \end{enumerate} \emph{Proof of a:} Let \(B\) be fixed. Let \(\Delta_B: M_n(R) \to R\) be defined as \(C \mapsto \abs{CB}\). Then this is an alternating form, so by the theorem, \(\Delta_B = r \mathrm{det}\). But then \(\Delta_B(C) = r\abs{C}\), so \(r\abs{C} = \abs{CB}\). So pick \(C = I\), then \(r = \abs{B}\). \(\qed\) \emph{Proof of b:} Suppose \(A\) is invertible. Then \(AA\inv = I\), so \(\abs{AA\inv} = \abs{A}\abs{A\inv} = 1\), which shows that \(\abs A\) is a unit. \(\qed\) \emph{Proof of c:} Let \(A = PBP\inv\). Then \begin{align*} \abs A = \abs{PBP\inv} = \abs P \abs B \abs{P\inv} = \abs{P} \abs{P\inv} \abs B = \abs B .\end{align*} \(\qed\) \emph{Proof of d:} Let \(A = (a_{ij})\), so \(B = (b_{ij}) = (a_{ji})\). Then \begin{align*} \abs{A^t} &= \sum_{\sigma} \mathrm{sgn}(\sigma) \prod_k b_{k \sigma(k)} \\ &= \sum_\sigma \mathrm{sgn}(\sigma) \prod_k a_{\sigma(k) k} \\ &= \sum_{\sigma\inv} \mathrm{sgn}(\sigma) \prod_k a_{k \sigma\inv(k)} \\ &= \sum_\sigma \mathrm{sgn}(\sigma) \prod_k a_{k \sigma(k)} \\ &= \abs {A} .\end{align*} \(\qed\) \emph{Proof of e:} Let \(A\) be upper-triangular. Then \begin{align*} \abs A = \sum_\sigma \mathrm{sgn}(\sigma) \prod_k a_{k \sigma(k)} = a_{11} a_{22} \cdots a_{nn} .\end{align*} \(\qed\) Next time: \begin{itemize} \tightlist \item Calculate determinants \begin{itemize} \tightlist \item Gaussian elimination \item Cofactors \end{itemize} \item Formulas for \(A\inv\) \item Cramer's rule \end{itemize} \hypertarget{tuesday-november-19th}{% \section{Tuesday November 19th}\label{tuesday-november-19th}} \hypertarget{determinants-1}{% \subsection{Determinants}\label{determinants-1}} Let \(A\in M_n(R)\), where \(R\) is a commutative unital ring. Given \(A = (a_{ij})\), recall that \begin{align*} \det A = \sum_{\sigma \in S_n} \mathrm{sgn}(\sigma) \prod a_{i, \sigma(i)} .\end{align*} This satisfies a number of properties: \begin{itemize} \item \(\det(AB) = \det A \det B\) \item \(A\) invertible \(\implies\) \(\det A\) is a unit in \(R\) \item \(A \sim B \implies \det(A) = \det(B)\) \item \(\det A^t = \det A\) \item \(A\) is triangular \(\implies \det A = \prod a_{ii}\). \end{itemize} \hypertarget{calculating-determinants}{% \subsubsection{Calculating Determinants}\label{calculating-determinants}} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \textbf{Gaussian Elimination} \begin{enumerate} \def\labelenumii{\alph{enumii}.} \item \(B\) is obtained from \(A\) by interchanging rows: \(\det B = -\det A\) \item \(B\) is obtained from \(A\) by multiplying \(\det B = r \det A\) \item \(B\) is obtained from \(A\) by adding a scalar multiple of one row to another: \(\det B = \det A\). \end{enumerate} \item \textbf{Cofactors} Let \(A_{ij}\) be the \((n-1)\times (n-1)\) minor obtained by deleting row \(i\) and column \(j\), and \(C_{ij} = (-1)^{i+j} \det A_{ij}\). Then \textbf{(theorem)} \(\det A = \sum_{j=1}^n a_{ij} C_{ij}\) by expanding along either a row or column. \end{enumerate} \textbf{Theorem}: \begin{align*} A \mathrm{Adj}(A) = \det (A) I_n ,\end{align*} where \(\mathrm{Adj} = (C_{ij})^t\). If \(A\inv\) is a unit, then \(A\inv = \mathrm{Adj}(A) / \det(A)\). \hypertarget{decomposition-of-a-linear-transformation}{% \subsubsection{Decomposition of a Linear Transformation:}\label{decomposition-of-a-linear-transformation}} Let \(\phi: V \to V\) be a linear transformation of vector spaces. and \(R = \hom_k(V, V)\). Then \(R\) is a ring. Let \(f(x) = \sum a_j x^j \in k[x]\) be an arbitrary polynomial. Then for \(\phi \in R\), it makes sense to evaluate \(f(\phi)\) where \(\phi^n\) denotes an \(n\dash\)fold composition, and \(f(\phi): V \to V\). \textbf{Lemma:} \begin{itemize} \item There exists a unique monic polynomial \(q_\phi(x) \in k[x]\) such that \(q_\phi(\phi) = 0\) and \(f(\phi) = 0 \implies q_\phi \divides f\). \(q_\phi\) is referred to as the \textbf{minimal polynomial} of \(\phi\). \item The exact same conclusion holds with \(\phi\) replaced by a matrix \(A\), yielding \(q_A\). \item If \(A\) is the matrix of \(\phi\) relative to a fixed basis, then \(q_\phi = q_A\). \end{itemize} \emph{Proof of a and b:} Fix \(\phi\), and define \begin{align*} \Gamma: k[x] &\to \hom_k(V, V) \\ f &\mapsto f(\phi) .\end{align*} Since \(\dim_k V\dual = \dim_k V < \infty\) and \(\dim_k k[x] = \infty\), we must have \(\ker \Gamma \neq 0\). Since \(k[x]\) is a PID, we have \(\ker \Gamma = (q)\) for some \(q\in k[x]\). Then if \(f(\phi) = 0\), we have \(f(x) \in \ker \Gamma \implies q \divides f\). We can then rescale \(q\) to be monic, which makes it unique. \begin{quote} Note: for (b), just replace \(\phi\) with \(A\) everywhere. \end{quote} \(\qed\) \emph{Proof of c:} Suppose \(A = [\phi]_\mathcal{B}\) for some fixed basis \(\mathcal B\). Then \(\hom_k(V, V) \cong M_n(k)\), so we have the following commutative diagram: \begin{center} \begin{tikzcd} {k[x]} \arrow[rr, "\Gamma_\phi"] \arrow[rrdd, "\Gamma_A"] & & {\hom_k(V, V)} \arrow[dd, "\cong", two heads, hook] \\ & & \\ & & M_n(k) \end{tikzcd} \end{center} \(\qed\) \hypertarget{finitely-generated-modules-over-a-pid}{% \subsubsection{Finitely Generated Modules over a PID}\label{finitely-generated-modules-over-a-pid}} Let \(M\) be a finitely generated module over \(R\) a PID. Then \begin{align*} M &\cong F \oplus \bigoplus_{i=1}^n R/(r_i) \quad r_1 \divides r_2 \divides \cdots r_n \\ M &\cong F \oplus \bigoplus_{i=1}^n R/(p_i^{s_i}) \quad p_i \text{ not necessarily distinct primes. } .\end{align*} Letting \(R = k[x]\) and \(\phi: V\to V\) with \(\dim_k V < \infty\), \(V\) becomes a \(k[x]\dash\)module by defining \begin{align*} f(x) \actson \vector v \definedas f(\phi)(\vector v) \end{align*} Note that \(W\) is a \(k[x]\dash\)submodule iff \(\phi: W \to W\). Let \(v\in V\), and \(\generators{v} = \theset{\phi^i(v) \suchthat i = 0,1,2,\cdots}\) is the \textbf{cyclic submodule generated by \(v\)}, and we write \(\generators{v} = k[x].v\). \textbf{Theorem:} Let \(\phi: V\to V\) be a linear transformation. Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item There exist cyclic \(k[x]\dash\)submodules \(V_i\) such that \(V = \bigoplus_{i=1}^t V_i\), where for each \(i\) there exists a \(q_i: V_i \to V_i\) such that \(q_1 \divides q_2 \divides \cdots \divides q_t\). \item There exist cyclic \(k[x]\dash\)submodules \(V_j\) such that \(V = \bigoplus_{j=1}^\nu\) and \(p_j^{m_j}\) is the minimal polynomial of \(\phi: V_j \to V_j\). \end{enumerate} \emph{Proof:} Apply the classification theorem to write \(V = \bigoplus R/(r_i)\) as an invariant factor decomposition. Then \(R/(q_i) \cong V_i\), some vector space, and since there is a direct sum decomposition, the invariant factors are minimal polynomials for \(\phi_i: V_i \to V_i\), and thus \(k[x]/(q_i)\). \(\qed\) \hypertarget{canonical-forms-for-matrices}{% \subsubsection{Canonical Forms for Matrices}\label{canonical-forms-for-matrices}} We'll look at \begin{itemize} \item Rational Canonical Form \item Jordan Canonical Form \end{itemize} \textbf{Theorem}: Let \(\phi: V\to V\) be linear, then \(V\) is a cyclic \(k[x]\dash\)module and \(\phi: V\to V\) has minimal polynomial \(q(x) = \sum_j a_j x^j\) iff \(\dim V = n\) and \(V\) has an ordered basis of the form \begin{align*} [\phi]_{\mathcal{B}} = \left[ \begin{array}{ccccc} 0 & 1 & 0 & \cdots & 0 \\ 0 & 0 & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \cdots & \vdots \\ -a_0 & -a_1 & -a_2 & \cdots & -a_{n-1} \end{array} \right] \end{align*} with ones on the super-diagonal. \emph{Proof:} \(\impliedby\): Let \(V = k[x].v = \generators{v, \phi(v), \cdots, \phi^{n-1}(v)}\) where \(\deg q(x) = n\). The claim is that this is a linearly independent spanning set. Linear independence: suppose \(\sum_{j=0}^{n-1} k_j \phi^j(v) = 0\) with some \(k_j \neq 0\). Then \(f(x) = \sum k_j x^j\) is a polynomial where \(f(\phi) = 0\), but this contradicts the minimality of \(q(x)\). But then we have \(n\) linearly independent vectors in \(V\) which is dimension \(n\), so this is a spanning set. \(\implies\): We can just check where basis elements are sent. Set \(\mathcal{B} = \theset{v, \phi(v), \cdots, \phi^{n-1}(v)}\). Then \begin{align*} v &\mapsto \phi(v) \\ \phi(v) &\mapsto \phi^2(v) \\ &\vdots \\ \phi{n-1}(v) &\mapsto \phi^n(v) = -\sum a_i \phi^i(v) \\ .\end{align*} \(\impliedby\) Fix a basis \(B = \theset{v_1, \cdots, v_n}\) and \(A = [\phi]_B\), then \begin{align*} v_1 &\mapsto v_2 = \phi(v_1) \\ v_1 &\mapsto v_3 = \phi^2(v_1) \\ v_{n-2} &\mapsto v_{n-1} = \phi^2(v_1) .\end{align*} and \begin{align*} \phi^n(v) = -a_k v_1 \neq -a_1 \phi(v_1), \cdots -a_{n-1} \phi^{n-1}(v_1) .\end{align*} Thus \(V = k[x].v_1\), since \(\dim V = n\) with \(\theset{v_1, \phi(v_1), \cdots, \phi^{n-1}(v_1)}\) as a basis. \(\qed\) \hypertarget{thursday-november-21}{% \section{Thursday November 21}\label{thursday-november-21}} \hypertarget{cyclic-decomposition}{% \subsection{Cyclic Decomposition}\label{cyclic-decomposition}} Let \(\phi: V\to V\) be a linear transformation; then \(V\) is a \(k[x]\) module under \(f(x) \actson v \definedas f(\phi)(v)\). By the structure theorem, since \(k[x]\) is a PID, we have an invariant factor decomposition \(V = \bigoplus V_i\) where each \(V_i\) is a cyclic \(k[x]\dash\)module. If \(q_i\) is the minimal polynomial for \(\phi_i: V_i \to V_i\), then \(q_{i} \divides q_{i+1}\) for all \(i\). We also have an elementary divisor decomposition where \(p_i^{m_i}\) are the minimal polynomials for \(\phi_i\). \begin{quote} Note: one is only for the restriction to the subspaces? Check. \end{quote} Recall that if \(\phi\) has minimal polynomial \(q(x)\). Then if \(\dim V = n\), there exists a basis of \(B\) if \(V\) such that \([\phi]_B\) is given by the \textbf{companion matrix} of \(q(x)\). This is the \textbf{rational canonical form}. \textbf{Corollary:} Let \(\phi: V\to V\) be a linear transformation. Then \(V\) is a cyclic \(k[x]\dash\)module and \(\phi\) has minimal polynomial \((x-b)^n \iff \dim V = n\) and there exists a basis such that \begin{align*} [\phi]_B = \left[\begin{array}{ccccccc} b & 1 & 0 & \cdots & 0 & 0 & 0\\ 0 & b & 1 & \cdots & 0 & 0 & 0\\ 0 & 0 & b & 1 &\cdots & 0 & 0\\ 0 & 0 & 0 & 0 & \cdots & b & 1 \end{array}\right] .\end{align*} This is the \textbf{Jordan Canonical form}. \begin{quote} Note that if \(k\) is not algebraically closed, we can only reduce to RCF. If \(k\) \emph{is} closed, we can reduce to JCF, which is slightly nicer. \end{quote} \emph{Proof:} Let \(\delta = \phi - b \cdot\id_V\). Then \begin{itemize} \item \(q(x)\) is the minimal polynomial for \(\phi \iff x^n\) is the minimal polynomial for \(\delta\). \item A priori, \(V\) has two \(k[x]\) structures -- one given by \(\phi\), and one by \(\delta\). \item \emph{Exercise}: \(V\) is cyclic with respect to the \(\phi\) structure \(\iff\) \(V\) is cyclic with respect to the the \(\delta\) structure. \end{itemize} Then the matrix \([\delta]_B\) relative to an ordered basis for \(\delta\) is with only zeros on the diagonal and 1s on the super-diagonal, and \([\phi]_B\) is the same but with \(b\) on the diagonal. \(\qed\) \textbf{Lemma:} Let \(\phi: V\to V\) with \(V = \bigoplus_i^t V_i\) as \(k[x]\dash\)modules. Then \(M_i\) is a matrix of \(\restrictionof{\phi}{V_i}: V_i \to V_i\) relative to some basis for \(V_i \iff\)the matrix of \(\phi\) wrt some ordered basis is given by \begin{align*} \left[ \begin{array}{cccc} M_1 & & & \\ & M_2 & & \\ & & \ddots & \\ & & & M_t \end{array}\right] .\end{align*} \emph{Proof}: \(\implies\): Suppose \(B_i\) is a basis for \(V_i\) and \([\phi]_{B_i} = M_i\). Then let \(B = \union_i B_i\); then \(B\) is a basis for \(V\) and the matrix is of the desired form. \(\impliedby\): Suppose that we have a basis \(B\) and \([\phi]_B\) is given by a block diagonal matrix filled with blocks \(M_i\). Suppose \(\dim M_i = n_i\). If \(B = \theset{v_1, v_2, \cdots, v_n}\), then take \(B_1 = \theset{v_1, \cdots, v_{n_1}}\) and so on. Then \([\phi_i]_{B_i} = M_i\) as desired. \(\qed\) \emph{Application:} Let \(V = \bigoplus V_i\) with \(q_i\) the minimal polynomials of \(\phi: V_i \to V_i\) with \(q_i \divides q_{i+1}\). Then there exists a basis where \([\phi]_B\) is block diagonal with blocks \(M_i\), where each \(M_i\) is in rational canonical form with minimal polynomial \(q_i(x)\). If \(k\) is algebraically closed, we can obtain elementary divisors \(p_i(x) = (x - b_i)^{m_i}\). Then there exists a similar basis where now each \(M_i\) is a \emph{Jordan block} with \(b_i\) on the diagonals and ones on the super-diagonal. Moreover, in each case, there is a basis such that \(A = P [M_i] P\inv\) (where \(M_i\) are the block matrices obtained). When \(A\) is diagonalizable, \(P\) contains the eigenvectors of \(A\). \textbf{Corollary:} Two matrices are similar \(\iff\) they have the same invariant factors and elementary divisors. \emph{Example:} Let \(\phi: V\to V\) have invariant factors \(q_1(x) = (x-1)\) and \(q_2(x) = (x-1)(x-2)\). Then \(\dim V = 3\), \(V = V_1 \oplus V_2\) where \(\dim V_1 = 1\) and \(\dim V_2 = 2\). We thus have \begin{align*} [\phi]_B = \left(\begin{array}{ccc} 1 & 0 & 0 \\ 0 & 0 & 1 \\ 0 & -2 & 3 \end{array}\right) .\end{align*} Moreover, we have \begin{align*} V \cong \frac{k[x]}{(x-1)} \oplus \frac{k[x]}{(x-1)(x-2)} \cong \frac{k[x]}{(x-1)} \oplus \frac{k[x]}{(x-1)} \oplus \frac{k[x]}{(x-2)} ,\end{align*} so the elementary divisors are \(x-1, x-1, x-2\). \begin{quote} Invariant factor decompositions should correspond to rational canonical form blocks, and elementary divisors should correspond to Jordan blocks. \end{quote} \textbf{Theorem:} Let \(A\) be an \(n\times n\) matrix over \(k\). Then the matrix \(xI_n - A \in M_n(k[x])\) is equivalent in \(k[x]\) to a diagonal matrix \(D\) with non-zero entries \(f_1, f_2, \cdots f_t \in k[x]\) such that the \(f_i\) are monic and \(f_i \divides f_{i+1}\). The non-constant polynomials among the \(f_i\) are the invariant factors of \(A\). \emph{Proof (Sketch)}: Let \(V = k^n\) and \(\phi: k^n \to k^n\) correspond to \(A\) under the fixed standard basis \(\theset{e_i}\). Then \(V\) has a \(k[x]\dash\)module structure induced by \(\phi\). Let \(F\) be the free \(k[x]\) module with basis \(\theset{u_i}_{i=1}^n\), and define the maps \begin{align*} \pi: F &\to k^n \\ u_i &\mapsto e_i \end{align*} and \begin{align*} \psi: F &\to F \\ u_i &\mapsto xu_i - \sum_j a_{ij} u_j .\end{align*} Then \(\psi\) relative to the basis \(\theset{u_i}\) is \(xI_n - A\). Then \emph{(exercise)} the sequence \begin{align*} F \mapsvia{\psi} F \mapsvia{\pi} k^n \to 0 \end{align*} is exact, \(\im \pi = k^n\), and \(\im \psi = \ker \pi\). We then have \(k^n \cong F/\ker \pi = F / \im \psi\), and since \(k[x]\) is a PID, \begin{align*} xI_n - A \sim D \definedas \left[\begin{array}{cc} L_r & 0 \\ 0 & 0 \end{array}\right] .\end{align*} where \(L_r\) is diagonal with \(f_i\)s where \(f_i \divides f_{i+1}\). However, \(\det(xI_n - A) \neq 0\) because \(x I_n - A\) is a monic polynomial of degree \(n\). But \(\det{xI_n - A} = \det(D)\), so this means that \(L_r\) must take up the entire matrix of \(D\), so there is no zero in the bottom-right corner. So \(L_r = D\), and \(D\) is the matrix of \(\psi\) with respect to \(B_1 = \theset{v_i}\) and \(B_2 = \theset{w_i}\) with \(\psi(v_i) = f_i w_i\). Thus \begin{align*} \im \psi = \bigoplus_{i=1}^n k[x] f_i w_i. \end{align*} But then \begin{align*} V = k^n \cong F/ \im \psi &\cong \frac{k[x] w_1 \oplus \cdots \oplus k[x] w_n} {k[x] f_1 w_1 \oplus \cdots \oplus k[x] f_n w_n} \\ &\cong \bigoplus_{i=1}^n k[x]/(f_i) .\end{align*} \(\qed\) \hypertarget{tuesday-november-26th}{% \section{Tuesday November 26th}\label{tuesday-november-26th}} \hypertarget{minimal-and-characteristic-polynomials}{% \subsection{Minimal and Characteristic Polynomials}\label{minimal-and-characteristic-polynomials}} \textbf{Theorem} \begin{enumerate} \def\labelenumi{\alph{enumi}.} \item ? (Todo) \item \textbf{(Cayley Hamilton)} If \(p\) is the minimal polynomial of a linear transformation \(\phi\), then \(p(\phi) = 0\) \item For any \(f(x) \in k[x]\) that is irreducible, \(f(x) \divides p_\phi(x) \iff f(x) \divides q_\phi(x)\). \end{enumerate} \emph{Proof of (a):} ? \(\qed\) \emph{Proof of (b):} If \(q_\phi(x) \divides p_\phi(x)\) and \(q_\phi(\phi) = 0\), then \(p_\phi(\phi) = 0\) as well. \(\qed\) \emph{Proof of (c):} We have \(f(x) \divides q_\phi(x) \implies f(x) \divides p_\phi(x)\) and \(f(x) \divides p_\phi(x) \implies f(x) \divides q_i(x)\) for some \(i\), and so \(f(x) \divides q_\phi(x)\). \(\qed\) \hypertarget{eigenvalues-and-eigenvectors}{% \subsection{Eigenvalues and Eigenvectors}\label{eigenvalues-and-eigenvectors}} \textbf{Definition:} Let \(\phi: V\to V\) be a linear transformation. Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item An \textbf{eigenvector} is a vector \(\vector v = \vector 0\) such that \(\phi(\vector v) = \lambda \vector v\) for some \(\lambda \in k\). \item If such a \(\vector v\) exists, then \(\lambda\) is called an \textbf{eigenvalue} of \(\phi\). \end{enumerate} \textbf{Theorem:} The eigenvalues of \(\phi\) are the roots of \(p_\phi(x)\) in \(k\). \emph{Proof:} Let \([\phi]_B = A\), then \begin{align*} &p_A(\lambda) = p_\phi(\lambda) = \det(\lambda I - A) = 0 \\ &\iff \exists \vector v\neq \vector 0 \text{ such that } (\lambda I - A)\vector v = \vector 0 \\ &\iff \lambda I\vector v = A \vector v \\ &\iff A\vector v = \lambda \vector v \\ &\iff \lambda \text{ is an eigenvalue and } \vector v \text{ is an eigenvector} .\end{align*} \(\qed\) \hypertarget{tuesday-december-3rd}{% \section{Tuesday December 3rd}\label{tuesday-december-3rd}} \hypertarget{similarity-and-diagonalizability}{% \subsection{Similarity and Diagonalizability}\label{similarity-and-diagonalizability}} Recall that \(A \sim B \iff A = PBP\inv\). \emph{Fact:} If \(T:V \to V\) is a linear transformation and \(\mathcal{B}, \mathcal{B}'\) are bases where \([T]_{\mathcal{B}} = A\) and \([T]_{\mathcal{B}'}\), then \(A \sim B\). \textbf{Theorem:} Let \(A\) be an \(n\times n\) matrix. Then \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(A\) is similar to a diagonal matrix / diagonalizable \(\iff A\) has \(n\) linearly independent eigenvectors. \item \(A = PDP\inv\) where \(D\) is diagonal and \(P = [\vector{v_1}, \vector{v_2}, \cdots, \vector{v_n}]\) with the \(\vector{v_i}\) linearly independent. \end{enumerate} \emph{Proof:} Consider \(AP = PD\), then \(AP\) has columns \(A\vector{v_i}\) and \(PD\) has columns \(\lambda_i \vector{v_i}\). \(\qed\) \emph{Corollary:} If \(A\) has distinct eigenvalues, then \(A\) is diagonalizable. \emph{Examples:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item Let \begin{align*} A = \left[\begin{array}{ccc} 4 & 0 & 0 \\ -1 & 4 & 0 \\ 0 & 0 & 5 \end{array}\right] \end{align*} \(A\) has eigenvalues \(4,5\), and it turns out that \(A\) is defective. Note that \(\dim \Lambda_4 + \dim \Lambda_5 = 2 < 3\), so the eigenvectors can't form a basis of \(\RR^3\). \item \begin{align*} A = \left[\begin{array}{ccc} 4 & 2 & 2 \\ 2 & 4 & 2 \\ 2 & 2 & 4 \end{array}\right] \end{align*} \(A\) has eigenvalues \(2, 8\). \(\Lambda_2 = \spanof_\RR\theset{[-1, 1, 0]^t, [-1, 0, 1]^t}\) and \(\Lambda_8 = \spanof_\RR\theset{[1,1,1]^t}\). These vectors become the columns of \(P\), which is (by no coincidence!) an orthogonal matrix, since \(A\) was symmetric. \end{enumerate} \emph{Exercise}: \begin{align*} \left[\begin{array}{ccc} 0 & 4 & 2 \\ -1 & -4 & -1 \\ 0 & 0 & -2 \end{array}\right] .\end{align*} Find \(J = JCF(A)\) (so \(A = PJP\inv\)) and compute \(P\). \textbf{Definition:} Let \(A = (a_{ij})\), then define that \emph{trace} of \(A\) by \(\tr(A) = \sum_i a_{ii}\). The trace satisfies several properties: \begin{itemize} \item \(\tr(A+B) = \tr(A) + \tr(B)\), \item \(\tr(kA) = k\tr(A)\), \item \(\tr(AB) = \tr(BA)\). \end{itemize} \textbf{Theorem:} Let \(T: V\to V\) be a linear transformation with \(\dim V < \infty\), \(A = [T]_{\mathcal{B}}\) with respect to some basis, and \(p_T(x)\) be the characteristic polynomial of \(A\). Then \begin{align*} p_T(x) &= x^n + c_{n-1}x^{n-1} + \cdots + c_1 x + c_0, \\ c_0 &= (-1)^n \det(A), \\ c_{n-1} &= -\tr(A) .\end{align*} \emph{Proof:} We have \(p_T(0) = \det(0 I_n - A) = \det(-A) = (-1)^n \det(A)\). Compute \(p_T(x)\) by expanding \(\det{xI - A}\) along the first row. The first term looks like \(\prod (x-a_{ii})\), and no other term contributes to the coefficient of \(x^{n-1}\). \(\qed\) \textbf{Definition:} A \emph{Lie Algebra} is a vector space with an operation \([\wait, \wait]: V\cross V \to V\) satisfying \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item Bilinearity, \item \([x, x] = 0\), \item The Jacobi identity \([x, [y, z]] = [y, [z, x]] + [z, [x, y]] = 0\). \end{enumerate} \emph{Examples:} \begin{enumerate} \def\labelenumi{\arabic{enumi}.} \item \(L = \liegl(n, \CC) = n \times n\) invertible matrices over \(\CC\) with \([A, B] = AB - BA\). \item \(L = \liesl(n, \CC) = \theset{A \in \liegl(n, \CC) \mid \tr(A) = 0}\) with the same operation, and it can be checked that \begin{align*} \tr([A, B]) = \tr(AB - BA) = \tr(AB) - \tr(BA) = 0 .\end{align*} \end{enumerate} \begin{quote} This turns out to be a \emph{simple} algebra, and simple algebras over \(\CC\) can be classified using root systems and Dynkin diagrams -- this is given by type \(A_{n-1}\). \end{quote} \hypertarget{preface}{% \section{Preface}\label{preface}} These are notes live-tex'd from a graduate Algebra course taught by Dan Nakano at the University of Georgia in Fall 2019. As such, any errors or inaccuracies are almost certainly my own. \medskip \begin{flushright} D. Zack Garza, \today \\ \currenttime \end{flushright} \newpage \newpage \section{Indices} \listoftodos[List of Todos] % Hook into amsthm environments to list them. \renewcommand{\listtheoremname}{Definitions} \listoftheorems[ignoreall,show={definition}, numwidth=3.5em] \renewcommand{\listtheoremname}{Theorems} \listoftheorems[ignoreall,show={theorem,proposition}, numwidth=3.5em] \renewcommand{\listtheoremname}{Exercises} \listoftheorems[ignoreall,show={exercise}, numwidth=3.5em] \listoffigures \printbibliography[title=Bibliography] \end{document}