\newcommand{\cat}[1]{\mathsf{#1}} \newcommand{\Sets}[0]{{\mathsf{Set}}} \newcommand{\Set}[0]{{\mathsf{Set}}} \newcommand{\sets}[0]{{\mathsf{Set}}} \newcommand{\set}{{\mathsf{Set}}} \newcommand{\Poset}[0]{\mathsf{Poset}} \newcommand{\GSets}[0]{{G\dash\mathsf{Set}}} \newcommand{\Groups}[0]{{\mathsf{Group}}} \newcommand{\Grp}[0]{{\mathsf{Grp}}} \newcommand{\cC}[0]{{\mathsf{C}}} \newcommand{\Ar}[0]{{\mathsf{Ar}}} \newcommand{\Mot}[0]{{\mathsf{Mot}}} \newcommand{\SW}[0]{{\mathsf{SW}}} \newcommand{\cof}[0]{{\mathsf{cof}}} \newcommand{\fib}[0]{{\mathsf{fib}}} % Modifiers \newcommand{\der}[0]{{\mathsf{d}}} \newcommand{\dg}[0]{{\mathsf{dg}}} \newcommand{\comm}[0]{{\mathsf{C}}} \newcommand{\pre}[0]{{\mathsf{pre}}} \newcommand{\fn}[0]{{\mathsf{fn}}} \newcommand{\smooth}[0]{{\mathsf{sm}}} \newcommand{\Aff}[0]{{\mathsf{Aff}}} \newcommand{\Ab}[0]{{\mathsf{Ab}}} \newcommand{\Add}[0]{{\mathsf{Add}}} \newcommand{\Assoc}[0]{\mathsf{Assoc}} \newcommand{\Ch}[0]{\mathsf{Ch}} \newcommand{\Coh}[0]{{\mathsf{Coh}}} \newcommand{\Comm}[0]{\mathsf{Comm}} \newcommand{\Cor}[0]{\mathsf{Cor}} \newcommand{\Corr}[0]{\mathsf{Cor}} \newcommand{\Fin}[0]{{\mathsf{Fin}}} \newcommand{\Free}[0]{\mathsf{Free}} \newcommand{\Tors}[0]{\mathsf{Tors}} \newcommand{\Perf}[0]{\mathsf{Perf}} \newcommand{\Unital}[0]{\mathsf{Unital}} \newcommand{\eff}[0]{\mathsf{eff}} \newcommand{\Dc}[0]{\mathbf{D}} \newcommand{\derivedcat}[1]{\Dc {#1}} \newcommand{\Db}[0]{ \mathsf{D}^b } \newcommand{\db}[0]{\Db} \newcommand{\bderivedcat}[1]{\Db {#1}} \newcommand{\Const}[0]{\mathsf{Const}} \newcommand{\Cx}[0]{\mathsf{Ch}} \newcommand{\Stable}[0]{\mathsf{Stab}} \newcommand{\ChainCx}[1]{\mathsf{Ch}\qty{ #1 }} \newcommand{\Vect}[0]{{ \mathsf{Vect}}} \newcommand{\kvect}[0]{{ \mathsf{Vect}\slice{k}}} \newcommand{\loc}[0]{{\mathsf{loc}}} \newcommand{\locfree}[0]{{\mathsf{locfree}}} \newcommand{\Bun}{{\mathsf{Bun}}} \newcommand\Prinbun{{ \mathsf{PrinBun}}} \newcommand{\bung}{{\mathsf{Bun}_G}} % Rings \newcommand{\Local}[0]{\mathsf{Local}} \newcommand{\Fieldsover}[1]{{ \mathsf{Fields}_{#1}}} \newcommand{\Field}[0]{\mathsf{Field}} \newcommand{\Number}[0]{\mathsf{Number}} \newcommand{\Numberfield}[0]{\Field\slice{\QQ}} \newcommand{\NF}[0]{\Numberfield} \newcommand{\Art}[0]{\mathsf{Art}} \newcommand{\Global}[0]{\mathsf{Global}} \newcommand{\Ring}[0]{\mathsf{Ring}} \newcommand{\Mon}[0]{\mathsf{Mon}} \newcommand{\CMon}[0]{\mathsf{CMon}} \newcommand{\CRing}[0]{\mathsf{CRing}} \newcommand{\DedekindDomain}[0]{\mathsf{DedekindDom}} \newcommand{\IntDomain}[0]{\mathsf{IntDom}} \newcommand{\Dom}[0]{\mathsf{Dom}} \newcommand{\Domain}[0]{\mathsf{Domain}} \newcommand{\DVR}[0]{\mathsf{DVR}} \newcommand{\Dedekind}[0]{\mathsf{Dedekind}} % Modules \newcommand{\Quat}[0]{{\mathsf{Quat}}} \newcommand{\torsors}[1]{{\mathsf{#1}\dash\mathsf{Torsors}}} \newcommand{\torsorsright}[1]{\mathsf{Torsors}\dash\mathsf{#1}} \newcommand{\torsorsleft}[1]{\mathsf{#1}\dash\mathsf{Torsors}} \newcommand{\bimod}[2]{({#1}, {#2})\dash\mathsf{biMod}} \newcommand{\bimods}[2]{({#1}, {#2})\dash\mathsf{biMod}} \newcommand{\Dmod}[0]{{ \mathcal{D}\dash\mathsf{Mod}}} \newcommand{\Mod}[0]{{\mathsf{Mod}}} \newcommand{\modsleft}[1]{ {}_{#1}\Mod} \newcommand{\modsright}[1]{ \Mod_{#1}} \newcommand{\mods}[1]{ \modsleft{#1}} \newcommand{\modr}[0]{ \modsright{R}} \newcommand{\rmod}[0]{ \modsleft{R}} \newcommand{\zmod}[0]{ \modsleft{\ZZ}} \newcommand{\qmod}[0]{ \modsleft{\QQ}} \newcommand{\kmod}[0]{ \modsleft{k}} \newcommand{\fmod}[0]{ \modsleft{ \FF }} \newcommand{\oxmods}[0]{ \mods{\OO_X}} \newcommand{\stmods}[1]{{\mathsf{#1}\dash\mathsf{stMod}}} \newcommand{\grmods}[1]{{\mathsf{#1}\dash\mathsf{grMod}}} \newcommand{\grMod}[0]{{\mathsf{grMod}}} \newcommand{\comods}[1]{{\mathsf{#1}\dash\mathsf{coMod}}} \newcommand{\lmod}[0]{{\mathsf{L}\dash\mathsf{Mod}}} \newcommand{\amod}[0]{{\mathsf{A}\dash\mathsf{Mod}}} \newcommand{\gmod}[0]{{\mathsf{G}\dash\mathsf{Mod}}} \newcommand{\gr}[0]{{\mathsf{gr}\,}} \newcommand{\mmod}[0]{{\dash\mathsf{Mod}}} \newcommand{\Rep}[0]{{\mathsf{Rep}}} \newcommand{\Irr}[0]{{\mathsf{Irr}}} \newcommand{\Adm}[0]{{\mathsf{Adm}}} \newcommand{\semisimp}[0]{{\mathsf{ss}}} % Vector Spaces and Bundles \newcommand{\VectBundle}[0]{{ \Bun\qty{\GL_r}}} \newcommand{\VectBundlerk}[1]{{ \Bun\qty{\GL_{#1}}}} \newcommand{\VectSp}[0]{{ \VectSp }} \newcommand{\VectBun}[0]{{ \VectBundle }} \newcommand{\VectBunrk}[1]{{ \VectBundlerk{#1}}} \newcommand{\Bung}[0]{{ \Bun\qty{G}}} % Algebras \newcommand{\Alg}[0]{ \mathsf{Alg}} \newcommand{\algs}[1]{{ {}_{#1} \Alg }} \newcommand{\Hopf}[0]{\mathsf{Hopf}} \newcommand{\alg}[0]{\Alg} \newcommand{\scalg}[0]{\mathsf{sCAlg}} \newcommand{\cAlg}[0]{{\mathsf{cAlg}}} \newcommand{\calg}[0]{\mathsf{CAlg}} \newcommand{\liegmod}[0]{{\mathfrak{g}\dash\mathsf{Mod}}} \newcommand{\liealg}[0]{{\mathsf{Lie}\dash\Alg}} \newcommand{\Lie}[0]{\mathsf{Lie}} \newcommand{\kalg}[0]{ {}_{k} \Alg } \newcommand{\kAlg}[0]{\kalg} \newcommand{\falg}[0]{ {}_{\FF} \Alg } \newcommand{\ralg}[0]{ {}_{R} \Alg } \newcommand{\rAlg}[0]{ \ralg } \newcommand{\zalg}[0]{ {}_{\ZZ} \Alg } \newcommand{\CCalg}[0]{ {}_{\\CC} \Alg } \newcommand{\dga}[0]{{\mathsf{dg\Alg}}} \newcommand{\cdga}[0]{{ \mathsf{c}\dga }} \newcommand{\dgla}[0]{{\dg\Lie\Alg }} \newcommand{\Poly}[0]{{\mathsf{Poly}}} \newcommand{\Hk}[0]{{\mathsf{Hk}}} \newcommand{\Asm}[0]{{\mathsf{Asm}}} \newcommand{\kSch}[0]{{\mathsf{Sch}_{/k}}} \newcommand{\Grpd}[0]{{\mathsf{Grpd}}} \newcommand{\inftyGrpd}[0]{{ \underset{\infty}{ \Grpd }}} \newcommand{\Algebroid}[0]{{\mathsf{Algd}}} % Schemes and Sheaves \newcommand{\Loc}[0]{\mathsf{Loc}} \newcommand{\Locsys}[0]{\mathsf{LocSys}} \newcommand{\Ringedspace}[0]{\mathsf{RingSp}} \newcommand{\RingedSpace}[0]{\mathsf{RingSp}} \newcommand{\LRS}[0]{\Loc\RingedSpace} \newcommand{\IndCoh}[0]{{\mathsf{IndCoh}}} \newcommand{\dbcoh}[0]{\mathsf{D}^b\mathsf{Coh}} \newcommand{\DbCoh}[0]{\dbcoh} \newcommand{\DCoh}[0]{\mathsf{D}\mathsf{Coh}} \newcommand{\dcoh}[0]{\DCoh} \newcommand{\QCoh}[0]{{\mathsf{QCoh}}} \newcommand{\qcoh}[0]{\QCoh} \newcommand{\Ind}[0]{{\mathsf{Ind}}} \newcommand{\Pro}[0]{{\mathsf{Pro}}} \newcommand{\Cov}[0]{{\mathsf{Cov}}} \newcommand{\sch}[0]{{\mathsf{Sch}}} \newcommand{\presh}[0]{ \underset{ \mathsf{pre}} {\mathsf{Sh}}} \newcommand{\prest}[0]{ {\underset{ \mathsf{pre}} {\mathsf{St}} }} \newcommand{\Descent}[0]{{\mathsf{Descent}}} \newcommand{\Desc}[0]{{\mathsf{Desc}}} \newcommand{\FFlat}[0]{{\mathsf{FFlat}}} \newcommand{\Perv}[0]{\mathsf{Perv}} \newcommand{\smsch}[0]{{ \smooth\Sch }} \newcommand{\Sch}[0]{{\mathsf{Sch}}} \newcommand{\Schf}[0]{{\mathsf{Schf}}} \newcommand{\Sh}[0]{{\mathsf{Sh}}} \newcommand{\St}[0]{{\mathsf{St}}} \newcommand{\Stacks}[0]{{\mathsf{St}}} \newcommand{\Var}[0]{{\mathsf{Var}}} \newcommand{\Vark}[0]{{ \Var_{/k}}} \newcommand{\kvar}[0]{{ \Var_{/k}}} \newcommand{\Open}[0]{{\mathsf{Open}}} % Homotopy \newcommand{\CW}[0]{{\mathsf{CW}}} \newcommand{\sset}[0]{{\mathsf{sSet}}} \newcommand{\sSet}[0]{{\mathsf{sSet}}} \newcommand{\ssets}[0]{\mathsf{sSet}} \newcommand{\hoTop}[0]{{\mathsf{hoTop}}} \newcommand{\hoType}[0]{{\mathsf{hoType}}} \newcommand{\ho}[0]{{\mathsf{ho}}} \newcommand{\SHC}[0]{{\mathsf{SHC}}} \newcommand{\SH}[0]{{\mathsf{SH}}} \newcommand{\Spaces}[0]{{\mathsf{Spaces}}} \newcommand{\GSpaces}[1]{{G\dash\mathsf{Spaces}}} \newcommand{\Spectra}[0]{{\mathsf{Sp}}} \newcommand{\Sp}[0]{{\mathsf{Sp}}} \newcommand{\Top}[0]{{\mathsf{Top}}} \newcommand{\Bord}[0]{{\mathsf{Bord}}} \newcommand{\TQFT}[0]{{\mathsf{TQFT}}} \newcommand{\Kc}[0]{{\mathsf{K^c}}} \newcommand{\triang}[0]{{\mathsf{triang}}} \newcommand{\TTC}[0]{{\mathsf{TTC}}} \newcommand{\dchrmod}{{\derivedcat{\Ch(\rmod)}}} % Infty Cats \newcommand{\Finset}[0]{{\mathsf{FinSet}}} \newcommand{\Cat}[0]{\mathsf{Cat}} \newcommand{\Fun}[0]{{\mathsf{Fun}}} \newcommand{\Kan}[0]{{\mathsf{Kan}}} \newcommand{\Monoid}[0]{\mathsf{Mon}} \newcommand{\Arrow}[0]{\mathsf{Arrow}} \newcommand{\quasiCat}[0]{{ \mathsf{quasiCat}} } \newcommand{\inftycat}[0]{{ \underset{\infty}{ \Cat} }} \newcommand{\inftycatn}[1]{{ \underset{(\infty, {#1})}{ \Cat} }} \newcommand{\core}[0]{{ \mathsf{core}}} \newcommand{\Indcat}[0]{ \mathsf{Ind}} % New? \newcommand{\Prism}[0]{\mathsf{Prism}} \newcommand{\Solid}[0]{\mathsf{Solid}} \newcommand{\WCart}[0]{\mathsf{WCart}} % Motivic \newcommand{\Torsor}[1]{{\mathsf{#1}\dash\mathsf{Torsor}}} \newcommand{\Torsorleft}[1]{{\mathsf{#1}\dash\mathsf{Torsor}}} \newcommand{\Torsorright}[1]{{\mathsf{Torsor}\dash\mathsf{#1}}} \newcommand{\Quadform}[0]{{\mathsf{QuadForm}}} \newcommand{\HI}[0]{{\mathsf{HI}}} \newcommand{\DM}[0]{{\mathsf{DM}}} \newcommand{\hoA}[0]{{\mathsf{ho}_*^{\scriptstyle \AA^1}}} \newcommand\Tw[0]{\mathsf{Tw}} \newcommand\SB[0]{\mathsf{SB}} \newcommand\CSA[0]{\mathsf{CSA}} \newcommand{\CSS}[0]{{ \mathsf{CSS}} } % Unsorted \newcommand{\FGL}[0]{\mathsf{FGL}} \newcommand{\FI}[0]{{\mathsf{FI}}} \newcommand{\CE}[0]{{\mathsf{CE}}} \newcommand{\Fuk}[0]{{\mathsf{Fuk}}} \newcommand{\Lag}[0]{{\mathsf{Lag}}} \newcommand{\Mfd}[0]{{\mathsf{Mfd}}} \newcommand{\Riem}[0]{\mathsf{Riem}} \newcommand{\Wein}[0]{{\mathsf{Wein}}} \newcommand{\gspaces}[1]{{#1}\dash{\mathsf{Spaces}}} \newcommand{\deltaring}[0]{{\delta\dash\mathsf{Ring}}} \newcommand{\terminal}[0]{{ 0_{\scriptscriptstyle \uparrow}}} \newcommand{\initial}[0]{{ \mathscr \emptyset^{\scriptscriptstyle \downarrow}}} % Universal guys \newcommand{\coeq}[0]{\operatorname{coeq}} \newcommand{\cocoeq}[0]{\operatorname{eq}} \newcommand{\dgens}[1]{\gens{\gens{ #1 }}} \newcommand{\ctz}[1]{\, {\converges{{#1} \to\infty}\longrightarrow 0} \, } \newcommand{\conj}[1]{{\overline{{#1}}}} \newcommand{\complex}[1]{{ {#1}_{\scriptscriptstyle \bullet}} } \newcommand{\cocomplex}[1]{ { {#1}^{\scriptscriptstyle \bullet}} } \newcommand{\bicomplex}[1]{{ {#1}_{\scriptscriptstyle \bullet, \bullet}} } \newcommand{\cobicomplex}[1]{ { {#1}^{\scriptscriptstyle \bullet, \bullet}} } \newcommand{\floor}[1]{{\left\lfloor #1 \right\rfloor}} \newcommand{\ceiling}[1]{{\left\lceil #1 \right\rceil}} \newcommand{\fourier}[1]{\widehat{#1}} \newcommand{\embedsvia}[1]{\xhookrightarrow{#1}} \newcommand{\openimmerse}[0]{\underset{\scriptscriptstyle O}{\hookrightarrow}} \newcommand{\weakeq}[0]{\underset{\scriptscriptstyle W}{\rightarrow}} \newcommand{\fromvia}[1]{\xleftarrow{#1}} \newcommand{\generators}[1]{\left\langle{#1}\right\rangle} \newcommand{\freeon}[1]{ \left[ {#1} \right] } \newcommand{\gens}[1]{\left\langle{#1}\right\rangle} \newcommand{\globsec}[1]{{{\Gamma}\qty{#1} }} \newcommand{\Globsec}[1]{{{\Gamma}\qty{#1} }} \newcommand{\langL}[1]{ {}^{L}{#1} } \newcommand{\modulo}[0]{{ \bigg/ }} \newcommand{\equalsbecause}[1]{\overset{#1}{=}} \newcommand{\congbecause}[1]{\overset{#1}{\cong}} \newcommand{\congas}[1]{\underset{#1}{\cong}} \newcommand{\isoas}[1]{\underset{#1}{\cong}} \newcommand{\pwiso}[0]{\underset{\mathrm{pw}}{\cong}} \newcommand{\addbase}[1]{{ {}_{\pt} }} \newcommand{\ideal}[1]{\mathcal{#1}} \newcommand{\adjoin}[1]{ { \left[ \scriptstyle {#1} \right] } } \newcommand{\polynomialring}[1]{ { \left[ {#1} \right] } } \newcommand{\htyclass}[1]{ { \left[ {#1} \right] } } \newcommand{\qtext}[1]{{\quad \operatorname{#1} \quad}} \newcommand{\abs}[1]{{\left\lvert {#1} \right\rvert}} \newcommand{\stack}[1]{\mathclap{\substack{ #1 }}} \newcommand{\freezmod}[1]{\ZZ\left[ {#1} \right] } \newcommand{\functionfield}[1]{ { \left( {#1} \right) } } \newcommand{\rff}[1]{ \functionfield{#1} } \newcommand{\fps}[1]{{\llbracket #1 \rrbracket }} \newcommand{\formalseries}[1]{ \fps{#1} } \newcommand{\formalpowerseries}[1]{ \fps{#1} } \newcommand{\powerseries}[1]{ \fps{#1} } \newcommand\fls[1]{(\hspace{-0.25em}( #1 )\hspace{-0.22em}) } \newcommand\laurent[1]{\fls{#1}} \newcommand\laurentseries[1]{\fls{#1}} \newcommand\lshriek[0]{{}_{!}} \newcommand\pushf[0]{{}^{*}} \newcommand{\nilrad}[1]{{\sqrt{0_{#1}} }} \newcommand{\jacobsonrad}[1]{{J ({#1}) }} \newcommand{\localize}[1]{ \left[ { \scriptstyle { {#1}\inv} } \right] } \newcommand{\primelocalize}[1]{ \left[ { \scriptstyle { { ({#1}^c) }\inv} } \right]} \newcommand{\plocalize}[1]{\primelocalize{#1}} \newcommand{\sheafify}[1]{ \left( #1 \right)^{\scriptscriptstyle \mathrm{sh}} } \newcommand{\complete}[1]{{ {}_{ \hat{#1} } }} \newcommand{\takecompletion}[1]{{ \overbrace{#1}^{\widehat{\hspace{4em}}} }} \newcommand{\pcomplete}[0]{{ {}^{ \wedge }_{p} }} \newcommand{\kv}[0]{{ k_{\hat{v}} }} \newcommand{\Lv}[0]{{ L_{\hat{v}} }} \newcommand{\twistleft}[2]{{ {}^{#1} #2 }} \newcommand{\twistright}[2]{{ #2 {}^{#1} }} \newcommand{\liesover}[1]{{ {}_{/ {#1}} }} \newcommand{\liesabove}[1]{{ {}_{/ {#1}} }} \newcommand{\slice}[1]{_{/ {#1}}} \newcommand{\coslice}[1]{_{{#1/}}} \newcommand{\quotright}[2]{ {}^{#1}\mkern-2mu/\mkern-2mu_{#2} } \newcommand{\quotleft}[2]{ {}_{#2}\mkern-.5mu\backslash\mkern-2mu^{#1} } \newcommand{\invert}[1]{{ \left[ { \scriptstyle \frac{1}{#1} } \right] }} \newcommand{\symb}[2]{{ \qty{ #1 \over #2 } }} \newcommand{\squares}[1]{{ {#1}_{\scriptscriptstyle \square} }} \newcommand{\shift}[2]{{ \Sigma^{\scriptstyle[#2]} #1 }} \newcommand\cartpower[1]{{ {}^{ \scriptscriptstyle\times^{#1} } }} \newcommand\disjointpower[1]{{ {}^{ \scriptscriptstyle\coprod^{#1} } }} \newcommand\sumpower[1]{{ {}^{ \scriptscriptstyle\oplus^{#1} } }} \newcommand\prodpower[1]{{ {}^{ \scriptscriptstyle\times^{#1} } }} \newcommand\tensorpower[2]{{ {}^{ \scriptstyle\otimes_{#1}^{#2} } }} \newcommand\tensorpowerk[1]{{ {}^{ \scriptscriptstyle\otimes_{k}^{#1} } }} \newcommand\derivedtensorpower[3]{{ {}^{ \scriptstyle {}_{#1} {\otimes_{#2}^{#3}} } }} \newcommand\smashpower[1]{{ {}^{ \scriptscriptstyle\smashprod^{#1} } }} \newcommand\wedgepower[1]{{ {}^{ \scriptscriptstyle\smashprod^{#1} } }} \newcommand\fiberpower[2]{{ {}^{ \scriptscriptstyle\fiberprod{#1}^{#2} } }} \newcommand\powers[1]{{ {}^{\cdot #1} }} \newcommand\skel[1]{{ {}^{ (#1) } }} \newcommand\transp[1]{{ \, {}^{t}{ \left( #1 \right) } }} \newcommand{\inner}[2]{{\left\langle {#1},~{#2} \right\rangle}} \newcommand{\inp}[2]{{\left\langle {#1},~{#2} \right\rangle}} \newcommand{\poisbrack}[2]{{\left\{ {#1},~{#2} \right\} }} \newcommand\tl[2]{{ #1_1, \cdots, #1_{#2} }} \newcommand\tlz[2]{{ #1_0, \cdots, #1_{#2} }} \newcommand\tsl[3]{ \ts{ {#1}_{#2}, \cdots, {#1}_{#3} } } \newcommand\tlset[2]{ \ts{ {#1}_{1}, \cdots, {#1}_{#2} } } \newcommand{\cofinal}[0]{\mathsf{\emptyset} } \newcommand{\final}[0]{\ts{\pt}} \newcommand{\roof}[3]{ #1 {\, \scriptstyle {}^\swarrow\, } #2 {\, \scriptstyle {}^\searrow\,} #3 } \newcommand\tmf{ \mathrm{tmf} } \newcommand\taf{ \mathrm{taf} } \newcommand\TAF{ \mathrm{TAF} } \newcommand\TMF{ \mathrm{TMF} } \newcommand\String{ \mathrm{String} } \newcommand{\BO}[0]{{\B \Orth}} \newcommand{\EO}[0]{{\mathsf{E} \Orth}} \newcommand{\BSO}[0]{{\B\SO}} \newcommand{\ESO}[0]{{\mathsf{E}\SO}} \newcommand{\BG}[0]{{\B G}} \newcommand{\EG}[0]{{\mathsf{E} G}} \newcommand{\BP}[0]{{\operatorname{BP}}} \newcommand{\BU}[0]{\B{\operatorname{U}}} \newcommand{\MO}[0]{{\operatorname{MO}}} \newcommand{\MSO}[0]{{\operatorname{MSO}}} \newcommand{\MSpin}[0]{{\operatorname{MSpin}}} \newcommand{\MSp}[0]{{\operatorname{MSpin}}} \newcommand{\MString}[0]{{\operatorname{MString}}} \newcommand{\MStr}[0]{{\operatorname{MString}}} \newcommand{\MU}[0]{{\operatorname{MU}}} \newcommand{\KO}[0]{{\operatorname{KO}}} \newcommand{\KU}[0]{{\operatorname{KU}}} \newcommand{\smashprod}[0]{\wedge} \newcommand{\ku}[0]{{\operatorname{ku}}} \newcommand{\hofib}[0]{{\operatorname{hofib}}} \newcommand{\cofib}[0]{{\operatorname{cofib}}} \newcommand{\hocofib}[0]{{\operatorname{hocofib}}} \DeclareMathOperator{\Suspendpinf}{{\Sigma_+^\infty}} \newcommand{\Loop}[0]{{\Omega}} \newcommand{\Loopinf}[0]{{\Omega}^\infty} \newcommand{\Suspend}[0]{{\Sigma}} % Names and Symbols \newcommand*\dif{\mathop{}\!\operatorname{d}} \newcommand*{\horzbar}{\rule[.5ex]{2.5ex}{0.5pt}} \newcommand*{\vertbar}{\rule[-1ex]{0.5pt}{2.5ex}} \newcommand\Fix{ \mathrm{Fix}} \newcommand\CS{ \mathrm{CS}} \newcommand\FP{ \mathrm{FP}} \newcommand\places[1]{ \mathrm{Pl}\qty{#1}} \newcommand\Ell{ \mathrm{Ell}} \newcommand\homog{ { \mathrm{homog} }} \newcommand\Kahler[0]{\operatorname{Kähler}} \newcommand\aug{\fboxsep=-\fboxrule\!\!\!\fbox{\strut}\!\!\!} \newcommand\compact[0]{\operatorname{cpt}} \newcommand\hyp[0]{{\operatorname{hyp}}} \newcommand\jan{\operatorname{Jan}} \newcommand\curl{\operatorname{curl}} \newcommand\EZ{\operatorname{EZ}} \newcommand\depth{\operatorname{depth}} \newcommand\Sim{\operatorname{Sim}} \newcommand\kbar{{ \overline{k} }} \newcommand\ksep{{ k\sep }} \newcommand\mypound{\scalebox{0.8}{\raisebox{0.4ex}{\#}}} \newcommand\rref{\operatorname{RREF}} \newcommand\RREF{\operatorname{RREF}} \newcommand{\Tatesymbol}{\operatorname{TateSymb}} \newcommand\tilt[0]{ {}^{ \flat }} \newcommand\vecc[2]{\textcolor{#1}{\textbf{#2}}} \newcommand{\Af}[0]{{\mathbf{A}}} \newcommand{\Ag}[0]{{\mathcal{A}_g}} \newcommand{\Mg}[0]{{\mathcal{M}_g}} \newcommand{\mgbar}[0]{\bar{\Mg}} \newcommand{\Mgbar}[0]{\bar{\Mg}} \newcommand{\agbar}[0]{\bar{\Ag}} \newcommand{\Ahat}[0]{\hat{ \operatorname{A}}_g } \newcommand{\Ann}[0]{\operatorname{Ann}} \newcommand{\sinc}[0]{\operatorname{sinc}} \newcommand{\Banach}[0]{\mathcal{B}} \newcommand{\Arg}[0]{\operatorname{Arg}} \newcommand{\BB}[0]{{\mathbb{B}}} \newcommand{\Betti}[0]{{\operatorname{Betti}}} \newcommand{\CC}[0]{{\mathbf{C}}} \newcommand{\CCstar}[0]{{\CC\units }} \newcommand{\cstar}[0]{{\CC\units }} \newcommand{\CF}[0]{\operatorname{CF}} \newcommand{\CH}[0]{{\operatorname{CH}}} \newcommand{\Chow}[0]{{\operatorname{Ch}}} \newcommand{\CP}[0]{{\mathbf{CP}}} \newcommand{\CY}{{ \text{CY} }} \newcommand{\Cl}[0]{ \operatorname{Cl}} \newcommand{\Crit}[0]{\operatorname{Crit}} \newcommand{\DD}[0]{{\mathbb{D}}} \newcommand{\DSt}[0]{{ \operatorname{DSt}}} \newcommand{\Def}{\operatorname{Def}} \newcommand{\Diffeo}[0]{{\operatorname{Diffeo}}} \newcommand{\Diff}[0]{\operatorname{Diff}} \newcommand{\Disjoint}[0]{\coprod} \newcommand{\resprod}[0]{\prod^{\res}} \newcommand{\restensor}[0]{\bigotimes^{\res}} \newcommand{\Disk}[0]{{\operatorname{Disk}}} \newcommand{\Dist}[0]{\operatorname{Dist}} \newcommand{\EE}[0]{{\mathbb{E}}} \newcommand{\EKL}[0]{{\mathrm{EKL}}} \newcommand{\vir}[0]{{\mathrm{vir}}} \newcommand{\Nil}[0]{{\mathrm{Nil}}} \newcommand{\QH}[0]{{\mathrm{QH}}} \newcommand{\AMGM}[0]{{\mathrm{AMGM}}} \newcommand{\Hasse}[0]{{\mathrm{Hasse}}} \newcommand{\resultant}[0]{{\mathrm{res}}} \newcommand{\tame}[0]{{\mathrm{tame}}} \newcommand{\primetop}[0]{{\scriptscriptstyle \mathrm{prime-to-}p}} \newcommand{\VHS}[0]{{\mathrm{VHS} }} \newcommand{\ZVHS}[0]{{ \ZZ\mathrm{VHS} }} \newcommand{\CR}[0]{{\mathrm{CR}}} \newcommand{\unram}[0]{{\scriptscriptstyle\mathrm{un}}} \newcommand{\Emb}[0]{{\operatorname{Emb}}} \newcommand{\minor}[0]{{\operatorname{minor}}} \newcommand{\Et}{\text{Ét}} \newcommand{\trace}{\operatorname{tr}} \newcommand{\Trace}{\operatorname{Trace}} \newcommand{\Kl}{\operatorname{Kl}} \newcommand{\Rel}{\operatorname{Rel}} \newcommand{\Norm}{\operatorname{Nm}} \newcommand{\Extpower}[0]{\bigwedge\nolimits} \newcommand{\Extalgebra}[0]{\cocomplex{\bigwedge}} \newcommand{\Extalg}[0]{\Extalgebra} \newcommand{\Extcomplex}[0]{\cocomplex{ \Extalgebra}} \newcommand{\Extprod}[0]{\bigwedge\nolimits} \newcommand{\Ext}{ \operatorname{Ext}} \newcommand{\FFbar}[0]{{\bar\FF}} \newcommand{\FFpn}[0]{{\FF_{p^n}}} \newcommand{\FFp}[0]{{\FF_p}} \newcommand{\FF}[0]{{ \mathbf{F} }} \newcommand{\FS}{{ \text{FS} }} \newcommand{\Fil}[0]{{\operatorname{Fil}}} \newcommand{\Flat}[0]{{\operatorname{Flat}}} \newcommand{\Fpbar}[0]{\bar{\FF_p}} \newcommand{\Fpn}[0]{{\FF_{p^n} }} \newcommand{\Fppf}[0]{\mathrm{\operatorname{Fppf}}} \newcommand{\Fp}[0]{{\FF_p}} \newcommand{\Frac}[0]{\operatorname{Frac}} \newcommand{\GF}[0]{{\mathbf{GF}}} \newcommand{\GG}[0]{{\mathbf{G}}} \newcommand{\GL}[0]{\operatorname{GL}} \newcommand{\GW}[0]{{\operatorname{GW}}} \newcommand{\Gal}[0]{{ \mathsf{Gal}}} \newcommand{\bigo}[0]{{ \mathsf{O}}} \newcommand{\Gl}[0]{\operatorname{GL}} \newcommand{\Gr}[0]{{\operatorname{Gr}}} \newcommand{\GGr}[0]{ {\mathbb{Gr}}} \newcommand{\HC}[0]{{\operatorname{HC}}} \newcommand{\HFK}[0]{\operatorname{HFK}} \newcommand{\HF}[0]{\operatorname{HF}} \newcommand{\HHom}{\mathscr{H}\kern-2pt\operatorname{om}} \newcommand{\HH}[0]{{\mathbb{H}}} \newcommand{\HP}[0]{{\operatorname{HP}}} \newcommand{\HT}[0]{{\operatorname{HT}}} \newcommand{\HZ}[0]{{H\ZZ}} \newcommand{\Hilb}[0]{\operatorname{Hilb}} \newcommand{\Homeo}[0]{{\operatorname{Homeo}}} \newcommand{\Honda}[0]{\mathrm{\operatorname{Honda}}} \newcommand{\Hsh}{{ \mathcal{H} }} \newcommand{\Id}[0]{\operatorname{Id}} \newcommand{\Intersect}[0]{\bigcap} \newcommand{\JCF}[0]{\operatorname{JCF}} \newcommand{\RCF}[0]{\operatorname{RCF}} \newcommand{\Jac}[0]{\operatorname{Jac}} \newcommand{\II}[0]{{\mathbb{I}}} \newcommand{\KK}[0]{{\mathbb{K}}} \newcommand{\KH}[0]{ \K^{\scriptscriptstyle \mathrm{H}}} \newcommand{\KMW}[0]{ \K^{\scriptscriptstyle \mathrm{MW}}} \newcommand{\KMimp}[0]{ \hat{\K}^{\scriptscriptstyle \mathrm{M}}} \newcommand{\KM}[0]{ \K^{\scriptstyle\mathrm{M}}} \newcommand{\Kah}[0]{{ \operatorname{Kähler} }} \newcommand{\LC}[0]{{\mathrm{LC}}} \newcommand{\LL}[0]{{\mathbb{L}}} \newcommand{\Log}[0]{\operatorname{Log}} \newcommand{\MCG}[0]{{\operatorname{MCG}}} \newcommand{\MM}[0]{{\mathcal{M}}} \newcommand{\mbar}[0]{\bar{\mathcal{M}}} \newcommand{\MW}[0]{\operatorname{MW}} \newcommand{\Mat}[0]{\operatorname{Mat}} \newcommand{\NN}[0]{{\mathbb{N}}} \newcommand{\NS}[0]{{\operatorname{NS}}} \newcommand{\OO}[0]{{\mathcal{O}}} \newcommand{\OP}[0]{{\mathbb{OP}}} \newcommand{\OX}[0]{{\mathcal{O}_X}} \newcommand{\Obs}{\operatorname{Obs}} \newcommand{\obs}{\operatorname{obs}} \newcommand{\Ob}[0]{{\operatorname{Ob}}} \newcommand{\Op}[0]{{\operatorname{Op}}} \newcommand{\Orb}[0]{{\mathrm{Orb}}} \newcommand{\Conj}[0]{{\mathrm{Conj}}} \newcommand{\Orth}[0]{{\operatorname{O}}} \newcommand{\PD}[0]{\mathrm{PD}} \newcommand{\PGL}[0]{\operatorname{PGL}} \newcommand{\GU}[0]{\operatorname{GU}} \newcommand{\PP}[0]{{\mathbf{P}}} \newcommand{\PSL}[0]{{\operatorname{PSL}}} \newcommand{\Pic}[0]{\operatorname{Pic}} \newcommand{\Num}[0]{\operatorname{Num}} \newcommand{\Pin}[0]{{\operatorname{Pin}}} \newcommand{\Places}[0]{{\operatorname{Places}}} \newcommand{\Presh}[0]{\presh} \newcommand{\QHB}[0]{\operatorname{QHB}} \newcommand{\PHS}[0]{\operatorname{PHS}} \newcommand{\QHS}[0]{\mathbf{Q}\kern-0.5pt\operatorname{HS}} \newcommand{\QQpadic}[0]{{ \QQ_p }} \newcommand{\ZZelladic}[0]{{ \ZZ_\ell }} \newcommand{\QQ}[0]{{\mathbf{Q}}} \newcommand{\QQbar}[0]{{ \bar{ \mathbf{Q} } }} \newcommand{\Quot}[0]{\operatorname{Quot}} \newcommand{\RP}[0]{{\mathbf{RP}}} \newcommand{\RR}[0]{{\mathbf{R}}} \newcommand{\Rat}[0]{\operatorname{Rat}} \newcommand{\Reg}[0]{\operatorname{Reg}} \newcommand{\Ric}[0]{\operatorname{Ric}} \newcommand{\SF}[0]{\operatorname{SF}} \newcommand{\SL}[0]{{\operatorname{SL}}} \newcommand{\SNF}[0]{\mathrm{SNF}} \newcommand{\SO}[0]{{\operatorname{SO}}} \newcommand{\Symp}[0]{{\operatorname{Sp}}} \newcommand{\SU}[0]{{\operatorname{SU}}} \newcommand{\F}[0]{{\operatorname{F}}} \newcommand{\Sgn}[0]{{ \Sigma_{g, n} }} \newcommand{\Sm}[0]{{\operatorname{Sm}}} \newcommand{\SpSp}[0]{{\mathbb{S}}} \newcommand{\Spec}[0]{\operatorname{Spec}} \newcommand{\Spf}[0]{\operatorname{Spf}} \newcommand{\Spc}[0]{\operatorname{Spc}} \newcommand{\spc}[0]{\operatorname{Spc}} \newcommand{\Spinc}[0]{\mathrm{Spin}^{{ \scriptscriptstyle \mathbf C} }} \newcommand{\Spin}[0]{{\operatorname{Spin}}} \newcommand{\Mp}[0]{{\operatorname{Mp}}} \newcommand{\Herm}[0]{{\mathsf{Herm}}} \newcommand{\coinv}[0]{{\operatorname{coinv}}} \newcommand{\Sq}[0]{\operatorname{Sq}} \newcommand{\Stab}[0]{{\operatorname{Stab}}} \newcommand{\stable}[0]{\mathrm{st}} \newcommand{\unstable}[0]{\mathrm{unst}} \newcommand{\Sum}[0]{ \sum } \newcommand{\Syl}[0]{{\operatorname{Syl}}} \newcommand{\Sym}[0]{\operatorname{Sym}} \newcommand{\Tor}[0]{\operatorname{Tor}} \newcommand{\Homcx}[0]{\operatorname{Hom}^{\bullet}} \newcommand{\Tr}[0]{\operatorname{Tr}} \newcommand{\Ug}[0]{{\mathcal{U}(\mathfrak{g}) }} \newcommand{\Uh}[0]{{\mathcal{U}(\mathfrak{h}) }} \newcommand{\Union}[0]{\bigcup} \newcommand{\U}[0]{{\operatorname{U}}} \newcommand{\Wedge}[0]{\bigwedge} \newcommand{\Wittvectors}[0]{{\mathbb{W}}} \newcommand{\ZHB}[0]{\operatorname{ZHB}} \newcommand{\ZHS}[0]{\ZZ\operatorname{HS}} \newcommand{\ZZG}[0]{{\ZZ G}} \newcommand{\ZZH}[0]{{\ZZ H}} \newcommand{\ZZlocal}[1]{{ \ZZ_{\hat{#1}} }} \newcommand{\plocal}[0]{{ \scriptsize {}_{ \localize{p} } }} \newcommand{\ZZpadic}[0]{{ \ZZ_{\hat p} }} \newcommand{\ZZladic}[0]{{ \ZZ_{\hat \ell} }} \newcommand{\ZZpcomplete}[0]{{ \ZZpadic }} \newcommand{\ZZplocal}[0]{{ L_p \ZZ }} \newcommand{\ZZprof}[0]{{ \hat{\ZZ} }} \newcommand{\QQladic}[0]{{ \QQ_\ell }} \newcommand{\CCpadic}[0]{{ \CC_p }} \newcommand{\ZZ}[0]{{\mathbf{Z}}} \newcommand{\ZZbar}[0]{{ \bar{ \ZZ } }} \newcommand{\ZZhat}[0]{{ \widehat{ \ZZ } }} \newcommand{\Zar}[0]{{\mathrm{Zar}}} \newcommand{\ZpZ}[0]{\ZZ/p} \newcommand{\abuts}[0]{\Rightarrow} \newcommand{\ab}[0]{{\operatorname{ab}}} \newcommand{\Rees}[0]{{\operatorname{Rees}}} \newcommand{\actsonl}[0]{\curvearrowleft} \newcommand{\actson}[0]{\curvearrowright} \newcommand{\adjoint}[0]{\dagger} \newcommand{\adj}[0]{\operatorname{adj}} \newcommand{\ad}[0]{ { \operatorname{ad}}} \newcommand{\Ad}[0]{{ \operatorname{Ad} }} \newcommand{\afp}[0]{A_{/\FF_p}} \newcommand{\annd}[0]{{\operatorname{ and }}} \newcommand{\ann}[0]{\operatorname{Ann}} \newcommand{\csch}[0]{\operatorname{csch}} \newcommand{\arccot}[0]{\operatorname{arccot}} \newcommand{\arccsc}[0]{\operatorname{arccsc}} \newcommand{\arcsec}[0]{\operatorname{arcsec}} \newcommand{\bP}[0]{\operatorname{bP}} \newcommand{\barz}{\bar{z}} \newcommand{\bbm}[0]{{\mathbb{M}}} \newcommand{\bd}[0]{{\del}} \newcommand{\bigast}[0]{{\mathop{\text{\Large $\ast$}}}} \newcommand{\bmgn}[0]{{ \bar{\mathcal{M}}_{g, n} }} \newcommand{\bundle}[1]{\mathcal{#1}} \newcommand{\by}[0]{\times} \newcommand{\candim}[0]{\operatorname{candim}} \newcommand{\chp}[0]{\operatorname{ch. p}} \newcommand{\ch}[0]{\operatorname{ch}} \newcommand{\AJ}[0]{\operatorname{AJ}} \newcommand{\chr}[0]{\operatorname{ch}} \newcommand{\characteristic}[0]{\operatorname{ch}} \newcommand{\character}[0]{\operatorname{ch}} \newcommand{\cl}[0]{{ \operatorname{cl}}} \newcommand{\codim}[0]{\operatorname{codim}} \newcommand{\cohdim}[0]{\operatorname{cohdim}} \newcommand{\coim}[0]{\operatorname{coim}} \newcommand{\coker}[0]{\operatorname{coker}} \newcommand{\cok}[0]{\operatorname{coker}} \newcommand{\cone}[0]{\operatorname{cone}} \newcommand{\conjugate}[1]{{\overline{{#1}}}} \newcommand{\connectsum}[0]{\mathop{ \Large\mypound }} \newcommand{\const}[0]{{\operatorname{const.}}} \newcommand{\converges}[1]{\overset{#1}} \newcommand{\convergesto}[1]{\overset{#1}\too} \newcommand{\convolve}[0]{\ast} \newcommand{\correspond}[1]{\theset{\substack{#1}}} \newcommand{\covers}[0]{\rightrightarrows} \newcommand{\cocovers}[0]{\leftleftarrows} \newcommand{\coveredby}[0]{\leftleftarrows} \newcommand{\projresolve}[0]{\rightrightarrows} \newcommand{\injresolve}[0]{\leftleftarrows} \newcommand{\covol}[0]{\operatorname{covol}} \newcommand{\cpt}[0]{{ \operatorname{compact} }} \newcommand{\crit}[0]{\operatorname{crit}} \newcommand{\cross}[0]{\times} \newcommand{\dR}[0]{\mathrm{dR}} \newcommand{\dV}{\,dV} \newcommand{\dash}[0]{{\hbox{-}}} \newcommand{\da}[0]{\coloneqq} \newcommand{\ddd}[2]{{\frac{d #1}{d #2}\,}} \newcommand{\ddim}[0]{\operatorname{ddim}} \newcommand{\area}[0]{\operatorname{area}} \newcommand{\ddt}{\tfrac{\dif}{\dif t}} \newcommand{\ddx}{\tfrac{\dif}{\dif x}} \newcommand{\dd}[2]{{\frac{\partial #1}{\partial #2}\,}} \newcommand{\definedas}[0]{\coloneqq} \newcommand{\del}[0]{{\partial}} \newcommand{\diagonal}[1]{\Delta} \newcommand{\Diagonal}[1]{\Delta} \newcommand{\diag}[0]{\operatorname{diag}} \newcommand{\diam}[0]{{\operatorname{diam}}} \newcommand{\diff}[0]{\operatorname{Diff}} \newcommand{\discriminant}[0]{{\Delta}} \newcommand{\disc}[0]{{\operatorname{disc}}} \newcommand{\disjoint}[0]{{\textstyle\coprod}} \newcommand{\dist}[0]{\operatorname{dist}} \newcommand{\dlog}[0]{\operatorname{dLog}} \newcommand{\logd}[0]{{ \del^{\scriptsize \log} }} \newcommand{\dom}[0]{\operatorname{dom}} \newcommand{\BM}[0]{{\operatorname{BM}}} \newcommand{\dual}[0]{ {}^{ \vee }} \newcommand{\shriek}[0]{ { ! }} \newcommand{\dmu}{\,d\mu} \newcommand{\dr}{\,dr} \newcommand{\dtau}{\,d\tau} \newcommand{\ds}{\,ds} \newcommand{\dtheta}{\,d\theta} \newcommand{\dt}{\,dt} \newcommand{\du}{\,du} \newcommand{\dw}{\,dw} \newcommand{\dxi}{\,d\xi} \newcommand{\dx}{\,dx} \newcommand{\dm}{\,dm} \newcommand{\dA}{\,dA} \newcommand{\dy}{\,dy} \newcommand{\dn}{\,dn} \newcommand{\dalpha}{\,d\alpha} \newcommand{\dzbar}{\,d\bar{z}} \newcommand{\dzeta}{\,d\zeta} \newcommand{\dz}{\,dz} \newcommand{\drho}{\,d\rho} \newcommand{\embeds}[0]{\hookrightarrow} \newcommand{\eo}[0]{{\operatorname{eo}}} \newcommand{\eps}[0]{{\varepsilon}} \newcommand{\essdim}[0]{\operatorname{essdim}} \newcommand{\et}{\text{ét}} \newcommand{\eul}[0]{{\operatorname{eul}}} \newcommand{\evalfrom}[0]{\Big|} \newcommand{\ext}{\operatorname{Ext}} \newcommand{\ff}[0]{\operatorname{ff}} \newcommand{\fppf}[0]{{\operatorname{fppf}}} \newcommand{\fpqc}[0]{{\operatorname{fpqc}}} \newcommand{\fp}[0]{{ \operatorname{fp} }} \newcommand{\fqr}[0]{{\FF_{q^r}}} \newcommand{\fq}[0]{{\FF_{q}}} \newcommand{\fqbar}[0]{\bar{\FF_{q}}} \newcommand{\freeprod}[0]{\ast} \newcommand{\from}[0]{\leftarrow} \newcommand{\gal}[0]{{ \operatorname{Gal}}} \newcommand{\gl}[0]{{\mathfrak{gl}}} \newcommand{\gp}[0]{ {\operatorname{gp} }} \newcommand{\grad}[0]{\operatorname{grad}} \newcommand{\graded}[0]{\operatorname{gr}} \newcommand{\grdim}[0]{{\operatorname{gr\,dim}}} \newcommand{\height}[0]{\operatorname{ht}} \newcommand{\homotopic}[0]{\simeq} \newcommand{\id}[0]{\operatorname{id}} \newcommand{\im}[0]{\operatorname{im}} \newcommand{\decreasesto}[0]{\searrow} \newcommand{\increasesto}[0]{\nearrow} \newcommand{\injectivelim}[0]{\varinjlim} \newcommand{\injects}[0]{\hookrightarrow} \newcommand{\interior}[0]{^\circ} \newcommand{\intersect}[0]{\cap} \newcommand{\Todd}[0]{\operatorname{Td}} \newcommand{\into}[0]{\to} \newcommand{\inverselim}[0]{\varprojlim} \newcommand{\inv}[0]{^{-1}} \newcommand{\ip}[2]{{\left\langle {#1},~{#2} \right\rangle}} \newcommand{\kG}[0]{{kG}} \newcommand{\kfq}[0]{K_{/\FF_q}} \newcommand{\kk}[0]{{\mathbf{k}}} \newcommand{\ko}[0]{{\operatorname{ko}}} \newcommand{\krulldim}[0]{\operatorname{krulldim}} \newcommand{\ks}[0]{\operatorname{ks}} \newcommand{\kxn}[0]{k[x_1, \cdots, x_{n}]} \newcommand{\kxnz}[0]{k[x_0, \cdots, x_{n}]} \newcommand{\kx}[1]{k[x_1, \cdots, x_{#1}]} \newcommand{\lci}[0]{\mathrm{lci}} \newcommand{\SC}[0]{\mathrm{SC}} \newcommand{\tb}[0]{\mathrm{tb}} \newcommand{\lcm}[0]{\operatorname{lcm}} % Lie theory \newcommand{\liealgk}[0]{{ \liealg_{/k} }} \newcommand{\liea}[0]{{\mathfrak{a}}} \newcommand{\lieb}[0]{{\mathfrak{b}}} \newcommand{\mfe}[0]{{\mathfrak{e}}} \newcommand{\lied}[0]{{\mathfrak{d}}} \newcommand{\lief}[0]{{\mathfrak{f}}} \newcommand{\liegl}[0]{{\mathfrak{gl}}} \newcommand{\liep}[0]{{\mathfrak{p}}} \newcommand{\liesu}[0]{{\mathfrak{su}}} \newcommand{\lieg}[0]{{\mathfrak{g}}} \newcommand{\lieh}[0]{{\mathfrak{h}}} \newcommand{\liel}[0]{{\mathfrak{l}}} \newcommand{\lien}[0]{{\mathfrak{n}}} \newcommand{\lieo}[0]{{\mathfrak{o}}} \newcommand{\lieq}[0]{{\mathfrak{q}}} \newcommand{\lier}[0]{{\mathfrak{r}}} \newcommand{\lies}[0]{{\mathfrak{s}}} \newcommand{\liesl}[0]{{\mathfrak{sl}}} \newcommand{\lieso}[0]{{\mathfrak{so}}} \newcommand{\liesp}[0]{{\mathfrak{sp}}} \newcommand{\liet}[0]{{\mathfrak{t}}} \newcommand{\lieu}[0]{{\mathfrak{u}}} \newcommand{\lieW}[0]{{\mathfrak{W}}} \newcommand{\liey}[0]{{\mathfrak{y}}} \newcommand{\mfX}[0]{{\mathfrak{X}}} \newcommand{\mfS}[0]{{\mathfrak{S}}} \newcommand{\gitquot}{{ \mathbin{/\mkern-6mu/}}} \newcommand{\modmod}{\gitquot} \newcommand{\htyquot}{\gitquot} \newcommand{\lk}[0]{\operatorname{lk}} \newcommand{\rot}[0]{\operatorname{rot}} \newcommand{\mTHH}[0]{{\operatorname{THH}}} \newcommand{\mHH}[0]{{\operatorname{HH}}} \newcommand{\TCH}[0]{{\operatorname{TCH}}} \newcommand{\TC}[0]{{\operatorname{TC}}} \newcommand{\THC}[0]{{\operatorname{THC}}} \newcommand{\THoH}[0]{{\operatorname{THH}}} \newcommand{\HoH}[0]{{\operatorname{HH}}} \newcommand{\TP}[0]{{\operatorname{TP}}} \newcommand{\TT}[0]{{\mathbb{T}}} \newcommand{\mapscorrespond}[2]{\mathrel{\operatorname*{\rightleftharpoons}_{#2}^{#1}}} \newcommand{\mapstofrom}[0]{\rightleftharpoons} \newcommand{\ofrom}[0]{\overset{\circ}{\leftarrow}} \newcommand{\oto}[0]{\overset{\circ}{\rightarrow}} \newcommand{\wdot}[0]{{ \mathsf{w}_{\cdot} }} \newcommand{\sdot}[0]{{ \mathsf{S}_{\cdot} }} \newcommand{\mapstovia}[1]{\xmapsto{#1}} \newcommand{\mapsvia}[1]{\xrightarrow{#1}} \newcommand{\torational}[0]{\dashrightarrow} \newcommand{\birational}[0]{\overset{\sim}{\torational}} \newcommand{\birationaliso}[0]{\overset{\sim}{\torational}} \newcommand{\sbirational}[0]{\overset{\sim_{ \stab} }{\torational}} \newcommand{\isomorphic}{{ \, \mapsvia{\sim}\, }} \newcommand{\iso}{ \isomorphic } \newcommand{\qiso}{ \homotopic } \newcommand{\isovia}[1]{\underset{#1}{\iso}} \newcommand{\isoin}[1]{\overset{#1}{\iso}} \newcommand{\injectsvia}[1]{\xhookrightarrow{#1}} \newcommand{\injectsfrom}[0]{\hookleftarrow} \newcommand{\embedsdense}[0]{{ \underset{ {\scriptscriptstyle\mathrm{dense}} }{\hookrightarrow} }} \newcommand{\injectsfromvia}[1]{\xhookleftarrow{#1}} \newcommand{\maps}[0]{\operatorname{Maps}} \newcommand{\mat}[0]{\operatorname{Mat}} \newcommand{\maxspec}[0]{{\operatorname{maxSpec}}} \newcommand{\mcTop}[0]{\mathcal{T}\mathsf{op}} \newcommand{\mca}[0]{{\mathcal{A}}} \newcommand{\mcb}[0]{{\mathcal{B}}} \newcommand{\mcc}[0]{{\mathcal{C}}} \newcommand{\mcd}[0]{{\mathcal{D}}} \newcommand{\mce}[0]{{\mathcal{E}}} \newcommand{\mcf}[0]{{\mathcal{F}}} \newcommand{\mcg}[0]{{\mathcal{G}}} \newcommand{\mch}[0]{{\mathcal{H}}} \newcommand{\mci}[0]{{\mathcal{I}}} \newcommand{\mcj}[0]{{\mathcal{J}}} \newcommand{\mck}[0]{{\mathcal{K}}} \newcommand{\mcl}[0]{{\mathcal{L}}} \newcommand{\mcm}[0]{{\mathcal{M}}} \newcommand{\mcn}[0]{{\mathcal{N}}} \newcommand{\mco}[0]{{\mathcal{O}}} \newcommand{\mcp}[0]{{\mathcal{P}}} \newcommand{\mcr}[0]{{\mathcal{R}}} \newcommand{\mcs}[0]{{\mathcal{S}}} \newcommand{\mct}[0]{{\mathcal{T}}} \newcommand{\mcu}[0]{{\mathcal{U}}} \newcommand{\mcv}[0]{{\mathcal{V}}} \newcommand{\mcw}[0]{{\mathcal{W}}} \newcommand{\mcx}[0]{{\mathcal{X}}} \newcommand{\mcX}[0]{{\mathcal{X}}} \newcommand{\mcy}[0]{{\mathcal{Y}}} \newcommand{\mcz}[0]{{\mathcal{Z}}} \newcommand{\mfa}[0]{{\mathfrak{a}}} \newcommand{\mfb}[0]{{\mathfrak{b}}} \newcommand{\mfc}[0]{{\mathfrak{c}}} \newcommand{\mff}[0]{{\mathfrak{f}}} \newcommand{\mfi}[0]{{\mathfrak{I}}} \newcommand{\mfh}[0]{{\mathfrak{h}}} \newcommand{\mfm}[0]{{\mathfrak{m}}} \newcommand{\mfn}[0]{{\mathfrak{n}}} \newcommand{\mfp}[0]{{\mathfrak{p}}} \newcommand{\mfo}[0]{{\mathfrak{o}}} \newcommand{\mfq}[0]{{\mathfrak{q}}} \newcommand{\mfr}[0]{{\mathfrak{r}}} \newcommand{\mfs}[0]{{\mathfrak{s}}} \newcommand{\mfy}[0]{{\mathfrak{Y}}} \newcommand{\mfx}[0]{{\mathfrak{X}}} \newcommand{\mg}[0]{{ \mathcal{M}_{g} }} \newcommand{\mgn}[0]{{ \mathcal{M}_{g, n} }} \newcommand{\Mgn}[0]{{ \mathcal{M}_{g, n} }} \newcommand{\Mell}[0]{{ \mathcal{M}_{\mathrm{ell}} }} \newcommand{\cusp}[0]{{ \mathrm{cusp} }} \newcommand{\Noeth}[0]{{ \mathrm{Noeth} }} \newcommand{\minpoly}[0]{{\operatorname{minpoly}}} \newcommand{\mltext}[1]{\left\{\begin{array}{c}#1\end{array}\right\}} \newcommand{\mm}[0]{{\mathfrak{m}}} \newcommand{\mot}[0]{{ \mathrm{mot}}} \newcommand{\cell}[0]{{ \mathrm{cell}}} \newcommand{\mspec}[0]{\operatorname{mSpec}} \newcommand{\mproj}[0]{\operatorname{mProj}} \newcommand{\ms}[0]{\xrightarrow{\sim}} \newcommand{\multinomial}[1]{\left(\!\!{#1}\!\!\right)} \newcommand{\mult}[0]{{\operatorname{mult}}} \newcommand{\mveq}[0]{{\mapsvia{\sim}}} \newcommand{\mviso}[0]{{\mapsvia{\sim}}} \newcommand{\nd}[0]{\operatorname{nd}} \newcommand{\nil}[0]{{\operatorname{nil}}} \newcommand{\nonzero}[0]{^{\bullet}} \newcommand{\normalneq}{\mathrel{\reflectbox{$\trianglerightneq$}}} \newcommand{\normal}[0]{{~\trianglelefteq~}} \newcommand{\norm}[1]{{\left\lVert {#1} \right\rVert}} \newcommand{\normm}[1]{{\left\lVert \left\lVert {#1} \right\rVert\right\rVert}} \newcommand{\notimplies}[0]{\centernot\implies} \newcommand{\onto}[0]{\twoheadhthtarrow} \newcommand{\open}[1]{\overset{\circ}{#1}} \newcommand{\op}[0]{^{\operatorname{op}}} \newcommand{\ord}[0]{{\operatorname{Ord}}} \newcommand{\order}[0]{{\operatorname{Ord}}} \newcommand{\oriented}[0]{{ \operatorname{oriented} }} \newcommand{\orr}[0]{{\operatorname{ or }}} \newcommand{\padic}[0]{p\dash\text{adic}} \newcommand{\pic}[0]{{\operatorname{Pic}}} \newcommand{\pnorm}[2]{{\left\lVert {#1} \right\rVert}_{#2}} \newcommand{\poly}[0]{\mathrm{poly}} \newcommand{\prim}[0]{{\operatorname{prim}}} \newcommand{\projectivelim}[0]{\varprojlim} \newcommand{\pr}[0]{{\operatorname{pr}}} \newcommand{\pt}[0]{{\operatorname{pt}}} \newcommand{\qc}[0]{{\operatorname{qc}}} \newcommand{\qst}[0]{{\quad \operatorname{such that} \quad}} \newcommand{\rank}[0]{\operatorname{rank}} \newcommand{\realpart}[1]{{\mathcal{Re}({#1})}} \newcommand{\red}[0]{{ \text{red} }} \newcommand{\reg}[0]{\mathrm{reg}} \newcommand{\reldim}[0]{\operatorname{reldim}} \newcommand{\hilbdim}[0]{\operatorname{hilbdim}} \newcommand{\gendim}[0]{\operatorname{gendim}} \newcommand{\restrictionof}[2]{ {\left.{{#1}} \right|_{{#2}} }} \newcommand{\rk}[0]{{\operatorname{rank}}} \newcommand{\rotate}[2]{{\style{display: inline-block; transform: rotate(#1deg)}{#2}}} \newcommand{\ro}[2]{{ \left.{{#1}} \right|_{{#2}} }} \newcommand{\selfmap}[0]{{\circlearrowleft}} \newcommand{\semidirect}[0]{\rtimes} \newcommand{\wreath}[0]{\wr} \newcommand{\size}[0]{{\sharp}} \newcommand{\sep}[0]{{ {}^{ \operatorname{sep} } }} \newcommand{\sgn}[0]{\operatorname{sgn}} \newcommand{\shom}{ {\mathcal{H}}\kern-0.5pt{\operatorname{om}}} \newcommand{\signature}[0]{\operatorname{sig}} \newcommand{\sign}[0]{\operatorname{sign}} \newcommand{\Sing}[0]{{\operatorname{Sing}}} \newcommand{\sing}[0]{{\mathrm{sing}}} \newcommand{\Pet}[0]{{\mathrm{Pet}}} \newcommand{\CM}[0]{{\mathrm{CM}}} \newcommand{\slope}[0]{{\mathrm{slope}}} \newcommand{\smpt}[1]{\setminus\theset{#1}} \newcommand{\smts}[1]{\setminus\theset{ #1 }} \newcommand{\smz}[0]{\setminus\theset{0}} \newcommand{\sm}[0]{\setminus} \newcommand{\spec}[0]{\operatorname{Spec}} \newcommand{\stab}[0]{{\operatorname{Stab}}} \newcommand{\stirlingfirst}[2]{\genfrac{[}{]}{0pt}{}{#1}{#2}} \newcommand{\stirling}[2]{\genfrac\{\}{0pt}{}{#1}{#2}} \newcommand{\surjectsvia}[2][]{ \xrightarrow[#1]{#2} { \mathrel{\mkern-16mu}\rightarrow }\, } \newcommand{\surjects}[0]{\twoheadrightarrow} \newcommand{\syl}[0]{{\operatorname{Syl}}} \newcommand{\sym}[0]{\operatorname{Sym}^*} \newcommand{\Cox}[0]{\operatorname{Cox}} \newcommand{\Eff}[0]{\operatorname{Eff}} \newcommand{\Symalg}[0]{\sym} \newcommand{\symalg}[0]{\sym} \newcommand{\td}[0]{\mathrm{td}} \newcommand{\Li}[0]{\mathrm{Li}} \newcommand{\VMHS}[0]{\mathrm{VMHS}} \newcommand{\MHS}[0]{\mathrm{MHS}} \newcommand{\Tensor}[0]{\bigotimes} \newcommand{\tensor}[0]{\otimes} \newcommand{\boxtensor}[0]{\boxtimes} \newcommand{\Ltensor}[0]{ \overset{\mathbb{L}}{ \otimes}} \newcommand{\dtensor}[0]{ \overset{\mathbb{L}}{ \otimes}} \newcommand{\tgn}[0]{{ \mathcal{T}_{g, n} }} \newcommand{\theset}[1]{\left\{{#1}\right\}} \newcommand{\thetaref}[0]{{ \theta_{\mathrm{Ref} } }} \newcommand{\thevector}[1]{{\left[ {#1} \right]}} \newcommand{\thinrank}[0]{T_n\dash\operatorname{rank}} \newcommand{\too}[1]{{\xrightarrow{#1}}} \newcommand{\tors}[0]{{\operatorname{tors}}} \newcommand{\tor}[0]{\operatorname{Tor}} \newcommand{\transverse}[0]{\pitchfork} \newcommand{\trdeg}[0]{\operatorname{trdeg}} \newcommand{\trianglerightneq}{\mathrel{\ooalign{\raisebox{-0.5ex}{\reflectbox{\rotatebox{90}{$\nshortmid$}}}\cr$\triangleright$\cr}\mkern-3mu}} \newcommand{\tr}[0]{{\mathrm{tr}}} \newcommand{\tspt}[0]{{\{\operatorname{pt}\}}} \newcommand{\ts}[1]{\left\{{#1}\right\}} \newcommand{\tv}[1]{{\left[ {#1} \right]}} \newcommand{\txand}[0]{{\operatorname{ and }}} \newcommand{\txor}[0]{{\operatorname{ or }}} \newcommand{\type}[0]{{\operatorname{type}}} \newcommand{\uniformlyconverges}[0]{\rightrightarrows} \newcommand{\union}[0]{\cup} \newcommand{\unital}[0]{{\operatorname{unital}}} \newcommand{\units}[0]{^{\times}} \newcommand{\up}[0]{\uparrow} \newcommand{\vhat}[1]{\widehat{ \vector{#1} }} \newcommand{\vol}[0]{\operatorname{vol}} \newcommand{\wait}[0]{{-}} \newcommand{\wt}[0]{{\operatorname{wt}}} \newcommand{\zar}[0]{{\mathrm{zar}}} \newcommand{\zbar}{\bar{z}} \newcommand{\zlnz}[0]{\ZZ/\ell^n\ZZ} \newcommand{\zlz}[0]{\ZZ/\ell\ZZ} \newcommand{\znz}[0]{\ZZ/n\ZZ} \newcommand{\zpz}[0]{\ZZ/p\ZZ} \newcommand{\divergence}[0]{{ \nabla\cdot }} \newcommand{\normalizer}[0]{{ N }} \newcommand{\fracId}[0]{{ \ddot{\Id} }} \newcommand{\Ord}[0]{{ \mathrm{Ord} }} \newcommand{\join}[0]{{ \ast }} \renewcommand{\AA}[0]{{\mathbf{A}}} \renewcommand{\SS}[0]{{\mathbb{S}}} \renewcommand{\ss}[0]{{\mathrm{ss}}} \renewcommand{\bar}[1]{\overline{#1}} \renewcommand{\det}{\operatorname{det}} \renewcommand{\div}[0]{\operatorname{Div}} \newcommand{\Div}[0]{\operatorname{Div}} \newcommand{\CDiv}[0]{\operatorname{CDiv}} \renewcommand{\hat}[1]{\widehat{#1}} \renewcommand{\labelitemiii}{$\diamondsuit$} \renewcommand{\labelitemiv}{$\diamondsuit$} \renewcommand{\mid}[0]{\mathrel{\Big|}} \renewcommand{\mod}{\operatorname{mod}} \renewcommand{\qed}[0]{\hfill\blacksquare} \renewcommand{\too}[0]{\longrightarrow} \renewcommand{\vector}[1]{\mathbf{#1}} \newcommand{\sheafhom}[0]{\mathop{\mathcal{H}\! \mathit{om}}} % Declare math operators \DeclareMathOperator*{\spanof}{span} \DeclareMathOperator*{\Mor}{Mor} \DeclareMathOperator*{\Sec}{Sec} \DeclareMathOperator*{\supp}{supp} \DeclareMathOperator*{\eq}{=} \DeclareMathOperator*{\hocolim}{hocolim} \DeclareMathOperator{\Index}{Index} \DeclareMathOperator{\aut}{Aut} \DeclareMathOperator{\Aut}{Aut} \DeclareMathOperator{\Inn}{Inn} \DeclareMathOperator{\Out}{Out} \DeclareMathOperator{\BiHol}{BiHol} \DeclareMathOperator{\Br}{Br} \DeclareMathOperator{\Curv}{Curv} \DeclareMathOperator{\Deck}{Deck} \DeclareMathOperator{\Der}{Der} \DeclareMathOperator{\Exists}{\exists} \DeclareMathOperator{\Forall}{\forall} \DeclareMathOperator{\Forget}{Forget} \DeclareMathOperator{\Frame}{Frame} \DeclareMathOperator{\Fr}{Fr} \DeclareMathOperator{\Griff}{Griff} \DeclareMathOperator{\Hol}{Hol} \DeclareMathOperator{\Ld}{{\mathbb{L} }} \DeclareMathOperator*{\Map}{Maps} \DeclareMathOperator*{\Maps}{Maps} \DeclareMathOperator{\OFrame}{OFrame} \DeclareMathOperator{\Prin}{Prin} \DeclareMathOperator{\Cart}{Cart} \DeclareMathOperator{\Proj}{Proj} \DeclareMathOperator{\RHom}{\mathbb{R}Hom} \DeclareMathOperator{\Rad}{Rad} \DeclareMathOperator{\Rd}{{\mathbb{R} }} \DeclareMathOperator{\Res}{Res} \DeclareMathOperator{\Symb}{Symb} \DeclareMathOperator{\Taut}{Taut} \DeclareMathOperator{\Th}{Th} \DeclareMathOperator{\triv}{triv} \DeclareMathOperator{\Triv}{Triv} \DeclareMathOperator{\UFrame}{UFrame} \DeclareMathOperator{\Isom}{Isom} \DeclareMathOperator{\codom}{codom} \DeclareMathOperator{\coh}{coh} \DeclareMathOperator{\coinfl}{coinf} \DeclareMathOperator{\colspace}{colspace} \DeclareMathOperator{\cores}{coRes} \DeclareMathOperator{\hd}{Head} \DeclareMathOperator{\hilb}{Hilb} \DeclareMathOperator{\infl}{inf} \DeclareMathOperator{\len}{len} \DeclareMathOperator{\length}{length} \DeclareMathOperator{\nullity}{nullspace} \DeclareMathOperator{\nullspace}{nullspace} \DeclareMathOperator{\per}{per} \DeclareMathOperator{\prin}{prin} \DeclareMathOperator{\projection}{Proj} \DeclareMathOperator{\proj}{proj} \DeclareMathOperator{\range}{range} \DeclareMathOperator{\rowspace}{rowspace} \DeclareMathOperator{\soc}{Soc} \DeclareMathOperator{\submfds}{SubMfds} \newcommand{\st}[0]{{~\mathrel{\Big\vert}~}} \newcommand{\suchthat}[0]{\st} \newcommand{\Suchthat}[0]{\middle\vert} \newcommand{\delbar}[0]{{ \bar{\del}}} \newcommand{\containedin}[0]{\subseteq} \newcommand{\contains}[0]{\supseteq} \newcommand{\containing}[0]{\supseteq} \newcommand{\iscontainedin}[0]{\supseteq} \newcommand{\rad}[1]{\sqrt{#1}} \newcommand{\thecat}[1]{\mathbf{#1}} \newcommand{\sheaf}[1]{\operatorname{\mathcal{#1}}} \newcommand{\IC}[0]{\sheaf{IC}} \newcommand{\rightderive}[0]{{\mathbf{R}}} % More involved commands \newcommand\rrarrows{\rightrightarrows} \newcommand\rrrarrows{ \mathrel{\substack{\textstyle\rightarrow\\[-0.6ex] \textstyle\rightarrow \\[-0.6ex] \textstyle\rightarrow}} } \newcommand\ul[1]{\underline{#1}} \newcommand\constantsheaf[1]{\underline{#1}} \newcommand\holomorphic[0]{\text{holo}} \newcommand\hol[0]{\text{hol}} \newcommand\cts[0]{\text{cts}} \newcommand\std[0]{\text{std}} \newcommand\Fl[0]{\operatorname{Fl}} \newcommand\Mero[0]{\operatorname{Mero}} \newcommand\Bl[0]{\operatorname{Bl}} \newcommand\res[0]{\operatorname{res}} \newcommand\coind[0]{\operatorname{coInd}} \newcommand\ind[0]{\operatorname{Ind}} \newcommand\Sel[0]{\operatorname{Sel}} \newcommand\Frob[0]{\operatorname{Frob}} \newcommand\transfer[0]{\operatorname{transfer}} \newcommand\modiso[0]{{_{\scriptstyle / \sim} }} \newcommand\even[0]{\text{even}} \newcommand\odd[0]{\text{odd}} \newcommand\hodgestar[0]{\star} \newcommand\dirac[0]{\mkern-3mu \not{ \partial}} \newcommand\laplacian[0]{\Delta} \newcommand\Laplacian[0]{\Delta} \newcommand\stardstar[0]{\hodgestar \mathrm{d} \mkern-1mu \hodgestar} \newcommand\covariant[0]{\nabla} \newcommand\ol[1]{\overline{#1}} \newcommand\univcover[1]{\overline{#1}} \newcommand\closure[1]{\overline{#1}} \newcommand\capprod{\frown} \newcommand\cupprod{\smile} \newcommand\Path{\mathcal{P}} \newcommand\gradient{\nabla} \newcommand\cechH[0]{{\check{H}}} \newcommand\Hc[0]{{\check{H}}} \newcommand\Cc[0]{{\check{C}}} \newcommand\cupp[0]{\smile} \newcommand\capp[0]{\frown} \newcommand\sig[0]{\operatorname{sig}} \newcommand\ev[0]{\operatorname{ev}} \newcommand\coev[0]{\operatorname{coev}} \newcommand\period[0]{\operatorname{period}} \newcommand{\fractional}[1]{\theset{#1}} \newcommand{\fractionalpart}[1]{\theset{#1}} \newcommand{\integerpart}[1]{\left[ {#1}\right] } \newcommand{\zadjoin}[1]{\ZZ\left[ {#1} \right]} \newcommand{\wedgeprod}[0]{\vee} \newcommand{\Prod}[0]{\displaystyle\prod} \newcommand{\Wedgepower}[0]{\bigwedge\nolimits} \def\multichoose#1#2{{\left(\kern-.3em\left(\genfrac{}{}{0pt}{}{#1}{#2}\right)\kern-.3em\right)}} \def\rising#1#2{ \qty{#1}^{ (#2) }} \def\falling#1#2{ \qty{#1}_{ (#2) }} \newcommand\elts[2]{{ {#1}_1, {#1}_2, \cdots, {#1}_{#2}}} \newcommand\tselts[2]{{ \theset{ {#1}_1, {#1}_2, \cdots, {#1}_{#2} } }} \newcommand\mix[1]{\overset{\scriptscriptstyle {#1} }{\times}} \newcommand\fiberproduct[1]{\underset{\scriptscriptstyle {#1} }{\times}} \newcommand\fiberprod[1]{ \fiberproduct{#1}} \newcommand\basechange[1]{{ \fiberproduct{k} {#1} }} \newcommand\fprod[1]{{ \fiberproduct{#1} }} \newcommand\smallprod[0]{{ \scriptscriptstyle\prod }} \newcommand\eqLH[0]{{ \equalsbecause{\scriptscriptstyle\text{LH}} }} \newcommand\drcomplex[0]{{\cocomplex{\Omega}}} \newcommand{\cxH}[0]{{\complex{H}}} \newcommand{\ccxH}[0]{{\cocomplex{H}}} \newcommand{\chern}[0]{{\mathrm{ch}}} \newcommand{\qsymb}[2]{{ \left( {#1} \over {#2} \right) }} \newcommand{\spinornorm}[0]{{\mathrm{spinornorm}}} \newcommand{\RT}[0]{{\mathrm{RT}}} \newcommand{\amp}[0]{{\mathrm{amp}}} \newcommand{\Tot}[0]{{ \operatorname{Tot} }} \newcommand{\Ram}[0]{ \operatorname{Ram}} \newcommand{\weight}[0]{{ \operatorname{weight} }} \newcommand{\Verts}[0]{{ \operatorname{Verts} }} \newcommand{\Endo}[0]{{ \operatorname{End} }} \newcommand{\Hom}[0]{{ \operatorname{Hom} }} %\DeclareMathOperator*{\Hom}{Hom} \newcommand{\PV}[0]{{ \operatorname{PV} }} \newcommand{\Totsum}[0]{\Tot^{\oplus}} \newcommand{\Totprod}[0]{\Tot^{\Pi}} \newcommand{\xpn}[0]{ { x^{p^n} }} \newcommand{\Qbar}[0]{{ \bar{ \mathbf{Q} } }} % mathrms \newcommand{\ptd}{{\scriptstyle { \ast } }} \newcommand{\charpoly}[0]{{\mathrm{charpoly}}} \newcommand{\Sw}[0]{{\mathrm{Sw}}} \newcommand{\Sieg}[0]{{\mathrm{Sieg}}} \newcommand{\Inertia}[0]{{\mathrm{In}}} \newcommand{\generic}[0]{{\mathrm{gen}}} \newcommand{\Conf}[0]{{\mathrm{Conf}}} \newcommand{\Sub}[0]{{\mathrm{Sub}}} \newcommand{\fin}[0]{{\mathrm{fin}}} \newcommand{\can}[0]{{\mathrm{can}}} \newcommand{\ess}[0]{{\mathrm{ess}}} \newcommand{\fd}[0]{{\mathrm{fd}}} \newcommand{\fg}[0]{{\mathrm{fg}}} \newcommand{\free}[0]{{\mathrm{free}}} \newcommand{\lf}[0]{{\mathrm{lf}}} \newcommand{\holonomy}[0]{{\mathrm{holon}}} \newcommand{\qproj}[0]{{\mathrm{qproj}}} \newcommand{\irr}[0]{{\mathrm{irr}}} \newcommand{\ft}[0]{{\mathrm{ft}}} \newcommand{\smol}[0]{{\mathrm{small}}} \newcommand{\alev}[0]{{\,\mathrm{a.e.}}} \newcommand{\semisimple}[0]{{\mathrm{ss}}} \newcommand{\semisimplification}[0]{{\mathrm{ss}}} \newcommand{\val}[0]{{\operatorname{val}}} \newcommand{\gon}[0]{{\dash\mathrm{gon}}} \newcommand{\semi}[0]{{\mathrm{semi}}} \newcommand{\inc}[0]{{\mathrm{inc}}} \newcommand{\Ball}[0]{{B}} \newcommand{\hq}[0]{{/}} \newcommand{\unioninfty}[0]{{\union\ts{\infty}}} \newcommand{\dualnumbers}[0]{{ [\eps] / \eps^2 }} \newcommand{\crys}[0]{{\mathrm{crys}}} \newcommand{\Xff}[0]{{X_\mathrm{FF}}} \newcommand{\an}[0]{{\mathrm{an}}} \newcommand{\prop}[0]{{\mathrm{prop}}} \newcommand{\ram}[0]{{\mathrm{ram}}} \newcommand{\Nis}[0]{{\mathrm{Nis}}} \newcommand{\perf}[0]{{\mathrm{perf}}} \newcommand{\exist}[0]{{\exists}} \newcommand{\quillenplus}[0]{{ {}^{+} }} \newcommand{\glue}[1]{{ \Disjoint_{#1} }} \newcommand{\normcomplex}[1]{{\norm{\complex{#1}}}} \newcommand{\nerve}[1]{{ \mathcal{N}({#1}) }} \newcommand{\realize}[1]{{ \abs{#1} }} \newcommand{\opcat}[1]{{ {#1}\op }} \newcommand{\intcl}[0] { \operatorname{cl}^{\mathrm{int}}} \newcommand{\algcl}[0] { \operatorname{cl}^{\mathrm{alg}}} \newcommand{\sepcl}[0] { \operatorname{cl}^{\mathrm{sep}}} \newcommand{\B}[0]{{\mathbf{B}}} \newcommand{\E}[0]{{\mathbf{E}}} \newcommand{\T}[0]{{\mathbf{T}}} \newcommand{\TX}[0]{{\T X}} \newcommand{\TM}[0]{{\T M}} \newcommand{\K}[0]{{\mathsf{K}}} \newcommand{\G}[0]{{\mathsf{G}}} %\newcommand{\H}[0]{{\mathsf{H}}} \newcommand{\D}{{ \mathsf{D} }} \newcommand{\mH}{{ \mathsf{H} }} \newcommand{\BGL}[0]{ \mathbf{B}\mkern-3mu \operatorname{GL}} \newcommand{\proportional}{ \propto } \newcommand{\asymptotic}{ \ll } % More complicated math commands \newcommand{\RM}[1]{% \textup{\uppercase\expandafter{\romannumeral#1}}% } \DeclareMathOperator{\righttriplearrows} {{\; \tikz{ \foreach \y in {0, 0.1, 0.2} { \draw [-stealth] (0, \y) -- +(0.5, 0);}} \; }} \DeclareMathOperator*{\mapbackforth}{\rightleftharpoons} \newcommand{\fourcase}[4]{ \begin{cases}{#1} & {#2} \\ {#3} & {#4}\end{cases} } \newcommand{\cvec}[2]{{ \begin{bmatrix} {#1} \\ {#2} \end{bmatrix} }} \newcommand{\matt}[4]{{ \begin{bmatrix} {#1} & {#2} \\ {#3} & {#4} \end{bmatrix} }} \newcommand{\mattt}[9]{{ \begin{bmatrix} {#1} & {#2} & {#3} \\ {#4} & {#5} & {#6} \\ {#7} & {#8} & {#9} \end{bmatrix} }} \newcommand\stacksymbol[3]{ \mathrel{\stackunder[2pt]{\stackon[4pt]{$#3$}{$\scriptscriptstyle#1$}}{ $\scriptscriptstyle#2$}} } \newcommand{\textoperatorname}[1]{ \operatorname{\textnormal{#1}} } % idk man \newcommand\caniso[0]{{ \underset{\can}{\iso} }} \renewcommand{\ae}[0]{{ \text{a.e.} }} \newcommand\eqae[0]{\underset{\ae}{=}} \newcommand{\sech}[0]{{ \mathrm{sech} }} \newcommand{\Cone}[0]{{ \mathrm{Cone} }} \newcommand{\Cyl}[0]{{ \mathrm{Cyl} }} %\newcommand{\strike}[1]{{\enclose{\horizontalstrike}{#1}}} \DeclarePairedDelimiter{\ceil}{\lceil}{\rceil} # Finite-dimensional Semisimple Lie Algebras over $\CC$ (Wednesday, August 17) ## Humphreys 1.1 :::{.remark} Main goal: understand semisimple finite-dimensional Lie algebras over $\CC$. These are extremely well-understood, but there are open problems in infinite-dimensional representations, representations over other fields, and Lie superalgebras. ::: :::{.remark} Recall that an associative algebras is a ring with the structure of a $k\dash$vector space, and *algebra* generally means a non-associative algebra. Given any algebra, one can define a new bilinear product \[ [\wait, \wait]: A\tensor_k A &\to A \\ a\tensor b &\mapsto ab-ba \] called the **commutator bracket**. This yields a new algebra $A_L$ which is an example of a Lie algebra. ::: :::{.definition title="Lie algebra"} For $L\in \mods{\FF}$ with an operation $[\wait, \wait]: L\times L\to L$ (called the **bracket**) is a **Lie algebra** if 1. $[\wait,\wait]$ is bilinear, 2. $[x, x] = 0$ for all $x\in L$, and 3. the Jacobi identity holds: $[x[yz]] + [y[zx]] + [z[xy]] = 0$. ::: :::{.exercise title="?"} Check that $[ab]\da ab-ba$ satisfies the Jacobi identity. ::: :::{.remark} \envlist - Expanding $[x+y, x+y] = 0$ yields $[xy] = -[yx]$. Note that this is equivalent to axiom 2 when $\characteristic \FF\neq 2$ (given axiom 1). - The Jacobi identity can be rewritten as $[x[yz]] = [[xy]z] + [y[xz]]$, where the second term is an error term measuring the failure of associativity. Note that this is essentially the Leibniz rule. ::: :::{.definition title="Abelian Lie algebras"} A Lie algebra $L\in\Lie\Alg\slice\FF$ is **abelian** if $[xy]=0$ for all $x,y\in L$. ::: :::{.definition title="Morphisms of Lie algebras"} A **morphism** in $\Lie\Alg\slice\FF$ is a morphism $\phi\in \mods{\FF}(L, L')$ satisfying $\phi( [xy] ) = [ \phi(x) \phi(y) ]$. ::: :::{.exercise title="?"} Check that if $\phi$ has an inverse in $\mods{\FF}$, then $\phi$ automatically has an inverse in $\Lie\Alg\slice \FF$. ::: :::{.definition title="Subobjects"} A vector subspace $K\leq L$ is a **Lie subalgebra** if $[xy]\in K$ for all $x,y\in K$. ::: :::{.remark} \envlist - Note that any nonzero $x\in L$ determines a 1-dimensional Lie subalgebra $K\da \FF\cdot x$, which is in fact abelian. - A big source of Lie algebras: left-invariant vector fields on a Lie group. - We'll restrict to finite-dimensional algebras for the remainder of the class. ::: ## Humphreys 1.2: Linear Lie Algebras :::{.remark} For $V\in \mods{\FF}$, the endomorphisms $A\da \Endo_\FF(V)$ is an associative algebra over $\FF$. Thus it can be made into a Lie algebra $\liegl(V) \da A_L$ by defining $[xy] = xy-yx$ as above. ::: :::{.definition title="Linear Lie algebras"} Any subalgebra $K\leq \liegl(V)$ is a **linear Lie algebra**. ::: :::{.remark} After picking a basis for $V$, there is a noncanonical isomorphism $\Endo_\FF(V) \cong \Mat_{n\times n}(\FF)$ where $n\da \dim_\FF V$. The resulting Lie algebra is $\liegl_n(\FF) \da \Mat_{n\times n}(\FF)_L$. ::: :::{.fact} By Ado-Iwasawa, any finite-dimensional Lie algebra is isomorphic to some linear Lie algebra. ::: :::{.example title="?"} The upper triangular matrices form a subalgebra $\liet_n(\FF) \leq \liegl_n(\FF)$.[^commutator_triangle] This is sometimes called the Borel and denoted $\lieb$. There is also a subalgebra $\lien_n(\FF)$ of strictly upper triangular matrices. The diagonal matrices form a maximal torus/Cartan subalgebra $\lieh_n(\FF)$ which is abelian. [^commutator_triangle]: You get something interesting if you take the commutator bracket of two upper triangular matrices. ::: :::{.example title="Classical Lie algebras"} \envlist - Type $A_n \leadsto \liesl_{n+1}(\FF)$ is the special linear Lie algebra, traceless matrices. - Type $B_n \leadsto \lieso_{2n+1}(\FF)$ is the odd orthogonal Lie algebra. - Type $C_n \leadsto \liesp_{2n}(\FF)$ is the symplectic Lie algebra. - Type $D_n \leadsto \lieso_{2n}(\FF)$ is the even orthogonal Lie algebra. - The remaining 3 are defined by matrices satisfying $sx = -x^t s$ where $s$ is one of the following: - $\mattt 1 0 0 0 0 {I_n} 0 {I_n} 0$ corresponding to $\lieso_{2n+1}$, - $\matt 0 {I_n} {-I_n} 0$ corresponding to $\liesp_{2n}$, - $\matt 0 {I_n} {I_n} 0$ corresponding to $\lieso_{2n}$. These can be viewed as the matrices of a nodegenerate bilinear form: writing $N$ for the size of the matrices, the matrices act on $V \da \FF^N$ by a bilinear form $f: V\times V\to \FF$ given by $f(v, w) = v^t s w$. The form will be symmetric for $\lieso$ and skew-symmetric for $\liesp$. The equation $sx=-x^ts$ is a version of preserving the bilinear form $s$. Note that these are the Lie algebras of the Lie groups $G = \SO_{2n+1}(\FF), \Symp_{2n}(\FF), \SO_{2n}(\FF)$ defined by the condition $f(gv, gw) = f(v, w)$ for all $v,w\in \FF^N$ where $G = \ts{g\in \GL_N(\FF) \st f(gv, gw) = f(v, w)}$. This is equivalent to the condition that $f(gv, w) = f(v, g\inv w)$. ::: :::{.remark} Philosophy: $G\to \lieg$ sends products to sums. \todo{Check, might have gotten this backward.} ::: :::{.exercise title="?"} Check that the definitions of $\SO_n(\FF), \Symp_n(\FF)$ yield Lie algebras. ::: # Friday, August 19 ## Humphreys 1.3 :::{.definition title="Derivations"} Let $A\in \Alg\slice\FF$, not necessarily associative (e.g. a Lie algebra). An **$\FF\dash$derivation** is a morphism $D: A \to A$ such that $D(ab) = D(a)b + aD(b)$. Equipped with the commutator bracket, this defines a Lie algebra $\Der_\FF(A) \leq \liegl_\FF(A)$.[^derivs] [^derivs]: The usual product somehow involves "second-order terms", while the commutator product cancels higher order terms to give something first-order. ::: :::{.warnings} If $D, D'$ are derivations, then the composition $D\circ D'$ is *not* generally a derivation. ::: :::{.definition title="Adjoint"} If $L\in \Lie\Alg\slice\FF$, for $x\in L$ fixed define the **adjoint operator** \[ \ad_x: L &\to L \\ y &\mapsto [x, y] .\] Note that $\ad_x\in \Der_\FF(A)$ by the Jacobi identity. Any derivation of this form is an **inner derivation**, and all other derivations are **outer derivations**. ::: :::{.remark} Given $x\in K\leq L$, note that $K$ is a Lie subalgebra, and we'll want to distinguish $\ad_L x$ and $\ad_K x$ (which may differ). Note that $\liegl_n(\FF) \geq \lieb = \lieh \oplus \lien$, where $\lieb$ are upper triangular, $\lieh$ diagonal, and $\lien$ strictly upper triangular matrices. If $x\in \lieh$ then note that $\ad_\lieh x = 0$, but $\ad_\lieg x \lieh \neq 0$. ::: ## Humphreys 2.1 :::{.remark} Some notes: - $L\geq I$ is an **ideal** if $[\ell, i] \in I$. Note that this is like a left ideal in a ring, but since $I$ is closed under scalar multiplication and $[i, \ell] = -[\ell, i]$, this is closed to a two-sided ideal. - If $A, B \subseteq L$ define $[A, B] \da \spanof_\FF\ts{[a,b] \st a\in A,b\in B}$. *Not* taking the span generally won't even yield a subalgebra. - For ideals $I,J\normal L$, $I+J, [I,J] \normal L$. - $L/I \da \ts{x+I \st x\in L}$ with $[x+I, y+I] \da [x,y] + I$.[^well_def] - Ideals are subalgebras (since this only requires closure under bracketing), but subalgebras are not necessarily ideals. - Centers: $Z(L) = \ts{x\in L \st [xy] = 0 \forall y\in L} \leq L$. - Derived ideals $L' \da L^1 \da [L, L] \normal L$. - If $\lieg \geq \lieb = \lieh \oplus \lien$, and $\lien \normal \lieb$ using the fact that products of upper-triangular matrices involve multiplying diagonals, and bracketing/subtracting cancels the diagonal off. Moreover $\lieb/\lien \cong \lieb$. - For $K \subseteq L$ a subspace, the normalizer is $N_L(K) \da \ts{x\in L\st [x, K] \subseteq K}$. If $K = N_L$ then $K$ is self-normalizing. - The centralizer of $K$ in $L$ is $C_L(K) \da \ts{x\in L\st [x, K] = 0} \leq L$, which is a subalgebra by the Jacobi identity. [^well_def]: One should check that this is well-defined. ::: :::{.exercise title="?"} Is $\lieh\normal \lieb$? ::: :::{.definition title="Simple Lie algebras"} A Lie algebra $L$ is **simple** if $L\neq 0$ and $\Id(L) = \ts{0, L}$ and $[L, L] \neq 0$. Note that $[LL] \neq 0$ only rules out the 1-dimensional Lie algebra, since $[L, L] = 0$ and if $0 < K < L$ then $K\normal L$ since $[L,K] = 0$. ::: :::{.example title="?"} Let $L = \liesl_2(\CC)$, so $\trace(x) = 0$. This has standard basis \[ x = \matt 0100, \qquad y = \matt 0010,\qquad h = \matt 100{-1}. \\ [xy]=h,\quad [hx] = 2x,\quad [hy] = -2y .\] ::: :::{.exercise title="?"} Prove that $\liesl_2(\CC)$ is simple. ::: :::{.exercise title="?"} Show that for $K\leq L$, the normalizer $N_L(K)$ is the largest subalgebra of $L$ in which $K$ is an ideal. ::: :::{.exercise title="?"} Show that $\lieh \subseteq \lieg\da \liesl_n(\CC)$ is self-normalizing subalgebra of $\lieg$. > Hint: use $[h, e_{ij}] = (h_i - h_j) e_{ij}$ where $h = \diag(h_1,\cdots, h_n)$. > The standard basis is $\lieh = \gens{e_{11} - e_{22}, e_{22} - e_{33}, \cdots, e_{n-1, n-1} - e_{n,n} }$. ::: :::{.exercise title="?"} What is $\dim \liesl_3(\CC)$? What is the basis for $\lieg$ and $\lieh$? ::: ## Humphreys 2.2 :::{.remark} Notes: - Let $L, L'\in \cat C \da \Lie\Alg\slice \FF, \phi \in \cat C(L, L'), \ker \phi = \ts{x\in L\st \phi(x) = 0} \normal L$. - Note that if $x\in \ker \phi, y\in L$ then $\phi([xy]) = [\phi(x)\phi(y)] = [0\phi(y)] = 0$. - A **representation** of $L$ is some $\phi\in \cat C(L, \liegl(V))$ for some $V\in \Vect\slice \FF$. - The usual 3 isomorphism theorems for groups hold for Lie algebras. - $\ad: L\to \liegl(L)$ where $x\mapsto \ad x$ is a representation. - $\ker \ad = Z(L)$, so if $L$ is simple then $Z(L) = 0$ and $\ad$ is injective. Thus any simple Lie algebra is linear. - Compare to: any finite dimensional Lie algebra is linear. ::: # Monday, August 22 ## Humphreys 2.3: Automorphisms :::{.remark} Let $L\in \Lie\Alg\slice\FF$, then $\Aut(L)$ is the group of isomorphisms $L \iso L$. Some important examples: if $L$ is linear and $g\in \GL(V)$, if $gLg\inv = L$ then $x\mapsto gxg\inv$ is an automorphism. This holds for example if $L = \liegl_n(\FF)$ or $\liesl_n(\FF)$. Assume $\characteristic \FF = 0$ and let $x\in L$ with $\ad x$ nilpotent, say $(\ad x)^k=0$. Then the power series expansion $e^{\ad x} = \sum_{n\geq 0} (\ad x)^n$ is a polynomial. ::: :::{.claim} $\exp^{\ad x}\in \Aut(L)$ is an automorphism. More generally, $e^\delta\in \Aut(L)$ for $\delta$ any nilpotent derivation. ::: :::{.lemma title="Generalized Leibniz rule"} \[ \delta^n(xy) = \sum_{i=0}^n {n\choose i} \delta^{n-i}(x) \delta^{i}(y) .\] ::: :::{.remark} One can prove this by induction. Then check that $\exp(\delta(x))\exp(\delta(y)) = \exp(\delta(xy))$ and writing $\exp(\delta) = 1+\eta$ there is an inverse $1-\eta +\eta^2 +\cdots \pm \eta^{k-1}$. Automorphisms which are of the form $\exp(\delta)$ for $\delta$ nilpotent derivation are called **inner automorphisms**, and all others are **outer automorphisms**. ::: # Solvable and Nilpotent Lie Algebras (Wednesday, August 24) ## Humphreys 3.1 :::{.definition title="Derived series"} Recall that if $L \subseteq \lieg$ is any subset, the **derived algebra** $[LL]$ is the span of $[xy]$ for $x,y\in L$. This is the analog of needing to take products of commutators to generate a commutator subgroup for groups. Define the **derived series** of $L$ as \[ L^{(0)} = [LL], \quad L^{(1)} = [L^{(0)}, L^{(0)}], \cdots \quad L^{(i+1)} = [L^{(i)} L^{(i)}] .\] ::: :::{.proposition title="?"} These are all ideals. ::: :::{.proof title="?"} By induction on $i$ -- it STS that $[x[ab]] \in L^{(i)}$ for $a,b \in L^{(i-1)}$ and $x\in L$. Use the Jacobi identity and the induction hypothesis that $L^{(i-1)} \normal L$: \[ [x,[ab]] = [[xa]b] + [a[xb]] \in L^{(i-1)} + L^{(i-1)} \subseteq L^{(i)} .\] ::: :::{.definition title="Solvable"} If $L^{(n)} = 0$ for some $n\geq 01$ then $L$ is called **solvable**. ::: :::{.remark} Note that - $L$ abelian implies solvable, since $L^{(1)} = 0$. - $L$ simple implies non-solvable, since this forces $L^{(1)} = L$. ::: :::{.exercise title="?"} Let $\mfb \da \mfb_n(\FF)$ be upper triangular matrices, show that $\lieb$ is solvable. > Use that $[\mfb \mfb] = \mfn$ is strictly upper triangular since diagonals cancel. > More generally, bracketing matrices with $n$ diagonals of zeros yields matrices with about $2^n$ diagonals of zeros. ::: :::{.proposition title="?"} Let $L\in \Lie\Alg\slice\FF$, then - $L$ solvable implies solvability of all subalgebras and homomorphic images of $L$. - If $I\normal L$ and $L/I$ are solvable, then $L$ is solvable. - If $I,J\normal L$ are solvable then $I+J$ is solvable.[^hint_solvable] [^hint_solvable]: Use the third isomorphism theorem. ::: :::{.exercise title="?"} Prove these. ::: :::{.definition title="Radical and semisimple"} Every $L\in \Lie\Alg\slice\FF^{\fd}$ has a unique maximal solvable ideal, the sum of all solvable ideals, called the **radical** of $L$, denote $\rad(L)$. $L$ is **semisimple** if $\rad(L) = 0$. ::: :::{.exercise title="?"} Prove that any simple algebra is semisimple, and in general $L/\rad(L)$ is semisimple (if nonzero). ::: :::{.proposition title="?"} Assume $\liesl_n(\CC)$ is simple, then $R\da \rad(\liegl_n(\CC)) = Z(\lieg) \contains \CC \id_n$ for $\lieg\da\liegl_n(\CC)$. ::: :::{.proof title="?"} $\supseteq$: Centers are always solvable ideals, since it's abelian and brackets are ideals, and the radical is sum of all solvable ideals. $\subseteq$: Suppose $Z\subsetneq R$ is proper, then there is a non-scalar matrix $x\in R$. Write $x = aI_n + y$ for $a = \tr(x)/n$ and $0\neq y\in \liesl_n(\CC)$ is traceless. Consider $I = \gens{x} \normal \liegl_n(\CC)$, i.e. the span of all brackets $[zx]$ for $z\in \lieg$ and their iterated brackets containing $x$, e.g. $[z_1[z_2x]]$. Note that $[zx]=[zy]$ since $aI_n$ is central. Since $\liesl_n(\CC)$ is simple, so $\gens{y}_{\liesl_n(\CC)} = \liesl_n(\CC)$ and thus $\liesl_n(\CC) \subseteq I$. This containment must be proper, since $I \subseteq \rad(\lieg)$ and the latter is solvable, so $I$ must be solvable -- but $\liesl_n(\CC)$ is not solvable. We can thus choose $x\in I$ such that $x = aI_n + y$ with $a\neq 0$ and $0\neq y \in \liesl_n(\CC)$, so $x-y= aI \in I$ since $y\in I$ because $\liesl_n(\CC) \subseteq I$. Since $a\neq 0$, we must have $I_n\in I$. Then $\CC\cdot I_n \subseteq I$, forcing $I = \lieg$ since every matrix in $\liegl_n(\CC)$ is a scalar multiple of the identity plus a traceless matrix. This contradicts that $I$ is solvable, since $\lieg^{(1)} \da [\lieg\lieg] = \liesl_n(\CC)$. But $\lieg^{(1)} = \liesl_n(\CC)$, so the derived series never terminates. $\contradiction$ ::: ## Humphreys 3.2 :::{.definition title="Lower central series and nilpotent algebras"} The **descending/lower central series** of $L$ is defined as \[ L^0 = L, \quad L^1 = [LL], \quad \cdots L^i = [L, L^{i-1}] .\] $L$ is **nilpotent** if $L^n=0$ for some $n$. ::: :::{.exercise title="?"} Check that $L^i \normal L$. ::: :::{.exercise title="?"} Show that $L$ nilpotent is equivalent to there existing a finite $n$ such that for any set of elements $\ts{x_i}_{i=1}^n$, \[ (\ad_{x_1} \circ \ad_{x_2} \circ \cdots \circ \ad_{x_n})(y) = 0 \qquad \forall y\in L .\] ::: # Friday, August 26 ## 3.2: Engel's theorem :::{.proposition title="Nilpotent implies solvable"} Recall $L$ is nilpotent if $L^{n} = 0$ for some $n\geq 0$ (the descending central series) where $L^{i+1} = [LL^{i}]$. Equivalently, $\prod_{i\leq n} \ad_{x_i} =0$ for any $\ts{x_i}_{i\leq n} \subseteq L$. Note that $L^{i} \contains L^{(i)}$ by induction on $i$ -- these coincide for $i=0,1$, and one can check \[ L^{(i+1)} = [L^{(i)} L^{(i)}] \subseteq [L L^{i}] = L^{i+1} .\] ::: :::{.example title="Solvable does not imply nilpotent"} $\lieb_n$ is solvable but not nilpotent, since $\lieb_n^1 = \lien_n$ but $\lieb_n^2 = \lien_n$ and the series never terminates. ::: :::{.example title="?"} $\lien_n$ is nilpotent, since the number of diagonals with zeros adds when taking brackets $[LL^{i}]$. ::: :::{.warnings} $\lieh$ is also nilpotent, since any abelian algebra is nilpotent. ::: :::{.proposition title="?"} Let $L\in\Lie\Alg\slice\FF$, then - If $L$ is nilpotent then any subalgebra or homomorphic image of $L$ is nilpotent. - If $L/Z(L)$ is nilpotent then $L$ is nilpotent.[^lift_series] - If $L\neq 0$ is nilpotent, then $Z(L) \neq 0$.[^how_to_prove] [^how_to_prove]: If $L^n={n-1} \contains L^n=0$ then $[LL^{n-1}] = 0$ and thus $L^{n-1} \subseteq Z(G)$. [^lift_series]: Lift a series for the quotient, which is eventually in $Z(L)$ since it was zero in the quotient, and then bracketing with $Z(L)$ terminates. ::: :::{.exercise title="?"} Show that if $L/I$ and $I\normal L$ are nilpotent, then $L$ need not be nilpotent. ::: :::{.remark} Distinguish $\Endo(L)$ whose algebra structure is given by associative multiplication and $\liegl(L)$ with the bracket multiplication. ::: :::{.definition title="ad-nilpotent"} An element $x\in L$ is **ad-nilpotent** if $\ad_x \in \Endo(L)$ is a nilpotent endomorphism. ::: :::{.remark} If $L$ is nilpotent then $x\in L$ is ad-nilpotent by taking $x_i = x$ for all $i$. It turns out that the converse is true: ::: :::{.theorem title="Engel's theorem"} If all $x\in L$ are ad-nilpotent, then $L$ is nilpotent. ::: :::{.proof title="?"} To be covered in an upcoming section. ::: :::{.lemma title="?"} Let $x\in \liegl(V)$ be a nilpotent linear transformation for $V$ finite-dimensional. Then \[ \ad_x: \liegl(V)&\to \liegl(V) \\ y &\mapsto x\circ y - y\circ x \] is a nilpotent operator. ::: :::{.proof title="?"} Let \( \lambda_x, \rho_x \in \Endo(\liegl(V)) \) be left and right multiplication by $x$, which are commuting nilpotent operators. The binomial theorem shows that if $D_1, D_2$ are any two commuting nilpotent endomorphisms of a vector space, then $D_1\pm D_2$ is again nilpotent. But then one can write $\ad_x = \lambda_x - \rho_x$. ::: :::{.remark} If $x\in \liegl(V)$ is nilpotent then so is $\ad_x$. Conversely, if all $\ad_x$ for $x\in L \leq \liegl(V)$ are nilpotent operators then $L$ is nilpotent by Engel's theorem. ::: :::{.warnings} The converse of the above lemma is not necessarily true: $x$ being ad-nilpotent does not imply that $x$ is nilpotent. As a counterexample, take $x=I_n\in \liegl_n(\CC)$, then $\ad_x = 0$ but $x^k=x$ for any $k\geq 1$. ::: ## 3.3: Proof of Engel's theorem :::{.remark} The following is related to the classical linear algebra theorem that commuting operators admit a simultaneous eigenvector: ::: :::{.theorem title="?"} Let $L$ be a Lie subalgebra of $\liegl(V)$ for $V$ finite-dimensional. If $L$ consists of nilpotent endomorphisms, then there exists a nonzero $v\in V$ such that $Lv=0$. ::: :::{.proof title="?"} Proceed by induction on $n = \dim L$ (assuming it holds for *all* vector spaces), where the $n=1$ case is clear -- the characteristic polynomial of such an operator is $f(t) = t^n$, which has roots $t=0$ and every field contains zero. Once one has an eigenvalue, there is at least one eigenvector. For $n > 1$, suppose $K \leq L$ is a proper Lie subalgebra. By hypothesis, $K$ consists of nilpotent elements in $\Endo(V)$, so apply the previous lemma to see that $\ad(K) \subseteq \Endo(L)$ acts by nilpotent endomorphisms of $L$ since they are restrictions to $L$ of nilpotent endomorphisms of $\liegl(V)$. Since $[KK] \subseteq K$, we can view $\ad(K) \subseteq \Endo(L/K)$ where $L/K$ is a vector space. By the IH with $V = L/K$, where $\Endo(L/K)$ has smaller dimension, one can find a nonzero $x+K \in L/K$ such that $\ad(K)(x+K)=0$. Hence one can find an $x\in L\sm K$ such that for all $y\in K$ one has $[yx] \in K$, so $x\in N_L(K)\sm K$. Thus $K \subsetneq N_L(K)$ is a proper containment. > To be continued. ::: # Monday, August 29 ## Continuation of proof and corollaries :::{.remark} Recall: we were proving that if $L \leq \liegl(V)$ with $V$ finite dimensional and $L$ consists of nilpotent endomorphisms, then there exists a common eigenvector $v$, so $Lv = 0$. ::: :::{.proof title="continued"} We're inducting on $\dim L$ (over all $V$). Assuming $\dim V > 1$, we showed that proper subalgebras are strictly contained in their normalizers: \[ K \lneq L \implies K \subsetneq N_L(K) .\] Let $K$ be a maximal proper subalgebra of $L$, then $N_L(K) = L$ by maximality and thus $K$ is a proper ideal of $L$. Then $L/K$ is a Lie algebra of some dimension, which must be 1 -- otherwise the preimage in $L$ under $L\surjects L/K$ would be a subalgebra of $L$ properly between $K$ and $L$. Thus $K$ is a codimension 1 ideal in $L$.Minimal model program Choosing any $z\in L\sm K$ yields a decomposition $L = K \bigoplus \FF z$ as vector spaces. Let $W \da \ts{v\in V\st Kv=0}$, then $W\neq 0$ by the IH. :::{.claim} $W$ is an $L\dash$stable subspace. To see this, let $x\in L, y\in K, w\in W$. A useful trick: \[ y.(x.w) = w.(y.w) - [xy].w = 0 ,\] since the first term is zero and $[xy]\in K \normal L$. ::: Since $z\actson W$ nilpotently, choose an eigenvector $v$ for $z$ in $W$ for the eigenvalue zero. Then $z.v=0$, so $Lv=0$. ::: :::{.corollary title="Engel's theorem"} If all elements of a Lie algebra $L$ are ad-nilpotent, then $L$ is nilpotent as an algebra. ::: :::{.proof title="?"} Induct on $\dim L$. Note that $\ad(L) \leq \liegl(V)$ consists of nilpotent endomorphisms. Use the theorem to pick $x\in L$ such that $\ad(L).x = 0$, i.e. $[L, x] = 0$, i.e. $x\in Z(L)$ and thus $Z(L)$ is nonzero. Now $\dim L/Z(L) < \dim L$, and a fortiori its elements are still ad-nilpotent so $L/Z(L)$ is nilpotent. By proposition 3.2b, $L$ is nilpotent.[^ses_works] [^ses_works]: Note that for arbitrary SESs, the 2-out-of-3 property does not hold for nilpotency, but for the special cases of a quotient by the center it does. ::: :::{.corollary title="?"} Let $o\neq L \leq \liegl(V)$ with $\dim V < \infty$ be a Lie algebra of nilpotent endomorphisms (as in the theorem).[^assumption_nilp_endo] Then $V$ has a basis in which the matrices of $L$ are all strictly upper triangular. [^assumption_nilp_endo]: Note that the assumption is not that $L$ is a nilpotent algebra, but rather the stronger assumption on endomorphisms. ::: :::{.proof title="?"} Induct on $\dim V$. Use the theorem to pick a nonzero $v_1$ with $Lv_1=0$. Consider $W\da V/\FF v_1$, and view $L \subseteq \Endo(V)$ as a subspace of $\Endo(W)$ -- these are still nilpotent endomorphisms. By the IH, $W$ has a basis $\ts{\bar v_i}_{2\leq i \leq n}$ with respect to the matrices in $L$ (viewed as a subspace of $\Endo(W)$) are strictly upper triangular. Let $\ts{v_i} \subseteq V$ be their preimages in $L$; this basis has the desired properties. This results in a matrix of the following form: ![](figures/2022-08-29_09-38-58.png) ::: ## Chapter 4: Theorems of Lie and Cartan :::{.remark} From now on, assume $\FF = \bar\FF$ is algebraically closed and $\characteristic(k) = 0$. ::: ## 4.1: Lie's Theorem :::{.theorem title="Lie's Theorem"} Let $L\neq 0$ be a solvable Lie subalgebra of $\liegl(V)$ with $\dim V < \infty$. Then $V$ contains a common eigenvector for all of $L$. ::: :::{.proof title="?"} Induct on $\dim L$. If $\dim L = 1$, then $L$ is spanned by 1 linearly operator $x$ and over an algebraically closed field, $x$ has at least one eigenvector. For $\dim L > 1$, take the following strategy: 1. Identify $K\normal L$ proper of codimension 1, 2. By IH, find a common eigenvector for $K$, 3. Verify that $L$ stabilizes "the" subspace of all such common eigenvectors (much harder than before!) 4. In this subspace, find an eigenvector for some $z\in L\sm K$ with $L = K \oplus \FF z$. Off we go! **Step 1**: Since $L$ is solvable, we have $[LL]$ properly contained in $L$. In $L/[LL]$ choose any codimension 1 subspace -- it is an ideal, which lifts to a codimension 1 ideal $K \subset L$. **Step 2**: Since subalgebras of solvable algebras are again solvable, $K$ is solvable. By the IH, pick a common nonzero eigenvector $v$ for $K$. There exists a linear map $\lambda: K\to \FF$ such that $x.v = \lambda(x) v$ for all $x\in K$. Let $W \da \ts{v\in V \st y.v = \lambda(y) v\,\,\forall y\in K}$, which is nonzero. **Step 3**: Note $L.W \subseteq W$. Let $w\in W, x\in L, y\in K$; we WTS $y.(x.w) = \lambda(y)x.w$. Write \[ y.(x.w) &= x.(y.w) - [xy].w \\ &= \lambda(y)(x.w) - \lambda([xy])w ,\] where the second line follows since $[xy]\in K$. We then need \( \lambda([xy]) = 0 \) for all $x\in L$ and $y\in K$. Since $\dim V < \infty$, choose $n$ minimal such that $\ts{w, x.w, x^2.w,\cdots, x^n.w}$ is linearly dependent. Set $W_i \da \spanof_\FF\ts{w, x.w, \cdots, x^i.w}$, so $W_0 = 0, W_1 = \spanof_\FF\ts{w}$, and so on, noting that - $\dim W_n = n$, - $W_{n+k} = W_n$ for all $k\geq 0$, - $x.W_n \subseteq W_n$. :::{.claim} For all $y\in K$, \[ y.x^i.w = \lambda(y) x^i.w \mod W_i .\] ::: To be continued! ::: # Wednesday, August 31 ## Section 4.1, continuing the proof :::{.remark} Recall $\dim L, \dim V < \infty$, $\FF$ is algebraically closed, and $\characteristic \FF = 0$. For $L \leq \liegl(V)$ solvable, we want a common eigenvector $v\in V$ for $L$. Steps for the proof: 1. Find a $K\normal L$ proper of codimension 1. 2. Set $W = \ts{v\in V \st x.v = \lambda(x) v \,\forall x\in K}\neq 0$ for some linear \( \lambda: K\to \FF \). 3. Show $L.W \subseteq W$; we needed to show \( \lambda([LK] ) = 0 \). ::: :::{.proof title="Continued"} **Step 3**: Fix $x\in L, w\in W$ and $n$ minimal such that $\ts{x^i w}_{i\leq n}$ is linearly dependent. For $i\geq 0$ set $W_i = \FF\gens{w, xw, \cdots, x^{i-1}w}$. Then $\dim W_n = n, W_n = W_{n+i}$ for $i\geq 0$, and $xW_n \subseteq W_n$. :::{.claim} For all $y\in K$, \[ yx^i .w = \lambda(y) x^i w \mod W_i .\] ::: :::{.proof title="?"} This is proved by induction on $i$, where $i=0$ follows from how $W$ is defined. For $i\geq 1$, use the commuting trick: \[ yx^i . w &= yxx^{i-1}w \\ &= (xy - [xy]) x^{i-1} w \\ &= x(y x^{i-1} w) - [xy]x^{i-1}w \\ &\equiv \lambda(y) x^i w - \lambda([xy])x^{i-1} w \mod W_{i-1} \\ &\equiv \lambda(y) x^i w - \lambda([xy])x^{i-1} w \mod W_{i} \qquad \text{since } W_{i-1} \leq W_i \\ &\equiv \lambda(y) x^i w \mod W_i .\] ::: Given this claim, for $i=n$ this says that the matrices of any $y\in K$ with respect to the basis $\ts{x^iw}_{0\leq i \leq n-1}$ is upper triangular with diagonal entries all equal to \( \lambda(y) \). Thus $\ro{\trace(y)}{W_m} = n \lambda(y)$, and so $[xy]\actson W_n$ with trace $n \lambda([xy])$. On the other hand, $x,y$ both act on $W_n$ (e.g. by the formula in the claim for $yx^i.w$) and so \[ \ro{[xy]}{W_n} = \ro{xy}{W_n} - \ro{yx}{W_n} ,\] thus $\ro{ \trace([xy])}{W_n} = 0$. Since $\FF$ is characteristic zero, we have $n \lambda([xy]) = 0 \implies \lambda([xy]) = 0$. **Step 4**: By step 1, $L = K \oplus \FF z$ for some $z\in L\sm K$. Viewing $z: W\to W$ and using $\FF = \bar\FF$, $z$ has an eigenvector $v\in W$. Since $v\in W$, it is also a common eigenvector for $K$ and thus an eigenvector for $L$ by additivity. ::: :::{.corollary title="A: Lie's theorem"} Let $L\leq \liegl(V)$ be a solvable subalgebra, then $L$ stabilizes some flag in $V$. In particular, there exists a basis for $V$ with respect to which the matrices in $L$ are all upper triangular. ::: :::{.remark} Recall that for $V \in\Vect\slice \FF$, a complete flag is an element of \[ \Fl(V) \da \ts{ 0 = V^0 \subsetneq V^1 \subsetneq \cdots \subsetneq V^n = V \st \dim V^i = i} .\] A subalgebra $L$ **stabilizes** a flag if $LV^i \subseteq V^i$ for all $i$, which implies there is a compatible basis (got by extending one vector at a time from a basis for $V^1$) for which $L$ acts by upper triangular matrices. ::: :::{.proof title="?"} Use the theorem and induct on $n=\dim V$ as in Engel's theorem -- find a common eigenvector for $V^1$, since $L$ stabilizes one gets an action $L\actson V^i/V^{i-1}$ which is smaller dimension. Then just lift through the quotient. ::: :::{.corollary title="B"} Let $L$ be a solvable Lie algebra, then there exists a chain of ideals \[ 0 = L_0 \subsetneq L_1 \subsetneq \cdots \subsetneq L_n = L \] such that $\dim L_i = i$. \label{thm:corB} ::: :::{.proof title="?"} Consider $\ad L \leq \liegl(L)$. Apply Lie's theorem: $(\ad L)L_i \subseteq L_i \iff [LL_i] \subseteq L_i$, making $L_i\normal L$ an ideal. ::: :::{.corollary title="C"} Let $L$ be solvable, then $x\in [LL]\implies \ad_L x$ is nilpotent. Hence $[LL]$ is nilpotent by Lie's theorem. ::: :::{.proof title="?"} Find a flag of ideals by \autoref{thm:corB} and let $\ts{x_1,\cdots, x_n}$ be a compatible basis. Then the matrices $\ts{\ad_x\st x\in L}$ are all upper triangular. If $x\in [LL]$, without loss of generality $x = [yz]$ for some $y,z\in L$. Then \[ \ad_x = [\ad_y \ad_z] = \ad_y\ad_z - \ad_z \ad_y \] will be strictly upper triangular (since these are upper triangular and the commutator cancels diagonals) and hence nilpotent. ::: ## Section 4.3 :::{.remark} We'll come back to 4.2 next time. For this section, assume $\FF = \bar\FF$ and $\characteristic \FF = 0$. **Cartan's criterion** for a semisimple $L$ (i.e. $\Rad(L) = 0$) involves the **Killing form**, a certain nondegenerate bilinear form on $L$. Recall that if $L$ is solvable then $[LL]$ is nilpotent, or equivalently every $x\in [LL]$ is ad-nilpotent. ::: :::{.lemma title="Checking nilpotency with a trace condition (technical)"} Let $A \subseteq B$ be subspaces of $\liegl(V)$ (really $\Endo(V)$ as a vector space) with $V$ finite-dimensional. Let \[ M\da\ts{w\in \liegl(V) \st [wB] \subseteq A} \] and suppose some $w\in M$ satisfies $\trace(wz) = 0$ for all $z\in M$. Then $w$ is nilpotent. ::: :::{.proof title="?"} Later! ::: :::{.definition title="Bilinear form terminology"} A **bilinear form** is a map \[ \beta(\wait, \wait): L\times L\to \FF ,\] which is **symmetric** if $\beta(x,y) = \beta(y,x)$ and **associative** if $\beta([xy], z) = \beta(x, [yz])$ for all $x,y,z\in L$. The **radical** of $\beta$ is \[ \Rad(\beta) \da \ts{w\in V\st \beta(w, V) = 0} ,\] and $\beta$ is **nondegenerate** if $\Rad(\beta) = 0$. ::: :::{.example title="?"} For $L = \liegl(V)$, take $\beta(x,y)\da \trace(xy)$. One can check this is symmetric, bilinear, and associative -- associativity follows from the following: \[ [xy]z &= xyz-yxz\\ x[yz] &= xyz - xzy .\] Then note that $y(xz)$ and $(xz)y$ have the same trace, since $\trace(AB) = \trace(BA)$. ::: :::{.proposition title="?"} If $\beta$ is associative, then $\Rad(\beta) \normal L$. ::: :::{.proof title="?"} Let $z\in \Rad(\beta)$ and $x,y\in L$. To see if $[zx]\in \Rad(\beta)$, check \[ \beta([zx], y) = \beta(z, [xy]) = 0 \] since $z\in \Rad(\beta)$. Thus $[zx] \in \Rad(\beta)$. ::: # Friday, September 02 ## 4.2: Jordan-Chevalley decomposition :::{.remark} Let $\FF = \bar\FF$ of arbitrary characteristic and $V\in\Vect\slice\FF^\fd$ with $x\in \Endo_\FF(V)$. The JCF of $x$ is of the form $D+N$ where $D$ is diagonal and $N$ is nilpotent where $D, N$ commute. Recall $x$ is semisimple (diagonalizable) iff the minimal polynomial of $x$ has distinct roots. ::: :::{.proposition title="?"} If $x\in \Endo(V)$, a. There is a decomposition $x = x_s + x_n$ where $x_s$ is semisimple and $x_n$ is nilpotent. This is unique subject to the condition that $x_s, x_n$ commute. b. There are polynomials $p(T), q(T)$ without constant terms with $x_s = p(x), x_n = q(x)$. In particular, $x_s, x_n$ commute with any endomorphism which commutes with $x$. ::: :::{.lemma title="?"} Let $x\in \liegl(V)$ with Jordan decomposition $x = x_s + x_n$. Then $\ad_x = \ad_{x_s} + \ad_{x_n}$ is the Jordan decomposition of $\ad_x$ in $\Endo(\Endo(V))$. ::: :::{.proof title="?"} If $x\in \liegl(V)$ is semisimple then so is $\ad_x$ since the eigenvalues for $\ad_x$ are differences of eigenvalues of $x$. I.e. if $\ts{v_1,\cdots, v_n}$ is an eigenbasis for $V$ and $x.v_i = a_i v_i$ in this bases, we have $[x e_{ij}] = (a_i - a_j) = e_{ij}$, so $\ts{e_{ij}}$ is an eigenbasis for $\ad_x$. If $x$ is nilpotent then $\ad_x$ is nilpotent, since $\ad_x(y) = \lambda_x(y) - \rho_x(y)$ where \( \lambda, \rho \) are left/right multiplication, and sums of nilpotents are nilpotent. One can check $[\ad_{x_s} \ad_{x_n}] = \ad_{[x_s x_n} = 0$ since they commute. ::: :::{.remark} One can show that if $L$ is semisimple then $\ad(L) = \Der(L)$, which is used to show that if $L$ is an arbitrary Lie algebra then one has - $x = x_s + x_n$, - $[x_s x_n] = 0$, - $\ad_{x_s}$ is semisimple and $\ad_{x_n}$ is nilpotent. This gives a notion of semisimplicity and nilpotency for Lie algebras not of the form $\liegl(V)$. ::: :::{.lemma title="?"} Let $U\in \Alg\slice{\FF}^\fd$, then $\Der(U)$ is closed under taking semisimple and nilpotent parts. ::: :::{.proof title="?"} Let \( \delta\in \Der(U) \) and write $\delta = \sigma + v$ be the Jordan decomposition of $\delta$ in $\Endo(U)$. It STS $\sigma$ is a derivation, so for $a\in \FF$ define \[ U_a \da \ts{x\in U \st (\delta - a)^k x = 0 \,\,\text{for some } k} .\] Note $U = \bigoplus _{a\in \Lambda} U_a$ where $\Lambda$ is the set of eigenvalues of $\delta$, which are also the eigenvalues of $\sigma$ -- this is because $\sigma, v$ are commuting operators, so eigenvalues of $\delta$ are sums of eigenvalues of $s$ and $v$. :::{.claim} For any $a,b\in \FF$, $U_a U_b \subseteq U_{a+b}$. ::: Assuming this, it STS \( \sigma(xy) = \sigma(x)y + x \sigma(y) \) when $x\in U_a, y\in U_b$ where $a,b$ are eigenvalues. Using that eigenvalues of $\delta$ are also eigenvalues of $\sigma$, since $xy\in U_{a+b}$ by the claim, \( \sigma(xy) = (a+b)xy \) and thus \[ \sigma(x)y + x \sigma(y) = axy + xby = (a+b)xy .\] So \( \sigma\in \Der(U) \). ::: :::{.proof title="of claim"} A sub-claim: \[ (\delta - (a+b) 1) (xy) = \sum_{0\leq i\leq n} {n\choose i} (\delta - aI)^{n-i}x (\delta- b 1)^i y .\] ::: # Wednesday, September 07 ## 4.3: Cartan's criterion for semisimplicity :::{.remark} For the rest of the course, $V$ is a vector space of finite dimension. Goal: get a criterion for semisimplicity. ::: :::{.theorem title="Cartan's criterion for linear Lie algebras"} Let $L\leq \liegl(V)$ be a linear Lie algebra and suppose $\trace(xz)=0$ for all $x\in [LL]$ and $z\in L$. Then $L$ is solvable. ::: :::{.lemma title="?"} Let $A \subseteq B$ be subspaces of $\Endo(V) = \liegl(V)$ and define \[ M = \ts{w\in \liegl(V) \st [w, B] \subseteq A} .\] Suppose that $w\in M$ satisfies $\trace(wz) = 0$ for all $z\in M$. Then $w$ is nilpotent. ::: :::{.proof title="of Cartan, assuming the lemma"} To show $L$ is solvable, it STS that $[LL]$ is nilpotent since the ideals used to check nilpotency are bigger than those to check solvability. By Engel's theorem, it STS to show each $w\in [LL]$ is ad-nilpotent. Since $L \leq \liegl(V)$, it STS to show each $w\in [LL]$ is a nilpotent endomorphism. As in the setup of the lemma, set $B = L, A = [LL]$, then \[ M \da \ts{z\in \liegl(V) \st [zL] \subseteq [LL] } \contains L \contains [LL] .\] Let $w\in [LL] \subseteq M$, then note that $\trace(wz) = 0$ for all $z\in L$, but we need to know this for all $z\in M$. Letting $z\in M$ be arbitrary; by linearity of the trace it STS $\trace(wz) = 0$ on generators $w = [xy]$ on $[LL]$ for $x,y\in L$. We thus WTS $\trace([xy]z) = 0$: \[ \trace([xy]z) &= \trace(x [yz] ) \\ &=\trace([yz] x) \qquad \in \trace(LMx) \subseteq \trace([LL]L) \\ &= 0 \] by assumption. By the lemma, $w$ is nilpotent. ::: :::{.corollary title="Cartan's criterion for general Lie algebras"} Let $L\in \Lie\Alg$ with $\trace(\ad_x \ad_y) = 0$ for all $x \in [LL]$ and $y\in L$. Then $L$ is solvable. ::: :::{.proof title="of corollary"} Use $\ad: L\to \liegl(V)$, a morphism of Lie algebras. Its image is solvable by Cartan's criterion above, and $\ker \ad = Z(L)$ which is abelian and hence a solvable ideal.[^derived_alg_term] Therefore $L$ is solvable. [^derived_alg_term]: The derived series terminates immediately for an abelian algebra. ::: :::{.proof title="of lemma"} Let $w = s + n$ be the Jordan-Chevalley decomposition of $w$. Choose a basis for $V$ such that this is the JCF of $w$, i.e. $s = \diag(a_1,\cdots, a_n)$ and $n$ is strictly upper triangular. Idea: show $s=0$ by showing $A\da \QQ\gens{a_1,\cdots, a_n} = 0$ by showing $A\dual = 0$, i.e. any $\QQ\dash$linear functional $f: A\to \QQ$ is zero. If $\sum a_i f(a_i) = 0$ then \[ 0 = f(\sum a_i f(a_i)) = \sum f(a_i)^2 \implies f(a_i) = 0 \,\,\forall i ,\] so we'll show this. Let $y = \diag( f(a_1), \cdots, f(a_n) )$, then $\ad_y$ is a polynomial (explicitly constructed using Lagrange interpolation) in $\ad_s$ without a constant term. So do this for $\ad_y$ and $\ad_s$ (see exercise). Since $\ad_s$ is a polynomial in $\ad_w$ with zero constant term, and since $\ad_w: B\to A$, we have $\ad_s(B) \subseteq A$ and the same is thus true for $\ad_y$. So $y\in M$ and $w\in M$, and applying the trace condition in the lemma with $z\da y$ we get \[ 0 = \trace(wy) = \sum a_i f(a_i) ,\] noting that $w$ is upper triangular and $y$ is diagonal. So $s=0$ and $w=n$ is nilpotent. ::: :::{.exercise title="?"} Show $\ad_y$ is a polynomial in $\ad_s$. ::: :::{.remark} Recall that $\Rad L$ is the unique maximal (not necessarily proper) solvable ideal of $L$. This exists, e.g. because sums of solvable ideals are solvable. Note that $L$ is semisimple iff $\Rad L = 0$. ::: :::{.definition title="Killing form"} Let $L\in \Lie\Alg^\fd$ and define the **Killing form** \[ \kappa: L\cross L &\to \FF \\ \kappa(x, y) &= \trace(\ad_x \circ \ad_y) .\] This is an associative[^assoc_invt] bilinear form on $L$. [^assoc_invt]: Associative is $f([xy]z) = f(x [yz])$, sometimes called *invariant*. ::: :::{.example title="?"} Let $L = \CC\gens{x, y}$ with $[xy] = x$. In this ordered basis, \[ \ad_x = \matt 0 1 0 0 \qquad \ad_y = \matt {-1} 0 0 0 ,\] and one can check $\kappa(x,x) = \kappa(x, y) = \kappa(y, x) = 0$ and $\kappa(y,y) = 1$. Moreover $\Rad \kappa = \CC\gens{x}$. > See the text for $\kappa$ defined on $\liesl_2$. ::: :::{.lemma title="?"} Let $I \normal L$. If $\kappa$ is the Killing form of $L$ ad $\kappa_I$ that of $I$, then \[ \kappa_I = \ro{\kappa}{I\times I} .\] ::: :::{.proof title="?"} Let $x\in I$, then $\ad_x(L) \subseteq I$ since $I$ is an ideal. Choosing a basis for $I$ yields a matrix: ![](figures/2022-09-07_09-56-24.png) So if $x,y\in I$, we have \[ \kappa(x,y) &= \trace(\ad_x \circ \ad_y) \\ &= \trace(\ad_{I, x} \circ \ad_{I, y}) \\ &= \kappa_I(x, y) .\] ::: # Friday, September 09 ## 5.1: The Killing form :::{.remark} For the rest of the course: $k = \kbar$ and $\characteristic k = 0$. Theorem from last time: $L$ is semisimple iff its Killing form $\kappa(x, y) \da \trace(\ad_x \ad_y)$ is nondegenerate. ::: :::{.proof title="?"} Let $S = \Rad(\kappa) \normal L$, which is easy to check using "invariance" (associativity) of the form. Given $s,s'\in S$, the restricted form $\kappa_S(x, y) = \trace(\ad_{S, s} \ad_{S, s'}) = \trace(\ad_{L, s} \ad_{L, s'})$, which was proved in a previous lemma. But this is equal to $\kappa(s, s') = 0$. In particular, we can take $s\in [SS]$, so by (the corollary of) Cartan's criterion for solvable Lie algebras, $S$ is solvable as a Lie algebra and thus solvable as an ideal in $L$. $\implies$: Since $\Rad(L)$ is the sum of all solvable ideals, we have $S \subseteq \Rad(L)$, but since $L$ is semisimple $\Rad(L) = 0$ and thus $S=0$. $\impliedby$: Assume $S=0$. If $I \normal L$ is a solvable ideal so $I^{(n)} = 0$ for some $n\geq 0$. If $I^{(n-1)} \neq 0$, it is a nonzero abelian ideal -- since we want to show $\Rad(L) = 0$, we don't want this to happen! Thus it STS every abelian ideal is contained in $S$. So let $I \normal L$ be an abelian ideal, $x\in I$, $y\in L$. Define an operator \[ A_{xy}^2 \da (\ad_x \ad_y)^2: L \mapsvia{\ad_y} L \mapsvia{\ad_x} I \mapsvia{\ad_y} I \mapsvia{\ad_x} 0 ,\] which is zero since $[II] =0$. Thus $A_{xy}$ is a nilpotent endomorphism, which are always traceless, so $0 = \trace(\ad_x \ad_y) = \kappa(x, y)$ for all $y\in L$, and so $x\in S$. Thus $I \subseteq S$. ::: :::{.warnings} $\Rad(\kappa) \subseteq \Rad(L)$ always, but the reverse containment is not always true -- see exercise 5.4. ::: ## 5.2: Simple Ideals of $L$ :::{.definition title="Direct sums of Lie algebras"} Let $L_i\in \Lie\Alg\slice k$, then their **direct sum** is the product $L_1 \times L_2$ with bracket \[ [x_1 \oplus x_2, y_1 \oplus y_2] \da [x_1 y_1] \oplus [x_2 y_2] .\] ::: :::{.remark} In particular, $[L_1, L_2] = 0$, and thus any ideal $I_1 \normal L_1$ yields an ideal $I_1 \oplus 0 \normal L_1 \oplus L_2$. Moreover, if $L = \bigoplus I_i$ is a vector space direct sum of ideals of $L$, this is automatically a Lie algebra direct sum since $[I_i, I_j] = I_i \intersect I_j = 0$ for all $i\neq j$. ::: :::{.warnings} This is *not* true for subalgebras! Also, in this theory, one should be careful about whether direct sums are as vector spaces or (in the stronger sense) as Lie algebras. ::: :::{.theorem title="?"} Let $L$ be a finite-dimensional semisimple Lie algebra. Then there exist ideals $I_n$ of $L$ such that $L = \bigoplus I_i$ with each $I_j$ simple as a Lie algebra. Moreover every simple ideal if one of the $I_j$. ::: :::{.proof title="?"} Let $I \normal L$ and define \[ I^\perp \da \ts{x\in L \st \kappa(x, I) = 0 } ,\] the orthogonal complement of $I$ with respect to $\kappa$. This is an ideal by the associativity of $\kappa$. Set $J\da I \intersect I^\perp \normal L$, then $\kappa([JJ], J) = 0$ and by Cartan's criterion $J$ is a solvable ideal and thus $J = 0$, making $L$ semisimple. From the Endman-Wildon lemma in the appendix (posted on ELC, lemma 16.11), $\dim L = \dim I + \dim I^\perp$ and $L = I \oplus I^\perp$, so now induct on $\dim L$ to get the decomposition when $L$ is not simple. These are semisimple ideals since solvable ideals in the $I, I^\perp$ remain solvable in $L$. Finally let $I\normal L$ be simple, then $[I, L] \subseteq L$ is a ideal (in both $L$ and $I$), which is nonzero since $Z(L) = 0$. Since $I$ is simple, this forces $[I, L] = I$. Writing $L = \bigoplus I_i$ as a sum of simple ideals, we have \[ I = [I, L] = [I, \bigoplus I_i] = \bigoplus [I, I_i] ,\] and by simplicity only one term can be nonzero, so $I = [I, I_j]$ for some $j$. Since $I_j$ is an ideal, $[I, I_j] \subseteq I_j$, and by simplicity of $I_j$ we have $I = I_j$. ::: :::{.corollary title="?"} Let $L$ be semisimple, then $L = [LL]$ and all ideals and homomorphic images (but not subalgebras) are again semisimple. Moreover, every ideal of $L$ is a sum of simple ideals $I_j$ of $L$. ::: :::{.proof title="?"} Take the canonical decomposition $L = \bigoplus I_i$ and check \[ [L, L] = [\bigoplus I_i, \bigoplus I_j] = \bigoplus [I_i, I_i] = \bigoplus I_i ,\] where in the last step we've used that the $I_i$ are (not?) abelian. Let $J \normal L$ to write $L = J \bigoplus J^\perp$, both of which are semisimple as Lie algebras. In particular, if $\phi: L\to L'$, set $J \da \ker \phi \normal L$. Then $\im \phi = L/J \cong J^\perp$ as Lie algebras, using the orthogonal decomposition, so $\im \phi$ is semisimple. Finally if $J \normal L$ then $L = J \oplus J^\perp$ with $J$ semisimple, so by the previous theorem $J$ decomposes as $J = \oplus K_i$ with $K_i$ (simple) ideals in $J$ -- but these are (simple) ideals in $L$ as well since it's a direct sum. Thus the $K_i$ are a subset of the $I_j$, since these are the only simple ideals of $L$. ::: # Monday, September 12 :::{.remark} Question from last time: does $L$ always factor as $\Rad(L) \oplus L_{\ss}$ with $L_{\ss}$ semisimple? Not always, instead there is a semidirect product decomposition $L = \Rad(L) \semidirect \lies$ where $\lies$ is the *Levi subalgebra*. Consider $L = \liegl_n$, then $\Rad(L) \neq \lieh$ since $[h, e_{ij}] = (h_i - h_j)e_{ij}$, so in fact this forces $\Rad(L) = \CC I_n = Z(L)$ with complementary subalgebra $\lies = \liesl_n$. Note that $\liegl_n = \CC I_n \oplus \liesl_n$ where $\liesl_n = [L L]$ is a direct sum, and $\liegl_n$ is **reductive**. ::: ## 5.3: Inner Derivations :::{.theorem title="In semisimples, every derivation is inner"} If $L$ is semisimple then $\ad(L) = \Der L$. ::: :::{.proof title="?"} We know $\ad(L) \leq \Der L$ is a subalgebra, and $L$ semisimple implies $0 = Z(L) = \ker \ad$, so $\ad: L\iso \ad(L)$ is an isomorphism and $\ad(L)$ is semisimple. The Killing form of a semisimple Lie algebra is always nondegenerate, so $\kappa_{\ad(L)}$ is nondegenerate, while $\kappa_{\Der L}$ may be degenerate. Recall that $\ad(L) \normal \Der L$, so $[\Der L, \ad(L)] \subseteq \ad(L)$. Define $\ad(L)^\perp \normal \Der L$ to be the orthogonal complement in $\Der(L)$ with respect to $\kappa_{\Der L}$, which is an ideal by the associative property. Claim: $\ad(L) \intersect \ad(L)^\perp = 0$, This follows readily from the fact that $\kappa_{\ad(L)}$ is nondegenerate and so $\Rad(\kappa_{\ad(L)}) = 0$. Note that $\ad(L), \ad(L)^\perp$ are both ideals, so $[\ad(L), \ad(L)^\perp] \subseteq \ad(L) \intersect \ad(L)^\perp = 0$. Let $\delta \in \ad(L)^\perp$ and $x\in L$, then $0 = [\delta, \ad_x] = \ad_{ \delta(x) }$ where the last equality follows from an earlier exercise. Since $\ad$ is injective, $\delta(x) = 0$ and so $\delta = 0$, thus $\ad(L)^\perp = 0$. So we have $\Rad \kappa_{\Der L} \subseteq \ad(L)^\perp = 0$ since any derivation orthogonal to all derivations is in particular orthogonal to inner derivations, and thus $\kappa_{\Der L}$ is nondegenerate. Finally, we can write $\Der L = \ad(L) \oplus \ad(L)^\perp = \ad(L) \oplus 0 = \ad(L)$. ::: ## 5.4: Abstract Jordan Decompositions :::{.remark} Earlier: if $A\in \Alg\slice\FF^\fd$, not necessarily associative, $\Der A$ contains the semisimple and nilpotent parts of all of its elements. Applying this to $A = L\in \Lie\Alg$ yields $L \cong \ad(L) = \Der L$ ad $\ad_x = s + n \in \ad(L) + \ad(L)$, so write $s = \ad_{x_s}$ and $n = \ad_{x_n}$, then $\ad_x = \ad_{x_s} + \ad_{x_n} = \ad_{x_s + x_n}$ so $x = x_s + x_n$ by injectivity of $\ad$, yielding a definition for the semisimple and nilpotent parts of $x$. If $L \leq \liegl(V)$, it turns out that these coincide with the usual decomposition -- this is proved in section 6.4. ::: ## 6.1: Modules (Chapter 6: Complete Reducibility of Representations) :::{.definition title="$L\dash$modules and representations"} Let $L \in \Lie\Alg\slice \CC^\fd$, then a **representation** of $L$ on $V$ is a homomorphism of Lie algebras $\phi: L \to \liegl(V)$. For $V\in\Vect\slice \CC$ with an *action* of $L$, i.e. an operation \[ L\times V &\to V \\ (x, v) &\mapsto x.v ,\] is an **$L\dash$module** iff for all $a,b\in \CC, x,y\in L, v,w\in V$, - (M1) $(ax+by).v = a(x.v) + b(y.v)$. - (M2) $x.(av+bw) = a(x.v) + b(x.w)$. - (M3) $[xy].v = x.(y.v) - y.(x.v)$. ::: :::{.remark} An $L\dash$module $V$ is equivalent to a representation of $L$ on $V$. If $\phi: L \to \liegl(V)$ is a representation, define $x.v \da \phi(x)v \da \phi(x)(v)$. Conversely, for $V\in \mods{L}$ define $\phi: L\to \liegl(V)$ by $\phi(x)(v) \da x.v$. ::: :::{.example title="?"} $L\in \mods{L}$ using $\ad$, this yields the **adjoint representation**. ::: :::{.definition title="Morphism of $L\dash$modules"} A **morphism** of $L\dash$modules is a linear map $\psi: V\to W$ such that $\psi(x.v) = x.\psi(v)$ for all $x\in L, v\in V$. It is an isomorphism as an $L\dash$module iff it is an isomorphism of the underlying vector spaces.[^turns_out_linear] In this case we say $V, W$ are *equivalent* representations. [^turns_out_linear]: It turns out that the inverse map of vector spaces $\psi\inv: W\to V$ is again a morphism of $L\dash$modules. ::: :::{.example title="?"} Let $L = \CC x$ for $x\neq 0$, then - What is a representation of $L$ on $V$? This amounts to picking an element of $\Endo(V)$. - When are 2 $L\dash$modules on $V$ equivalent? This happens iff the two linear transformations are conjugate in $\Endo(V)$. Thus representations of $L$ on $V$ are classified by Jordan canonical forms when $V$ is finite dimensional. ::: :::{.definition title="Submodules, irreducible/simple modules"} For $V\in \mods{L}$, a subspace $W \subseteq V$ is a **submodule** iff it is in invariant subspace, so $x.w\in W$ for all $x\in L, w\in W$. $V$ is **irreducible** or **simple** if $V$ has exactly two invariant subspaces $V$ and $0$. ::: :::{.warnings} Note that this rules out $0$ as being a simple Lie algebra. ::: :::{.definition title="Quotient modules"} For $W\leq V \in \mods{L}$ a submodule, the **quotient module** is $V/W$ has underlying vector space $V/W$ with action $x.(v+W) \da (x.v) + W$. This is well-defined precisely when $W$ is a submodule. ::: :::{.warnings} $I\normal L \iff \ad(I) \leq \ad(L)$, i.e. ideals corresponds to submodules under the adjoint representation. However, irreducible ideals need not correspond to simple submodules. ::: # Wednesday, September 14 ## 6.1: Structure theory :::{.definition title="Natural representation"} Note that all of the algebras $\lieg$ we've considered naturally act on column vectors in some $\FF^n$ -- this is the **natural representation** of $\lieg$. ::: :::{.example title="?"} Letting $\lieb_n$ be the upper triangular matrices in $\liegl_n$, this acts on $\FF^n$. Taking a standard basis $\FF^n = V \da \gens{e_1,\cdots, e_n}_\FF$, one gets submodules $V_i = \gens{e_1,\cdots, e_i}_\FF$ which correspond to upper triangular blocks got by truncating the first $i$ columns of the matrix. This yields a submodule precisely because the lower-left block is zero. ::: :::{.remark} Let $\phi: L\to \liegl(V)$ be a representation, noting that $\Endo(V)$ is an associative algebra. We can consider the associative unital algebra $A$ generated by the image $\phi(L)$. Note that the structure of $V$ as an $L\dash$module is the same as its $A\dash$module structure, so we can apply theorems/results from the representation of rings and algebras to study Lie algebra representations, e.g. the Jordan-Holder theorem and Schur's lemma. ::: :::{.definition title="Direct sums of $L\dash$modules"} Given $V, W\in \mods{L}$, their vector space direct sum admits an $L\dash$module structure using $x.(v, w) \da (x.v, x.w)$, which we'll write as $x.(v+w) \da xv + xw$. ::: :::{.definition title="Completely reducible modules"} $V\in \mods{L}$ is **completely reducible** iff $V$ is a direct sum of irreducible $L\dash$modules. Equivalently, for each $W\leq V$ there is a complementary submodule $W'$ such that $V = W \oplus W'$. ::: :::{.warnings} "Not irreducible" is strictly weaker than "completely reducible", since a submodule may not admit an invariant complement -- for example, the flag in the first example above. ::: :::{.example title="?"} The natural representation of $\lieh_n$ is completely reducible, decomposing as $V_1 \oplus V_2 \oplus \cdots V_n$ where $V_i = \FF e_i$. ::: :::{.definition title="Indecomposable modules"} A module $V$ is **indecomposable** iff $V\neq W \oplus W'$ for proper submodules $W, W' \leq V$. This is weaker than irreducibility. ::: :::{.example title="?"} Consider the natural representation $V$ for $L \da \lieb_n$. Every nonzero submodule of $V$ must contain $e_1$, so $V$ is indecomposable if $n\geq 1$. ::: :::{.remark} Recall that the **socle** of $V$ is the (direct) sum of all of its irreducible submodules. If $\soc(V)$ is simple (so one irreducible) then $V$ is indecomposable, since every summand must contain this simple submodule "at the bottom". For $L = \lieb_n$, note that $\soc(V) = \FF e_1$. ::: :::{.remark} For the reminder of chapters 6 and 7, we assume all modules are finite-dimensional over $\FF = \bar\FF$. ::: :::{.theorem title="Jordan-Holder"} Let $L\in\Lie\Alg\slice\FF^\fd$, then there exists a **composition series**, a sequence of submodules $0 = V_0 \subseteq V_1 \subseteq \cdots \subseteq V_n = V$ such that each composition factor (sometimes called *sections*) $V_i/V_{i-1}$ is irreducible/simple. Moreover, any two composition series admit the same composition factors with the same multiplicities, up to rearrangement and isomorphism. ::: :::{.example title="?"} If $V = W \oplus W'$ with $W, W'$ simple, there are two composition series: - $0 \subseteq W \subseteq V$ with factors $W, V/W \cong W'$, - $0 \subseteq W' \subseteq V$ with factors $W', V/W' \cong W$. These aren't equal, since they're representations on different coset spaces, but are isomorphic. ::: :::{.theorem title="Schur's lemma"} If $\phi: L\to \liegl(V)$ is an irreducible representation, then $\Endo_L(V)\cong \FF$. ::: :::{.proof title="?"} If $V$ is irreducible then every $f\in \mods{L}(V, V)$ is either zero or an isomorphism since $f(V) \leq V$ is a submodule. Thus $\Endo_L(V)$ is a division algebra over $\FF$, but the only such algebra is $\FF$ since $\FF = \bar\FF$. Letting $\phi$ be as above, it has an eigenvalue $\lambda \in \FF$, again since $\FF = \bar\FF$. Then $\phi - \lambda I \in \Endo_L(V)$ has a nontrivial kernel, the $\lambda\dash$eigenspace. So $\phi - \lambda I = 0 \implies \varphi = \lambda I$. ::: :::{.warnings} Schur's lemma is not always true for Lie *superalgebras*. ::: :::{.definition title="Trivial modules"} The **trivial $L\dash$module** is $\FF \in \mods{L}$ equipped with the zero map \( \varphi: L\to \liegl(\FF) \) where $x.1 \da 0$ for all $x\in L$. Note that this is irreducible, and any two such 1-dimensional trivial modules are isomorphic by sending a basis $\ts{e_1}$ to $1\in \FF$. More generally, an $V \in \mods{L}$ is trivial iff $x.v = 0$ for all $x\in L, v\in V$, and $V$ is completely reducible as a direct sum of copies of the above trivial module. ::: :::{.definition title="Homs, Tensors, and duals"} Let $V, W\in \mods{L}$, then the following are all $L\dash$modules: - $V\tensor_\FF W$: the action is $x.(v \tensor w) = (x.v)\tensor w + v\tensor(x.w)$.[^deriv_group] - $\Hom_\FF(V, W)$: the action is $(x.f)(v) = x.(f(v)) - f(x.v) \in W$. - $V\dual \da \Hom_\FF(V, \FF)$: the action is a special case of the above since $x.w = 0$, so[^inverse_dif] \[ (x.f)(v) = -f(x.v) .\] [^inverse_dif]: One might expect an inverse from group theory, which differentiates to a minus sign. [^deriv_group]: Note that groups would act on each factor separately, and this is more like a derivative. ::: :::{.remark} These structures come from the Hopf algebra structure on the universal associative algebra $U(\lieg)$, called the **universal enveloping algebra**. Note that we also have \[ \Hom_\CC(V, W)\isoas{\mods{L}} V\dual \tensor_\FF W .\] ::: # Friday, September 16 ## 6.2: Casimir element of a representation :::{.remark} Last time: $L$ semisimple over $\CC$ implies $\kappa(x,y)\da \trace(\ad_x \ad_y)$ is nondegenerate. Using Cartan's criterion, we can show that for any faithful representation $\phi: L\to \liegl(V)$ we can define a symmetric bilinear form $\beta_\phi$ on $L$ defined by $\beta_\phi(x, y) = \trace(\phi(x) \phi(y))$. Note that $\beta_{\ad} = \kappa$. Since $\Rad(\beta_\phi) = 0$, it is nondegenerate. This defines an isomorphism $L \iso L\dual$ by $x\mapsto \beta(x, \wait)$, so given a basis $\mcb \da \ts{x_i}_{i\leq n}$ for $L$ there is a unique dual basis $\mcb' = \ts{y_i}_{i\leq n}$ for $L$ such that $\beta(x_i, y_j) = \delta_{ij}$. Note that the $y_i \in L$ are dual to the basis $\beta(x_i, \wait) \in L\dual$. ::: :::{.example title="?"} For $L\da\liesl_2(\CC)$, the matrix of $\kappa$ is given by $\mattt 0 0 4 0 8 0 4 0 0$ with respect to the ordered basis $\mcb=\ts{x,h,y}$.[^humphreys_p22] Thus $\mcb' = \ts{{1\over 4}y, {1\over 8}h, {1\over 4}x}$. [^humphreys_p22]: See Humphreys p.22. ::: :::{.definition title="Casimir element"} Now let $\phi: L\to \liegl(V)$ be a faithful irreducible representation. Fix a basis $\mcb$ of $L$ and define the **Casimir element** \[ c_\phi = c_\phi(\beta) \da \sum_{i\leq n} \phi(x_i) \circ \phi(y_i) \in \Endo_\CC(V) .\] ::: :::{.remark} One can show that $c_\phi$ commutes with $\phi(L)$. Supposing $\phi$ is irreducible, $\Endo_L(V) = \CC$ by Schur's lemma, so $c_\phi$ is acts on $V$ as a scalar. This follows from \[ \trace(c_ \varphi) = \sum_{i\leq n} \trace( \varphi(x_i) \varphi(y_i) ) = \sum_{i\leq n} \beta(x_i, y_i) = n = \dim L \implies c_\phi = {\dim L \over \dim V} \id_V ,\] since there are $\dim V$ entries. In particular, $c_\phi$ is independent of the choice of $\mcb$. This will be used to prove Weyl's theorem, one of the main theorems of semisimple Lie theory over $\CC$. If $L$ is semisimple and $\phi:$ is not faithful, replace $L$ by $L/\ker \phi$. Since $\ker \phi \normal L$ and $L$ is semisimple, $\ker \phi$ is a direct sum of certain simple ideals of $L$ and the quotient is isomorphic to the sum of the remaining ideals. This yields a representation $\bar\phi: L/\ker \varphi \to \liegl(V)$ which is faithful and can be used to define a Casimir operator. ::: :::{.example title="?"} Let $L = \liesl_2(\CC)$ and let $V = \CC^2$ be the natural representation, so $\phi: L\to \liegl(V)$ is the identity. Fix $\mch = \ts{x,h,y}$, then $\beta(u, v) = \trace( u v )$ since $\phi(u) = u$ and $\phi(v) = v$. We get the following products: | | $\matt 0100$ | $\matt 100{-1}$ | $\matt 0010$ | |----------------- |-------------- |----------------- |-------------------- | | $\matt 0100$ | 0 | $\matt 0{-1}00$ | $\matt 1000$ | | $\matt 100{-1}$ | | I | $\matt 0 0 {-1} 0$ | | $\matt 0010$ | | | 0 | Thus $\beta = \mattt 001 020 100$, and $\mcb' = \ts{y, {1\over 2}h, x}$, so \[ c_\phi = xy + {1\over 2}h^2 + yx = \matt 1000 + {1\over 2}I + \matt 0001 = {3\over 2}I = {\dim \CC^2\over \dim \liesl_2(\CC)} I .\] ::: ## 6.3: Complete reducibility :::{.lemma title="?"} Let $\phi: L\to \liegl(V)$ be a representation of a semisimple Lie algebra, then $\phi(L) \subseteq \liesl(V)$. In particular, $L$ acts trivially on any 1-dimensional $L\dash$module since a $1\times 1$ traceless matrix is zero. The proof follows from $L = [LL]$. ::: :::{.warnings} Arbitrary reductive Lie algebras can have nontrivial 1-dimensional representations. ::: :::{.theorem title="Weyl's theorem"} Let $\phi: L\to \liegl(V)$ be a finite-dimensional representation of a finite-dimensional semisimple Lie algebra over $\CC$. This $\phi$ is completely reducible. ::: :::{.warnings} This is not true for characteristic $p$, or infinite dimensional representations in characteristic 0. ::: ### Proof of Weyl's theorem :::{.remark} Replace $L$ by $L/\ker \phi$ if necessary to assume $\phi$ is faithful, since these yield the same module structures. Define a Casimir operator $c_\phi$ as before, and recall that complete reducibility of $V$ is equivalent to every $L\dash$submodule $W\leq V$ admitting a complementary submodule $W''$ such that $V = W \oplus W''$. We proceed by induction on $\dim V$, where the dimension 1 case is clear. **Case I**: $\codim_V W = 1$, i.e. $\dim (V/W) = 1$. Take the SES $W \injects V \surjects V/W$. **Case 1**: Suppose $W' \leq W$ is a proper nonzero $L\dash$submodule. Schematic: ![](figures/2022-09-16_10-02-33.png) Using the 2nd isomorphism theorem, there is a SES $W/W' \injects V/W' \surjects V/W$. Since $\dim W' > 0$, $\dim V/W' \leq \dim V$, so by the IH there is a 1-dimensional complement to $W/W'$ in $V/W'$. This lifts to $\tilde W' \leq V$ with $W' \leq \tilde W$ with $\dim \tilde W/W' = 1$, and moreover $V/W' = W/W' \oplus \tilde W/W'$. Take the SES $W' \injects \tilde W \surjects \tilde W/W'$ with $\dim \tilde W < \dim V$. Apply the IH again to get a subspaces $X \leq \tilde W \leq V$ with $\tilde W = W' \oplus X$. We'll continue by showing $V = W \bigoplus X$. ::: # Monday, September 19 ## Proof of Weyl's theorem (continued) :::{.remark} Recall: we're proving Weyl's theorem, i.e. every finite-dimensional representation of semisimple Lie algebra over $\CC$ is completely reducible. Strategy: show every $W \leq_L V$ has a complement $W'' \leq_L V$ such that $V = W \oplus W''$; induct on $\dim V$. ::: :::{.proof title="of Weyl's theorem, continued"} **Case I**: $\dim V/W = 1$. **Case 1**: $W$ is reducible. We got $0 < W' < W < V$ (proper submodules), represented schematically by a triangle. We showed $V/W' \cong W/W' \oplus \tilde W/W'$, since - $\tilde W \intersect W \subseteq W'$, - $V= W + \tilde W + W' = W + \tilde W$ since $W' \subseteq W$. - $\tilde W = W' \oplus X$ for some submodule $X \leq_L \tilde W \leq_L V$. Thus replacing $\tilde W$ in the second point yields $V = W + \tilde W = W + W' +X = W + X$; we want to prove this sum is direct. Since $X$ is contained in $\tilde W$, we can write \[ X \intersect W &= (X \intersect \tilde W) \intersect W \\ &= X \intersect (\tilde W \intersect W) \qquad\text{by 1}\\ &\subseteq X \intersect W' = 0 \qquad \text{by 2} ,\] so $V = W \oplus X$. **Case 2**: Let $c_\phi$ be the Casimir element of $\phi$, and note that $c_\phi(W) \subseteq W$ since $c_\phi$ is built out of endomorphisms in $\phi(L)$ sending $W$ to $W$ (since $W$ is a submodule). In fact, $\phi(L)(V) = W$ since $V/W$ is a 1-dimensional representation of the semisimple Lie algebra $L$, hence trivial. Thus $c_\phi(V) \subseteq W$ and thus $\ker c_\phi \neq 0$ since $W < V$ is proper. Note also that $c_\phi$ commutes with anything in $c_\phi(L)$ on $V$, and so defines a morphism $c_\phi \in\mods{L}(V, V)$ and $\ker c_\phi \leq_L V$. On the other hand, $c_\phi$ induces an element of $\Endo_{L}(W)$, and since $W$ is irreducible, $\ro{c_\phi}{W} = \lambda \id_W$ for some scalar $\lambda$. This can't be zero, since $\trace(\ro{c_\phi}{W}) = {\dim L \over \dim W} > 0$, so $\ker c_\phi \intersect W = 0$. Since $\codim_V W = 1$, i.e. $\dim W = \dim V - 1$, we must have $\dim \ker c_\phi = 1$ and we have a direct sum decomposition $V = W \oplus \ker c_\phi$. > Use of the Casimir element in basic Lie theory: producing a complement to an irreducible submodule. **Case 2**: Suppose $0 < W < V$ with $W$ any nontrivial $L\dash$submodule; there is a SES \( W \injects V \surjects V/W \) Consider $H \da \hom_\CC(V, W)$, then $H \in \lmod$ by $(x.f)(v) \da x.(f(v)) - f(x.v)$ for $f\in H, x\in L, v\in V$. Let $\mcv \da \ts{f \st H \st \ro f W = \alpha \id_W \,\text{ for some } \alpha \in \CC} \subseteq H$. For $f\in V$ and $w\in W$, we have \[ (x.f)(w) = x.f(w) - f(x.w) = \alpha x.w - \alpha x.w = 0 .\] So let $\mcw \da \ts{f\in \mcv \st f(W) = 0} \subseteq \mcv$, then we've shown that $L.\mcv \subseteq \mcw$. Now roughly, the complement is completely determined by the scalar. Rigorously, since $\dim \mcv/\mcw = 1$, any $f\in \mcv$ is determined by the scalar $\ro{f}{W} = \alpha \id_W$: we have $f-\alpha \chi_W \in \mcw$ where $\chi_W$ is any extension of $\id_W$ to $V$ which is zero on $V/W$, e.g. by extending a basis and having it act by zero on the new basis elements. Now $\mcw \injects \mcv \surjects \mcv/\mcw \in \lmod$ with $\codim_\mcv \mcw = 1$. By Case I, $\mcv = \mcw \oplus \mcw''$ for some complement $L\dash$submodule $\mcw''$. Let $f: V\to W$ span $\mcw''$, then $\ro f W$ is a nonzero scalar -- a scalar since it's in $\mcv$, and nonzero since it's in the complement of $\mcw$. By rescaling, we can assume the scalar is 1, so $\im f = W$ and by rank-nullity $\dim \ker f = \dim V - \dim W$. Thus $\ker f$ has the right dimension to be the desired complement. It is an $L\dash$submodule, since $L.f \subseteq \mcw'' \intersect \mcw = 0$ since $\mcw''$ is an $L\dash$submodule and $f\in \mcw$ since $L.\mcv \subseteq \mcw$. Noting that if $(x.f) = 0$ then $x.(f(v)) = f(x.v)$, making $f$ an $\lmod$ morphism. Thus $W'' \da \ker f \leq_L V$, and $W \intersect W'' = 0$ so $\ro f W = \id_W$. Since the dimensions add up correctly, we get $V = W \oplus W''$. ::: ## 6.4: Preservation of Jordan decomposition :::{.theorem title="?"} Let $L \leq \liegl(V)$ be a subalgebra with $L$ semisimple and finite-dimensional. Given $x\in L$, there are two decompositions: the usual JCF $x = s + n$, and the abstract decomposition $\ad_x = \ad_{x_s} + \ad_{x_n}$. $L$ contains the semisimple and nilpotent parts of all of its elements, and in particular the two above decompositions coincide. ::: :::{.proof title="Idea"} The proof is technical, but here's a sketch: - Construct a subspace $L \leq L' \leq_L \liegl(V)$ such that $l'$ contains the semisimple and nilpotent parts of all elements where $L\actson \liegl(V)$ by $\ad: L\to \liegl(\liegl(V))$. - Check $L' \lieq N_{\liegl(V)}(L)$ (the normalizer), so $[LL'] \subseteq L$. - Show $L' = L$: - If $L'\neq L$, use Weyl's theorem to get a complement with $L' = L \oplus M$. - Check $[LM] \subseteq [LL'] \subseteq L$ and $[LM] \subseteq M$ since $M\leq_L M$, forcing $[LM] \subseteq L \intersect M = 0$. - Use Weyl's theorem on all of $V$ splits it into sums of irreducibles, bracket against the irreducibles, and use specific properties of this $L'$ to show $M = 0$. - Since $s, n\in L$ when $x=s+n$ and $\ad_x = \ad_s + \ad_n = \ad_{x_s} + \ad_{x_n}$, the result follows from uniqueness of the abstract JCF that $s=x_s, n=x_n$ (using that $\ad$ is injective when $L$ is semisimple since $Z(L) = 0$). ::: :::{.corollary title="?"} If $L \in \Lie\Alg\slice \CC^{\ss}$ (not necessarily linear) and $\phi\in \lmod$, writing $x=s+n$ the abstract Jordan decomposition, $\phi(x) = \phi(s) + \phi(n)$ is the usual JCF of $\phi(x)\in \liegl(V)$. ::: :::{.proof title="Sketch"} Consider $\ad_{\phi(L)}\phi(s)$ and $\ad_{\phi(L)}\phi(n)$, which are semisimple (acts on a vector space and decomposes into a direct sum of eigenspaces) and nilpotent respectively and commute, yielding the abstract Jordan decomposition of $\ad_{\phi(x)}$. Now apply the theorem. ::: # Ch. 7: Representations of $\liesl_2(\CC)$ (Wednesday, September 21) :::{.remark} If $L\in \Lie\Alg$ and $s$ is a semisimple element, then $\phi(s)$ is semisimple in any finite-dimensional representation $\phi$ of $L$. In particular, taking the natural representation, this yields a semisimple operator. For the same reason, $\ad_s$ is semisimple. Similar statements hold for nilpotent elements. ::: ## 7.1: Weights and Maximal Vectors :::{.remark} Let $L \da \liesl_2(\CC)$, then recall - $x = \matt 0100$ - $h = \matt 100{-1}$ - $y= \matt 0010$ - $[xy] = h$ - $[hx] = 2x$ - $[hy] = -2y$ Goal: classify $\lmod^\fd$. By Weyl's theorem, they're all completely reducible, so it's enough to describe the simple objects. ::: :::{.definition title="Weight decomposition and weight spaces"} Note that $L \leq \liegl_2(\CC) = \liegl(\CC^2)$, and since $h$ is semisimple, $\phi(h)$ acts semisimply on any finite-dimensional representation $V$ with $\phi: L\to \liegl(V)$. I.e. $\phi(h)$ acts diagonally on $V$. Thus $V = \bigoplus _{ \lambda} V_{ \lambda}$ which are eigenspaces for the $\phi(h)$ action, where \[ V_\lambda \da \ts{v\in V \st h.v = \lambda v, \lambda\in \CC} .\] If $V_\lambda \neq 0$ we say $\lambda$ is a **weight** of $h$ in $V$ and $V_\lambda$ is the corresponding **weight space**. ::: :::{.lemma title="?"} If $v\in V_ \lambda$, then $x.v \in V_ {\lambda+2}$ and $y.v \in V_{ \lambda- 2}$. ::: :::{.exercise title="?"} Prove this using the commutation relations. ::: :::{.definition title="Highest weights"} Note that if $V$ is finite-dimensional then there can not be infinitely many nonzero $V_\lambda$, so there exists a $\lambda\in \CC$ such that $V_{ \lambda} \neq 0$ but $V_{ \lambda+ 2} = 0$. We call $\lambda$ a **highest weight** (h.w.) of $V$ (which will turn out to be unique) and any nonzero $v\in V$ a **highest weight vector**. ::: ## 7.2: Classification of Irreducible $\liesl_2(\CC)\dash$Modules :::{.lemma title="?"} Let $V \in \lmod^{\fd, \irr}$ and let $v_0\in V_{ \lambda}$ be a h.w. vector. Set $v_{-1} = 0$ for $i\geq 0$ and $v_i \da {1\over i!}y^i v_0$, then for $i\geq 0$, 1. $h.v_i = (\lambda- 2i)v_i$ 2. $y.v_i = (i+1)v_{i+1}$ 3. $x.v_i = ( \lambda- i + 1) v_{i-1}$. ::: :::{.proof title="?"} In parts: 1. By the lemma and induction on $i$. 2. Clear! 3. Follows from $i x.v_i = x.(y.v_{i-1}) = y.(x.v_{i-1}) + [xy].v_{i-1}$ and induction on $i$. ::: :::{.remark} Some useful facts: - The nonzero $v_i$ are linearly independent since they are eigenvectors of $h$ with different eigenvalues -- this is a linear algebra fact. - The subspace of $V$ spanned by the nonzero $v_i$ is an $L\dash$submodule, but since $V$ is irreducible the $v_i$ must form a basis for $V$. - Since $V$ is finite-dimensional, there must be a smallest $m\geq 0$ such that $v_m \neq 0$ but $v_{m+1} = 0$, and thus $v_{m+k} = 0$ for all $k$. Thus $\dim_\CC V = m+1$ with basis $\ts{v_0, v_1, \cdots, v_m}$. - Since $v_{m+1} = 0$, we have $0 = x.v_{m+1} = ( \lambda- m) v_m$ where $v_m\neq 0$, so $\lambda = m \in \ZZ_{\geq 0}$. Thus its highest weight is a non-negative integer, equal to 1 less than the dimension. We'll reserve $\lambda$ to denote a highest weight and $\mu$ an arbitrary weight. - Thus the weights of $V$ are $\ts{m, m-2, \cdots, \star, \cdots, -m+2, -m}$ where $\star = 0$ or $1$ depending on if $m$ is even or odd respectively, each occurring with multiplicity one (using that $\dim V_{\mu} = 1$ if $\mu$ is a weight of $V$). ::: :::{.theorem title="?"} Let $V \in \lmod^{\fd, \irr}$ for $L\da \liesl_2(\CC)$, then 1. Relative to $h$, $V$ is a direct sum of weight spaces $V_\mu$ for $\mu \in \ts{m, m-2,\cdots, -m+2, -m}$ where $m+1=\dim V$ and each weight space is 1-dimensional. 2. $V$ has a unique (up to nonzero scalar multiples) highest weight vector whose weight (the highest weight of $V$) is $m\in \ZZ_{\geq 0}$ 3. The action $L\actson V$ is given explicitly as in the lemma if the basis is chosen in a prescribed fashion. In particular, there exists a unique finite-dimensional irreducible $\liesl_2\dash$module of dimension $m+1$ up to isomorphism. ::: :::{.corollary title="?"} Let $V \in \lmod^{\fd, \irr}$, then the eigenvalues of $h\actson V$ are all integers, and each occurs along with its negative with the same multiplicity. Moreover, in a decomposition of $V$ in a direct sum of irreducibles, the number of simple summands is $\dim V_0 + \dim V_1$. ::: :::{.remark} Existence of irreducible highest weight modules of highest weight $m \geq 0$: - $m=0$: take the trivial representation $V=\CC$. - $m=1$: take $V = \CC^2$ with the natural representation. - $m=2$: take $V = L$ with the adjoint representation. ::: :::{.remark} The formula in the lemma can be used to construct an irreducible representation of $L$ having highest weight \( \lambda= m \) for any $m\in \ZZ_{\geq 0}$, which is unique up to isomorphism and denoted $L(m)$ (or $V(m)$ in Humphreys) which has dimension $m+1$. In fact, the formulas can be used to define an infinite-dimensional representation of $L$ with highest weight $\lambda$ for any $\lambda\in \CC$, which is denoted $M( \lambda)$ -- we just don't decree that $v_{m+1} = 0$, yielding a basis $\ts{v_0, v_1,\cdots}$. This yields a decomposition into 1-dimensional weight spaces $M( \lambda) = \oplus _{i=1}^\infty M_{ \lambda-2i}$ (**Verma modules**) where $M_{ \lambda-2i} = \gens{v_i}_\CC$. ::: # Ch. 8: Root space decompositions (Friday, September 23) :::{.remark} Recall that the relations from last time can produce an infinite-dimensional module with basis $\ts{v_0,v_1,\cdots}$. Note that if \( m\in \ZZ_{\geq 0} \), then \( x.v_{m+1} = ( \lambda- m ) v_m = 0 \). This says that one can't raise $v_{m+1}$ back to $v_m$, so $\ts{v_{m+1},v_{m+2} \cdots}$ spans a submodule isomorphic to $M(-m-2)$. Quotienting yields $L(m) \da M(m) / M(-m-2)$, also called $V(m)$, spanned by $\ts{v_0, \cdots, v_m}$. Note that $M(-m-2)$ and $L(m)$ are irreducible. ::: :::{.remark} Let $L\in \Lie\Alg\slice\CC^{\fd, \ss}$ for this chapter. ::: ## 8.1: Maximal toral subalgebras and roots :::{.remark} Let $L\ni x = x_s + x_n \in L + L$ be the abstract Jordan decomposition. then if $x=x_n$ for every $x\in L$ then $L$ is nilpotent which contradicts Engel's theorem. Thus there exists some $x=x_s\neq 0$. ::: :::{.definition title="Toral subalgebras"} A **toral subalgebra** is any nonzero subalgebra spanned by semisimple elements. ::: :::{.example title="?"} The algebraic torus $(\CC\units)^n$ which has Lie algebra $\CC^n$, thought of as diagonal matrices. ::: :::{.lemma title="?"} Let $H$ be a maximal toral subalgebra of $L$. Any toral subalgebra $H \subseteq L$ is abelian. ::: :::{.proof title="?"} Let $T \leq L$ be toral and let $x\in T$ be a basis element. Since $x$ is semisimple, it STS $\ad_{T, x} = 0$. Semisimplicity of $x$ implies $\ad_{L, x}$ diagonalizable, so we want to show $\ad_{T, x}$ has no non-zero eigenvalues. Suppose that there exists a nonzero $y\in T$ such that $\ad_{T, x}(y) = \lambda y$ for \( \lambda \neq 0 \). Then $\ad_{T, y}(x) = [yx] = -[xy] = - \ad_{T, x} = - \lambda y\neq 0$, and since $\ad_{T, y}(y) = -[yy] = 0$, $y$ is an eigenvector with eigenvalue zero. Since $\ad_{T, y}$ is also diagonalizable and $x\in T$, write $x$ as a linear combination of eigenvectors for it, say $x = \sum a_i v_i$. Then $\ad_{T, y}(x) = \sum \lambda_i a_i v_i$ and the terms with $\lambda_i = 0$ vanish, and the remaining element is a sum of eigenvectors for $\ad_{T, y}$ with nonzero eigenvalues. $\contradiction$ ::: :::{.example title="?"} If $L = \liesl_n(\CC)$ then define $H$ to be the set of diagonal matrices. Then $H$ is toral and in fact maximal: if $L = H \oplus \CC z$ for some $z\in L\sm H$ then one can find an $h\in H$ such that $[hz] \neq 0$, making it nonabelian, but toral subalgebras must be abelian. ::: :::{.definition title="Roots"} Recall that a commuting family of diagonalizable operators on a finite-dimensional vector space can be simultaneously diagonalized. Letting $H \leq L$ be maximal toral, this applies to $\ad_L(H)$, and thus there is a basis in which all operators in $\ad_L(H)$ are diagonal. Set $L_{ \alpha} \da \ts{x\in L \st [hx] = \alpha(h) x\,\,\forall h\in H}$ where \( \alpha: H\to \CC \) is linear and thus an element of $H\dual$. Note that $L_0 = C_L(H)$, and the set $\Phi\da \ts{\alpha\in H\dual \st \alpha\neq 0, L_\alpha\neq 0}$ is called the **roots** of $H$ in $L$, and $L_\alpha$ is called a **root space**. Note that $L_0$ is not considered a root space. This induces a **root space decomposition** \[ L = C_L(H) \oplus _{ \alpha\in \Phi } L_\alpha .\] ::: :::{.remark} Note that for classical algebras, we'll show $C_L(H) = H$ and corresponds to the standard bases given early in the book. ::: :::{.example title="?"} Type $A_n$ yields $\liesl_{n+1}(\CC)$ and $\dim H = n$ for $H$ defined to be the diagonal traceless matrices. Define $\eps_i\in H\dual$ as $\eps_i \diag(a_1,\cdots, a_{n+1}) \da a_i$, the $\Phi \da \ts{\eps_i - \eps_j \st 1\leq i\neq j\leq ,n+1}$ and $L_{\eps_i - \eps_j} = \CC e_{ij}$. Why: \[ [h, e_{ij}] = \left[ \sum a_k e_{kk}, e_{ij}\right] = a_i e_{ii} e_{ij} - a_j e_{ij} e_{jj} = (a_i - a_j) e_{ij} \da (\eps_i -\eps_j)(h) e_{ij} .\] ::: # Monday, September 26 ## 8.1 Continued :::{.remark} A Lie algebra is **toral** iff every element is semisimple -- this exists because any semisimple Lie algebra contains at least one semisimple element and you can take its span. Let $H$ be a fixed maximal toral subalgebra, then we have a root space decomposition \[ L = C_L(H) \oplus \bigoplus _{\alpha\in \Phi \subseteq H\dual} L_\alpha, \qquad L_\alpha \da \ts{ x\in L \st [hx] = \alpha(h) x \,\forall h\in H} .\] Let $L$ be semisimple and finite dimensional over $\CC$ from now on. ::: :::{.proposition title="?"} 1. \[ [L_\alpha, L_\beta] \subseteq L_{ \alpha + \beta} \qquad \forall \alpha, \beta\in H\dual .\] 2. \[ x\in L_{\alpha}, \alpha\neq 0 \implies \ad_x \text{ is nilpotent} .\] 3. If \( \alpha, \beta\in H\dual \) and \( \alpha + \beta\neq 0 \) then $L_ \alpha \perp L_ \beta$ relative to $\kappa_L$, the Killing form for $L$. ::: :::{.proof title="?"} \envlist 1. Follows from the Jacobi identity. 2. Follows from (1), that $\dim L < \infty$, and the root space decomposition. This is because $L_{\alpha}\neq L_{\alpha + \beta}\neq L_{\alpha + 2\beta} \neq \cdots$ and there are only finitely many $\beta$ to consider. 3. If \( \alpha + \beta\neq 0 \) then $\exists h\in H$ such that $(\alpha + \beta)(h) \neq 0$. For $x\in L_\alpha, y\in L_ \beta$, \[ \alpha(h) \kappa(x,y) &= \kappa([hx], y) \\ &= -\kappa([xh], y) \\ &= -\kappa(x, [hy]) \\ &= -\beta(h)\kappa(x,y) \\ &\implies ( \alpha + \beta)(h) \kappa(x,y) = 0 \\ &\implies \kappa(x,y)=0 \text{ since } (\alpha + \beta)(h) \neq 0 .\] ::: :::{.corollary title="?"} $\ro{\kappa}{L_0}$ is nondegenerate, since $L_0 \perp L_{\alpha}$ for all $\alpha\in \Phi$, but $\kappa$ is nondegenerate. Moreover, if $L_{ \alpha} \neq 0$ then $L_{- \alpha}\neq 0$ by (3) and nondegeneracy. ::: ## 8.2: $C_L(H)$ :::{.proposition title="?"} Let $H \leq L$ be maximal toral, then $H = C_L(H)$. ::: :::{.proof title="?"} Skipped, about 1 page of dense text broken into 7 steps. Uses the last corollary along with Engel's theorem. ::: :::{.observation} If $L$ is a classical Lie algebra over $\CC$, we choose $H$ to be diagonal matrices in $L$, and $x\in L\sm H$ is non-diagonal, then there exists an $h\in H$ such that $[hx]\neq 0$. Note that toral implies abelian and nonabelian implies nontoral, thus there is no abelian subalgebra of $L$ properly containing $H$ -- adding any nontoral element at all to $H$ makes it nonabelian. This same argument shows $C_L(H) = H$ since nothing else commutes with $H$. This implies that $L = H \oplus_{ \alpha \in \Phi} L_\alpha$. ::: :::{.corollary title="?"} $\ro{\kappa}{H}$ is nondegenerate. ::: :::{.remark} As a result, $\kappa$ induces an isomorphism $H \iso H\dual$ by $h\mapsto \kappa(h, \wait)$ and $H\dual \iso H$ by $\phi\mapsto t_\phi$, the unique element such that $\kappa(t_\phi, \wait) = \phi(\wait)$. In particular, given \( \alpha\in \Phi \subset H\dual \) there is some $t_\alpha\in H$. The next 3 sections are about properties of $\Phi$: - Orthogonality, - Integrality, - Rationality. ::: ## 8.3: Orthogonality properties of $\Phi$. :::{.proposition title="Big!"} \envlist a. $\Phi$ spans $H\dual$. b. If $\alpha\in \Phi$ is a root then $-\alpha\in \Phi$ is also a root. c. Let \( \alpha\in \Phi, x\in L_{ \alpha}, y\in L_{- \alpha} \), then $[xy] = \kappa(x, y) t_\alpha$. d. If $\alpha\in \Phi$ then $[L_{ \alpha}, L_{- \alpha}] = \CC t_\alpha$ is 1-dimensional with basis $t_\alpha$. e. For any \( \alpha \in \Phi \), we have \( \alpha(t_\alpha) = \kappa(t_ \alpha, t_ \alpha) \neq 0 \). f. (Important) If \( \alpha\in \Phi, x_ \alpha\in L_{ \alpha}\smz \) then there exists some \( y_ \alpha L_{- \alpha} \) in the opposite root space such that \( x_ \alpha, y_ \alpha, h_ \alpha \da [x_ \alpha, y_ \alpha] \) span a 3-dimensional subalgebra $\liesl(\alpha) \leq L$ isomorphic to $\liesl_2(\CC)$. g. $h_\alpha = {2 t_\alpha \over \kappa(t_ \alpha, t_ \alpha)}$, \( \alpha(h_ \alpha) = 2, h_{ \alpha} = h_{- \alpha} \). ::: :::{.proof title="?"} \envlist a. If it does not span, choose $h \in H\smz$ with \( \alpha(h) = 0 \) for all \( \alpha\in \Phi \). Then $[h, L_ \alpha] = 0$ for all $\alpha$, but $[h H] = 0$ since $H$ is abelian. Using the root space decomposition, $[h L] =0$ and so $h\in Z(L) = 0$ since $L$ is semisimple. $\contradiction$ b. Follows from proposition 8.2 and $\kappa(L_ \alpha, L_ \beta) = 0$ when \( \beta\neq \alpha \). c. Let $h\in H$, then \[ \kappa(h, [xy]) &= \kappa([hx], y) \\ &= \alpha(h) \kappa(x, y)\\ &= \kappa(t_\alpha, h) \kappa(x, y) \\ &= \kappa( \kappa(x, y) t_ \alpha, h) \\ &= \kappa( h, \kappa(x, y) t_ \alpha) \\ &\implies \kappa(h, [xy] - \kappa(x,y)t_\alpha) = 0 \\ &\implies [xy] = \kappa (x,y) t_ \alpha ,\] where we've used that $[xy]\in H$ and $\kappa$ is nondegenerate on $H$ and $[L_{ \alpha}, L_{ - \alpha}] \subseteq L_0 = H$. d. (c) shows $[L_{ \alpha}, L_{ - \alpha}]$ is spanned by $t_ \alpha$ if it is nonzero. Let $x\in L_ \alpha\smz$, then if $\kappa(x, L_{ - \alpha}) = 0$ then $\kappa$ would have to be degenerate, a contradiction. So there is some $y\in L_{ - \alpha}$ with \( \kappa(x, y) \neq 0 \). Moreover $t_\alpha\neq 0$ since $\alpha\neq 0$ and $\alpha\mapsto t_\alpha$ is an isomorphism. Thus $[xy] = \kappa(x,y) t_ \alpha$. e. Suppose \( \alpha(t_\alpha) = \kappa(t_{ \alpha}, t_{ \alpha}) = 0\), then for $x\in L_{\alpha}, y\in L_{ - \alpha}$, we have $[t_ \alpha, x] = \alpha(t_ \alpha)x = 0$ and similarly $[t_ \alpha, y] = 0$. As before, find $x\in L_{ \alpha}, y\in L_{ - \alpha}$ with $\kappa(x,y)\neq 0$ and scale one so that $\kappa(x, y) = 1$. Then by (c), $[x, y] = t_ \alpha$, so combining this with the previous formula yields that $S \da \gens{x, y, t_ \alpha}$ is a 3-dimensional solvable subalgebra.[^dne_omg] Taking $\ad: L\injects \liegl(L)$, which is injective by semisimplicity, and similarly $\ro{\ad}{S}: S\injects \ad(S) \leq \liegl(L)$. We'll use Lie's theorem to show everything here is a commutator of upper triangular, thus strictly upper triangular, thus nilpotent and reach a contradiction. [^dne_omg]: Note that this don't actually exist! We're in the middle of a contradiction. ::: # Wednesday, September 28 ## Continued proof Recall the proposition from last time: :::{.proposition title="Big!"} \envlist a. $\Phi$ spans $H\dual$. b. If $\alpha\in \Phi$ is a root then $-\alpha\in \Phi$ is also a root. c. Let \( \alpha\in \Phi, x\in L_{ \alpha}, y\in L_{- \alpha} \), then $[xy] = \kappa(x, y) t_\alpha$. d. If $\alpha\in \Phi$ then $[L_{ \alpha}, L_{- \alpha}] = \CC t_\alpha$ is 1-dimensional with basis $t_\alpha$. e. For any \( \alpha \in \Phi \), we have \( \alpha(t_\alpha) = \kappa(t_ \alpha, t_ \alpha) \neq 0 \). f. (Important) If \( \alpha\in \Phi, x_ \alpha\in L_{ \alpha}\smz \) then there exists some \( y_ \alpha L_{- \alpha} \) in the opposite root space such that \( x_ \alpha, y_ \alpha, h_ \alpha \da [x_ \alpha, y_ \alpha] \) span a 3-dimensional subalgebra $\liesl(\alpha) \leq L$ isomorphic to $\liesl_2(\CC)$. g. $h_\alpha = {2 t_\alpha \over \kappa(t_ \alpha, t_ \alpha)}$, \( \alpha(h_ \alpha) = 2, h_{ \alpha} = h_{- \alpha} \). ::: :::{.proof title="continued"} **Part e**: We have \( \alpha(t_ \alpha ) = \kappa(t_ \alpha, t_ \alpha) \), so suppose this is zero. Pick $x\in L_{ \alpha}, y\in L_{ - \alpha}$ such that $\kappa(x, y) = 1$, then - $[t_ \alpha, x] = 0$, - $[t_ \alpha, y] = 0$, - $[x, y] = t_ \alpha$. Set $S \da \liesl( \alpha) \da \CC\gens{x,y,t_ \alpha}$ and restrict $\ad: L\injects \liegl(L)$ to $S$. Then $\ad(S) \cong S$ by injectivity, and this is a solvable linear subalgebra of $\liegl(L)$. Apply Lie's theorem to choose a basis for $L$ such that the matrices for $\ad(S)$ are upper triangular. Then use that $\ad_L([SS]) = [\ad_L(S) \ad_L(S)]$, which is strictly upper triangular and thus nilpotent. In particular, $\ad_L (t_ \alpha)$ is nilpotent, but since $t_\alpha\in H$ which is semisimple, so $\ad_L( t_ \alpha)$ is semisimple. The only things that are semisimple *and* nilpotent are zero, so $\ad_L( t_ \alpha) = 0 \implies t_\alpha = 0$. This contradicts that $\alpha\in H\dual\smz$. $\contradiction$ **Part f**: Given $x_ \alpha\in L_ \alpha\smz$, choose $y_ \alpha \in L_{ - \alpha}$ and rescale it so that \[ \kappa(x_ \alpha, y _{\alpha} ) = {2\over \kappa(t_ \alpha, t_ \alpha)} .\] Set \( h_ \alpha \da {2t_ \alpha\over \kappa(t_ \alpha, t_ \alpha) } \), then by (c), $[x_ \alpha, y_ \alpha] = \kappa( x_ \alpha, y_ \alpha) t_ \alpha = h_ \alpha$. So \[ [ h_ \alpha, x_ \alpha] = {2\over \alpha(t_ \alpha) }[ t_ \alpha, x_ \alpha] = {2\over \alpha(t_ \alpha)} \alpha(t_ \alpha) x_ \alpha = 2x_ \alpha ,\] and similarly $[h_ \alpha, y_ \alpha] = -2 y_ \alpha$. Now the span $\gens{x_ \alpha, h_ \alpha, y_ \alpha} \leq L$ is a subalgebra with the same multiplication table as $\liesl_2(\CC)$, so $S \cong \liesl_2(\CC)$. **Part g**: Note that we would have $h_{ - \alpha} = {2 t_{ - \alpha} \over \kappa( t_ { - \alpha}, t_{- \alpha} ) } = - h_ \alpha$ if $t_{ \alpha} = t_{ - \alpha}$. This follows from the fact that $H\dual \iso H$ sends $\alpha\mapsto t_\alpha, -\alpha\mapsto t_{- \alpha}$, but by linearity $-\alpha\mapsto -t_{ \alpha}$. ::: :::{.corollary title="?"} $L$ is generated as a Lie algebra by the root spaces $\ts{L_ \alpha\st \alpha\in \Phi}$. ::: :::{.proof title="?"} It STS $H \subseteq \gens{\ts{L_\alpha}_{\alpha\in \Phi}}$. Given $\alpha\in \Phi$, \[ \exists x_\alpha\in L_ \alpha, y_ \alpha\in L_{- \alpha} \quad\text{such that}\quad \gens{x_ \alpha, y_ \alpha, h_ \alpha\da [x_ \alpha, y_ \alpha] } \cong \liesl_2(\CC) .\] Note any $h_\alpha\in \CC\units t_ \alpha$ corresponds via $\kappa$ to some $\alpha\in H\dual$. By (a), $\Phi$ spans $H\dual$, so $\ts{t_\alpha}_{\alpha\in \Phi}$ spans $H$. ::: ## 8.4: Integrality properties of $\Phi$ :::{.remark} Any $\alpha\in \Phi$ yields $\liesl(\alpha) \cong \liesl(\alpha)$, and in fact that the generators entirely determined by the choice of $x_\alpha$. View $L\in\mods{\liesl(\alpha)}$ via $\ad$. ::: :::{.lemma title="?"} If $M \leq L \in \mods{\liesl(\alpha)}$ then all eigenvalues of $h_\alpha\actson M$ are integers. ::: :::{.proof title="?"} Apply Weyl's theorem to decompose $M$ into a finite direct sum of irreducibles in $\mods{ \liesl_2(\CC) }$. The weights of $h_\alpha$ are of the form $m, m-2,\cdots, -m+2, -m\in \ZZ$.[^hwcomplex] [^hwcomplex]: This fails for infinite dimensional modules, e.g. Verma modules. The highest weight can be any complex number. ::: :::{.example title="?"} Let $M = H + \liesl(\alpha) \leq L \in \mods{\liesl( \alpha)}$, which one can check is actually a submodule since bracketing either lands in $\liesl(\alpha)$ or kills elements. What does Weyl's theorem say about this submodule? There is some intersection. Set $K \da \ker\alpha \subseteq H$, so $\codim_H K = 1$ by rank-nullity. Note that $h_\alpha \not\in K$, so $M = K \oplus \liesl(\alpha)$. Moreover $\liesl(\alpha)\actson K$ by zero, since bracketing acts by $\alpha$ which vanishes on $K$. So $K\cong \CC\sumpower{n+1}$ decomposes into trivial modules. ::: :::{.example title="?"} Let $\beta\in \Phi\union\ts{0}$ and define $M \da \bigoplus _{c\in \CC} L_{\beta + c\alpha}$, then $L \leq L \in \mods{\liesl( \alpha)}$. It will turn out that $L_{\beta+ c \alpha} \neq 0 \iff c\in [-r, q] \subseteq \ZZ$ with $r, q\in \ZZ_{\geq 0}$. ::: :::{.proposition title="A"} Let $\alpha\in \Phi$, then the root spaces $\dim L_{\pm \alpha} = 1$, and the only multiples of $\alpha$ which are in $\Phi$ are $\pm \alpha$. ::: :::{.proof title="?"} Note $L_\alpha$ can only pair with $L_{- \alpha}$ to give a nondegenerate Killing form. Set \[ M \da \bigoplus _{c\in \CC} L_{c \alpha} = H \oplus \bigoplus _{c \alpha\in \Phi} L_{c \alpha} .\] By Weyl's theorem, this decomposes into irreducibles. This allows us to take a complement of the decomposition from before to write $M = H \bigoplus \liesl(\alpha) \oplus W$, and we WTS $W = 0$ since this contains all $L_{c \alpha}$ where $c\neq \pm 1$. Since $H \subseteq K \oplus \liesl( \alpha)$, we have $W \intersect H = 0$. If $c_ \alpha$ is a root of $L$, then \( h_ \alpha \) has $(c \alpha)(h_ \alpha) = 2c$ as an eigenvalue, which must be an integer by a previous lemma. So $c\in \ZZ$ or $c\in \ZZ + {1\over 2}$. Suppose $W\neq 0$, and let $V(s)$ (or $L(s)$ in modern notation) be an irreducible $\liesl( \alpha)\dash$submodule of $W$ for $s\in \ZZ_{\geq 0}$. If $s$ is even, $V(s)$ contains an eigenvector $w$ for $h_ \alpha$ of eigenvalue zero by applying $y_\alpha$ $s/2$ times. We can then write $w = \sum_{c\in \CC} v_{c \alpha}$ with $v_{ ca } \in L_{ c \alpha}$, and by finiteness of direct sums we have $v_{c \alpha} = 0$ for almost every $c\in \CC$. Then \[ 0 &= [h_ \alpha, w] \\ &= \sum_{c\in \CC} [h_ \alpha, v_{ c \alpha} ] \\ &= \sum_{c\in \CC} (c\alpha)( h _{\alpha} )v_{c \alpha} \\ &= \sum 2c v_{c \alpha} \\ &\implies v_{c \alpha} = 0 \text{ when } c\neq 0 ,\] forcing $w\in H$, the zero eigenspace. But $w\in W$, so $w\in W \intersect H = 0$. $\contradiction$ ::: # Friday, September 30 ## 8.4 :::{.proposition title="A"} $\alpha\in \Phi\implies \dim L_{\pm \alpha} = 1$, and $\alpha, \lambda \alpha\in \Phi\implies \lambda = \pm 1$. ::: :::{.proof title="of proposition A"} Consider $M \da \bigoplus \bigoplus _{ c\in \CC} L_{c \alpha} \leq L\in\mods{\liesl( \alpha)}$. Write \( \liesl(\alpha) = \gens{ x_ \alpha, h_ \alpha, y_ \alpha} \), we decomposed $M = K \oplus \liesl(\alpha) \oplus W$ where $\ker \alpha \leq H$ and $W \intersect H = 0$. WTS: $W = 0$. So far, we've shown that if $L(s) \subseteq W$ for $s\in \ZZ_{\geq 0}$ (which guarantees finite dimensionality), then $s$ can't be even -- otherwise it has a weight zero eigenvector, forcing it to be in $H$, but $W \intersect H = 0$. Aside: $\alpha\in \Phi \implies 2\alpha\not\in \Phi$, since it would have weight $(2\alpha)(h_ \alpha) = 2\alpha h_ \alpha = 4$, but weights in irreducible modules have the same parity as the highest weight and no such weights exist in $M$ (only $0, \pm 2$ in $K \oplus \liesl_2(\alpha)$ and only odd in $W$). Suppose $L(s) \subseteq W$ and $s\geq 1$ is odd. Then $L(s)$ has a weight vector for $h_ \alpha$ of weight 1. This must come from $c=1/2$, since $(1/2) \alpha (h_ \alpha) = (1/2) 2 = 1$, so this is in $L_{\alpha/2}$. However, by the aside, if $\alpha\in \Phi$ then $\alpha/2\not\in\Phi$. Thus it $W$ can't contain any odd roots or even roots, so $W = 0$. Note also that $L_{\pm\alpha}\not\subset K \oplus W$, forcing it to be in $\liesl( \alpha)$, so $L_{ \alpha} = \gens{x_ \alpha}$ and $L_{- \alpha} = \gens{y _{\alpha} }$. ::: :::{.proposition title="B"} Let \( \alpha, \beta\in \Phi \) with \( \beta\neq \pm \alpha \) and consider $\beta + k \alpha$ for $n\in \ZZ$. a. $\beta(h_ \alpha) \in \ZZ$. b. $\exists r,q\in \ZZ_{\geq 0}$ such that for $k\in \ZZ$, the combination $\beta + k \alpha\in \Phi \iff k \in [-r, q] \ni 0$. The set $\ts{ \beta + k \alpha \st k\in [-r, q]} \subseteq \Phi$ is the **$\alpha\dash$root string through $\beta$**. c. If \( \alpha + \beta\in \Phi \) then \( [L_ \alpha L_ \beta ] = L_{ \alpha + \beta} \). d. \(\beta- \beta(h_ \alpha) \alpha\in \Phi \). ::: :::{.proof title="?"} Consider \[ M \da \bigoplus _{k\in \ZZ} L_{ \beta + k \alpha} \leq L \quad\in\mods{\liesl( \alpha)} .\] a. $\beta(h _{\alpha} )$ is the eigenvalues of $h_ \alpha$ acting on $L_ \beta$. But by the lemma, $\beta(h_ \alpha)\in \ZZ$. b. By the previous proposition, $\dim L_{ \beta+ k \alpha} = 1$ if nonzero, and the weight of $h_\alpha$ acting on it is $\beta( h _{\alpha} ) + 2k$ all different for distinct $k$. By $\liesl_2\dash$representation theory, we know the multiplicities of various weight spaces as the sum of dimensions of the zero and one weight spaces, and thus $M$ is a single irreducible $\liesl(\alpha)\dash$module. So write $M - L(d)$ for some $d\in \ZZ_{\geq 0}$, then $h_ \alpha\actson M$ with eigenvalues $\ts{d,d-2,\cdots, -d+2, -d}$. But $h_ \alpha\actson M$ with eigenvalues $\beta( h_ \alpha) + 2k$ for those $k\in \ZZ$ with $L_{\beta + k \alpha}\neq 0$. Since the first list is an unbroken string of integers of the same parity, thus the $k$ that appear must also be an unbroken string. Define $r$ and $q$ by setting $d = \beta(h_\alpha) + 2q$ and $-d =\beta( h_ \alpha ) - 2r$ to obtain $[-r, q]$. Adding these yields $0 = 2\beta( h_ \alpha) + 2q-2r$ and $r-q = \beta(h_ \alpha)$. d. Let $M\cong L(d) \in \mods{\liesl(\alpha)}$ and $x_\beta \in L_ \beta\smz \subseteq M$ with $x_ \alpha\in L_{ \alpha}$. If $[x_ \alpha x_ \beta] = 0$ then $x_ \beta$ is a maximal $\liesl(\alpha)\dash$vector in $L(d)$ and thus $d = \beta(h_ \alpha)$. But $\alpha + \beta\in \Phi \implies \beta)(h_ \alpha) + 2$ is a weight in $M$ bigger than $d$, a contradiction. Thus $\alpha + \beta\in \Phi \implies [x_ \alpha x_ \beta] \neq 0$. Since this bracket spans and $\dim L_{ \alpha + \beta} = 1$, so $[L_ \alpha L_ \beta] = L_{ \alpha + \beta}$. e. Use that $q\geq 0, r\geq 0$ to write $-r \leq -r+q \leq q$. Then \[ \beta - \beta(h_ \alpha) \alpha = \beta - (r-q) \alpha = \beta + (-r+q \alpha)\da \beta + \ell\alpha \] where $\ell\in [-r, q]$. Thus $\beta + \ell\alpha\in \Phi$ is an unbroken string by (b). ::: :::{.question} Is it true that $\bigoplus_{k\in \ZZ} L_{\beta+ k \alpha} = \bigoplus _{c\in \CC} L_{\beta + c\alpha}$? The issue is that $c\in \ZZ + {1\over 2}$ is still possible. ::: ## 8.5: Rationality properties of $\Phi$ :::{.remark} Recall that $\kappa$ restrict to a nondegenerate bilinear form on $H$ inducing $H\dual\iso H$ via $\phi\mapsto t_\phi$ where $\kappa(t_\phi, \wait) = \phi(\wait)$. Transfer to a nondegenerate symmetric bilinear form on $H\dual$ by $(\lambda, \mu) \da \kappa(t_\lambda, t_\mu)$. By prop 8.3 we know $H\dual$ is spanned by $\Phi$, so choose a $\CC\dash$basis $\ts{ \alpha_1,\cdots, \alpha_n} \subseteq \Phi$. Given $\beta\in\Phi$, write $\beta = \sum c_i \alpha_i$ with $c_i\in \CC$. ::: :::{.claim} $c_i \in \QQ$ for all $i$! ::: # Monday, October 03 ## Integrality and Rationality Properties :::{.claim} Setup: - Decompose $L = H \oplus \bigoplus _{ \alpha\in \Phi} L_{ \alpha}$ - Use the isomorphism \[ H &\iso H\dual \\ \varphi &\mapsfrom t_{ \varphi} \] to define $(\lambda, \mu) \da \kappa(t_ \lambda, t_ \mu)$ on $H$. - Choose a basis $\ts{ \alpha_i} \subseteq \Phi \subseteq H\dual$ - For any $\beta \in \Phi$, write \( \beta= \sum c_i \alpha_i \) with $c_i\in \CC$. Then \[ c_i\in \QQ .\] ::: :::{.proof title="?"} Write \( ( \beta, \alpha_j) = \sum c_i (\alpha_i, \alpha_j) \) and m \[ {2 (\beta, \alpha_j) \over (\alpha_j, \alpha_j) } = \sum c_i {2 (\alpha_i, \alpha_j) \over (\alpha_j, \alpha_j) } ,\] where the LHS is in $\ZZ$, as is $2( \alpha_i, \alpha_j) \over (\alpha_j, \alpha_j)$. On the other hand \[ {2 (\beta, \alpha_j) \over (\alpha_j, \alpha_j) } = {2 (t_ \beta, t_{\alpha_j} ) \over \kappa(t_{ \alpha_j}, \kappa_{ \alpha_j} ) } = \kappa(t_ \beta, h_{\alpha_j} ) = \beta(h_{ \alpha_j}) \] using that \( ( \alpha_j, \alpha_j) = \kappa( t_{ \alpha_j}, t_{ \alpha_j} )\neq 0 \) from before.[^more_gen] Since $\ts{ \alpha_i}$ is a basis for $H\dual$ and $(\wait, \wait)$ is nondegenerate, the matrix $[ ( \alpha_i, \alpha _j) ]_{1\leq i, j\leq n}$ is invertible. Thus so is $\left[ 2 ( \alpha_i, \alpha_j) \over (\alpha_j, \alpha_j ) \right]_{1\leq i,j \leq n}$, since it's given by multiplying each column as a nonzero scalar, and one can solve for the $c_i$ by inverting it. This involves denominators coming from the determinant, which is an integer, yielding entries in $\QQ$. [^more_gen]: More generally, \[ {2 (\lambda, \alpha)\over (\alpha, \alpha) } = \lambda(h_ \alpha) \qquad\forall \alpha\in \Phi .\] ::: :::{.remark} Given \( \lambda, \mu \in H\dual \) then \[ (\lambda, \mu) = \kappa(t_ \lambda, t_\mu) = \Trace(\ad_{t_ \lambda} \circ \ad_{t_\mu} ) = \sum_{ \alpha\in \Phi} \alpha(t_ \lambda) \cdot \alpha(t_\mu) ,\] using that both ads are diagonal in this basis, so their product is given by the products of their diagonal entries. One can write this as $\sum_{ \alpha\in \Phi} \kappa(t_ \alpha, t_ \lambda) \kappa(t_ \alpha, t_\mu)$, so we get a formula \[ ( \lambda, \mu ) = \sum_{ \alpha\in \Phi} ( \alpha, \lambda) (\alpha, \mu), \qquad (\lambda, \lambda) = \sum_{ \alpha\in \Phi} (\alpha, \lambda)^2 .\] Setting $\lambda = \beta$ and dividing by $(\beta, \beta)^2$ yields \[ {1\over (\beta, \beta)} = \sum_{ \alpha\in \Phi} {(\alpha, \beta)^2 \over (\beta, \beta)^2} \in {1\over 4}\ZZ ,\] since $(\alpha, \beta)\over (\beta, \beta)\in {1\over 2} \ZZ$. So $(\beta, \beta)\in \QQ$ and thus $(\alpha, \beta)\in \QQ$ for all \( \alpha, \beta\in \Phi \). It follows that the pairings \( (\lambda, \mu) \) on the $\QQ\dash$subspace $\EE_\QQ$ of $H\dual$ spanned by $\ts{ \alpha_i}$ are all rational. ::: :::{.claim} $(\wait, \wait)$ on $\EE_\QQ$ is still nodegenerate ::: :::{.proof title="?"} If \( \lambda\in \EE_\QQ, ( \lambda, \mu) =0 \forall \mu\in \EE_\QQ \), then $( \lambda, \alpha_i) = 0 \forall i \implies (\lambda, \nu) = 0 \forall \nu\in H\dual \implies \lambda= 0$. ::: :::{.remark} Similarly, $(\lambda, \lambda) = \sum_{ \alpha\in \Phi \subseteq \EE_\QQ} ( \alpha, \lambda)^2$ is a sum of squares of rational numbers, and is thus non-negative. Since $( \lambda, \lambda) = 0 \iff \lambda= 0$, the form on $\EE_\QQ$ is positive definite. Write $\EE \da \EE_\QQ \tensor_\QQ \RR = \RR\ts{\alpha_i}$, then $(\wait, \wait)$ extends in the obvious way to an $\RR\dash$values positive definite bilinear form on $\EE$, making it a real inner product space. ::: :::{.theorem title="?"} Let $L, H, \Phi, \EE\slice\RR$ be as above, then a. $\Phi$ is a finite set which spans $\EE$ and does not contain zero. b. If $\alpha\in \Phi$ then $-\alpha\in \Phi$ and thus is the only other scalar multiple in $\Phi$. c. If \( \alpha, \beta \in \Phi \) then \[ \beta - \beta(h_ \alpha) \alpha = \beta - {2 (\beta, \alpha) \over ( \alpha, \alpha) } \alpha\in \Phi ,\] which only depends on $\EE$. Note that this swaps $\pm \alpha$. d. If \( \alpha, \beta \in \Phi \) then $\beta(h_\alpha) = {2(\beta, \alpha) \over (\alpha, \alpha)}\in \ZZ$. Thus $\Phi$ satisfies the axioms of a **root system** in $\EE$. ::: :::{.example title="?"} Recall that for $\liesl_3(\CC)$, $\kappa(x,y) = 6 \Trace(xy)$. Taking the standard basis $\ts{v_i} \da \ts{x_i, h_i, y_i \da x_i^t}$, the matrix $\Trace(v_i v_j)$ is of the form \[ \mattt 0 0 I 0 A 0 I 0 0\qquad A \da \matt 2 {-1} {-1} 2 .\] This is far from the matrix of an inner product, but the middle block corresponds to the form restricted to $H$, which is positive definite. One can quickly check this is positive definite by checking positivity of the upper-left $k\times k$ minors, which here yields $\det(2) = 2, \det A = 4-1 = 3$. ::: ## Part III: Root Systems ## Ch. 9, Axiomatics. 9.1: Reflections in a Euclidean Space :::{.remark} Let $\EE$ be a fixed real finite-dimensional Euclidean space with inner product $(\alpha, \beta)$, we consider property (c) from the previous theorem: \[ \beta - {2( \beta, \alpha) \over (\alpha, \alpha)} \in \Phi \qquad\forall \alpha, \beta\in \Phi .\] ::: :::{.definition title="Reflections"} A **reflection** in $\EE$ is an invertible linear map on an $n\dash$dimensional Euclidean space that fixes pointwise a hyperplane $P$ (of dimension $n-1$) and sending any vector $v\perp P$ to $-v$: \begin{tikzpicture} \fontsize{45pt}{1em} \node (node_one) at (0,0) { \import{/home/zack/SparkleShare/github.com/Notes/Class_Notes/2022/Fall/LieAlgebras/sections/figures}{2022-10-03_09-52.pdf_tex} }; \end{tikzpicture} ::: :::{.remark} If $\sigma$ is a reflection sending \( \alpha\mapsto - \alpha \), then \[ \sigma_\alpha(\beta) = \beta - {2( \beta, \alpha) \over (\alpha, \alpha)} \alpha \qquad \forall \beta\in \EE .\] One can check that $\sigma_\alpha^2 = \id$. Some notes on notation: - Humphreys writes $\inp{ \beta}{ \alpha} \da {2 ( \beta, \alpha) \over (\alpha, \alpha)}$ This is linear in $\beta$ but not in $\alpha$! - More modern: $(\beta, \alpha\dual) \da \inp{ \beta}{\alpha}$ where \( \alpha\dual \da {2\alpha\over (\alpha, \alpha)} \) corresponds to $h_\alpha$. - Modern notation for the map: $s_\alpha$ instead of $\sigma_\alpha$. ::: # Wednesday, October 05 ## Reflections in $\EE^n$ :::{.remark} Recall the formula \[ s_\alpha( \lambda) = \lambda- (\lambda, \alpha\dual)\alpha, \qquad \alpha\dual \da {2\alpha\over (\alpha, \alpha)}, \alpha\neq 0 ,\] which is a reflection through the hyperplane $P_\alpha\da \alpha\perp$: \begin{tikzpicture} \fontsize{45pt}{1em} \node (node_one) at (0,0) { \import{/home/zack/SparkleShare/github.com/Notes/Class_Notes/2022/Fall/LieAlgebras/sections/figures}{2022-10-05_09-13.pdf_tex} }; \end{tikzpicture} ::: :::{.lemma title="?"} Let \( \Phi \subseteq \EE \) be a set that spans $\EE$, and suppose all of the reflections $s_\alpha$ for \( \alpha \in \Phi \) leave $\Phi$ invariant. If \( \sigma\in \GL(\EE) \) leaves $\Phi$ invariant, fixes a hyperplane $P$ pointwise, and sends some $\alpha\in \Phi\smz$ to $-\alpha$, then $\sigma = s_\alpha$ and $P = P_\alpha$. ::: :::{.proof title="?"} Let $\tau = \sigma s_ \alpha =\sigma s_{ \alpha}\inv \in \GL(\EE)$, noting that every $s_\alpha$ is order 2. Then $\tau( \Phi) = \Phi$ and $\tau( \alpha) = \alpha$, so $\tau$ acts as the identity on the subspace $\RR\alpha$ and the quotient space $\EE/\RR\alpha$ since there are two decompositions $\EE = P_ \alpha \oplus \RR\alpha = P \oplus \RR \alpha$ using $s_\alpha$ and $\sigma$ respectively. So $\tau - \id$ acts as zero on $\EE/\RR\alpha$, and so maps $\EE$ into $\RR\alpha$ and $\RR\alpha$ to zero, s $(\tau - \id)^2 = 0$ on $\EE$ and its minimal polynomial $m_\tau(t)$ divides $f(t) \da (t-1)^2$. Note that $\Phi$ is finite, so the vectors \( \beta, \tau \beta, \tau^2 \beta, \tau^3 \beta,\cdots \) can not all be distinct. Since $\tau$ is invertible we can assume $\tau^k \beta = \beta$ for some particular $k$. Taking the least common multiple of all such $k$ yields a uniform $k$ that works for all $\beta$ simultaneously, so $\tau^k \beta = \beta$ for all $\beta \in \Phi$. Since $\RR\Phi = \EE, \tau^k$ acts as $\id$ on all of $\EE$, so $\tau^k - 1 = 0$ and so $m_\tau(t) \divides t^k - 1$ for some $k$. Therefore $m_\tau(t) \divides \gcd( (t-1)^2, t^k-1 ) = t-1$, forcing $\tau = \id$ and $\sigma = s_ \alpha$ and $P = P_\alpha$. ::: ## Abstract root systems :::{.definition title="Root systems"} A subset $\Phi \subseteq \EE$ of a real Euclidean space is a **root system** iff - R1: $\size \Phi < \infty$, $\RR\Phi = \EE$, and $0\not\in \Phi$, - R2: $\alpha\in \Phi \implies -\alpha\in \Phi$ and no other scalar multiples of $\alpha$ are in $\Phi$, - R3: If $\alpha\in \Phi$ then $s_\alpha( \Phi) = \Phi$, - R4: If $\alpha, \beta\in \Phi$ then \( (\beta, \alpha\dual) = {2(\beta, \alpha) \over ( \alpha, \alpha) } \in \ZZ \). Notably, $\ell\da \norm{\beta - s_\alpha\beta}$ is an integer multiple of $\alpha$: \begin{tikzpicture} \fontsize{45pt}{1em} \node (node_one) at (0,0) { \import{/home/zack/SparkleShare/github.com/Notes/Class_Notes/2022/Fall/LieAlgebras/sections/figures}{2022-10-05_09-33.pdf_tex} }; \end{tikzpicture} ::: :::{.definition title="Weyl groups"} The **Weyl group** $W$ associated to a root system $\Phi$ is the subgroup $\gens{s_\alpha, \alpha\in \Phi} \leq \GL(\EE)$. ::: :::{.remark} Note that $\size W < \infty$: $W$ permutes $\Phi$ by (R3), so there is an injective group morphism $W \injects \mathrm{Perm}(\Phi)$, which is a finite group -- this is injective because if $w\actson \Phi$ as $\id$, since $\RR \Phi = \EE$, by linearity $w\actson \EE$ by $\id$ and $w=\id$. Recalling that $s_ \alpha( \lambda) = \lambda- (\lambda, \alpha\dual) \alpha$, we have $(s_ \alpha(\lambda), s_ \alpha(\mu)) = ( \lambda, \mu)$ for all $\lambda, \mu \in \EE$. So in fact $W \leq \Orth(\EE) \leq \GL(\EE)$, which have determinant $\pm 1$ -- in particular, $\det s_\alpha = -1$ since it can be written as a block matrix $\diag(1, 1, \cdots, 1, -1)$ by choosing a basis for $P_\alpha$ and extending it by $\alpha$. > Note that one can classify finite subgroups of $\SO_n$. ::: :::{.example title="?"} Let $\Phi = \ts{ \eps_i - \eps_j \st 1\leq i,j \leq n+1, i\neq j}$ be a root system of type $A_n$ where $\ts{\eps_i}$ form the standard basis of $\RR^{n+1}$ with the standard inner product, so $(\eps_i, \eps_j) = \delta_{ij}$. One can compute \[ s_{\eps_i - \eps_j}(\eps_k) = \eps_k {2 (\eps_k, \eps_i - \eps_j) \over (\eps_i - \eps_j, \eps_i - \eps_j)}(\eps_i - \eps_j) = \eps_k - (\eps_k, \eps_i - \eps_j)(\eps_i - \eps_j) = \begin{cases} \eps_j & k=i \\ \eps_i & k=j \\ \eps_k & \text{otherwise}. \end{cases} = \eps_{(ij).k} \] where $(ij) \in S_{n+1}$ is a transposition, acting as a function on the index $k$. Thus there is a well-defined group morphism \[ W &\to S_{n+1} \\ s_{\eps_i - \eps_j} &\mapsto (ij) .\] This is injective since $w$ acting by the identity on every $\eps_k$ implies acting by the identity on all of $\EE$ by linearity, and surjective since transpositions generate $S_{n+1}$. So $W\cong S_{n+1}$, and $A_n$ corresponds to $\liesl_{n+1}(\CC)$ using that \[ [h, e_{ij}] = (h_i - h_j) e_{ij} = (\eps_i - \eps_j)(h) e_{ij} .\] In $G = \SL_n(\CC)$ one can define $N_G(T)/C_G(T)$ for $T$ a maximal torus. ::: :::{.exercise title="?"} What are the Weyl groups of other classical types? ::: :::{.lemma title="?"} Let $\Phi \subseteq \EE$ be a root system. If $\sigma\in \GL(\EE)$ leaves $\Phi$ invariant, then for all $\alpha\in \Phi$, \[ \sigma s_{ \alpha} \sigma = s_{ \sigma( \alpha)}, \qquad (\beta, \alpha\dual) = (\sigma( \beta), \sigma(\alpha)\dual) \,\,\forall \alpha, \beta\in \Phi .\] Thus conjugating a reflection yields another reflection. ::: :::{.proof title="?"} Note that \( \sigma s_ \alpha \sigma\inv \) sends \( \sigma( \alpha) \) to its negative and fixes pointwise the hyperplane $\sigma(P_\alpha)$. If \( \beta \in \Phi\) then \(s_{ \alpha}( \beta) \in \Phi \), so \( \sigma s_ \alpha ( \beta) \in \Phi \) and \[ (\sigma s_ \alpha \sigma\inv) ( \sigma( \beta)) = \sigma s_ \alpha(\beta) \in \sigma\Phi ,\] so \( \sigma s_ \alpha \sigma\inv \) leaves invariant the set $\ts{ \sigma( \beta) \st \beta\in \Phi} = \Phi$. By the previous lemma, it must equal $s_{ \sigma( \alpha)}$, and so \[ ( \sigma( \beta), \sigma( \alpha)\dual ) = (\beta, \alpha\dual) \] by applying both sides to $\sigma(\beta)$. ::: :::{.warnings} This does not imply that \( (\sigma( \beta), \sigma( \alpha) ) = (\beta, \alpha) \)! With the duals/checks, this bracket involves a ratio, which is preserved, but the individual round brackets are not. ::: # Friday, October 07 :::{.lemma title="?"} Let $\Phi \subseteq \EE$ be a root system with Weyl group $W$. If $\sigma\in \GL(\EE)$ leaves $\Phi$ invariant then \[ \sigma s_{\alpha} \sigma\inv = s_{ \sigma( \alpha)} \qquad\forall \alpha\in \Phi \] and \[ ( \beta, \alpha\dual) = ( \sigma(\beta), \sigma(\alpha)\dual ) \qquad \forall \alpha, \beta \in \Phi .\] ::: :::{.warnings} \[ ( \sigma( \beta), \sigma( \alpha) ) \neq (\beta, \alpha) ,\] i.e. the $(\wait)\dual$ is important here since it involves a ratio. Without the ratio, one can easily scale to make these not equal. ::: :::{.definition title="?"} Two root systems \( \Phi \subseteq \EE, \Phi' \subseteq \EE' \) are **isomorphic** iff there exists $\phi: \EE\to \EE'$ of vector spaces such that $\phi(\Phi) = \Phi'$ such that \[ (\varphi( \beta), \varphi(\alpha)\dual) = (\beta, \alpha) \da {2 (\beta, \alpha) \over (\alpha, \alpha)} \qquad\forall \alpha, \beta \in \Phi .\] ::: :::{.example title="?"} One can scale a root system to get an isomorphism: \begin{tikzpicture} \fontsize{45pt}{1em} \node (node_one) at (0,0) { \import{/home/zack/SparkleShare/github.com/Notes/Class_Notes/2022/Fall/Moduli/sections/figures}{2022-10-07_09-19.pdf_tex} }; \end{tikzpicture} ::: :::{.remark} Note that if $\phi: \Phi \iso \Phi'$ is an isomorphism, then \[ \varphi(s_{ \alpha}( \beta)) = s_{ \varphi( \alpha)}( \varphi(\beta)) \qquad \forall \alpha, \beta\in \Phi \implies \varphi \circ s_{ \alpha} \varphi\inv = s_{ \varphi( \alpha)} .\] So $\phi$ induces an isomorphism of Weyl groups \[ W &\iso W' \\ s_{\alpha} &\mapsto s_{ \varphi( \alpha)} .\] By the lemma, an automorphism of $\Phi$ is the same as an automorphism of $\EE$ leaving $\Phi$ invariant. In particular, $W\injects \Aut( \Phi)$. ::: :::{.definition title="Dual root systems"} If $\Phi \subseteq \EE$ is a root system then the **dual root system** is \[ \Phi\dual \da \ts{ \alpha\dual \st \alpha\in \Phi}, \qquad \alpha\dual \da {2\alpha\over (\alpha, \alpha)} .\] ::: :::{.exercise title="?"} Show that $\Phi\dual$ is again a root system in $\EE$. ::: :::{.remark} One can show $W( \Phi) = W( \Phi\dual)$ and $\inp {\lambda}{ \alpha\dual} \alpha\dual = \inp{ \lambda}{ \alpha} \alpha = (\lambda, \alpha\dual)$ for all \( \alpha\in \Phi, \lambda\in \EE \), so \( s_{\alpha\dual} = s_{\alpha} \) as linear maps on $\EE$. ::: ## 9.3: Example(s) :::{.definition title="Ranks of root systems"} Let \( \Phi \subseteq \EE \) be a root system, then $\ell \da \dim_\RR \EE$ is the **rank** of $\Phi$. ::: :::{.remark} Rank 1 root systems are given by choice of $\alpha\in \RR$: \begin{tikzpicture} \fontsize{45pt}{1em} \node (node_one) at (0,0) { \import{/home/zack/SparkleShare/github.com/Notes/Class_Notes/2022/Fall/Moduli/sections/figures}{2022-10-07_09-31.pdf_tex} }; \end{tikzpicture} ::: :::{.remark} Recall ${2( \beta, \alpha) \over (\alpha, \alpha)} \in \ZZ$, and from linear algebra, $\inp{v}{w} = \norm v \cdot \norm w \cos( \theta)$ and $\norm{\alpha}^2 = ( \alpha, \alpha)$. We can thus write \[ \inp{ \beta}{ \alpha} = {2( \beta, \alpha) \over (\alpha, \alpha)} = 2{\norm \beta\over \norm \alpha} \cos (\theta), \qquad \inp \alpha \beta= 2{\norm \alpha\over \norm \beta} \cos( \theta) ,\] and so \[ \inp \alpha \beta\inp \beta \alpha = 4\cos^2( \theta) ,\] noting that $L_{ \alpha, \beta} \da \inp \alpha \beta, \inp \beta \alpha$ are integers of the same sign. If positive, this is in QI, and if negative QII. This massively restricts what the angles can be, since $0 \leq \cos^2( \theta) \leq 1$. First, an easy case: suppose $L_{ \alpha, \beta} = 4$, so $\cos^2( \theta) = 1\implies \cos( \theta) = \pm 1\implies \theta= 0, \pi$. - If $0$, then $\alpha,\beta$ are in the same 1-dimensional subspace and thus $\beta = \alpha$. In this case, \( \inp \beta \alpha = 2 = \inp \alpha \beta \). - If $\pi$, then $\alpha = - \beta$. Here $\inp \beta \alpha = -2$. So assume \( \beta\neq \pm \alpha \), and without loss of generality $\norm \beta\geq \norm \alpha$, or equivalently $\inp \alpha \beta \leq \inp \beta \alpha$. Note that if $\inp \alpha \beta\neq 0$ then \[ { \inp \beta \alpha\over \inp \alpha \beta} = {\norm{ \beta}^2 \over \norm{ \alpha}^2} .\] The other possibilities are as follows: | $\inp \alpha\beta$ | $\inp \beta\alpha$ | $\theta$ | $\norm{\beta}^2/\norm{\alpha}^2$ | |-------------------- |-------------------- |---------- |---------------------------------- | | 0 | 0 | $\pi/2$ | Undetermined | | 1 | 1 | $\pi/3$ | 1 | | -1 | -1 | $2\pi/3$ | 1 | | 1 | 2 | $\pi/4$ | 2 | | -1 | -2 | $3\pi/4$ | 2 | | 1 | 3 | $\pi/6$ | 3 | | -1 | -3 | $5\pi/6$ | 3 | Cases for the norm ratios: - $1: A_2$ - $2: B_2 = C_2$ - $3: G_2$ These are the only three irreducible rank 2 root systems. ::: :::{.lemma title="?"} Let \( \alpha, \beta\in\Phi \) lie in distinct linear subspaces of $\EE$. Then 1. If $(\alpha, \beta) > 0$, i.e. their angle is strictly acute, then $\alpha - \beta$ is a root 2. If $(\alpha, \beta) < 0$ then \( \alpha + \beta \) is a root. ::: :::{.proof title="?"} Note that (2) follows from (1) by replacing $\beta$ with $-\beta$. Assume $(\alpha, \beta) > 0$, then by the chart \( \inp \alpha \beta =1 \) or \( \inp \beta \alpha = 1 \). In the former case, \[ \Phi\ni s_{ \beta}( \alpha) = \alpha - \inp \alpha \beta \beta = \alpha- \beta .\] In the latter, \[ s_{ \alpha}(\beta) = \beta- \alpha \in \Phi\implies - (\beta- \alpha) = \alpha- \beta\in \Phi .\] ::: :::{.remark} Suppose $\rank( \Phi) = 2$. Letting \( \alpha\in \Phi \) be a root of shortest length, since $\RR\Phi = \EE$ there is some $\beta \in \EE$ not equal to $\pm \alpha$. Without loss of generality assume $\angle_{\alpha, \beta}$ is obtuse by replacing $\beta$ with $-\beta$ if necessary: \begin{tikzpicture} \fontsize{45pt}{1em} \node (node_one) at (0,0) { \import{/home/zack/SparkleShare/github.com/Notes/Class_Notes/2022/Fall/Moduli/sections/figures}{2022-10-07_09-57.pdf_tex} }; \end{tikzpicture} Also choose $\beta$ such that $\angle_{ \alpha, \beta}$ is maximal. **Case 0**: If $\theta = \pi/2$, one gets $\AA_1\times \AA_1$: \begin{tikzpicture} \fontsize{45pt}{1em} \node (node_one) at (0,0) { \import{/home/zack/SparkleShare/github.com/Notes/Class_Notes/2022/Fall/Moduli/sections/figures}{2022-10-07_09-59.pdf_tex} }; \end{tikzpicture} We'll continue this next time. ::: # Monday, October 10 ## Classification of Rank 2 Root Systems :::{.remark} If $\beta\neq \pm \alpha$, - \( (\alpha, \beta) > 0 \implies \alpha - \beta\in \Phi \) - \( (\alpha, \beta) < 0 \implies \alpha + \beta\in \Phi \) ::: :::{.remark} Rank 2 root systems: let $\alpha$ be a root of shortest length, and $\beta$ a root with angle $\theta$ between $\alpha,\beta$ with $\theta \geq \pi/2$ as large as possible. - If $\theta = \pi/2$: $A_1 \times A_1$. ![](figures/2022-10-10_09-16-04.png) - If $\theta = 2\pi/3$: $A_2$ ![](figures/2022-10-10_09-20-14.png) One can check $\inp \alpha \beta= 2(-1/2) = -1$ and $\inp {\alpha + \beta}{ \beta} = \inp \alpha \beta + \inp \beta \beta = -1 + 2 = 1$. - If $\theta = 3\pi/4$: $B_2$ - If $\theta = 5\pi/6$: $G_2$ One can check that using linearity of $\inp\wait\wait$ in the first variable that - $s_\alpha \beta = \beta + 3 \alpha$, - $s_\alpha(\beta+ \alpha) = \beta+ 2 \alpha$, - $s_ \beta(\beta+ 3 \alpha) = (\beta+ 3 \alpha) - \inp{ \beta+ 3 \alpha}{ \beta}= 2 \beta+ 3 \alpha \in \Phi$. ::: :::{.remark} Note that in each case one can see the root strings, defined as \[ R_\beta \da \ts{\beta+ k \alpha \st k\in \ZZ} \intersect \Phi .\] Let $r,q\in \ZZ_{\geq 0}$ be maximal such that $\beta-r \alpha, \beta + q \alpha\in \Phi$. The claim is that every such root string is unbroken. Suppose not, then there is some $k$ with $-r < k < q$ with $\beta + k \alpha \not\in \Phi$. One can then find a maximal $p$ and minimal $s$ with $p < s$ and $\beta+p \alpha \in \Phi$ but $\beta + (p+1) \alpha \not\in \Phi$, and similarly $\beta + (s-1)\alpha\not\Phi$ but $\beta + s \alpha\in \Phi$. By a previous lemma, $(\beta+ p \alpha, \alpha) \geq 0$ and similarly $(\beta+ s \alpha, \alpha) \leq 0$. Combining these, \[ p( \alpha, \alpha) \geq s (\alpha, \alpha) \implies p \geq s \text{ since } (\alpha, \alpha) > 0 \qquad\contradiction .\] The picture: \begin{tikzpicture} \fontsize{45pt}{1em} \node (node_one) at (0,0) { \import{/home/zack/SparkleShare/github.com/Notes/Class_Notes/2022/Fall/LieAlgebras/sections/figures}{2022-10-10_09-44.pdf_tex} }; \end{tikzpicture} So $s_\alpha$ reverses the root string, since it sends the line containing the root string to itself but reflects through $P_\alpha$. One can compute \[ \beta - r \alpha &= s_ \alpha(\beta + q \alpha) \\ &= (\beta+ q \alpha) - \inp{\beta+ q \alpha}{ \alpha} \alpha \\ &= (\beta+ q \alpha) - \inp{ \beta}{\alpha}ga - 2q \alpha \\ &= \beta - \qty{\inp \beta \alpha + q} \alpha ,\] so $r = \inp \beta \alpha$ and $r-q = \inp \beta \alpha = \beta(h_ \alpha)$ for a semisimple Lie algebra. Supposing \( \abs{\inp \beta \alpha} \leq 3 \). Choose $\beta$ in $R_\alpha$ such that $\beta-\alpha$ is not a root and $\beta$ is at the left end of the string and $r=0$: ![](figures/2022-10-10_09-51-39.png) Then $q = -\inp \beta \alpha$, so the root string contains at most 4 roots (for $\Phi$ of any rank). ::: ## Ch. 10 Simple Roots, 10.1 Bases and Weyl Chambers :::{.definition title="Base (simple roots)"} A subset $\Delta \subseteq \Phi$ is a **base** (or more modernly a **set of simple roots**) if - B1: $\Delta$ is a basis for $\EE$, - B2: Each $\beta\in \Phi$ can be written as $\beta = \sum_{\alpha\in \Delta} k_\alpha \alpha$ with $k_\alpha\in \ZZ$ with either all $k_\alpha\in \ZZ_{\geq 0}$ or all $k_\alpha \in \ZZ_{\leq 0}$. ::: :::{.example title="?"} 1. The roots labeled $\alpha,\beta$ in the rank 2 cases were all simple systems. 2. For $A_n$, a base is $\ts{\eps_1 - \eps_2, \eps_2 - \eps_3, \cdots, \eps_{n} - \eps_{n+1} }$, where $\Phi = \ts{\eps_i - \eps_j \st i\neq j}$. ::: # Wednesday, October 12 :::{.remark} Today: finding bases for root systems. It's not obvious they always exist, but e.g. in the previous rank 2 examples, $\alpha,\beta$ formed a base. ::: :::{.definition title="Height, positive/negative roots"} Given a base $\Delta \subseteq \Phi$, the **height** of a root $\beta = \sum_{\alpha\in \Delta} k_ \alpha \alpha$ is \[ \height(\beta) \da \sum k_ \alpha .\] If all $k_\alpha \geq 0$, we say $\beta$ is **positive** and write $\beta\in \Phi^+$. Similarly, $\beta$ is **negative** iff $k_\alpha \leq 0$ for all $\alpha$, and we write $\beta\in \Phi^-$. This decomposes a root system into $\Phi = \Phi^+ \disjoint \Phi^-$, and moreover $-\Phi^+ = \Phi^-$. ::: :::{.remark} A choice of $\Delta$ determines a partial order on $\Phi$ which extends to $\EE$, where \( \lambda\geq \mu \iff \lambda- \mu \) is a non-negative integer linear combination of elements of $\Delta$. ::: :::{.lemma title="?"} If $\Delta \subseteq \Phi$ is a base and $\alpha, \beta\in \Delta$, then \[ \alpha\neq \beta\implies ( \alpha, \beta) \leq 0 \text{ and } \alpha- \beta\not\in \Phi .\] ::: :::{.proof title="?"} We have \( \alpha\neq \pm \beta \) since $\Delta$ is a linearly independent set. By a previous lemma, if $( \alpha, \beta) > 0$ then $\beta- \alpha \in \Phi$ by a previous lemma. $\contradiction$ ::: :::{.definition title="Regular"} An element $\gamma\in \EE$ is **regular** iff $\gamma \in \EE\sm \Union_{\alpha\in \Phi} P_ \alpha$ where $P_ \alpha= \alpha^\perp$, otherwise $\gamma$ is **singular**. ::: :::{.lemma title="?"} Regular vectors exist. ::: :::{.remark} The basic fact used is that over an infinite field, no vector space is the *union* of a finite number of proper subspaces. Note that this is a union, not a sum! Given a regular vector $\gamma\in \EE$, define \[ \Phi^+(\gamma) = \ts{ \alpha\in \Phi \st (\alpha, \gamma) > 0 } ,\] the roots on the positive side of the hyperplane $\alpha^\perp$: \begin{tikzpicture} \fontsize{45pt}{1em} \node (node_one) at (0,0) { \import{/home/zack/SparkleShare/github.com/Notes/Class_Notes/2022/Fall/LieAlgebras/sections/figures}{2022-10-12_09-31.pdf_tex} }; \end{tikzpicture} This decomposes $\Phi = \Phi^+(\gamma) \disjoint \Phi^-(\gamma)$ where $\Phi^-(\gamma) \da - \Phi^+(\gamma)$. Note that $\gamma$ lies on the positive side of $\alpha^\perp$ for every $\alpha\in \Phi^+(\gamma)$. ::: :::{.definition title="Decomposable roots"} A positive root $\beta\in \Phi^+$ is **decomposable** iff $\beta = \beta_1 + \beta_2$ for $\beta_i \in \Phi^+( \gamma)$. Otherwise $\beta$ is **indecomposable**. ::: :::{.theorem title="?"} There exists a base for $\Phi$. ::: :::{.theorem title="?"} Let \( \gamma\in \EE \) be regular. Then the set $\Delta(\gamma)$ of all indecomposable roots in $\Phi^+( \gamma)$ is a base for $\Phi$. Moreover, any base for $\Phi$ arises in this way. ::: :::{.proof title="in 5 easy steps"} \envlist 1. Claim: each root in $\Phi^+( \gamma)$ is in $\ZZ_{\geq 0} \Delta( \gamma)$. The proof: if not, pick $\beta\in \Phi^+(\gamma)$ which cannot be written this way and choose it such that $(\beta, \gamma)$ is maximal (by finiteness). Since $\beta\not\in \Delta( \gamma)$, it is decomposable as $\beta = \beta_1 + \beta_2$ with \( \beta_i \in \Phi^+ \). Now \( (\beta, \gamma) = \sum (\beta_i, \gamma) \) is a sum of nonnegative numbers, so $(\beta_i, \gamma) < (\beta, \gamma)$ for $i=1,2$. By minimality, \( \beta_i\in \ZZ_{\geq 0 } \Delta(\gamma) \), but then by adding them we get \( \beta\in \ZZ_{\geq 0 } \Delta(\gamma) \). 2. Claim: if \( \alpha, \beta\in \Delta( \gamma) \) with \( \alpha\neq \beta \) then \( (\alpha, \beta) \leq 0 \). Note \( \alpha\neq - \beta \) since $(\alpha, \gamma), (\beta, \gamma) > 0$. By lemma 9.4, if $( \alpha, \beta) > 0$ then \( \alpha - \beta\in \Phi \) is a root. Then one of \( \alpha- \beta, \beta- \alpha\in \Phi^+( \gamma) \). In the first case, \( \beta + (\alpha- \beta ) = \alpha \), decomposing \( \alpha \). In the second, \( \alpha + (\beta- \alpha) = \beta \), again a contradiction. 3. Claim: \( \Delta\da \Delta( \gamma) \) is linearly independent. Suppose $\sum_{ \alpha\in \Delta} r_ \alpha \alpha = 0$ for some $r_ \alpha \in \RR$. Separate the positive terms $\alpha\in \Delta'$ and the remaining $\alpha\in \Delta''$ to write $\eps \da \sum_{ \alpha\in \Delta'}r_ \alpha \alpha = \sum_{ \beta\in \Delta''} t_\beta \beta$ where now $r_ \alpha, t_\beta > 0$. Use the two expressions for $\eps$ to write \[ (\eps, \eps) = \sum _{ \alpha\in \Delta', \beta\in \Delta''} r_ \alpha t_ \beta (\alpha, \beta) \leq 0 ,\] since $r_ \alpha t_ \beta >0$ and $(\alpha, \beta) \leq 0$. So $\eps = 0$, since $(\wait, \wait)$ is an inner product. Write $0 = (\gamma, \eps) = \sum_{ \alpha\in \Delta'} r_\alpha (\alpha, \delta)$ where $r_ \alpha > 0$ and $(\alpha, \gamma) > 0$, so it must be the case that $\Delta' = \emptyset$. Similarly $\Delta'' = \emptyset$, so $r_\alpha = 0$ for all \( \alpha\in \Delta \). 4. Claim: \( \Delta( \gamma) \) is a base for $\Phi$. Since $\Phi = \Phi^+(\gamma)\disjoint \Phi^-( \gamma)$, we have B2 by step 1. This also implies $\Delta( \gamma)$ is a basis for $\EE$, since we have linear independent by step 3. Thus $\ZZ \Delta ( \gamma) \contains \Phi$ and $\RR \Phi = \EE$. 5. Claim: every base of $\Phi$ is $\Delta( \gamma)$ for some regular $\gamma$. Given $\Delta$, choose $\gamma\in \EE$ such that $(\gamma, \alpha) > 0$ for all \( \alpha\in \Delta \). Then $\gamma$ is regular by B2. Moreover $\Phi^+ \subseteq \Phi^+( \gamma)$ and similarly $\Phi^- \subseteq \Phi^-( \gamma)$, and taking disjoint unions yields $\Phi$ for both the inner and outer sets, forcing them to be equal, i.e. $\Phi^{\pm} = \Phi^{\pm}( \gamma)$. One can check that $\Delta \subseteq \Delta( \gamma)$ using $\Phi^+ = \Phi^+( \gamma)$ and linear independence of $\Delta$ -- but both sets are bases for $\EE$ and thus have the same cardinality $\ell = \dim \EE$, making them equal. ::: # Bases $\Delta$ for $\Phi$ (Friday, October 14) :::{.remark} From a previous discussion: given a rank $n$ root system $\Phi$ with $n\geq 2$, is $\RR\gens{ \alpha, \beta} \intersect \Phi$ always a rank 2 root system? The answer is yes! This follows readily from just checking the axioms directly. ::: :::{.remark} For a regular \( \gamma \in \EE\sm \Union_{\alpha\in \Phi} P_ \alpha \), define $\Phi^+(\gamma) \da \ts{ \beta\in \Phi \st (\beta, \gamma) > 0}$ and let $\Delta( \gamma)$ be the indecomposable elements of $\Phi^+( \gamma)$: \begin{tikzpicture} \fontsize{45pt}{1em} \node (node_one) at (0,0) { \import{/home/zack/SparkleShare/github.com/Notes/Class_Notes/2022/Fall/LieAlgebras/sections/figures}{2022-10-14_09-16.pdf_tex} }; \end{tikzpicture} ::: :::{.theorem title="?"} $\Delta( \gamma)$ is a base for $\Phi$, and every base is of this form. ::: :::{.definition title="?"} The connected components of $\EE \sm \Union_{ \alpha\in \Phi} P_ \alpha$ are called the **(open) Weyl chambers** of $\EE$. Each regular $\gamma$ belongs to some Weyl chamber, which we'll denote $C(\gamma)$. ::: :::{.remark} Note that $C(\gamma) = C(\gamma') \iff \gamma, \gamma'$ are on the same side of every root hyperplane $P_\alpha$ for $\alpha\in \Phi$, which happens iff $\Phi^+( \gamma) = \Phi^+(\gamma;) \iff \Delta( \gamma) = \Delta(\gamma')$, so there is a bijection \[ \correspond{ \text{Weyl chambers} } &\mapstofrom \correspond{ \text{Bases for $\Phi$} } .\] Note also that $W$ sends one Weyl chamber to another: any $s_ \alpha$ preserves the connected components $\EE\sm \Union_{ \beta\in \Phi} P_{\beta}$, so if $\gamma$ is regular and $\sigma\in W$ and $\sigma( \gamma) = \gamma'$ for some regular $\gamma'$, then $\sigma(C(\gamma)) = C(\gamma')$. $W$ also acts on bases for $\Phi$: if $\Delta$ is a base for $\Phi$, then $\sigma( \Delta)$ is still a basis for $\EE$ since $\sigma$ is an invertible linear transformation. Since $\sigma( \Phi) = \Phi$ by the axioms, any root $\alpha\in \Phi$ is of the form $\sigma(\beta)$ for some \( \beta\in \Phi \), but writing $\beta = \sum _{\alpha\in\Delta} k_ \alpha \alpha$ with all $k_\alpha$ the same sign, $\sigma( \beta) = \sum_{ \alpha\in \Delta} k_ \alpha \sigma( \alpha)$ is a linear combination of elements in $\sigma(\Delta)$ with coefficients of the same sign. The actions of $W$ on chambers and bases are compatible: if $\Delta = \Delta( \gamma)$ then \( \sigma( \Delta) = \Delta( \sigma( \gamma)) \), since $\sigma(\Phi^+( \gamma)) = \Phi^+( \sigma( \gamma))$ since $W \leq \Orth(\EE)$ and thus $( \sigma \gamma, \sigma \alpha) = (\gamma, \alpha)$. ::: :::{.lemma title="A"} Fix a base $\Delta \subset \Phi$, which decomposes $\Phi = \Phi^+ \disjoint \Phi^-$. If \( \beta\in \Phi^+ \sm \Delta \), then \( \beta- \alpha\in \Phi^+ \) for some $\alpha\in\Delta$. ::: :::{.proof title="?"} If $( \beta, \alpha)\leq 0$ for all $\alpha\in \Delta$, then the proof of theorem 10.1 would show $\beta = 0$ by taking $\eps \da \beta$ in step 3. One can then find an $\alpha\in \Delta$ with $( \beta, \alpha) > 0$, where clearly $\beta \neq \pm \alpha$. By lemma 9.4, $\beta- \alpha\in \Phi$. Why is this positive? Note that $\beta$ has at least one coefficient for a simple root (not the coefficient for $\alpha$) which is strictly positive, and thus all coefficients are $\geq 0$. This coefficient stays the same in $\beta- \alpha$, so its coefficients are all non-negative by axiom B2 and $\beta-\alpha\in \Phi^+$. ::: :::{.corollary title="?"} Each $\beta\in \Phi^+$ can be written as \( \alpha_1 + \cdots + \alpha_k \) where $\alpha_i\in \Delta$ not necessarily distinct, such that each truncated sum \( \alpha_1 + \cdots + \alpha_i \) for $1\leq i \leq k$ is a positive root. One proves this by induction on the height of $\beta$. ::: :::{.lemma title="B"} Let $\alpha \in \Delta$, then $s_ \alpha$ permutes $\Phi^+ \smts{ \alpha}$. ::: :::{.proof title="?"} Let $\beta \in \Phi^+\smts{\alpha}$; if $\beta = \sum _{\gamma\in \Delta} k_ \gamma \gamma$ with $k_ \gamma \in \ZZ_{\geq 0}$, since $\beta\neq \alpha$, some $k_ \gamma > 0$ for some $\gamma \neq \alpha$. Using the formula $s_ \alpha( \beta) = \beta- \inp \beta \alpha \alpha$ still has coefficient $k_ \gamma$ for $\gamma$. Thus $s_ \alpha( \beta) \in \Phi^+$ and $s_ \alpha( \beta)\neq \alpha$ since $s_ \alpha(- \alpha) = \alpha$ and $s_\alpha$ is bijective, and so $s_\alpha(\beta)\in \Phi^+\smts{ \alpha}$. As a result, $s_\alpha$ permutes this set since it is invertible. ::: :::{.corollary title="?"} Let \[ \rho \da {1\over 2}\sum_{ \beta\in \Phi^+} \beta \quad\in \EE .\] Then $s_\alpha( \rho) = \rho- \alpha$ for all \( \alpha\in \Delta \), and $s_ \alpha( \rho) = \rho$. > Note that Humphreys uses $\delta$, but nobody uses this notation. ::: :::{.lemma title="C, The Deletion Condition"} Let \( \alpha_1,\cdots \alpha_t \in \Delta \) be not necessarily distinct simple roots, and write $s_i \da s_{\alpha_i}$. If $s_1 \cdots s_{t-1}(\alpha_t) < 0$, then for some $1\leq u \leq t$ one has \[ s_1\cdots s_t = s_1\cdots s_{u-1} s_{u+1} s_{t-1} ,\] so one can delete $s_u$ and $s_t$ to get a shorter product of reflections. ::: :::{.proof title="?"} For $0\leq i\leq t-1$, let $\beta_i \da s_{i+1} \cdots s_{t-1}( \alpha_t)$ and $\beta_{t-1} \da \alpha_t$. Since $\beta_0 \da s_1\cdots s_{t-1}( \alpha_t ) < 0$ and $\beta_{t-1} = \alpha_t > 0$, there must be a smallest index $u$ such that $\beta_u > 0$. Note that $u\geq 1$ since $\beta_0$ is negative. Then \[ s_u( \beta_u) &= s_u s_{u+1} \cdots s_{t-1}( \alpha_t) \\ &= \beta_{u-1} \\ & < 0 \] by choice of $u$. Noting that $\beta_u = s_{u+1}\cdots s_{t-1} (\alpha_t)$, by lemma B, $s_u = s_{\alpha_u}$ permutes roots other than $\alpha_u$ since $\beta_u > 0$ and $s_{\alpha_u}( \beta_u) < 0$. By lemma 9.2, write \[ s_{\alpha_u} = s_{\beta_u} = s_{ s_{u+1}\cdots s_{t-1}( \alpha_t ) } = (s_{u+1} \cdots s_{t-1}) s_{\alpha_u} (s_{u+1}\cdots s_{t-1})\inv .\] Multiply both sides on the left by $(s_1\cdots s_u)$ and on the right by $(s_{u+1}\cdots s_{t-1})$ to obtain \[ (s_1 \cdots s_{u-1})(s_{u+1}\cdots s_{t-1}) = (s_1\cdots s_u)(s_{u+1}\cdots s_t), \qquad s_t \da s_{\alpha_t} .\] ::: :::{.corollary title="?"} If $\sigma = s_1\cdots s_t$ is an expression for $w\in W$ in terms of simple reflections (which we don't yet know exists, but it does) with $t$ minimal, then $\sigma( \alpha_t) < 0$. ::: # Monday, October 17 ## 10.3: The Weyl group :::{.theorem title="?"} Fix a base for $\Phi$. a. ($W$ acts transitively on the set of Weyl chambers) If $\gamma\in \EE$ is regular (not on a root hyperplane), there exists $\sigma\in W$ such that $( \sigma(\delta), \alpha)> 0$ for all $\alpha\in \Delta$, i.e. $\sigma( \gamma) \in \mcc(\Delta)$, the dominant Weyl chamber relative to $\Delta$. b. ($W$ acts transitively on bases) If $\Delta'$ is another base for $\Phi$, then there exists $\sigma\in W$ such that $\sigma( \Delta') = \Delta$, so $W$ acts transitively on bases. c. (Every orbit of $W\Phi$ contains a simple root) If $\beta\in \Phi$ then there exists a $\sigma\in W$ such that $\sigma( \beta)\in \Delta$. d. ($W$ is generated by simple roots) $W = \gens{s_ \alpha \st \alpha\in \Delta}$ is generated by *simple* roots. e. (Stabilizers are trivial) If $\sigma( \Delta) = \Delta$ for some $\sigma\in W$, then $\sigma = 1$. ::: :::{.proof title="?"} **Part c**: Set $W' \da \gens{s_ \alpha\st \alpha\in \Delta}$, we'll prove (c) with $W$ replaced $W'$, which is larger. First suppose \( \beta\in \Phi^+ \) and consider $W' \beta \intersect \Phi^+$. This is nonempty since it includes $\beta$ and is a finite set, so choose $\gamma$ in it of minimal height. Claim: $\height(\gamma) = 1$, making $\gamma$ simple. If not, supposing $\height( \gamma) > 1$, write $\gamma = \sum_{ \alpha\in \Delta} k_ \alpha \alpha$ with $k_ \alpha > 0$. Since $\gamma\neq 0$, we have $(\gamma, \gamma) > 0$, so substitute to yield \[ 0 < (\gamma, \gamma) = (\gamma, \sum_{\alpha \in \Delta} k_ \alpha \alpha) = \sum_{\alpha\in \Delta} k_ \alpha (\gamma, \alpha) ,\] so $(\gamma, \alpha)>0$ for some \( \alpha \in \Delta \), and $s_{ \alpha} \gamma = \gamma - \inp \gamma \alpha \alpha\in \Phi^+$ is positive where $\inp \gamma \alpha > 0$. This is a contradiction, since it has a smaller height. Note that if $\beta\in \Phi^-$ then $-\beta\in \Phi^+$ and there exists a $\sigma\in W'$ such that $\sigma( - \beta) = \alpha\in \Delta$. So $\sigma( \beta) = - \alpha$, and $s_ \alpha \sigma( \beta) = s_ \alpha( - \alpha) = \alpha \in \Delta$. **Part d**: Given $\beta$, pick $\sigma\in W'$ such that $\sigma\inv( \beta) = \alpha\in \Delta$. Then \[ s_ \beta = s_{ \sigma( \alpha)} = \sigma s_{ \alpha} \sigma\inv \in W' ,\] so $W \leq W' \leq W$, making $W = W'$. **Parts a and b**: Recall $\rho = {1\over 2}\sum _{ \beta\in \Phi^+}$ and choose $\sigma\in W$ such that $( \sigma(\delta), \rho)$ is maximal (picking from a finite set). Given $\alpha\in \Delta$, we have $s_\alpha \sigma\in W$, and so \[ ( \sigma(\delta), \rho) &\geq ( \sigma(\delta), \rho) \\ &= (\sigma(\delta), s_ \alpha \rho) \\ &= (\sigma(\delta), \rho - \alpha) \\ &= (\sigma(\delta), \rho ) - ( \sigma( \delta), \alpha) ,\] and so $( \sigma( \delta), \alpha)\geq 0$ for all $\alpha\in \Delta$. Importantly, $\gamma$ is regular, so this inequality is structure for all $\alpha\in \Delta$. So $W$ acts transitively on the Weyl chambers, and consequently on simple systems (i.e. bases for $\Phi$) by the discussion at the end of $\S 10.1$.2 **Part e**: Suppose $\sigma( \Delta) = \Delta$ and $\sigma \neq 1$, and write $\sigma = \prod_{1\leq i \leq t} s_i$ with $s_i \da s_{\alpha_i}$ for \( \alpha_i \in \Delta\) with $t \geq 1$ minimal. Note $\sigma( \Delta) = \Delta$ and $\alpha_t \in \Delta$, we have \( \sigma( \alpha_t) > 0 \) and $\prod_{1\leq i\leq t}(\alpha_t) = \prod_{1\leq i \leq t-1}s_i (-\alpha_t)$ so $\prod_{1\leq i\leq t-1} s_i(\alpha_t) < 0$. This fulfills the deletion condition, so $\prod_{1\leq i \leq t} = s_1\cdots \hat{s_u}\cdots \hat{s_t}$ which is of smaller length. ::: :::{.remark} In type $A_n$, $\size W(A_n) \approx n!$, and since bases biject with $W$ there are many choices of bases. ::: :::{.definition title="?"} Let $\Delta \subseteq \Phi$ be a base and write \( \sigma\in W \) as \( \sigma = \prod_{1\leq i \leq t} s_{\alpha_i} \) with $\alpha_i\in \Delta$ and $t$ minimal. We say this is a **reduced expression** for $\sigma$ and say $t$ is the **length** of $\sigma$, denoted $\ell( \sigma)$. By definition, $\ell(1) = 0$. ::: :::{.remark} Since $W \leq \GL(\EE)$, there is a map $\det: W\to \GL_1(\RR) = \RR\units$. The determinant of a reflection is $-1$ by writing it in a basis about the fixed hyperplane, and so $\det\sigma = (-1)^{\ell( \sigma)}$ and in fact $\det: W\to \ts{\pm 1}$. Thus $\ell( \sigma \sigma') \equiv \ell( \sigma) + \ell( \sigma)\mod 2$. Note also that if $\sigma' = s_ \alpha$ for $\alpha$ simple, then $\ell( \sigma s_{ \alpha}) = \ell( \sigma) \pm 1$. The proof: $\ell( \sigma s_ \alpha)\leq \ell( \sigma) + 1$, similarly for $\sigma s_ \alpha$, and use $\det( \sigma s_ \alpha) = - \det \sigma$. ::: :::{.warnings} Reduced expressions are not unique: for $A_2$, one has $s_ \alpha s_ \beta s_ \alpha = s_ \beta s_ \alpha s_ \beta$, and these two reflections do not commute. ::: :::{.remark} Some temporary notation for this section: for $\sigma\in W$, set \[ n( \sigma) \da \size( \Phi^- \intersect \sigma(\Phi^+)) ,\] the number of positive roots that $\sigma$ sends to negative roots. ::: :::{.lemma title="A"} For all $\sigma\in W$, \[ n( \sigma) = \ell( \sigma) .\] ::: :::{.proof title="?"} Induct on $\ell(\sigma)$: if zero, then $\sigma = 1$ and $n(1) = 0$ since it fixes all positive roots. If $\ell( \sigma ) = 1$ then \( \sigma = s_{ \alpha} \) for some simple $\alpha$, and we know from the last section that $\sigma$ permutes $\Phi^+\smts{ \alpha}$ and \( \sigma( \alpha) = - \alpha \), so $n( \sigma) = 1$. ::: # Wednesday, October 19 :::{.proof title="of lemma A, continued"} We're proving $\ell( \sigma) = n(\sigma) \da \size( \Phi^- \intersect \sigma(\Phi^-))$ by induction on $\ell(\sigma)$, where we already checked the zero case. Assume the result for all $\tau$ with $\ell( \tau) \leq \ell( \sigma)$ for $\tau \in W$. Write \( \sigma = s_1\cdots s_t \) with $s_i \da s_{ \alpha_i}, \alpha_i\in \Delta$ reduced. Set $\tau \da \sigma s_t = s_1\cdots s_{t-1}$ which is again reduced with $\ell(\tau) = \ell( \sigma) - 1$. By the deletion condition, $s_1 \cdots s_{t-1}( \alpha_t) > 0$, so $s_1\cdots s_{t-1}s_t (\alpha_t) = s_1 \cdots s_{t-1}(- \alpha_t) < 0$. Thus $n(\tau) = n( \sigma) - 1$, since $s_t$ permutes $\Phi^+\smts{\alpha_t}$, so \[ \ell( \sigma) - 1 = \ell( \tau) = n( \tau) = n( \sigma) -1 \implies \ell( \sigma) = n( \sigma) .\] ::: :::{.remark} This is useful for finding reduced expressions, or at least their length: just compute how many positive roots change sign under $\sigma$. Using the deletion condition and lemma A, it's clear that any expression for $\sigma$ as a product of simple reflections can be converted into a *reduced* expression by deleting pairs of simple reflections, and this terminates after finitely many steps. ::: :::{.lemma title="B"} Recall that the open Weyl chambers are the complements of hyperplanes. The closure of any Weyl chamber is a fundamental domain for the action $W\actson \EE$. ::: ## 10.4: Irreducible root systems :::{.definition title="Irreducible root systems"} A root system $\Phi \subseteq \EE$ is **irreducible** if it cannot be partitioned into mutually orthogonal nonempty subsets. Otherwise, $\Phi$ is **reducible**. ::: :::{.proposition title="?"} Let $\Delta \subseteq \Phi$ be a simple system. Then $\Phi$ is irreducible iff $\Delta$ is irreducible, i.e. $\Delta$ cannot be partitioned into nonempty orthogonal subsets. ::: :::{.proof title="?"} $\Phi$ reducible implies $\Delta$ reducible: write $\Phi = \Phi_1 \disjoint \Phi_2$ where $( \Phi_1, \Phi_2) = 0$; this induces a similar partition of $\Delta$. Then $(\Delta, \Phi_2) = 0 \implies (\EE, \Phi_2) = 0 \implies \EE = \emptyset$ using nondegeneracy of the bilinear form. $\contradiction$ Now $\Delta$ reducible implies $\Phi$ reducible: write $\Delta =\Delta_1 \disjoint \Delta_2$ with $(\Delta_1, \Delta_2) = 0$. Let $\Phi_i$ be the roots which are $W\dash$conjugate to an element of $\Delta_i$. Then elements in $\Phi_i$are obtained from $\Delta_i$ by adding and subtracting only elements of $\Delta_i$, so $(\Phi_1, \Phi_2) = 0$ and $\Phi = \Phi_1 \union \Phi_2$ by a previous lemma that every $\beta\in \Phi$ is conjugate to some $\alpha\in \Delta$. ::: :::{.lemma title="A"} Let $\Phi \contains \Delta$ be irreducible. Relative to the partial order $\leq$ on roots, there is a unique maximal root $\tilde \alpha$. In particular, if $\beta\in \Phi$ and $\beta\neq \tilde \alpha$, then $\height( \beta) < \height( \tilde \alpha)$ and $(\tilde \alpha, \alpha) \geq 0$ for all $\alpha\in \Delta$. Moreover, one can write $\tilde \alpha = \sum _{\alpha\in \Delta}$ with $k_\alpha > 0$, i.e. it is a sum where every simple root appears. ::: :::{.proof title="?"} **Existence**: Let $\tilde \alpha$ be any maximal root in the ordering. Given \( \alpha \in \Delta, (\tilde \alpha, \alpha) \geq 0 \) -- otherwise $s_ \alpha(\tilde \alpha)= \tilde \alpha-\inp{ \tilde \alpha}{\alpha} \alpha > \alpha$, a contradiction. $\contradiction$ Write $\tilde \alpha = \sum_{\alpha\in \Delta} k_ \alpha \alpha$ with $k_ \alpha \in \ZZ_{\geq 0}$, where it's easy to see these are all non-negative. Suppose some $k_\gamma = 0$, then $(\tilde \alpha, \gamma)\leq 0$ -- otherwise $s_\gamma( \tilde \alpha) = \tilde \alpha - \inp{\tilde \alpha}{\gamma} \gamma$ has both positive and negative coefficients, which is not possible. Since $(\tilde \alpha, \alpha) \geq 0$, we must have $( \tilde \alpha, \gamma) = 0$. So write \[ 0 = (\tilde \alpha, \gamma) = \sum_{ \alpha\in \Delta} k_ \alpha( \alpha, \gamma) \leq 0 ,\] so $( \alpha, \gamma)= 0$ whenever $k_\alpha \neq 0$, otherwise this expression would be strictly $< 0$. Thus \( \Delta = \Delta_1 \disjoint \Delta_2 \) where \( \Delta_1 = \ts{\alpha\in \Delta \st K_ \alpha\neq 0} \) and \( \Delta_2 = \ts{ \alpha\in \Delta\st k_ \alpha = 0 } \). This is an orthogonal decomposition of $\Delta$, since any $\gamma \in \Delta_2$ is orthogonal to any $\alpha\in \Delta_1$. Note that $\Delta_1\neq \empty$ since $\tilde\alpha \neq 0$, and if $\Delta_2\neq \empty$ then this is a contradiction, so $\Delta_2$ must be empty. So no such $\gamma$ exists. **Uniqueness**: let $\tilde \alpha$ be any maximal root in the ordering and let $\tilde \alpha'$ be another such root. Then $(\tilde \alpha, \tilde \alpha') = \sum_{\alpha\in \Delta} k_ \alpha (\alpha, \tilde \alpha')$ with $k_ \alpha > 0$ and $(\alpha, \tilde \alpha') \geq 0$. So $(\tilde \alpha, \tilde \alpha ') > 0$ since $\Delta$ is a basis for $\EE$ and anything orthogonal to a basis is zero by nondegeneracy of the form. Since $\tilde \alpha \neq 0$, it is not orthogonal to everything. By Lemma 9.4, either $\tilde \alpha, \tilde \alpha '$ are proportional (which was excluded in the lemma), in which case they are equal since they're both positive, or otherwise \( a \da \tilde \alpha - \tilde \alpha' \in \Phi \) is a root. In the latter case, $a > 0 \implies \tilde \alpha > \tilde \alpha'$ or $a< 0 \implies \tilde \alpha < \tilde \alpha'$, both contradicting maximality. ::: :::{.remark} If $\beta = \sum_{\alpha\in \Delta} m_ \alpha \alpha \in \Phi^+$, then $m_ \alpha \leq k_ \alpha$ for all $\alpha$ since $\beta \leq \alpha$. ::: :::{.lemma title="B"} If $\Phi$ is irreducible then $W$ acts irreducibly on $\EE$ (so there are no $W\dash$invariant subspaces). In particular, the $W\dash$orbit of a root spans $\EE$. ::: :::{.proof title="?"} Omitted. ::: :::{.lemma title="C"} If $\Phi$ is irreducible, then at most two root lengths occur, denoted **long** and **short** roots. ::: :::{.proof title="?"} Omitted. ::: :::{.example title="?"} $B_2$ has 4 long roots and 4 short roots, since they fit in a square: \begin{tikzpicture} \fontsize{45pt}{1em} \node (node_one) at (0,0) { \import{/home/zack/SparkleShare/github.com/Notes/Class_Notes/2022/Fall/LieAlgebras/sections/figures}{2022-10-19_09-57.pdf_tex} }; \end{tikzpicture} Similarly $G_2$ has long and short roots, fitting into a star of David. ::: :::{.lemma title="D"} If $\Phi$ is irreducible then the maximal root $\tilde \alpha$ is a long root. ::: :::{.remark} There is also a unique maximal short root. ::: :::{.proof title="?"} Omitted. ::: # Friday, October 21 ## 11.1: The Cartan Matrix :::{.definition title="Cartan matrix"} Fix $\Delta \subseteq \Phi$ a rank $\ell$ root system with Weyl group $W$. Let $\Delta = \tsl \alpha 1 \ell$ and then the matrix $A$ where $A_{ij} = \inp {\alpha_i}{\alpha_j} = 2{\inp{\alpha_i}{\alpha_j} \over \inp{\alpha_j}{\alpha_j}}$ is the **Cartan matrix of $A$.** Note that changing the ordering of $\Delta$ permutes the rows and columns of $A$, but beyond this, $A$ does not depend on the choice of $\Delta$ since they are permuted by $W$ and $W$ preserves the inner products and thus the ratios defining the *Cartan numbers* $A_{ij}$. More $A\in \GL_\ell(\ZZ)$ since the inner product is nondegenerate and $\Delta$ is a basis for $\EE$. ::: :::{.example title="?"} Note that the diagonals are always 2. Some classical types: - $A_1 \times A_1: \matt 2002$ - $A_2: \matt 2 {-1} {-1} 2$ - $B_2: \matt 2 {-2}{-1} 2$ - $G_2: \matt 2 {-1}{-3} 2$. ::: :::{.remark} The Cartan matrix $A$ determines the root system $\Phi$ up to isomorphism: if $\Phi' \subseteq \EE'$ is another root system with base $\Delta' = \tsl{ \alpha'} 1 \ell$ with $A'_{ij} = A_{ij}$ for all $i, j$ then the bijection $\alpha_i \mapsto \alpha_i'$ extends to a bijection $\phi: \EE \iso \EE'$ sending $\Phi$ to $\Phi'$ which is an isometry, i.e. $\inp{\varphi(\alpha)}{\varphi( \beta)} = \inp \alpha \beta$ for all \( \alpha, \beta \in \Phi \). Since $\Delta, \Delta'$ are bases of $\EE$, this gives a vector space isomorphism $\phi(\alpha_i) \da \alpha_i'$. If $\alpha, \beta\in \Delta$ are simple, then \[ s_{\varphi( \alpha)}( \varphi( \beta)) &= \varphi( \beta)- \inp{\beta'}{\alpha'}\phi( \alpha) \\ &= \varphi( \beta)-\inp{ \beta}{ \alpha} \phi( \alpha) \\ &= \phi(\beta- \inp \beta \alpha \alpha) \\ &= \phi( s_ \alpha( \beta)) ,\] so this diagram commutes since these maps agree on the simple roots, which form a basis: \begin{tikzcd} \EE && \EE \\ \\ \EE && \EE \arrow["\phi", from=1-1, to=1-3] \arrow["{s_\alpha}", from=1-3, to=3-3] \arrow["\phi"', from=3-1, to=3-3] \arrow["{s_\alpha}"', from=1-1, to=3-1] \end{tikzcd} > [Link to Diagram](https://q.uiver.app/?q=WzAsNCxbMCwwLCJcXEVFIl0sWzIsMCwiXFxFRSJdLFswLDIsIlxcRUUiXSxbMiwyLCJcXEVFIl0sWzAsMSwiXFxwaGkiXSxbMSwzLCJzX1xcYWxwaGEiXSxbMiwzLCJcXHBoaSIsMl0sWzAsMiwic19cXGFscGhhIiwyXV0=) Since $W, W'$ are generated by reflections and $s_{ \varphi( \alpha)} = \varphi\circ s_ \alpha \circ \varphi\inv$ for $\alpha\in \Delta$, there is an isomorphism \[ W &\iso W \\ s_ \alpha &\mapsto s_{ \varphi( \alpha)} = \varphi s_ \alpha \varphi \quad \forall \alpha \in \Delta .\] If $\beta \in \Phi$, then $\beta = w( \alpha)$ for some $\alpha\in \Delta$ and $w\in W$ by theorem 10.3C. Thus $\phi( \beta) = ( \varphi \circ w \circ \varphi\inv)( \varphi( \alpha))\in \Phi'$ since \( \varphi\circ w \circ \varphi\inv \in W' \). Thus $\phi( \Phi) = \Phi'$. Using lemma 9.2, $s_{\varphi(\beta)} = \varphi s_ \beta \varphi\inv$, so $\phi$ preserves all of the Cartan integers $\inp \beta \gamma$ for all \( \gamma, \beta\in \Phi \). ::: :::{.remark} Read the last paragraph of $\S 11.1$ which gives an algorithm for constructing $\Phi^+$ from $\Delta$ and $A$. ::: ## 11.2: Coxeter graphs and Dynkin diagrams :::{.definition title="Coxeter graph"} If $\alpha\neq \beta\in \Phi^+$ then $\inp \beta \alpha\inp \alpha \beta = 0,1,2,3$ from the table several sections ago. Fix $\Delta = \tsl \alpha 1 \ell$, then the **Coxeter graph** $\Gamma$ of $\Phi$ is the graph with $\ell$ vertices $1,\cdots, \ell$ with vertices $i, j$ connected by $\inp {\alpha_i}{ \alpha_j} \inp {\alpha_j}{\alpha_i}$ edges. ::: :::{.example title="?"} Recall that the table was | $\inp \alpha \beta$ | $\inp \beta \alpha$ | |---|---| | 0 | 0 | | -1 | -1 | | -1 | -2 | | -1 | -3 | Here $\alpha$ is the shorter root., although without loss of generality in the first two rows we can rescale so that $\norm \alpha= \norm \beta$. The graphs for some classical types: ![](figures/2022-10-21_09-42-15.png) ::: :::{.remark} If $\Phi$ has roots all of the same length, the Coxeter graph determines the Cartan integers since $A_{ij} = 0, 1$ for $i\neq j$. If $i \to j$ is a subgraph of $\Gamma$ then $\inp{ \alpha_i}{ \alpha_j} = \inp{ \alpha_j}{\alpha_i} = -1$, so \( \alpha_i, \alpha_j \) have the same length. However, if there are roots of multiple lengths, taking the product to determine the number of edges loses information about which root is longer. ::: :::{.definition title="Dynkin diagram"} The **Dynkin diagram** of $\Phi$ is the Coxeter graph $\Gamma$ where for each multiple edge, there is an arrow pointing from the longer root to the shorter root. ::: :::{.example title="?"} In rank 2: ![](figures/2022-10-21_09-50-47.png) We also have the following diagram for $F_4$: ![](figures/2022-10-21_09-51-32.png) ::: :::{.remark} Note that $\Phi$ is irreducible iff $\Delta$ can not be partitioned into two proper nonempty orthogonal subsets iff the Coxeter graph is connected. In general, if $\Gamma$ has $t$ connected components, let $\Delta = \Disjoint_{1\leq i\leq t} \Delta_i$ be the corresponding orthogonal partition of simple roots. Let $\EE_i = \spanof_\RR\Delta_i$, then $\EE = \bigoplus_{1\leq i\leq t}\EE_i$ is an orthogonal direct sum decomposition into $W\dash$invariant subspaces, which follows from the reflection formula. Writing $\Phi_i = (\ZZ \Delta_I) \intersect \Phi$, one has $\Phi = \Disjoint_{1\leq i\leq t} \Phi_i$ since each root is $W\dash$conjugate to a simple root and $\ZZ\Delta_i$ is $W\dash$invariant and each $\Phi_i \subseteq \EE_i$ is itself a root system. Thus it's enough to classify irreducible root systems. ::: # Monday, October 24 :::{.remark} Classifying root systems: $\Delta \subseteq \Phi \subseteq \EE$ a base yields a decomposition - $\EE = \bigoplus_{i=1}^t \EE_i$, - $\Phi= \bigoplus_{i=1}^t \Phi_i$, - $\Delta = \bigoplus_{i=1}^t \Delta_i$, where are orthogonal direct sums with respect to $(\wait, \wait)$. Note that the sub-bases $\Delta_i$ biject with connected components of the Coxeter graph $\Gamma$ of $\Delta$. We saw $\inp{\alpha_i}{\alpha_j} \inp {\alpha_j}{\alpha_i} \in \ts{0,1,2,3}$ is the number of edges between nodes $i$ and $j$ in $\Gamma$, using that the first term is $4\cos^2(\theta)\in [0, 3] \intersect \ZZ$. It suffices to classify irreducible root systems, corresponding to connected Coxeter graphs. Recall arrows point from long to short roots. ::: :::{.theorem title="?"} If $\Phi$ is an irreducible root system of rank $\ell$, then its Dynkin diagram is one of the following: - The four infinite families, corresponding to classical types: ![](figures/2022-10-24_09-19-12.png) - Exceptional classes ![](figures/2022-10-24_09-19-36.png) Types ADE are called **simply laced** since they have no multiple edges. ::: :::{.remark} Idea: classify possible connected Coxeter graphs, ignoring relative root lengths. If \( \alpha, \beta \) are simple roots, note that for any $c$, \[ \inner{ \alpha }{ \beta } \inner{ \beta }{ \alpha } = {2(c \alpha, \beta)\over (\beta, \beta)} {2( \beta, c \alpha) \over (c \alpha, c \alpha)} \in \ts{0,1,2,3} ,\] so $\alpha \mapsto c\alpha$ leaves this number invariant and we can assume all simple roots are unit vectors. ::: :::{.definition title="?"} Let $\EE$ be a finite dimensional Euclidean space, then a subset $A =\ts{\tl \eps n }\subseteq \EE$ of linearly independent unit vectors satisfying - $(\eps_i, \eps_j) \leq 0$ for all $i,j$, - $4(\eps_i, \eps_j)^2 = 4\cos^2(\theta) \in \ts{0,1,2,3}$ for all $i\neq j$ where $\theta$ is the angle between $\eps_i$ and $\eps_j$ is called **admissible**. ::: :::{.example title="?"} Any base for a root system where each vector is normalized is admissible. ::: :::{.remark} To such an $A$ we associate a graph $\Gamma$ as before with vertices $1,\cdots, n$ where $i,j$ are joined by $4(\eps_i, \eps_j)^2$ edges. We'll determine all connected graphs $\Gamma$ that can occur, since these include all connected Coxeter graphs. ::: ## Proof of classification :::{.proof title="Sketch"} An easy 10 steps: 1. If some $\eps_i$ are discarded, the remaining ones still form an admissible set in $\EE$ whose graph is obtained from $\Gamma$ by omitting the corresponding discarded vertices. 2. The number of pairs of vertices in $\Gamma$ connected by at least one edge is strictly less than $n$. Proof: Set $\eps \da\sum_{i=1}^n \eps_i$, which is nonzero by linear independence. Then $0 < (\eps, \eps) = n + \sum_{i 0$ so $\sum_{i=1}^k (\eps, \eta_i)^2 < 1$ and thus $\sum_{i=1}^k 4 (\eps, \eta_i)^2 < 4$. But this sum is the number of edges incident to $\eps$ in $\Gamma$. 5. The only connected graph which contains a triple edge is the Coxeter graph of $G_2$ by (4), since the triple edge forces each vertex to already have 3 incident edges. 6. Let $\ts{\eps_1,\cdots, \eps_k} \subseteq A$ have a simple chain $\cdot \to \cdot \to \cdots \to \cdot$ as a subgraph. If $A' \da \ts{A\smts{\tl \eps k}} \union\ts{\eps}$ where $\eps \da \sum_{i=1}^k \eps_i$, then $A'$ is admissible. The corresponding graph $\Gamma'$ is obtained by shrinking the chain to a point, where any edge that was incident to any vertex in the chain is now incident to $\eps$, with the same multiplicity. Proof: number the vertices in the chain $1,\cdots, k$. Linear independence of $A'$ is clear. Note $4(\eps_i, \eps_{i+1})^2 = 1\implies 2(\eps_i, \eps_{i+1}) \implies (\eps, \eps) = k + 2 \sum_{i< j} (\eps_i, \eps_j) = k + (-1)(k-1) = 1$. Any $\eta\in A\smts{\tl \eps k}$ is connected to at most one of $\tl \eps k$ since this would otherwise form a cycle, so $(\eta, \eps) = (\eta, \eps_i)$ for a single $i$. So $4(\eta, \eps)^2 = 4(\eta, \eps_i)^2 \in \ts{0,1,2,3}$ and $(\eta, \eps) = (\eta, \eps_i) \leq 0$, which verifies all of the admissibility criteria. 7. $\Gamma$ contains no graphs of the following forms: ![](figures/2022-10-24_10-01-11.png) Proof: collapsing the chain in the middle produces a vertex with 4 incident edges. ::: # Wednesday, October 26 > Missed first 15m! :::{.proposition title="Step 8"} Any connected $\Gamma$ is one of the following types: ![](figures/2022-10-26_09-28-53.png) ::: :::{.proposition title="Step 9"} The only connected $\Gamma$ graphs of the second type in Step 8 are either $F_4$ or $B_\ell = C_\ell$. Compute \[ (\eps, \eps) = \sum_{i=1}^k i^2 - \sum_{i=1}^{p-1} i(i+1) \\ = p^2 - \sum_{i=1}^{p-1} i \\ = p^2 - {p(p-1)\over 2} \\ = {p(p+1)\over 2} ,\] and similarly $(\eta, \eta) = {q(q+1)\over 2}$. Note $4(\eps_p, \eta_q)^2 = 2$, so $(\eps,\eta)^2 = p^2 q^2 (\eps_p, \eta_q)^2 = {p^2q^2\over 2}$. By Cauchy-Schwarz, $(\eps, \eta)^2< (\eps, \eps) (\eta, \eta)$, where the inequality is strict since $\eta, \eps$ are linearly independent. Then check \[ {p^2 q^2\over 2} &< {p(p+1)\over 2} \cdot {q(q+1)\over 2} \\ {p1\over 2} &< {p+1\over 2}\cdot {q+1\over 2} \\ 2pq &< pq + p + q + 1 ,\] and so combining these yields $pq-p-q+1 < 2$ and thus \[ (p-1)(q-1) < 2 .\] Since $p\geq q\geq 1$, this yields two possible cases: - $p=q=2 \leadsto F_4$ - $q=1, p\in \ZZ_{\geq 0} \leadsto B_\ell = C_\ell$. ::: :::{.proposition title="Step 10"} The only connected $\Gamma$ of type (d) are $D_\ell, E_6, E_7, E_8$. Set $\eps \da \sum i\eps_i$, $\eta \da \sum i\eta_i$, and $\zeta = \sum i \zeta_i$. Note that $\eps, \eta, \zeta$ mutually orthogonal by inspecting the graph, and $\psi$ is not in their span. Let $\theta_1$ (resp. $\theta_2, \theta_3$) be the angles between $\eps$ (resp. $\eta, \zeta$) and $\psi$. Since $\eps,\eta,\zeta$ are linearly independent, the idea is to apply Gram-Schmidt to $\ts{\eps,\eta,\zeta,\psi}$ without normalizing. The first 3 are already orthogonal, so we get a new orthogonal basis $\ts{\psi_1 \da \eps, \psi_2\da \eta, \psi_3\da \zeta, \psi_0}$ where $(\psi_0, \psi) \neq 0$. We can expand $\psi$ in this basis to write $\psi = \sum_{i=0}^3 \qty{\psi, {\psi_i\over \norm{\psi_i}}} {\psi_i \over \norm{\psi_i}}$. Note that $(\psi, \psi) = 1$, and consequently $\sum_{i=1}^3 \qty{\psi, {\psi_i \over\norm{\psi_i}}}^2 < 1 \implies \sum_{i=1}^3 \cos^2(\theta_i) < 1$. So \[ \cos^2(\theta_1) + \cos^2( \theta_2) + \cos^2( \theta_3) < 1 .\] As in Step (9), $(\eps, \eps) = {p(p-1)\over 2}$ and similarly for $\eta,\zeta$, and so \[ \cos^1( \theta_1) = {(\eps, \psi)^2 \over (\eps, \eps) (\psi, \psi)} = {(p-1)^2 (\eps_{p-1}, \psi )^2 \over {p(p-1)\over 2} \cdot 1 } \\ = {p-1\over p} {1/4\over 1/2} \\ = {1\over 2}\qty{1-{1\over p}} ,\] where we've used that $4(\eps_{p-1},\psi ) = 1$. Similarly (and summarizing), \[ \cos^2(\theta_1) &= {1\over 2}\qty{1-{1\over p}} \\ \cos^2(\theta_2) &= {1\over 2}\qty{1-{1\over q}} \\ \cos^2(\theta_3) &= {1\over 2}\qty{1-{1\over r}} \\ \\ &\implies {1\over 2}\qty{ 1 - {1\over p} + 1 - {1\over q} + 1 - {1\over r}} < 1 \\ &\implies p\inv + q\inv +r\inv > 1 .\] and since $p\geq q\geq r\geq 2 \implies p\inv \leq q\inv \leq r\inv \leq 2\inv$, we have ${3\over r} > 1$ by replacing $p,q$ with $r$ above. So $r < 3$, forcing $r=2$, and there is only one "top leg" in the graph for (d) above. We also have \[ {2\over q} \geq {1\over p} + {1\over q} > {1\over 2}, \qquad (\star) .\] so $q<4$ forces $q=2,3$. - If $q=2$, then $(\star)$ is true for any $p\geq 2$, and the bottom leg has two vertices and this yields type $D_\ell$. - If $q=3$ then ${1\over p} > {1\over 2}-{1\over 3} = {1\over 6}$ implies $p < 6$, forcing $p=3,4,5$ corresponding to $E_6, E_7, E_8$. ::: :::{.remark} Note that the diagrams we've constructed are the only possible Coxeter graphs of a root system, since normalizing any set of simple roots yields an admissible set. This proves one direction of a correspondence, but what are all possible Dynkin diagrams? Note that types $B_\ell, C_\ell$ have the same underlying Coxeter graph, and only differ by directions on the multi-edges. ::: :::{.question} Does every connected Dynkin diagram correspond to an irreducible root system. Yes: types $A,B,C,D$ can be constructed from root systems in classical Lie algebras, and the corresponding Dynkin diagrams can be constructed directly. The 5 exceptional types must be constructed directly. ::: :::{.question} Does each irreducible root system occur as the root system of some semisimple Lie algebra over $\CC$? The answer is of course: yes! ::: > Next time: starting Ch. V. # Part V: Existence Theorem. Ch. 17: The Universal Enveloping Algebra (Monday, October 31) ## 17.1: The tensor algebra and symmetric algebra :::{.remark} Let $\FF$ be an arbitrary field, not necessarily characteristic zero, and let $L\in \Lie\Alg\slice\FF$ be an arbitrary Lie algebra, not necessarily finite-dimensional. Recall that the tensor algebra $T(V)$ is the $\ZZ_{\geq 0}$ graded unital algebra where $\gr_n T(V) = T^n(V) \da V\tensorpower{\FF}{n}$ where $T^0(V) \da \FF$. Note $T(V) = \bigoplus _{n\geq 0} T^n(V)$. If $V$ has a basis $\ts{x_k}_{k\in K}$ then $T(V) \cong \FF\gens{x_k \st k\in K}$, a polynomial ring in the noncommuting variables $x_k$. Degree $n$ monomials in this correspond to pure tensors with $n$ components in $T(V)$. There is an $\FF\dash$linear map $V \mapsvia{i} T(V)$, and $T(V)$ satisfies a universal property: given any linear map $\phi\in \mods{\FF}(V, A)$ where $A$ has the structure of an associative algebra, there exists a unique $\psi\in \Assoc\Alg\slice\FF(T(V), A)$ making the diagram commute: \begin{tikzcd} V && {T(V)} \\ \\ && A \arrow["i", from=1-1, to=1-3] \arrow["{\exists !}", dashed, from=1-3, to=3-3] \arrow["\phi"', from=1-1, to=3-3] \end{tikzcd} > [Link to Diagram](https://q.uiver.app/?q=WzAsMyxbMCwwLCJWIl0sWzIsMCwiVChWKSJdLFsyLDIsIkEiXSxbMCwxLCJpIl0sWzEsMiwiXFxleGlzdHMgISIsMCx7InN0eWxlIjp7ImJvZHkiOnsibmFtZSI6ImRhc2hlZCJ9fX1dLFswLDIsIlxccGhpIiwyXV0=) In fact, one can explicitly write $\psi$ as $\psi(x_{k_1}\tensor \cdots x_{k_n}) = \phi(x_k)\cdots \phi(x_{k_n})$ using the multiplication in $A$. The **symmetric algebra** and **exterior algebra** are defined as \[ S(V) \da T(V)/\gens{x\tensor y -y\tensor x \st x,y\in V}, \Extalg(V) \da T(V)/\gens{x\tensor y + y\tensor x \st x,y\in V} .\] ::: :::{.definition title="The Universal Enveloping Algebra"} Let $L\in \Lie\Alg\slice \FF$ with basis $\ts{x_k}_{k\in K}$. A **universal enveloping algebra** for $L$ is a pair $(U, i)$ where $U$ is a unital associative $\FF\dash$algebra and $i: L \to U_L$ (where $U_L$ is $U$ equipped with the commutator bracket multiplication) is a morphism of Lie algebras, i.e. \[ i([xy]) = i(x) i(y) - i(y) i(x) = [i(x) i(y) ] \quad \forall x,y\in L .\] It satisfies a universal property: for any unital associative algebra $A$ receiving a Lie algebra morphism $j: L\to A_L$, there is a unique $\phi$ in the following: \begin{tikzcd} \Lie\Alg && \Assoc\Alg \\ L && U \\ \\ && A \arrow["i", from=2-1, to=2-3] \arrow["{\exists ! \phi}", dashed, from=2-3, to=4-3] \arrow["j"', from=2-1, to=4-3] \end{tikzcd} > [Link to Diagram](https://q.uiver.app/?q=WzAsNSxbMiwxLCJVIl0sWzIsMywiQSJdLFsyLDAsIlxcQXNzb2NcXEFsZyJdLFswLDAsIlxcTGllXFxBbGciXSxbMCwxLCJMIl0sWzQsMCwiaSJdLFswLDEsIlxcZXhpc3RzICEgXFxwaGkiLDAseyJzdHlsZSI6eyJib2R5Ijp7Im5hbWUiOiJkYXNoZWQifX19XSxbNCwxLCJqIiwyXV0=) ::: :::{.remark} Uniqueness follows from the usual proof for universal objects. Existence: let \[ U(L) \da T(L) / J, \qquad J \da \gens{x\tensor y - y\tensor x - [xy] \st x,y\in L} .\] Warning: $J$ is a two-sided ideal, but is not homogeneous! One can form the required map: \begin{tikzcd} && {T(L)} \\ L \\ && {U(L)} \\ \\ && A \arrow["{i_*}"', from=2-1, to=1-3] \arrow["\pi", from=1-3, to=3-3] \arrow["{\exists ! \psi}", dashed, from=3-3, to=5-3] \arrow["{j\in \Lie\Alg(L, A)}"', from=2-1, to=5-3] \arrow["i"', from=2-1, to=3-3] \end{tikzcd} > [Link to Diagram](https://q.uiver.app/?q=WzAsNCxbMCwxLCJMIl0sWzIsMCwiVChMKSJdLFsyLDIsIlUoTCkiXSxbMiw0LCJBIl0sWzAsMSwiaV8qIiwyXSxbMSwyLCJcXHBpIl0sWzIsMywiXFxleGlzdHMgISBcXHBzaSIsMCx7InN0eWxlIjp7ImJvZHkiOnsibmFtZSI6ImRhc2hlZCJ9fX1dLFswLDMsImpcXGluIFxcTGllXFxBbGcoTCwgQSkiLDJdLFswLDIsImkiLDJdXQ==) This satisfies $\psi(x\tensor y - y\tensor x - [xy]) = j(x) j(y) - j(y)j(x) - j([xy]) = 0$ using the properties of $j$. $\phi$ is unique because $U(L)$ is generated by 1 and $\im i$, since $T(L)$ is generated by 1 and the image of $L = T^1(L)$. ::: :::{.remark} If $L$ is abelian, $U(L) = S(L)$ is the symmetric algebra. Note that $J \subseteq \bigoplus _{n\geq 1} T^n(L)$ so $\FF = T^0(L)$ maps isomorphically into $U(L)$ under $\pi$. So $\FF\injects U(L)$, meaning $U(L) \neq 0$, although we don't yet know if $L$ injects into $U(L)$. ::: :::{.theorem title="Poincaré-Birkhoff-Witt (PBW) Theorem"} Let $L$ be a Lie algebra with basis $\ts{x_k}_{k\in K}$, and filter $T(L)$ by $T_m \da \bigoplus _{i\leq m} T^i (L)$. Then $T_m$ is the span of words of length at most $m$ in the basis elements $x_k$. Note $T_m \cdot T_n \subseteq T_{m+n}$, and the projection $\pi: T(L) \surjects U(L)$ induces an increasing filtration $U_0 \subseteq U_1 \subseteq \cdots$ of $U(L)$. Let $G^m \da U_m / U_{m-1}$ be the $m$th graded piece. The product on $U(L)$ induces a well-defined product $G^m \times G^n \to G^{m+n}$ since $U_{m-1}\times U_{n-1} \subseteq U_{m+n-2} \subseteq U_{m+n-1}$. Extending this bilinearly to \( \bigoplus _{m\geq 0} G^m \) to form the associated graded algebra of $U(L)$. > Note that this construction generally works for any filtered algebra where the multiplication is compatible with the filtration. ::: :::{.example title="?"} Let $L \da \liesl_2(\CC)$ with ordered basis $\ts{x,h,y}$. Then $y\tensor h\tensor x\in T^3(L)$ -- denote the image of this monomial in $U(L)$ by $yhx \in U_3$. We can reorder this: \[ yhx &= hyx + [yh]x &= hyx + 2yx \qquad \in U_3 + U_2 \\ ,\] so in $G^3$ we have $yhx=hyx$. This is a general feature: reordering introduces error terms of lower degree, which are quotiented out. Continuing, \[ hyx + 2yx &= hxy + h[yx] + 2xy + 2[yx] \\ &= hxy - h^2 + 2xy - 2h \\ &= xhy + [hx]y - h^2 + 2xy - 2h \\ &= xhy + 2xy - h^2 + 2xy - 2h \\ &= xhy + 4xy - h^2 - 2h \qquad \in U_3 + U_2 + U_2 + U_2 .\] ::: # Wednesday, November 02 :::{.remark} Clarification from last time: for $L\in \Lie\falg$ over $\FF$ an arbitrary field: \begin{tikzcd} && {T(L)} & {\trianglerighteq J \da \gens{x\tensor y-y\tensor x -[xy] \st x,y\in L = T^1(L)}} \\ L \\ && {U(L)} \\ && { A} \arrow["{i_0}", from=2-1, to=1-3] \arrow["i"', from=2-1, to=3-3] \arrow["\pi", from=1-3, to=3-3] \arrow["{\exists \psi}"', dashed, from=3-3, to=4-3] \arrow["{\exists ! \phi}"', curve={height=-30pt}, dashed, from=1-3, to=4-3] \arrow["j"', from=2-1, to=4-3] \end{tikzcd} > [Link to Diagram](https://q.uiver.app/?q=WzAsNSxbMCwxLCJMIl0sWzIsMiwiVShMKSJdLFsyLDAsIlQoTCkiXSxbMywwLCJcXHRyaWFuZ2xlcmlnaHRlcSBKIFxcZGEgXFxnZW5ze3hcXHRlbnNvciB5LXlcXHRlbnNvciB4IC1beHldIFxcc3QgeCx5XFxpbiBMID0gVF4xKEwpfSJdLFsyLDMsIiBBIl0sWzAsMiwiaV8wIl0sWzAsMSwiaSIsMl0sWzIsMSwiXFxwaSJdLFsxLDQsIlxcZXhpc3RzIFxccHNpIiwyLHsic3R5bGUiOnsiYm9keSI6eyJuYW1lIjoiZGFzaGVkIn19fV0sWzIsNCwiXFxleGlzdHMgISBcXHBoaSIsMix7ImN1cnZlIjotNSwic3R5bGUiOnsiYm9keSI6eyJuYW1lIjoiZGFzaGVkIn19fV0sWzAsNCwiaiIsMl1d) Then $i$ is a Lie algebra morphism since $i([xy]) = i(x)i(y) - i(x)i(y) = [i(x) i(y)]$. We know \[ 0 &= \pi(x\tensor y - y\tensor x - [xy]) \\ &= \pi(i_0(x) i_0(y) - i_0(y) i_0(x) - i_0([xy])) \\ &= i(x) i(y) - i(y) i(x) - i([xy]) .\] ::: :::{.remark} Recall that we filtered $0 \subseteq U_1 \subseteq \cdots \subseteq U(L)$ and defined the associated graded $G^m = U_m / U_{m-1}$ and $G(L) \da \bigoplus _{m\geq 0} G^m$, and we saw by example that $yhx = hyx = xhy$ in $G^3(\liesl_2)$. There is a projection map $T^m(L) \to U(L)$ whose image is contained in $U_m$, so there is a composite map \[ T^m(L) \to U_m \to U_m/U_{m-1} = G^m .\] Since $T(L) = \bigoplus _{m\geq 0} T^m(L)$, these can be combined into an algebra morphism $T(L) \to G(L)$. It's not hard to check that this factors through $S(L) \da T(L)/\gens{x\tensor y - y\tensor x\st x,y\in L}$ since $x\tensor y = y\tensor x + [xy]$ and the $[xy]$ term is in lower degree. So this induces $w: S(L) \to G(L)$, and the PBW theorem states that this is an isomorphism of graded algebras. ::: :::{.corollary title="C"} Let $\ts{x_k}_{k\in K}$ be an ordered basis for $L$, then the collection of monomials $x_{k_1}\cdots x_{k_m}$ for $m\geq 0$ where $k_1\leq \cdots k_m$ is a basis for $U(L)$. ::: :::{.proof title="?"} The collection of such monomials of length exactly $n$ forms a basis for $S^m(L)$, and via $w$, a basis for $G^m(L)$. In particular these monomials form a linearly independent in $U_m/U_{m-1}$, since taking quotients can only increase linear dependencies, and hence these are linearly independent in $U_m$ and $U(L)$. By induction on $m$, $U_{m-1}$ has a basis consisting of all monomials of length $\leq m-1$. We can then get a basis of $U_m$ by adjoining to this basis of $U_{m-1}$ any preimages in $U_m$ of basis elements for the quotient $U_m/U_{m-1}$. So a basis for $U_m$ is all ordered monomials of length $\leq m$. Since $U(L) = \union_{m\geq 0} U_m$, taking the union of bases over all $m$ yields the result. ::: :::{.corollary title="B"} The canonical map $i: L\to U(L)$ is injective. This follows from taking $m=1$ in the previous corollary. ::: :::{.corollary title="D"} Let $H\leq L$ be a Lie subalgebra and extend an ordered basis for $H$, say $\ts{h_1, h_2,\cdots}$, and extend it to an ordered basis $\ts{h_1,h_2,\cdots, x_1,x_2,\cdots}$. Then the injection $H\injects L$ induces an injective morphism $U(H) \injects U(L)$. Moreover, $U(L)\in \modsleft{U(H)}^\free$ with a basis of monomials $x_{k_1}x_{k_2}\cdots x_{k_m}$ for $m\geq 0$. This follows directly from corollary C. ::: :::{.remark} We'll skip 17.4, which proves the PBW theorem. The hard part: linear independence, which is done by constructing a representation of $U(L)$ in another algebra. ::: ## 17.5: Free Lie algebras :::{.definition title="Free Lie algebras"} Let $L \in \Lie\falg$ which is generated as a Lie algebras (so allowing commutators) by a subset $X \subseteq L$.[^sl2_free_gen] We say $L$ is **free on $X$** and write $L = L(X)$ if for any set map $\phi: X\to M$ with $M\in \Lie\falg$ there exists an extension: \begin{tikzcd} & {} && \Lie\falg \\ \Set & X && L \\ \\ &&& M \arrow[hook, from=2-2, to=2-4] \arrow["{\exists !\psi}", dashed, from=2-4, to=4-4] \arrow["\phi"', from=2-2, to=4-4] \end{tikzcd} > [Link to Diagram](https://q.uiver.app/?q=WzAsNixbMSwxLCJYIl0sWzMsMSwiTCJdLFszLDMsIk0iXSxbMywwLCJcXExpZVxcZmFsZyJdLFsxLDBdLFswLDEsIlxcU2V0Il0sWzAsMSwiIiwwLHsic3R5bGUiOnsidGFpbCI6eyJuYW1lIjoiaG9vayIsInNpZGUiOiJ0b3AifX19XSxbMSwyLCJcXGV4aXN0cyAhXFxwc2kiLDAseyJzdHlsZSI6eyJib2R5Ijp7Im5hbWUiOiJkYXNoZWQifX19XSxbMCwyLCJcXHBoaSIsMl1d) [^sl2_free_gen]: Note that $\liesl_2$ has a basis $\ts{x,h,y}$ but is freely generated by $x,y$ since $h=[xy]$. ::: :::{.remark} Existence: - Let $V = V(X)$ be the free $\fmod$ on $X$. - Let $L(X) \leq T(V)_L$[^notationsubL] be the Lie subalgebra generated by $X$ (or equivalently by $V$), which has elements like \[ x,y,z\in X,\qquad x\tensor y-y\tensor x, \qquad z(x\tensor y-y\tensor x) - (x\tensor y - y\tensor x)z,\,\cdots .\] - Check that $L(X)$ is free on $X$ by letting $\phi: X\to M$ for $M$ a Lie algebra, then by the universal property of $V(X)$ we get a unique linear map $\tilde \phi: V(X) \to M$ extending $\phi$: \begin{tikzcd} {V(X)} && {T(V)} \\ \\ M \\ \\ {U(M)} \arrow["\phi", from=1-1, to=3-1] \arrow[hook, from=3-1, to=5-1] \arrow[hook, from=1-1, to=1-3] \arrow["{\exists ! \tilde \phi \text{ (an algebra morphism)}}", dashed, from=1-3, to=5-1] \end{tikzcd} > [Link to Diagram](https://q.uiver.app/?q=WzAsNCxbMCwwLCJWKFgpIl0sWzIsMCwiVChWKSJdLFswLDIsIk0iXSxbMCw0LCJVKE0pIl0sWzAsMiwiXFxwaGkiXSxbMiwzLCIiLDAseyJzdHlsZSI6eyJ0YWlsIjp7Im5hbWUiOiJob29rIiwic2lkZSI6InRvcCJ9fX1dLFswLDEsIiIsMix7InN0eWxlIjp7InRhaWwiOnsibmFtZSI6Imhvb2siLCJzaWRlIjoidG9wIn19fV0sWzEsMywiXFxleGlzdHMgISBcXHRpbGRlIFxccGhpIFxcdGV4dHsgKGFuIGFsZ2VicmEgbW9ycGhpc20pfSIsMCx7InN0eWxlIjp7ImJvZHkiOnsibmFtZSI6ImRhc2hlZCJ9fX1dXQ==) One checks that $\tilde \phi$ restricts to a Lie algebra morphisms $\tilde \phi: L(X) \to U(M)$ whose image is the Lie subalgebra of $U(M)$ generated by $M$ -- but this subalgebra is precisely $M$, since e.g. $U(M)\ni x\tensor y - y\tensor x = [xy]\in M$. Thus we can view $\tilde \phi$ as a map $\tilde \phi: L(X)\to M$. [^notationsubL]: $W_L$ is $W$ made into a Lie algebra via $[xy] = xy-yx$. ::: :::{.remark} One can check that $U(L(X)) = T(V(X))$. ::: # $\S 18$: Generators and Relations for Simple Lie Algebras (Friday, November 04) :::{.remark} Recall that the free Lie algebra of a set $X$, $L(X)$ satisfies a universal property: \begin{tikzcd} X && {L(X)} \\ \\ && M \arrow["\iota", hook, from=1-1, to=1-3] \arrow["{\forall \phi}"', from=1-1, to=3-3] \arrow["{\exists ! \tilde \phi}", dashed, from=1-3, to=3-3] \end{tikzcd} > [Link to Diagram](https://q.uiver.app/?q=WzAsMyxbMCwwLCJYIl0sWzIsMCwiTChYKSJdLFsyLDIsIk0iXSxbMCwxLCJcXGlvdGEiLDAseyJzdHlsZSI6eyJ0YWlsIjp7Im5hbWUiOiJob29rIiwic2lkZSI6InRvcCJ9fX1dLFswLDIsIlxcZm9yYWxsIFxccGhpIiwyXSxbMSwyLCJcXGV4aXN0cyAhIFxcdGlsZGUgXFxwaGkiLDAseyJzdHlsZSI6eyJib2R5Ijp7Im5hbWUiOiJkYXNoZWQifX19XV0=) ::: ## $\S 18.1$: Relations satisfied by $L$ :::{.definition title="Relations"} Given an arbitrary $L\in \liealg$ and fix a set $X$ of generators for $L$ and form $LX()$, then there is a Lie algebra morphism $\pi: L(X) \surjects L$ which is surjective since $X$ generates $L$. Defining $R\da \ker \pi$, one has $L \cong L(X)/R$, so $R$ is called the **ideal of relations**. ::: :::{.remark} Let $L \in \liealg^{\fd, \ss}_{\CC}$, let $H \subseteq L$ be a maximal toral subalgebra, and $\Phi$ its root system. Fix a base $\Delta = \ts{ \alpha_1, \cdots, \alpha_\ell} \subseteq \Phi$. Recall \[ \inp{ \alpha_j}{ \alpha_i} \da {2 (\alpha_j, \alpha_i) \over (\alpha_i, \alpha_i)} = \alpha_j(h_i),\qquad g_i \da h_{\alpha_i} = {2 \alpha_i \over (\alpha_i, \alpha_i)} .\] The root strings are of the form $\beta - r \alpha, \cdots, \beta, \cdots, \beta+ q \alpha$ where $r-q = \beta(h_ \alpha)$. For any $i$ we can fix a standard $\liesl_2$ triple $\ts{x_i, h_i, y_i}$ such that $x_i\in L_{ \alpha_i}, y_i \in L_{- \alpha_i}, h_i\in [x_i y_i]$. ::: :::{.proposition title="Serre relations"} $L$ is generated as a Lie algebra by the $3\ell$ generators $X\da \ts{x_i, h_i, y_i \st 1\leq i\leq \ell}$ subject to at least the following relations: - S1: $[h_i h_j] = 0$, - S2: $[x_i y_j] = \delta_{ij} h_i$, - S3: $[h_i x_j] = \inp{ \alpha_j}{\alpha_i} x_j$ and $[h_i y_j] = - \inp{ \alpha_j}{ \alpha_i} y_j$. - $\text{S}_{ij}^+$: $\ad_{x_i}^{-\inp{ \alpha_j}{\alpha_i} + 1}(x_j) = 0$ for $i\neq j$ - $\text{S}_{ij}^-$: $\ad_{y_i}^{-\inp{ \alpha_j}{\alpha_i} + 1}(x_j) = 0$ for $i\neq j$ ::: :::{.proof title="?"} Recall that differences of simple roots are never roots, since the coefficients have mixed signs. Since \( \alpha_i - \alpha_j \not\in \Phi \), we have $[x_i y_j] = 0$ for $i\neq j$ since it would have to be in $L_{\alpha_i - \alpha_j}$. Consider the $\alpha_i$ root string through $\alpha_j$: we have $r=0$ from above, and the string is \[ \alpha, \alpha+ \alpha_i, \cdots, \alpha_j - \inp{\alpha_j}{\alpha_i} \alpha_i \] since $L_\beta = 0$ for $\beta \da \alpha_j - \qty{ \inp{\alpha_j}{ \alpha_i} + 1} \alpha_i$. The relations for $S_{ij}^\pm$ follow similarly. ::: :::{.remark} Note that these relations are all described in a way that only involves the Cartan matrix of $\Phi$, noting that changing bases only permutes its rows and columns. ::: :::{.theorem title="Serre"} These five relations form a complete set of defining relations for $L$, i.e. $L \cong L(X)/R$ where $R$ is the ideal generated by the Serre relations above. Moreover, given a root system $\Phi$ and a Cartan matrix, one can define a Lie algebra using these generators and relations that is finite-dimensional, simple, and has root system $\Phi$. ::: ## $\S 18.2$: Consequences of Serre relations S1, S2, S3 :::{.remark} Fix an irreducible root system $\Phi$ of rank $\ell$ with Cartan matrix $A$. Let $\hat L \da L(\hat X)$ where $\hat X \da \ts{\hat x_i, \hat h_i, \hat y_i \st 1\leq i\leq \ell}$. Let $\hat K \normal \hat L$ be the 2-sided ideal generated by the relations S1, S2, S3. Let $L_0 \da \hat L/\hat K$ and write $\pi$ for the quotient map $\hat L\to L_0$ -- note that $L_0$ is infinite-dimensional, although it's not yet clear that $L_0\neq 0$. We'll study $L_0$ by defining a representation of it, which is essentially the adjoint representation of $L_0$ acting on $\ts{y_i}$. ::: :::{.remark} Recall that a representation of $M\in \liealg$ is a morphism $\phi\in \liealg(M, \liegl(V))$ for $V\in\fmod$. This yields a diagram \begin{tikzcd} {} && \Assoc\Alg \\ M && {U(M)} \\ \\ && {\Endo(V)} \arrow["\iota", hook, from=2-1, to=2-3] \arrow["\phi"', from=2-1, to=4-3] \arrow["{\exists !\phi}", dashed, from=2-3, to=4-3] \end{tikzcd} > [Link to diagram](https://q.uiver.app/?q=WzAsNSxbMCwxLCJNIl0sWzIsMSwiVShNKSJdLFsyLDMsIlxcZW5kbyhWKSJdLFsyLDAsIlxcQXNzb2NcXEFsZyJdLFswLDBdLFswLDEsIlxcaW90YSIsMCx7InN0eWxlIjp7InRhaWwiOnsibmFtZSI6Imhvb2siLCJzaWRlIjoidG9wIn19fV0sWzAsMiwiXFxwaGkiLDJdLFsxLDIsIlxcZXhpc3RzICFcXHBoaSIsMCx7InN0eWxlIjp7ImJvZHkiOnsibmFtZSI6ImRhc2hlZCJ9fX1dXQ==) Conversely given an algebra morphism $\tilde \phi: U(M) \to \Endo(V)$, and restricting $\tilde \phi$ to $M \subseteq U(M)$ gives a Lie algebra morphism $\phi: M\to \Endo(V) = \liegl(V)$. This representations of $M$ (using $\liegl(V)$) correspond to associative algebra representations of $U(M)$ (using $\Endo(V)$). Since $U(M) = T(V(\hat X))$, using the various universal properties, having a representation $V$ of $\hat L$ is equivalent to having a set map $\hat X\to \Endo(V)$, i.e. elements of $\hat X$ should act linearly on $V$. ::: :::{.remark} Let $V$ be the tensor algebra on a vector space with basis $\ts{v_1,\cdots, v_\ell}$, thinking of each $v_i$ being associated to $\hat y_i$. Write $v_1 v_2\cdots v_t \da v_1 \tensor v_2\tensor\cdots\tensor v_t$, and define elements of $\Endo(V)$ by - $\hat h_j \cdot 1 \da 0$, - $\hat h_j \cdot v_{i_1}\cdots v_{i_t} \da -\qty{ \inp{ \alpha_{i_1}}{\alpha_j} + \cdots \inp{\alpha_{i_t}}{\alpha_j} }v_{i_1}\cdots v_{i_t}$, - $\hat{y}_j\cdot v_{i_1}\cdots v_{i_t} \da v_j v_{i_1} \cdots v_{i_t}$ for $t\geq 0$, - $\hat x_j \cdot 1 \da 0$, - $\hat x_j \cdot v_i \da 0$ for all $i$, - $\hat x_j \cdot v_{i_1}\cdots v_{i_t} \da v_{i_1} \qty{ \hat x_j v_{i_2} \cdots v_{i_t}} - \delta_{i_1, j}\qty{\inp{\alpha_{i_2} }{\alpha_j} + \cdots + \inp{\alpha_{i_2} }{\alpha_j} }v_{i_2}\cdots v_{i_t}$ for $t\geq 2$. ::: # Monday, November 07 :::{.remark} Last time: constructing a semisimple Lie algebra that has a given root system. Setup: - $\Delta = \ts{\alpha_1,\cdots, \alpha_l}$. - $\hat L$ the free Lie algebra on $\ts{\hat x_i, \hat h_i, \hat y_i}_{1\leq i\leq l}$. - $\hat K$ the ideal generated by the Serre relations. - $L_0 \da \hat L/\hat K$ with quotient map $\pi$. - $\hat \phi: \hat L\to \liegl(V)$ a representation we constructed. - $\hat H$ the free Lie algebra on $\ts{h_i}$. ::: :::{.theorem title="?"} $\hat K \subseteq \ker \hat\phi$, so $\hat\phi$ induces a representation $\phi$ of $L_0$ on $\liegl(V)$ \begin{tikzcd} {\hat L} && {\liegl(V)} \\ \\ & {L_0} \arrow["\hat\phi", from=1-1, to=1-3] \arrow["\pi"', from=1-1, to=3-2] \arrow["{\exists \phi}"', dashed, from=3-2, to=1-3] \end{tikzcd} > [Link to Diagram](https://q.uiver.app/?q=WzAsMyxbMCwwLCJcXGhhdCBMIl0sWzIsMCwiXFxsaWVnbChWKSJdLFsxLDIsIkxfMCJdLFswLDEsIlxcaGF0XFxwaGkiXSxbMCwyLCJcXHBpIiwyXSxbMiwxLCJcXGV4aXN0cyBcXHBoaSIsMix7InN0eWxlIjp7ImJvZHkiOnsibmFtZSI6ImRhc2hlZCJ9fX1dXQ==) ::: :::{.proof title="?"} Straightforward but tedious checking of all relations, e.g. \[ \hat\phi(\hat h_i) \circ \hat\phi(\hat x_j) - \hat \phi(\hat x_j )\hat\phi(\hat h_i) = \inp{ \alpha_j}{\alpha_i} \hat \phi(\hat x_j) .\] ::: :::{.theorem title="?"} In $L_0$, the $h_i$ form a basis for an $\ell\dash$dimensional abelian subalgebra $H$ of $L_0$, and moreover $L_0 = Y \oplus H \oplus X$ where $Y, X$ are the subalgebras generated by the $x_i$ and $y_i$ respectively. ::: :::{.proof title="?"} **Steps 1 and 2**: :::{.claim} $\pi(\hat H) = H$ is $\ell\dash$dimensional. ::: Clearly the $\hat h_i$ span an $\ell\dash$dimensional subspace of $\hat L$, so we need to show that $\pi$ restricts to an isomorphism $\pi: \hat H\iso H$. Suppose $\hat h \da \sum_{j=1}^\ell c_j \hat h_j \in \ker \pi$, so $\hat\phi (\hat h) = 0$. Thus \[ 0 = \hat h \cdot v_i = \sum_{j} c_j \hat{h}_j \cdot v_i = - \sum_j c_j \inp{ \alpha_i}{\alpha_j} = - \sum_j a_{ij} c_j \qquad \forall i ,\] so $A\vector c = 0$ where $A$ is the Cartan matrix, and so $\vector c = \vector 0$ since $A$ is invertible (since it was essentially a Gram matrix). **Step 3**: Now $\sum \FF x_i + \sum \FF h_i + \sum \FF y_i \mapsvia{\pi} L_0$ maps isomorphically to $L_0$, and S2, S3 show that for each $i$. Then $\FF x_i + \FF h_i + \FF y_i$ is a homomorphic image of $\liesl_2$, which is simple if $\characteristic \FF \neq 2$. Note $\pi( \hat h_i) = h_i \neq 0$ in $L_0$ by (1), so this subspace of $L_0$ is isomorphic to $\liesl_2(\FF)$. In particular $\ts{x_i, h_i, y_i}$ is linearly independent in $L_0$ for each fixed $i$. Supposing $0 = \sum_{j=1}^\ell (a_j x_j + b_j h_j + c_j y_j)$, applying $\ad_{L_0, h_i}$ for each $i$ to obtain \[ 0 = \sum_{j=1}^\ell \qty{ a_j\inp{\alpha_j}{\alpha_i} x_j + b_j 0 - c_j \inner{ \alpha_j }{ \alpha_i } y_j } = \sum_{j=1}^\ell \inner{ \alpha_j }{ \alpha_i } (a_j x_j - x_y y_j) ,\] and by invertibility of $A$ we have $a_j x_j - c_j y_j = 0$ for each $j$. So $a_j = c_j = 0$ for all $j$, and $\sum b_j h_j = 0$ implies $b_j = 0$ for all $j$ from (1). **Step 4**: $H = \sum_{j=1}^\ell \FF h_j$ is an $\ell\dash$dimensional abelian subalgebra of $L_0$ by (1) and S1. **Step 5**: Write $[x_{i_1}\cdots x_{i_t}] \da [x_{i_1} [ x_{i_2} [ \cdots [x_{i_{t-1}} x_{i_t}] \cdots ]] \in X$ for an iterated bracket, taken by convention to be bracketing from the right. We have \[ \ad_{L_0, h_j}([x_{i_1} \cdots x_{i_t}] ) = \qty{ \inner{ \alpha_{i_1} }{ \alpha_j } + \cdots + \inner{ \alpha_{i_t} }{ \alpha_j } } [x_{i_1} \cdots x_{i_t}] \qquad t\geq 1 ,\] and similarly for $[y_{i_1}\cdots y_{i_t}]$. **Step 6**: For $t\geq 2$, $[y_j [ x_{i_1} \cdots x_{i_t} ] ] \in X$, and similarly with the roles of $x_i, y_i$ reversed. This follows from the fact that $\ad_{L_0, y_j}$ acts by derivations, and using S2 and S3. **Step 7**: It follows from steps 4 through 6 that $Y+H+X$ is a subalgebra of $L_0$. One shows that $[[ x_{i_1} \cdots x_{i_t}], [y_{i_1} \cdots y_{i_t} ]]$, which comes down to the Jacobi identity and induction on $s+t$. E.g. \[ [ [x_1 x_2], [y_3 y_4] ] = [x_1[ x_2 [y_3 y_4 ] ] ] - [x_2 [x_1 [y_3 y_4]] \in [x_1, \FF y_3 + \FF y_4] + \cdots \in H + \cdots ,\] which lands in $H$ since there are as many $x_i$ as $y_i$, whereas if there are more $x_i$ than $y_i$ this lands in $X$, and so on. Since $Y+H+X$ is a subalgebra that contains the generators $x_i, h_i, y_i$ of $L_0$, it must be equal to $L_0$. **Step 8**: The decomposition $L_0 = X + H + Y$ is a direct sum decomposition of $L_0$ into submodule for the adjoint action of $H$. Use the computation in the previous step to see that every element of $X$ is a linear combination of elements $[x_{i_1}\cdots x_{i_t}]$ and similarly for $Y$. These are eigenvectors for the action of $\ad_H \actson L_0$ by (5), and eigenfunctions for $X$ have the form $\lambda = \sum_{i=1}^\ell c_i \alpha_i$ with $c_i \in \ZZ_{\geq 0}$. The \( \lambda_i \) is referred to as a **weight**, and $c_i$ is the number of times $i$ appears is an index in $i_1,\cdots, i_t$. So every weight space $X_\lambda$ is finite-dimensional, and the weights of $Y$ are $-\lambda$. Since the weights in $X, H, Y$ are all different, their intersections must be trivial and the sum is direct. ::: :::{.remark} $L_0 = Y \bigoplus H \bigoplus X$ is known as the **triangular decomposition**, where the $x_i$ are on the super diagonal and bracket to upper-triangular elements, and the $y_i$ are their transposes. ::: # Wednesday, November 09 :::{.remark} Progress so far: we start with the data of an irreducible root system $\Phi \contains \Delta = \tsl \alpha 1 \ell$ and Cartan matrix $A = (\inp{ \alpha_i}{\alpha_j})$ and Weyl group $W$. We set $L_0 \da {\gens{x_i, y_i, h_i \st 1\leq i\leq \ell} \over \gens{\text{S1, S2, S3}}} = Y \oplus H \oplus X$. Letting $h\in H$ act by $\ad_h$, we get weight spaces $(L_0)_{ \lambda} \da \ts{v\in L_0 \st [hv] = \lambda(h) v \, \forall h\in H}$. ::: ## $\S$ 18.3: Serre's Theorem :::{.remark} For $i\neq j$, set \[ y_{ij} \da (\ad_{y_i})^{- \inp{\alpha_j}{ \alpha_i} + 1}(y_j) ,\] and similarly for $x_{ij}$. Recall \( \alpha_i(h_j) \da \inner{ \alpha_i }{ \alpha_j } \). ::: :::{.lemma title="A"} \[ \ad_{x_k}(y_{ij}) = 0 \qquad \forall i\neq j, \forall k .\] ::: :::{.proof title="?"} **Case 1**: $k\neq i$. In this case, $[x_k y_i] = 0$ and thus \[ (\ad_{x_k}) (\ad_{y_i})^{- \inner{ \alpha_j }{ ga_i } +1 }(y_j) = (\ad_{y_i})^{- \inner{ \alpha_j }{ \alpha_i } +1 } (\ad_{x_k})(y_j) .\] - Case i, $k\neq j$: then $(\ad_{x_k})(y_j) = 0$. - Case ii, $k=j$: then $(\ad_{x_j})(y_j) = h_j$ and $(\ad_{y_i})(h_j) = \inner{ \alpha_i }{ \alpha_j } y_i$. - Case a, \( \inner{ \alpha_j }{ \alpha_i } \neq 0 \), then \[ (\ad_{y_i})^{- \inner{ \alpha_j }{ \alpha_i } +1 } (\ad_{x_j}) (y_j) = \inner{ \alpha_i }{ \alpha_j } (\ad_{y_i})^{- \inner{ \alpha_j }{ \alpha_i } }(y_i) = 0 .\] - Case b, \( \inner{ \alpha_j }{ \alpha_i } = 0 \), then \( \inner{ \alpha_i }{ \alpha_j } =0 \). In this case we have $(\ad_{y_i})^1 (h_j) = \inner{ \alpha_i }{ \alpha_j } y_i = 0$. **Case 2**: $k=i$. In this case, we saw that for any fixed $i$, $\ts{x_i, h_i, y_i}$ spans a standard $\liesl_2$ triple in $L_0$, so consider the $\liesl_2\dash$submodule of $Y_J \leq L_0$ generated by $y_j$. Since $i\neq j$, we know $[x_i y_j] = 0$, so $y_j$ is a maximal vector for $Y_j$ with weight $m \da - \inner{ \alpha_j }{ \alpha_i }$. One can show by induction on $t$ that the following formula holds: \[ (\ad_{x_i}) (\ad_{y_i})^t (y_j) = t (m-t+1) (\ad_{y_i})^{t-1} (y_j) \qquad t\geq 1 .\] So in particular $(\ad_{x_i}) (\ad_{y_i})^{m+1}(y_j) = 0$, and the LHS is $y_{ij}$. ::: :::{.definition title="Locally nilpotent and the exponential"} An endomorphism $x\in \Endo(V)$ is **locally nilpotent** if for all $v\in V$ there exists some $n$ depending on $v$ such that $x^n \cdot v = 0$. If $x$ is locally nilpotent, then define the **exponential** as \[ \exp(x) = \sum_{k\geq 0} {1\over k!} x^k = 1 + x + {1\over 2}x^2 + \cdots \qquad \in \Endo(V) ,\] which is in fact an automorphism of $V$ since its inverse is $\exp(-x)$. ::: :::{.lemma title="B"} Suppose $\ad_{x_i}, \ad_{y_i}$ are locally nilpotent on $L_0$ and define \[ \tau_i \da \exp(\ad_{x_i})\circ \exp(\ad_{-y_i}) \circ \exp(\ad_{x_i}) .\] Then $\tau_i((L_0)_\lambda) = (L_0)_{s_i (\lambda)}$ where $s_i \da s_{ \alpha_i} \in W$ for $\alpha_i\in \Phi$. Here \( \lambda\in H\dual\cong \CC\gens{ \alpha_1, \cdots, \alpha_\ell} \) since \( H = \CC\gens{h_1,\cdots, h_{\ell}} \), using that $A$ is invertible. We use the formula \( s_{\alpha_i}(\alpha_j) = \alpha_j - \inner{ \alpha_j }{ \alpha_i } \alpha_i \) and extending linearly to $H\dual$ as done previously. ::: :::{.proof title="?"} Omitted. See $\S 14.3$ and $\S 2.3$ for a very similar calculation. ::: :::{.theorem title="Serre"} The Lie algebra $L$ generated by the $3\ell$ elements $\ts{x_i, h_i, y_i}_{1\leq i\leq \ell}$ subject to relations S1-S3 and the remaining two relations $S_{ij}^{\pm}$ (which hold in any finite dimensional semisimple Lie algebra) is a finite dimensional semisimple Lie algebra with maximal torus spanned by $\ts{h_i}_{1\leq i\leq \ell}$ and with corresponding root system $\Phi$. ::: ## Proof of Serre's Theorem :::{.proof title="?"} By definition, $L \da L_0/K$ where $K\normal L_0$ is generated by the elements $x_{ij}, y_{ij}$ where $i\neq j$. Recall that $X, Y\leq L_0$ are the subalgebras generated by the $x_i$ and $y_i$ respectively, so let $I$ (resp. $J$) be the ideal in $X$ (resp. $Y$) generated by the $x_{ij}$ (resp. $y_{ij}$) for $i\neq j$. Clearly $I, J \subseteq K$. :::{.claim title="Step 1"} \[ I, J \normal L_0 .\] ::: :::{.proof title="of Step 1"} We'll prove this for $J$, and $I$ is similar. Note $J\normal Y$ and write $J = \gens{y_{ij} \st i\neq j}$. Fix $1\leq k\leq \ell$, then $(\ad_{y_k}) (y_{ij}) \in J$ by definition. Recall $y_{ij} = (\ad_{y_i})^{- \inner{ \alpha_i }{ \alpha_j } +1 }(y_{ij})$. Note $(\ad_{h_k})(y_{ij}) = c_{ijk} y_{ij}$ for some constant $c_{ijk} \in \ZZ$, and $(\ad_{x_k})(y_{ij}) = 0$ by lemma A above. Since $x_k, h_k, y_k$ generate $L_0$, we have $[L_0, y_{ij}] \subseteq J$. Using the Jacobi identity and that $\ad_z$ is a Lie algebra derivation for $z\in L_0$, it follows that $[L_0, J] \subseteq J$. > This essentially follows from $[h_\ell, Y] \subseteq Y$ and $[x_\ell, Y] \subseteq H + Y$, and bracketing these against $y_{ij}$ lands in $J$. ::: :::{.claim title="Step 2"} \[ K = I + J .\] ::: :::{.proof title="of Step 2"} We have $I+J \subseteq K$, but $I+J \normal L_0$ by claim 1 and it contains the generators of $K$ -- since $K$ is the smallest such ideal, $K \subseteq I+J$. ::: :::{.observation title="Step 3"} We have a decomposition $L_0 = Y \oplus H \oplus X$ as modules under $\ad_H$, and $K = J \oplus 0 \oplus I$. Taking the quotient yields $L \da L_0/K = Y/J \oplus H \oplus X/I \da N^- \oplus H \oplus N^+$. ::: :::{.observation title="Step 4"} As in the proof last time, $\ts{x_i, h_i, y_i} \subseteq L$ spans a copy of $\liesl_2$. We deduce that $\sum_{1\leq i \leq \ell} \FF x_i + \FF h_i + \FF y_i \subseteq L_0$ maps isomorphically into $L$, so we can identify $x_i, h_i, y_i$ with their images in $L$, which are still linearly independent and still generate $L$ as a Lie algebra. ::: :::{.observation title="Step 5"} For \( \lambda\in H\dual \), set \( L_ \lambda\da \ts{z\in L \st [hz] = \lambda(h) z\, \forall h\in H } \) and write \( \lambda > 0 \iff \lambda \in \ZZ_{\geq 0} \Delta \) and similarly define $\lambda < 0$. View \( \alpha_i \in H\dual \), extended linearly as before. Note $H = L_0, N^+ = \sum_{\lambda> 0} L_{\lambda}, N^- = \sum_{\lambda<0} L_ \lambda$, and thus \[ L = N^- \oplus H \oplus N^+ ,\] which is a direct sum since the eigenvalues in different parts are distinct. ::: ::: # Serre's Theorem, Continued (Friday, November 11) :::{.proof title="of Serre's theorem, continued"} Recall that we have \[ L = N^- \oplus H \oplus N^+ \da Y/\gens{(S_{ij}^-)} \oplus \CC\gens{h_1,\cdots, h_\ell} \oplus X/\gens{ (S_{ij}^+) } .\] :::{.remark title="Step 6"} For $1\leq i\leq \ell$, note $\ad_{L, x_i}$ (and similarly $\ad_{L, y_i}$) is locally nilpotent on $L$. Let $M \subseteq L$ be the subspace of elements on which $\ad_{x_i}$ acts nilpotently. By the Leibniz rule, $(\ad_{x_i})^{m+n}([uv])$ when $(\ad_{x_i})^m(v) = 0$ and $(\ad_{x_y})^n(u) = 0$, so $M \leq L$ is a Lie subalgebra. By the Serre relations, $(\ad_{x_i})^2(h_j) = 0$ and $(\ad_{x_i})^3(y_j) = 0$, so the generators of $L$ are in $M$ and thus $L = M$. ::: :::{.remark title="Step 7"} Defining $\tau_i \da \exp(\ad_{x_i}) \circ \exp(\ad_{-y_i}) \circ \exp(\ad_{x_i}) \in \Aut(L)$, by lemma (B) we have $\tau_i(L_ \lambda) = L_{s_i \lambda}$ where $s_i \da s_{\alpha_i}$ and \( \lambda\in H\dual \). ::: :::{.remark title="Step 8"} Let \( \lambda, \mu \in H\dual \) and suppose \( w \lambda = \mu \); we want to show $\dim L_ \lambda = \dim L_{ \mu}$. Note $W$ is generated by simple reflections $s_i$, so it STS this when $w=s_i$, whence it follows from lemma (B). ::: :::{.remark title="Step 9"} Clearly $\dim (L_0)_{\alpha_i} = 1$ since it's spanned by $x_i$. Then $\dim (L_0)_{k \alpha_i} = 0$ for $k\neq 0, \pm 1$, so $\dim L_{\alpha_i} \leq 1$ and $\dim L_{k \alpha} = 0$ for $k\neq 0, \pm 1$. Since $x_i\in L_{\alpha_i}$ has a nonzero image in $L$, $\dim L_{\alpha_i} = 1$. ::: :::{.remark title="Step 10"} If \( \beta\in \Phi \), conjugate it to a simple root using \( \beta = w \alpha_i \) with $w\in W, \alpha_i \in \Delta$. By step 8, $\dim L_{ \beta} = 1$ and $L_{k \beta} = 0$ for $k\neq 0, \pm 1$. ::: :::{.remark title="Step 11"} Suppose \( L_ \lambda \neq 0 \) where \( \lambda\neq 0 \). Then \( \lambda\in \ZZ_{\geq 0} \Delta \) or \( \ZZ_{\leq 0} \Delta \), i.e. all coefficients are the same sign. Suppose \( \lambda\not\in \Phi \), then \( \lambda\in \ZZ \Phi \) by (10). Exercise 10.10 yields $\exists w\in W$ such that $w \lambda\ZZ \Delta$ with both positive and negative coefficients. Thus $w \lambda$ can not be a weight, and by step 8, $0 = \dim L_{w \lambda} = \dim L_ \lambda$. ::: :::{.remark title="Step 12"} Writing $L = N^- \oplus H \oplus N^+$ with $H = L_0, N^+ = \sum_{ \lambda > 0} L_{ \lambda} = \sum_{\beta\in \Phi^+} L_{ \lambda}$ and $N^- = \sum_{ \lambda < 0} L_{ \lambda} = \sum_{ \beta\in \Phi^-} L_ \beta$, by step 10 we can conclude $\dim L = \ell + \size\phi < \infty$. This shows that $H$ is toral, i.e. its elements are ad-semisimple. ::: :::{.remark title="Step 13"} We have that $L$ is a finite-dimensional Lie algebra. To show semisimplicity, we need to now $L$ has no nonzero solvable ideals, and as before it's ETS $L$ has no nonzero *abelian* ideals. Suppose \( A \subseteq L \) is an abelian ideal; we WTS $A = 0$. Since $[H,A] \subseteq A$ and $H\actson L$ diagonally, $H\actson A$ diagonally as well and thus \[ A = (A \intersect H) \oplus \bigoplus _{\alpha\in \Phi} (A \intersect L_{ \alpha}) .\] If \( A \intersect L_{\alpha} \neq 0 \) then \( A \intersect L_{ \alpha} = L_{\alpha} \), which is 1-dimensional. > Note: the argument in Humphreys here may not be quite right, so we have to do something different. Now $\exists w\in W$ such that $w \alpha = \alpha_i \in \Delta$ as in step 8, so write $w = s_{i_1}\cdots s_{i_t}$. Then $\tau_{i_1}\cdots \tau_{i_t} (L_ \alpha) = L_ \alpha$. Set $A' \da \tau(A)$, then $A'\normal L$ is necessarily an abelian ideal and $A' \intersect L_{\alpha_i} = L_{\alpha_i}$. So we can replace $\alpha$ by a simple root and replace $A$ by $A'$. Then $x_i\in L_{\alpha_i} \subseteq A' \normal L$, but $A'\ni -[y_i, x_i] = h_i$, but $[h_i, x_i] \neq 0$, contradicting that $A'$ is abelian. $\contradiction$. Note $[A', L_{\alpha_j}] \subseteq A' \subseteq H$ and $[A', L_{\alpha_j}] \subseteq L_{\alpha_j}$, but $H\actson L_{\alpha_j}$ with eigenvalues \( \alpha_j \) and thus $A' \subseteq \intersect _{j=1}^\ell \ker \alpha_j = 0$ since the \( \alpha_j \) span $H\dual$. So $A' = 0$ and $L$ is semisimple. ::: :::{.remark title="Step 14"} Since $L = H \oplus \bigoplus _{\alpha\in \Phi} L_{ \alpha}$, it's easy to check that $C_L(H) = H$ by considering what happens when bracketing against any nonzero element in \( \bigoplus L_ \alpha \). Thus $H$ is a maximal toral subalgebra with corresponding root system $\Phi$. ::: ::: :::{.remark} Next: part VI on representation theory, although we'll first cover $\S 13$ on weights, especially $\S 13.1, \S 13.2$. Goal: Weyl's character formula. ::: # Part 5: Representation Theory (Monday, November 14) ## $\S 13$ Abstract theory of integral weights :::{.definition title="Integral weights and the root lattice"} Let $\EE \contains \Phi \contains \Delta$ with Weyl group $W$. An element $\lambda\in \EE$ is an **integral weight** if $\inp \lambda \beta = (\lambda, \beta\dual)\in \ZZ$ for all $\beta \in \Phi$, where $\beta\dual \da {2\beta \over (\beta, \beta)}$. We write the set of all weights as $\Lambda$, and write $\Lambda_r \da \ZZ \Phi$ for the **root lattice**. ::: :::{.remark} Recall $\Delta\dual \da \ts{\alpha\dual \st \alpha\in \Delta}$ is a base for $\Phi\dual = \ts{ \beta\dual \st \beta \in \Phi }$, and so \[ \lambda \in \Lambda\iff (\lambda, \alpha\dual) = \inp \lambda \alpha \in \ZZ\forall \alpha \in \Delta .\] ::: :::{.definition title="Dominant weights"} A weight \( \lambda\in \Lambda \) is **dominant** iff $\inp \lambda \alpha \geq 0$ for all \( \alpha\in \Delta \), and we denote the set of all such dominant weights $\Lambda^+$. The weight $\lambda$ is **strongly dominant** if $\inp \lambda \alpha > 0$ for all \( \alpha \in \Delta\). Writing $\Delta = \tsl \alpha 1 \ell$, let \( \ts{ \lambda_i }_{1\leq i \leq \ell} \) be the dual basis for $\EE$ relative to $\inp\wait\wait$, so $\inp{ \lambda_i}{\alpha_i} = \delta_{ij}$. The $\lambda_i$ are referred to as the **fundamental dominant weights**, written $\lambda_i = \omega_i = \varpi_i$. ::: :::{.remark} If \(\lambda \in \Lambda \) then one can write \( \lambda = \sum_{i=1}^\ell m_i \lambda_i \) where $m_i \da \inp{ \lambda}{\alpha_i}$, so $\Lambda$ is a $\ZZ\dash$lattice with lattice basis $\ts{\lambda_i}_{1\leq i\leq \ell}$ containing the root lattice as a sublattice, so in fact $\Lambda_r = \ZZ \Delta$. Writing the Cartan matrix as $A = (\inp{ \alpha_i}{\alpha_j})$ we have $\alpha_i = \sum_{j=1}^\ell \inp{\alpha_i}{\alpha_j} \lambda_j$ coming from the $i$th row of $A$. So this matrix expresses how to write simple roots in terms of fundamental dominant roots, and inverting it allows writing the fundamental roots in terms of simple roots. ::: :::{.fact} The entires of $A\inv$ are all nonnegative rational numbers, so each fundamental dominant root is a nonnegative rational linear combination of simple roots. ::: :::{.example title="?"} For $A_3$ one has $A = \mattt 2 {-1} 0 {-1} 2 {-1} 0 {-1} 2$, so \[ \alpha_1 &= 2 \lambda_1 - \lambda_2 \\ \alpha_2 &= - \lambda_1 + 2 \lambda_2 - \lambda_3 \\ \alpha_3 &= - \lambda_2 + 2 \lambda_3 .\] ::: :::{.definition title="Fundamental group"} The quotient \( \Lambda/ \Lambda_r \) is called the **fundamental group** of $\Phi$, and the index $f\da [\Lambda: \Lambda_r]$ is called its **index of connection**. ::: :::{.remark} The index is generally small: - $A_\ell$ has $f=\ell + 1$ - $f=1$ is obtained from $F_4, G_2, E_8$, - $f=2$ is obtained from types $B, C, E_7$, - $f=3$: $E_6$, - $f=4$: type $D$. ::: ## $\S 13.2$ Dominant weights :::{.remark} Note that \[ s_i \lambda_j = \lambda_j - \inp{ \lambda_j}{\alpha_i} \alpha_i = \lambda_j - \delta_{ij} \alpha_i ,\] so $\Lambda$ is invariant under $W$. In fact, any sublattice of \( \Lambda \) containing \( \Lambda_r \) is $W\dash$invariant. ::: :::{.lemma title="A"} Each integral weight is $W\dash$conjugate to exactly one dominant weight. If $\lambda$ is dominant, then $w \lambda\leq \lambda$ for all $w\in W$, and if $\lambda$ is strongly dominant then $w \lambda= \lambda\iff w=1$. ::: :::{.proof title="Sketch"} Most of this follows from Theorem 10.3, exercise 10.14, lemma 10.3B, and corollary 10.2C, along with induction on $\ell(w)$. We'll omit the details. ::: :::{.remark} The ordering $\leq$ on $\Lambda$ is not well-behaved with respect to dominant weights, i.e. one can have $\mu \Lambda$ with $\mu\in \Lambda^+$ dominant but $\lambda\not\in \Lambda^+$ not dominant. ::: :::{.example title="?"} Let $\Phi$ be indecomposable of type $A_1$ with two roots $\alpha, \beta$, then $0\in \Lambda^+$ is dominant, but $0 < \alpha \in \Delta$ is not dominant: $(\alpha,\beta) < 0 \implies \ip\alpha \beta< 0$. ::: :::{.lemma title="?"} Let \( \lambda \in \Lambda^+ \) be dominant, then the number of dominant $\mu \in \Lambda^+$ with $\mu\leq \lambda$ is finite. ::: :::{.proof title="?"} Let \( \lambda, \mu\in \Lambda^+ \) and write \( \lambda - \mu \) as a nonnegative integer linear combination of simple roots. Note \[ 0 \leq (\lambda+ \mu, \lambda- \mu ) = (\lambda, \lambda ) - (\mu, \mu) = \norm{ \lambda}^2 - \norm{\mu}^2 ,\] so $\mu$ lies in the compact set of vectors whose length is $\norm{\lambda}$ and also in the discrete set \( \Lambda^+ \). The intersection of a compact set and a discrete set is always finite. ::: ## $\S 13.3$ The weight $\rho$ :::{.definition title="$\rho$"} \[ \rho \da {1\over 2} \sum_{\alpha\in \Phi^+} \alpha .\] ::: :::{.remark} This section shows \( \rho = \sum_{i = l}^\ell \lambda_i \) and \( \norm{\lambda+ \rho}^2 \geq\norm{w \lambda+ \rho}^2 \) when $\lambda$ is the unique dominant weight in the orbit $W \lambda$. ::: ## $\S 13.4$: Saturated sets of weights :::{.remark} This section will be used later to analyze the set of weights in a finite-dimensional module for semisimple Lie algebra over $\CC$. ::: ## $\S 20$: Weights and maximal vectors. :::{.remark} Let $L$ be finite-dimensional semisimple over $\CC$ containing $H$ its toral subalgebra. This corresponds to $\Phi \contains \Delta$ with Weyl group $W$ and \( \Phi \subseteq \EE = \RR \Phi \). ::: ## $\S 20.1$ :::{.definition title="Weight spaces and weights for $L\dash$modules"} Let $V$ be a finite-dimensional $L\dash$module. By corollary 6.4, $H\actson V$ semisimply (diagonally) and we can simultaneously diagonalize to get a decomposition \[ V= \bigoplus _{\lambda\in H\dual} V_{\lambda}, \qquad V_{ \lambda} \da \ts{v\in V \st h.v = \lambda(h) v\,\,\forall h\in H} .\] If $V_{\lambda}\neq 0$ then $\lambda$ is a **weight**. ::: :::{.example title="?"} If $\phi = \ad$ and $V=L$, then $L = H \oplus \oplus _{\alpha\in \Phi} L_{\alpha}$ where $H = L_0$. ::: :::{.warnings} If $\dim V = \infty$, $V_{ \lambda}$ still makes sense but $V$ may no longer decompose as a direct sum of its weight spaces. E.g. take $V = \mcu(L)$ and the left regular representation given by left-multiplication in the algebra $\mcu(L) \actson \mcu(L)$. This restricts to $L = L_0 \actson \mcu(L)$, the *regular action* of $L$ on $\mcu(L)$. Note that there are no eigenvectors, since taking a PBW basis one can write $\prod h_i^{n_i} \cdot \prod_{\alpha\in \Phi} x_{\alpha}^{n_ \alpha}$, which strictly increases monomial degrees and thus there are no eigenspaces. So $V_ \lambda= 0$ for all $\lambda$, i.e. there are no weight spaces at all. ::: # Wednesday, November 16 :::{.remark} Let $L\in \Lie\Alg^{\fd, \semisimple}$ containing $H$ with $\Phi, \Delta, W$ as usual. Recall that $V\in \mods{L}\implies V = \bigoplus _{\lambda\in H\dual} V_{ \lambda}$ where $V_{ \lambda} \da \ts{v\in V\st h.v = \lambda(h)v\, \forall h\in H}$, which we call a **weight space** when \( \lambda\neq 0 \). Note that if if $V$ is *any* representation of $V$, even finite-dimensional, $V' \da \bigoplus _{ \lambda\in H\dual} V_{ \lambda} \leq V$ is always an $L\dash$submodule. The sum is still direct since the terms correspond to eigenspaces with distinct eigenvalues. Note that if $h\in H, x\in L_{\alpha}, v\in V_{ \lambda}$, then \[ h.(x.v) &= x.(h.v) + [hx].v \\ &= \lambda(h) x.v + \alpha(h) x.v \\ &= (\lambda+ \alpha)(h) x.v ,\] so $L_{ \alpha} V_{ \lambda} \subseteq V_{ \lambda+ \alpha}$. ::: :::{.lemma title="?"} Let $V\in \mods{L}$, then a. $L_{\alpha}$ maps $V_{ \lambda}$ into $V_{ \lambda + \alpha}$, b. The sum $V' \da \sum _{ \lambda\in H\dual} V_{ \lambda}$ is direct and $V' \leq V$ is an $L\dash$submodule, c. If $\dim V < \infty$ then $V = V'$. ::: ## $\S 20.2$: Highest weight modules :::{.definition title="Maximal vectors"} A **maximal vector** in an $L\dash$module $V$ is a nonzero weight vector $v\in V_{ \lambda}$ such that $L_ \alpha.v = 0$ for all positive roots \( \alpha \in \Phi^+ \). Equivalently, $L_ \alpha .v = 0$ for all \( \alpha\in \Delta \). ::: :::{.definition title="Highest weight vectors"} A **highest weight vector** is a nonzero $v\in V_{ \lambda}$ where \( \lambda \) is maximal among all weights of $V$ with respect to the ordering $\leq$ corresponding to the choice of $\Delta$. ::: :::{.observation} If $v$ is a highest weight vector then $v$ is necessarily a maximal vector, since \( \lambda+ \alpha > \lambda \), but the converse is not necessarily true. ::: :::{.warnings} I.e., the weight of a highest weight vector need not be maximal. ::: :::{.example title="?"} In $\S 18$, $L$ is constructed using the Serre relations to get $L_0 \surjects L$ where $L$ involved $(S_{ij}^{\pm})$ and $L_0$ involved S1-S3. Recalling $y_i^{m+1} y_j = y_{ij}$, since $x_k . y_{ij} = 0$, $y_{ij}$ is a maximal vector in $L_0$ as an $L\dash$module but is *not* a highest weight vector since $\mathrm{wt} y_{ij} = (-m+1) \alpha_i - \alpha_j < - \alpha_j = \mathrm{wt}(y_j)$ and the weight is not maximal. ::: :::{.example title="?"} View $L\in \mods{L}$ via $\ad_L$, then $\S 10.4$ shows that there is a unique highest root $\tilde \alpha$ satisfying \( \tilde \alpha \geq \alpha \) for all \( \alpha\in \Phi \). Any nonzero $v\in L_{\tilde \alpha}$ is a highest weight vector for the adjoint representation. ::: :::{.definition title="Borel subalgebras"} A **Borel subalgebra** of $L$ is a maximal solvable subalgebra $B\leq L$. ::: :::{.proposition title="?"} $B \da H \oplus \bigoplus _{\alpha\in \Phi^+} L_ \alpha$ is a Borel subalgebra of $L$. ::: :::{.proof title="?"} If \( \alpha, \beta\in \Phi^+ \) then $[L_{ \alpha}, L_{ \beta}] = L_{\alpha + \beta}$ where \( \alpha + \beta\in \Phi^+ \) (if this is still a root), so $H\leq L$. One has $B^{(i)} \da [B^{(i-1)}, B^{(i-1)}] \subseteq \sum_{\height( \beta) \geq 2^{i-1} } L_{\beta}$, since bracketing elements of $H$ together will vanish (since $H$ is abelian) and bracketing height 1 roots yields height 2, bracketing height 2 yields height 4, and so on. Thus $B$ is a solvable subalgebra, since the height is uniformly bounded above by a finite number. To see that its maximal, note that any subalgebra $B' \leq L$ containing $B$, it must also contain some $L_{ - \alpha}$ for some $\alpha\in \Phi^+$. But then $B' \contains \liesl_2(\CC) = L_{ - \alpha} \oplus [ L_{ - \alpha }, L_{ \alpha}] \oplus L_{ \alpha}$ which is not solvable, so $B'$ can not be solvable. ::: :::{.remark} Let $V\in \lmod^\fd$, then $V\in \mods{B}$ by restriction and by Lie's theorem $V$ must have a common eigenvector $v$ for the action of $B$. Since $B\contains H$, $v$ is a weight vector, $[B, B] = \bigoplus _{ \alpha\in \Phi^+} L_ \alpha$ acts by commutators of operators acting by scalars, which commute, and thus this acts by zero on $v$ and makes $v$ a maximal vector in $V$. So any finite dimensional $L\dash$module as a maximal vector. ::: :::{.definition title="Highest weight modules"} A module $V\in \lmod$, possibly infinite dimensional, is a **highest weight module** if there exists a \( \lambda \in H\dual \) and a nonzero vector $v^+ \in V_{ \lambda}$ such that $V$ is generated as an $L\dash$module by $v^+$, i.e. $U(L).v^+ = V$. ::: :::{.remark} Let $x_ \alpha \in L_ \alpha, y_ \alpha\in L_{- \alpha}, h_{\alpha} = [x_ \alpha, y_ \alpha]$ be a fixed standard $\liesl_2$ triple in $L$. ::: :::{.theorem title="?"} Let $V\in \lmod$ be a highest weight module with maximal vector $v^+ \in V_{ \lambda}$. Write \( \Phi^+ = \tsl \beta 1 m \), \( \Delta = \tsl \alpha 1 \ell \), then a. $V$ is spanned by the vectors $y_{\beta_1}^{i_1}\cdots y_{ \beta_m}^{i_m} .v^+$ for $i_j\in \ZZ_{\geq 0}$. In particular, $V = \bigoplus _{ \mu\in H\dual} V_ \mu$. b. The weights of $V$ are of the form $\mu = \lambda- \sum_{i=1}^\ell k_i \alpha_i$ with $k_i\in \ZZ_{ \geq 0}$, and all weights $\mu$ satisfy $\mu \leq \lambda$. c. For each $\mu\in H\dual$, $\dim V_{ \mu} < \infty$, and for the highest weight \( \lambda \), one has $\dim V_{ \lambda} = 1$ spanned by $v^+$. d. Each $L\dash$submodule $W$ of $V$ is a direct sum of its weight spaces. e. $V$ is indecomposable in $\lmod$ with a unique maximal proper submodule and a corresponding unique irreducible quotient. f. Every nonzero homomorphic image of $V$ is also a highest weight module of the same highest weight. ::: :::{.proof title="Sketch"} \envlist a. Use the PBW theorem and extend a basis for any $B\leq L$ to a basis where the $B$ basis elements come second. Writing $L = N^ \oplus B$, one can decompose $U(L) = U(N^-) \tensor_\CC U(B)$ and get $U(L) v^+ = U(N^-)U(B)v^+ = U(N^-)U(H \oplus N^+)v^+$ b. Writing the $\beta$ in terms of $\alpha$ yields this expression. c. Clear. We'll finish the rest next time. ::: # $\S21.2$: A sufficient condition for finite-dimensionality (Monday, November 21) :::{.remark} Last time: if $V\in \lmod^\fd$ then $V = L( \lambda)$ for some dominant weight $\lambda\in \Lambda^+$, yielding a necessary condition for finite-dimensionality. Today: a sufficient condition. ::: :::{.lemma title="?"} Write \( \Delta = \tsl \alpha 1 n \) and set $x_i \da x_{ \alpha_i}, y_i \da y_{\alpha_i}$. For $k\geq 0$ and $1\leq i,j\leq \ell$, the following relations hold in $U(L)$: a. $[x_j, y_i^{k+1}] = 0$ for $i\neq j$ b. $[h_j, y_i^{k+1}] = - (k+1) \alpha_i(h_j) y_i^{k+1}$ c. $[x_i, y_i^{k+1}] = (k+1) y_i^k(h_i - k\cdot 1)$ ::: :::{.proof title="of (c)"} Use that $\ad$ acts by derivations: \[ [x_i, y_i^{k+1}] &= x_i y_i^{k+1} - y_i^{k+1} x_i \\ &= x_i y_i y_i^k - y_i x_i y_i^k + y_i x_i y_i^k - y_i y_i^k x_i \\ &= [x_i y_i] y_i^k + y_i[x_i y_i^k] \\ &= h_i y_i^k + y_i [x_i y_i^k] \\ &= \qty{ y_i^k h_i - k \alpha_i (h_i) y_i^k } + y_i \qty{k y_i^{k-1} (h_i - (k-1)\cdot 1) } \qquad\text{by (b) and induction} \\ &= y_i^k h_i - 2k y_i^k + ky_i^k (h_i - (k-1)\cdot 1) \\ &= (k+1)y_i^k h_i - (2k + k(k-1)) y_i^k \\ &= (k+1) y_i^k h_i - k(k+1) y_i^k \\ &= (k+1)y_i^k (h_i - k\cdot 1) .\] ::: :::{.theorem title="?"} Given $V\in \lmod$, let $\Pi(V) \da \ts{ \lambda\in H\dual \st V_ \lambda\neq 0}$ be the set of weights. If \( \lambda\in \Lambda^+ \) is a dominant weight, then $V \da L(\lambda)\in \lmod^\irr$ is finite-dimensional and $\Pi(V)$ is permuted by $W$ with $\dim V_ \mu = \dim V_ { \sigma \mu}$ for all $\sigma\in W$. ::: :::{.proof title="?"} The main work is showing the last part involving equality of dimensions. It STS this for a simple reflection $s_i \da s_{\alpha_i}$ since $\sigma$ is a product of such reflections. Write $\phi: L\to \liegl(V)$ be the representation associated to $V$ -- the strategy is to show that $\phi(x_i)$ and $\phi(y_i)$ are locally nilpotent endomorphisms of $V$. Let $v^+ \in V_ \lambda\smz$ be a fixed maximal vector and set $m_i \da \lambda(h_i)$ so $h_i.v^+ = m_i v^+$. 1. Set $w \da y_i^{m_i+1}. v^+$, then the claim is that $w=0$. Supposing not, we'll show $w$ is a maximal vector of weight not equal to \( \lambda \), and thus not a scalar multiple of $v^+$. We have $\wt( w) = \lambda- (m_i +1) \alpha_i < \lambda$ (a strict inequality). If $j\neq i$ then $x_j.w = x_j y_i^{m_i + 1} v^+ = y_i^{m_i + 1} x_j v$ by part (a) of the lemma above, and this is zero since $v^+$ is highest weight and thus maximal (recalling that these are distinct notions). Otherwise \[ x_i w &= x_i y_i^{m_i + 1} v^+ \\ &= y_i^{m_i+1} x_i v^+ + (m_i + 1) y_i^{m_i} (h_i - m_i\cdot 1)v^+ \\ &= 0 + (m_i+1)y_i^{m_i}(m_i - m_i) v^+ = 0 .\] So $w$ is a maximal vector of weight distinct from $\lambda$, contradicting corollary 20.2 since this would generate a proper submodule. $\contradiction$ 2. Let $S_i = \gens{x_i, y_i, h_i} \cong \liesl_2$, then the claim is that $v^+, y_i v^+, \cdots, y_i^{m_i} v^+$ span a nonzero finite-dimensional $S_i\dash$submodule of $V$. The span is closed under (the action of) $h_i$ since all of these are eigenvectors for $h_i$, and is closed under $y_i$ since $y_i$ raises generators and annihilates $y_i^{m_i} v^+$, and is closed under $x_i$ by part (c) of the lemma (since it lowers generators). 3. The sum of two finite-dimensional $S_i\dash$submodules of $V$ is again a finite-dimensional $S_i\dash$submodule, so let $V_i$ be the sum of all finite-dimensional $S_i\dash$submodules of $V$ (which is not obviously finite-dimensional, since we don't yet know if $V$ is finite-dimensional). The claim is $V'$ is a nonzero $L\dash$submodule of $V$. Let $w\in V'$, then $w$ is a finite sum and there exists a finite-dimensional $S_i\dash$submodule $W$ of $V$ with $w\in W$. Construct $U \da \sum_{ \alpha \in \Phi} x_ \alpha . W$ where $x_ \alpha \da y_{- \alpha}$ if $\alpha\in \Phi^-$, which is a finite-dimensional vector subspace of $V$. Check that \[ h_i (x_ \beta . W) &= x_ \beta(h_i.W) + [h_i x_ \beta].W \subseteq x_ \beta.W \subseteq U \\ x_i (x_ \beta.W) &= x_ \beta(x_i.W) + [x_i x_ \beta] .W \subseteq x_ \beta .W + x _{ \beta + \alpha_i}.W \subseteq U \\ y_i(x_ \beta. W) &= x_{ \beta}(y_i.W) + [y_i x_ \beta].W \subseteq x_ \beta W + x_{ \beta - \alpha_i}.W \subseteq U ,\] and so $U$ is a finite-dimensional $S_i\dash$submodule of $V$ and thus $U \subseteq V'$. So if $w\in V'$ then $x_ \alpha .w \in V'$ for all \( \alpha\in \Phi \) and $V'$ is stable under a set of generators for $L$, making $V' \leq V$ an $L\dash$submodule. Since $V'\neq 0$ (since it contains at least the highest weight space), it must be all of $V$ since $V$ is irreducible. 4. Given an arbitrary $v\in V$, apply the argument for $w$ in step 3 to show that there exists a finite-dimensional $S_i\dash$submodule $W \subseteq V$ with $v\in W$. The elements $x_i, y_i$ act nilpotently on any finite-dimensional $S_i\dash$module, and so in particular they act nilpotently on $v$ and we get local nilpotence. 5. Now $\tau_i \da e^{\phi(x_i)} \circ e^{\phi(-y_i)} \circ e^{\phi(x_i)}$ is well-defined. As seen before, $\tau_i(V_ \mu) = V_{s_i \mu}$, i.e. $\tau_i$ is an automorphism that behaves like $s_i$, and so $\dim V_ \mu = \dim V_{ \sigma \mu}$ for all $\mu\in \Pi(V)$ and $\sigma\in W$. Now any $\mu\in \Pi(V)$ is conjugate under $W$ to a unique dominant weight $\mu^+$, and by (4) $\mu^+\in \Pi(V)$ and since the weights in $V = L(\lambda)$ has only weights smaller than $\lambda$, we have $\mu^+ \leq \lambda$. Note $\lambda \in \Lambda^+$ is dominant, and so by 13.2B there are only finitely many such weights $\mu^+$. Now there are only finitely many conjugates of the finitely many possibilities for $\mu^+$, so $\size \Pi(V) < \infty$. By the general theory of highest weight modules, all weight spaces $V_{\mu} \leq L( \lambda)$ are finite-dimensional. Since $\dim V_\mu < \infty$ for all $\mu\in\Pi(V)$, we have $\dim V < \infty$. ::: :::{.remark} Skipping the next two sections $\S 21.3$ on weight strings and weight diagrams, and $\S 21.4$ on generators and relations for $L( \lambda)$ for \( \lambda\in \Lambda^+ \) dominant. ::: # Monday, November 28 :::{.remark} Setup: $L\in \liealg^{\fd, \semisimple}\slice \CC$ containing $H$ a maximal toral subalgebra, $\Phi \contains \Phi^+ = \tlset \beta m \contains \Delta = \tlset \alpha \ell$ with Weyl group $W$, and we have $x_i \in L_{ \alpha_i}, y_i\in L_{- \alpha_i}, h_i = [ x_i y_i]$. For $\beta\in \Phi^+$, we also wrote $x_ \beta\in L_ \beta, y_ \beta\in L_{- \beta}$. There is also a Borel $B = H \oplus \bigoplus _{\beta > 0} L_ \beta$ with $H \subseteq B \subseteq L$. We saw that if $V\in \lmod^\fd$ then $V = \bigoplus _{\mu\in H\dual} V_ \mu$ and $V$ is a highest weight module of highest weight $\lambda$ where $\lambda \in \Lambda^+ = \ts{\nu \in H\dual \st \nu(h_i) \in \ZZ_{\geq 0} \, 1\leq i\leq \ell}$. Writing $M( \lambda)$ for the Verma module $\mcu(L) \tensor_{\mcu(B)} C_{ \lambda}$, there is a unique irreducible quotient $M( \lambda) \surjects L( \lambda)$ with highest weight $\lambda$. It turns out that $L ( \lambda)$ is finite-dimensional if \( \lambda\in \Lambda^+ \). ::: :::{.question} How can we understand $L( \lambda)$ for \( \lambda\in \Lambda^+ \) better? What is $\dim L( \lambda)$? What is $\dim L( \lambda)_\mu$ (i.e. the dimensions of weight spaces)? ::: ## $\S 22$ Multiplicity Formulas; $\S 22.5$ Formal characters :::{.remark} Let \( \Lambda \subseteq H\dual \) be the lattice of integral weights. Note that \( \lambda(h_\beta) \in \ZZ \,\, \forall \beta\in \Phi \iff \lambda(h_i) \in \ZZ \) for all $1\leq i\leq \ell$. Since \( \Lambda\in \zmod \) there is a group algebra $\ZZ[ \Lambda]$, the free $\ZZ\dash$module with basis $e( \lambda)$ for \( \lambda\in \Lambda \), also written $e_ \lambda$ or $e^{ \lambda}$. This has a ring structure given by linearly extending $e( \lambda)\cdot e( \mu) \da e( \lambda+ \mu)$, so \[ \qty{ \sum a_ \lambda e( \lambda) } \cdot \qty{\sum b_ \mu e( \mu) } = \sum c_ \sigma e( \sigma), \qquad c_ \sigma\da \sum_{ \lambda+ \mu = \sigma} a_ \lambda b _{\mu} .\] Note that if $V\in \lmod^\fd$ then $V = \bigoplus _{ \mu\in H\dual} V_ \mu$, where $V_{ \mu}\neq 0\implies \mu \in \Lambda$. In this case we can define the **formal character** \[ \mathrm{ch}_V \da \sum_{ \mu\in \Lambda} (\dim V_ \mu) e( \mu) \in \ZZ[ \Lambda ] .\] ::: :::{.proposition title="?"} Let $V, W\in \lmod^\fd$, then \[ \mathrm{ch}_{V\tensor W} = \character_V \cdot \character_W .\] ::: :::{.proof title="?"} Take dimensions in the formula $(V\tensor W)_ \sigma = \sum_{ \lambda+ \mu = \sigma} V_ \lambda\tensor W_ \mu$. ::: :::{.remark} For \( \lambda\in \Lambda^+ \), we have $L( \lambda)$ (noting Humphreys uses $V( \lambda)$), we write \[ \character_ \lambda\da\character_{L( \lambda)} = \sum m_ \lambda(\mu) e( \mu) \] where $m_{\lambda}( \mu) \da \dim L( \lambda)_ \mu\in \ZZ_{\geq 0}$. ::: :::{.remark} We now involve the Weyl group to make more progress: let $W$ be the Weyl group of $(L, H)$, then $W\actson \ZZ[ \Lambda]$ by $w . e( \mu) \da e(w \mu)$ for $w\in W, \mu\in \Lambda$. So $W\to \Aut_{\Ring}(\ZZ[ \Lambda])$, and recalling that $\dim L( \lambda)_ \mu = \dim L(\lambda)_{ w \mu}$, we have \[ w. \character_{\lambda} = \sum_\mu m_ \lambda( \mu) e(w \mu) = \sum_\mu m_ \lambda(w \mu) e(w \mu) = \character_{\lambda} ,\] so $\character_ \lambda$ are $W\dash$invariant elements of the group algebra. ::: :::{.proposition title="?"} Let $f\in \ZZ[ \Lambda]^W$ be a $W\dash$invariant element, then $f$ can be written uniquely as a $\ZZ\dash$linear combination of the $\character_ \lambda$ for \( \lambda \in \Lambda^+ \), i.e. these form a $\ZZ\dash$basis: \[ \ZZ[ \Lambda]^W = \gens{\character_ \lambda\st \lambda\in \Lambda}_\ZZ .\] ::: :::{.proof title="?"} Recall every \( \lambda\in \Lambda \) is conjugate to a unique \( \lambda\in \Lambda^+ \). Since $f$ is $W\dash$invariant, we can write it as \[ f = \sum_{ \lambda\in \Lambda^+} c( \lambda) \qty{\sum_{w\in W} e( w \lambda)} \] where $c( \lambda)\in \ZZ$ and almost all are zero. Given \( \lambda\in \Lambda^+ \) with $c( \lambda)\neq 0$, a previous lemma (13.2B) shows that $\size\ts{ \mu\in \Lambda^+ \st \mu\leq \lambda} < \infty$ by a compactness argument. Set \[ M_f \da \Union_{c( \lambda) \neq 0, \lambda\in \Lambda^+} \ts{ \mu\in \Lambda^+ \st \mu\leq \lambda} ,\] all of the possible weights that could appear, then $\size M_f < \infty$ since this is a finite union of finite sets. Choose \( \lambda\in \Lambda^+ \) maximal with respect to the property that $c( \lambda)\neq 0$, and set $f' \da f- c( \lambda) \character_ \lambda$. Note that $f'$ is again $W\dash$invariant, since $f$ and $\character_ \lambda$ are both $W\dash$invariant, and $M_{ \character_ \lambda} \subseteq M_f$. However \( \lambda \not\in M_{f'} \) by maximality, since we've subtracted $c( \lambda) e(\lambda)$ off, so $\size M_{f'} < \size M_f$. Inducting on $\size M_f$, $f'$ is a $\ZZ\dash$linear combination of $\ch_{\lambda'}$ for $\lambda' \in \Lambda$, and thus so is $f$. One checks the base case $L(0) \cong \CC$ where everything acts with weight zero. Uniqueness is relegated to exercise 22.8. ::: :::{.question} Is there an explicit formula for $\character_ \lambda$ for $\lambda \in \Lambda^+$? An intermediate goal will be to understand characters of Verma modules $\character M( \lambda)$ -- note that this isn't quite well-defined yet, since this is an infinite-dimensional module and thus the character has infinitely many terms and is not an element in $\ZZ[ \Lambda]$. ::: ## $\S 23.2$ Characters and Verma Modules :::{.question} Let $\mcz(L) \subseteq \mcu(L)$ be the center of $\mcu(L)$, not to be confused with $Z(L) \subseteq L$ which is zero since $L$ is semisimple (since $Z(L)$ is an abelian ideal). How does $\mcz(L) \actson M( \lambda)$? ::: :::{.remark} Note $M( \lambda) = \mcu(L) \tensor_{\mcu(B)} \CC_{ \lambda} \cong \mcu(N^-) \tensor_\CC \CC_ \lambda$ and write $v^+$ for a nonzero highest weight vector of $M( \lambda)$. Let $z\in \mcz(L)$ and $h\in H$, then \[ h.(z.v^+) = z.(h.v^+) = z.( \lambda(h) ) v^+ = \lambda(h) v^+ ,\] and $x_ \alpha(z.v^+) = z(x_ \alpha.v^+) = 0$ for all \( \alpha\in \Phi^+ \), so $z.v^+$ is a maximal vector in $M( \lambda)$ of weight \( \lambda \), i.e. there exists $\chi_ \lambda(z)\in \CC$ such that \[ z.v^+ = \chi_ \lambda( z) v^+ \] since $\dim M( \lambda)_ \lambda = 1$. Thus there is an algebra morphism \[ \chi_ \lambda: \mcz(L) \to \CC .\] Pick a PBW basis for the Verma module $M(\lambda)$, then \[ z. y_{\beta_1}^{i_1}\cdots y_{\beta_m }^{i_m} v^+ = y_{\beta_1}^{i_1}\cdots y_{\beta_m }^{i_m} z. v^+ = \chi_ \lambda(z) y_{\beta_1}^{i_1}\cdots y_{\beta_m }^{i_m} v^+ ,\] so $z.m = \chi_ \lambda(z)m$ for all $m\in M( \lambda)$, and thus $\mcz(L)\actson M(\lambda)$ by the character $\chi_ \lambda$. Consequently, $\mcz(L)$ acts on any subquotient of $M( \lambda)$. ::: :::{.question} When is $\chi_ \lambda= \chi_ \mu$ for two integral weights \( \lambda, \mu\in \Lambda \)? ::: # Wednesday, November 30 :::{.remark} Recall: $\mcz(L) \da Z(\mu(L))$ acts by a character $\chi_ \lambda: \mcz(L) \to \CC$ on $M( \lambda)$ and thus any subquotients. For \( \lambda, \mu\in \Lambda \subseteq H\dual \), when is $\chi_ \lambda = \chi_ \mu$? ::: :::{.definition title="Linkage"} Two weights \( \lambda, \mu\in H\dual \) are **linked** iff $\exists w\in W$ such that $\mu + \rho = w ( \lambda+ \rho)$, where $\rho \da {1\over 2}\sum_{ \beta\in \Phi^+} \beta$. In this case, write $\mu \sim \lambda$ for this equivalence relation, i.e. $\mu = w( \lambda+ \rho) - \rho$. We'll write $w\cdot \lambda \da w( \lambda+ \rho)$ and call this the **dot action** of $W$ on $H\dual$. ::: :::{.warnings} This defines an action of a group on a set, but it is **not** a linear action. ::: :::{.proposition title="?"} Let \( \lambda\in \Lambda, \alpha\in \Delta \) and suppose $m\da \lambda(h_ \alpha) = \inp \lambda \alpha\in \ZZ_{\geq 0}$ and let $v^+$ be a highest weight vector of $M( \lambda)$. Then $w\da y_ \alpha^{m+1}\cdot v^+$ is maximal vector in $M( \lambda)$ of weight \( \lambda - (m+1 ) \alpha \). ::: :::{.proof title="?"} The proof that $w$ is a maximal vector is step 1 in theorem 21.2, which showed $\dim L( \lambda) < \infty$ (using lemma 21.2 and commutator relations in $\mcu(L)$). Then check that \[ \weight(w) = \lambda - (m+1) \alpha = \lambda- \qty{ \inp \lambda \alpha + 1} \alpha .\] In fact, for any \( \lambda\in H\dual, \alpha\in \Delta \) we can define \[ \mu \da \lambda- \qty{ \inp \lambda \alpha + 1} \alpha ,\] so in our case $\weight(w) = \mu$. Note that \[ \mu &= \lambda- \inp{\lambda} \alpha \alpha- \alpha \\ &= s_ \alpha( \lambda) + (s_ \alpha( \rho) - \rho) \\ &= s_ \alpha(\lambda+ \rho) - \rho\\ &= s_ \alpha \cdot \lambda .\] Now $w$ generates a highest weight module $W\leq M( \lambda)$ of highest weight $\mu = s_ \alpha \cdot \lambda$. Note that $M( \mu)$ is the universal highest weight module with highest weight $\mu$, i.e. $\exists! M(\mu) \surjects W$. This yields a $B\dash$module morphism \[ \CC_ \mu &\to W \\ 1 &\mapsto w ,\] which yields an $L\dash$module morphism $\mcu(L) \tensor_{\mcu(B)} \CC_ \mu \surjects W$. So $W$ is a nonzero quotient of $M( \mu)$ and $\mcz(L)\actson W$ by $\chi_ \mu$. On the other hand $W\leq M( \lambda)$ and so $\mcz(L)\actson W$ by $\chi_ \lambda$, yielding $\chi_ \lambda= \chi_ \mu$. So we conclude that if $\mu = s_ \alpha \cdot \lambda$ with $\inp \lambda \alpha\in \ZZ_{\geq 0}$, then $\chi_ \mu = \chi_ \lambda$. ::: :::{.corollary title="?"} Let \( \lambda\in \Lambda, \alpha \in \Delta, \mu = s_ \alpha \cdot \lambda \). Then $\chi_ \mu = \chi_ \lambda$. ::: :::{.proof title="?"} $\mu = s_ \alpha \cdot \lambda = \lambda- (m+1) \alpha$ where $m \da \inp \lambda \alpha$. - Case 1: $m=-1$, then $\mu = \lambda$ and we're done. - Case 2: $m\geq 0$, then $\chi_ \lambda= \chi_ \mu$ by the proposition. - Case 3: $m\leq -2$, then \[ \inp \mu \alpha &= \inp{ \lambda- (m+1) \alpha}{\alpha} \\ &= m-2(m+1) \\ &= -m-2 \geq 0 .\] We also have $\mu = s_ \alpha\dot \lambda\implies s_ \alpha\cdot \mu= s_ \alpha^2 \cdot \lambda= \lambda$ by applying the action on both sides. By the proposition, swapping \( \mu, \lambda \) to get submodules of $M( \mu)$ of highest weight $\lambda$ and conclude $\chi_ \lambda = \chi_ \mu$. ::: :::{.corollary title="?"} Let \( \lambda, \mu\in \Lambda \), then if $\mu\sim \lambda$ then $\chi_ \lambda=\chi_ \mu$. ::: :::{.proof title="?"} Say $\mu = w\cdot \lambda$ for $w\in W$, then write $w = s_{i_1}\cdots s_{i_t}$ and use induction on $t$, where the base case is the previous corollary. ::: :::{.theorem title="Harish-Chandra ($\S 23.2$)"} If \( \lambda, \mu \) satisfy $\chi_ \lambda= \chi_ \mu$, then \( \lambda\sim \mu \). ::: :::{.remark} Goal: find $\character_ \lambda\da \character_{L( \lambda)} \da \sum_{ \mu\in \Lambda} (\dim L( \lambda)_\mu ) e( \mu) \in \ZZ[ \Lambda]$ for \( \lambda\in \Lambda^+ \). ::: ## $\S 24$: Formulas of Weyl, Kostant, Steinberg; $\S 24.1$ functions on $H\dual$ :::{.remark} View $\ZZ[ \Lambda]$ as finitely-supported $\ZZ\dash$valued functions on \( \Lambda \) with elements $f = \sum_{ \lambda\in \Lambda} a_ \lambda e( \lambda)$ regarded as functions $f(\mu) \da a_\mu$. Thus $e( \lambda)( \mu) = \delta_{\lambda= \mu }$. The point of this maneuver: Verma modules will be infinite dimensional, but $\ZZ[ \Lambda]$ only handles finite sums. For $f,g\in \Hom_\ZZ( \Lambda, \ZZ)$, define $(f\convolve g) ( \sigma) = \sum_{ \lambda+ \mu} f( \lambda) g( \mu)$. Consider the set $\mcx$ of functions $\Hom_\CC(H\dual, \CC)$ whose support is contained in a finite union of sets of the form $\lambda_{ \leq} \da \ts{ \lambda - \sum_{ \mu\in \Phi^+} k_ \beta \beta\st k_ \beta\in \ZZ_{\geq 0}}$. One can show $\mcx$ is closed under convolution and $\mcx$ becomes a commutative associative algebra containing $\ZZ[ \Lambda]$ as a subring. Note that $\supp(f\convolve g) \subseteq ( \lambda+ \mu)_{\leq }$. ![](figures/2022-11-30_09-56-16.png) Write $e( \lambda)$ as $e_{ \lambda}$ for $\lambda \in H\dual$, regarded as a function $e_ \lambda:H\dual \to \CC$ where $e_ \lambda( \mu) = \delta_{ \lambda = \mu}$ is the characteristic function of $\lambda$, and note that $e_ \lambda \convolve e_ \mu = e_ { \lambda+ \mu}$. Let $p = \character_{M(0)}: H\dual \to \CC$, then \[ M(0) = \mcu(L) \tensor_{\mcu(B)} \CC_0 \cong \mcu(N^-) \tensor_\CC \CC_0 \quad\in \mods{H}, \mods{N^-} .\] By PBW, $\mcu(N^-)$ has a basis $y_{\beta_1}^{i_1}\cdots y_{ \beta_m}^{i_m}$ for $i_j\in \ZZ_{\geq 0}$ where $\Phi^+ = \tsl \beta 1 m$. The weights of $M(0)$ are $\mu = - \sum_{j=1}^m i_j \beta_j$ and $\mu\in 0_{\leq}$. Note $\character_{M(0)}( \mu) = \dim M(0)_ \mu$, and so $\character_{M(0)}\in \mcx$ and thus $p\in \mcx$. ::: # Friday, December 02 ## Convolution Formulas :::{.remark} Last time: \[ \mcx \da \ts{f\in \Hom_\CC(H\dual, \CC) \st \supp f \subseteq \Union_{i=1}^n (\lambda_i - \ZZ_{\geq 0} \Phi^+) } ,\] which is a commutative associative unital algebra under convolution, where $e_{ \lambda}(\mu) = \delta_{ \lambda \mu}$ for $\mu\in H\dual$ and $e_ \lambda\convolve e_ \mu = e_{ \lambda+ \mu}$ with $e_0 = 1$. We have $\ch_{M(0)}$ which records weights $\mu = \sum_{i=1}^m -i_j \beta_j$ with $i_j\in \ZZ_{\geq 0}$, and \[ \dim M(0)_\mu \da \ch_{M(0)}(\mu) = \size \ts{\vector i \in \ZZ_{\geq 0}^m \st \sum i_j \beta_j = -\mu } \da p(\mu) \] which is the **Kostant function** -- its negative is the Kostant partition function, which records the number of ways of writing a weight as a sum like this. We'll regard finding such a count as a known or easy problem, since this can be done by enumeration. Define the **Weyl function** $q\da \prod_{\alpha \in \Phi^+} (e_{\alpha\over 2} - e_{-{\alpha \over 2}})$ where the product is convolution. For \( \alpha\in \Phi+, f_ \alpha:H\dual \to \ZZ \), define \[ f_ \alpha( \lambda) \da \begin{cases} 1 & \lambda = -k \alpha \text{ for some } k\in \ZZ_{\geq 0} \\ 0 & \text{otherwise}. \end{cases} .\] We can regard this as an infinite sum $f_ \alpha = e_0 + e_{ - \alpha} + e_{ -2 \alpha} + \cdots = \sum_{k\geq 0} e_{ -k \alpha}$. ::: :::{.lemma title="A"} \envlist a. $p = \prod_{ \alpha\in \Phi^+} f_ \alpha$, b. $(e_0 - e_{- \alpha}) \convolve f_ \alpha = e_0$, c. $q = e_\rho \convolve \prod_{ \alpha\in \Phi^+} (e_0 - e_{ - \alpha})$. ::: :::{.proof title="of lemma A"} **Part a**: The coefficient of $e_\mu$ in $\prod_{i=1}^m f_{ \beta_j}$ is the convolution \[ \sum_{i_1,\cdots, i_m \in \ZZ_{\geq 0}, -\sum i_j \beta_j = \mu } f_{\beta_1}(-i_1 \beta_1) \cdots f_{ \beta_m }(-i_m \beta_m) = p(\mu) .\] **Part b**: \[ (e_0 - e_{- \alpha}) \convolve (e_0 + e_{- \alpha} + e_{-2 \alpha} + \cdots) = e_0 + e_{ - \alpha} - e_{ - \alpha}+ e_{ -2 \alpha}- e_{ -2 \alpha} +\cdots = e_0 ,\] noting the telescoping. This can be checked rigorously by regarding these as functions instead of series. **Part c**: Recall $\rho = \sum_{ \alpha\in \Phi^+} {1\over 2}\alpha$, so $e_\rho = \prod_{ \alpha\in \Phi^+} e_{\alpha\over 2}$. Thus the RHS is \[ \prod_{ \alpha\in \Phi^+} \qty{e_{\alpha\over 2} \convolve (e_0 - e_{- \alpha}) } = \prod_{\alpha\in \Phi^+}(e_{\alpha\over 2} - e_{- \alpha\over 2} ) \da q .\] Note that $q\neq 0$ since $q(\rho) = 1$. ::: :::{.lemma title="B"} Let $w\in W$, recalling that $w.e_{\alpha} = e_{w \alpha}$, \[ wq = (-1)^{\ell(w)} q .\] ::: :::{.proof title="of lemma B"} ETS for $\alpha \in \Delta$. Recall $s_ \alpha$ permutes \( \Phi^+ \smts{\alpha} \) and $s_ \alpha(\alpha) = - \alpha$, so $s_ \alpha q$ permutes the factors $(e_{ \beta\over 2} - e_{-{\beta\over 2}})$ for $\beta\neq \alpha$ and negates $e_{\alpha\over 2} - e_{- \alpha\over 2}$. ::: :::{.lemma title="C"} $q$ is invertible: \[ q\convolve p \convolve e_{\rho} = e_0 \quad \implies q\inv = \rho \convolve e_\rho .\] ::: :::{.proof title="of lemma C"} Use lemma A: \[ q\convolve \rho \convolve e_{-\rho} &= e_\rho \convolve \qty{ \prod_{ \alpha\in \Phi^+} (e_0 - e_{ - \alpha} ) } \convolve p \convolve e_{ - \rho} \qquad \text{by C} \\ &= \qty{ \prod_{ \alpha\in \Phi^+} (e_0 - e_{ - \alpha} ) } \convolve p \\ &= \qty{ \prod_{ \alpha\in \Phi^+} (e_0 - e_{ - \alpha}) \convolve f_{ \alpha} } \qquad \text{by A}\\ &= \prod_{ \alpha\in \Phi^+} e_0 \qquad \text{by B}\\ &= e_0 .\] ::: :::{.lemma title="D"} For \( \lambda, \mu\in H\dual \), \[ \ch_{M( \lambda)}( \mu) = p( \mu - \lambda) = (p \convolve e_ \lambda)( \mu) \implies \ch_{M( \lambda)} = p \convolve e_{ \lambda} .\] ::: :::{.proof title="of lemma D"} $M( \lambda)$ has basis $y_{\beta_1}^{i_1}\cdots y_{ \beta_m}^{i_m} \cdot v^+$ where $v^+$ is a highest weight vector of weight \( \lambda \). Note that \[ \mu = \lambda- \sum_{j=1}^M i_j \beta_j \iff \mu- \lambda= - \sum_{i=1}^m i_j \beta_j ,\] and $\dim M(\lambda)_ \mu = p( \mu- \lambda)$. Now check $(p\convolve e_ \lambda)( \mu) \da p( \mu - \lambda) e_ \lambda(\lambda) = p( \mu- \lambda)$. ::: :::{.lemma title="E"} \[ q \convolve \ch_{M( \lambda)} = e_{ \lambda+ \rho} .\] ::: :::{.proof title="of lemma E"} \[ \text{LHS} \equalsbecause{D} q \convolve p \convolve e_ \lambda \equalsbecause{C} e_ \rho \convolve e_ \lambda = e_{ \lambda+ \rho} .\] ::: ## $\S 24.2$ Kostant's Multiplicity Formula :::{.remark} Recall that characters of Verma modules are essentially known. For \( \lambda\in \Lambda^+ \), we have $\ch_{ \lambda} \da \ch_{L( \lambda)}$, recalling that $L( \lambda)$ is a finite-dimensional irreducible representation. Goal: express this as a finite $\ZZ\dash$linear combination of certain $\ch_{M( \lambda)}$. Fix \( \lambda\in H\dual \) and let $\mcm_ \lambda$ be the collection of all $L\dash$modules satisfying the following: 1. $V = \bigoplus _{ \mu\in H\dual} V_ \mu$, 2. $\mcz(L)\actson V$ by $\chi_ \lambda$, 3. $\ch_V \in \mcx$ Note that any highest weight module of highest weight \( \lambda \) is in $\mcm_ \lambda$, and this is closed under submodules, quotients, and finite direct sums. The Harish-Chandra theorem implies that \[ \mcm_ \lambda = \mcm_{\mu} \iff \lambda\sim \mu\iff \mu = w. \lambda .\] ::: :::{.lemma title="?"} If $0\neq V \in \mcm_ \lambda$ then $V$ has a maximal vector. ::: :::{.proof title="?"} By (3), the weights of $V$ lie in a finite number of cones $\lambda_i - \ZZ_{\geq 0} \Phi^+$. So if $\mu$ is a weight of $V$ and $\alpha\in \Phi^+$, then $\mu + k \alpha$ is not a weight of $V$ for $k\gg 0$. Iterating this for the finitely many positive weights, there exists a weight $\mu$ such that $\mu + \alpha$ is not a weight for any $\alpha\in \Phi^+$. Then any $0\neq v\in V_ \mu$ is a maximal vector. ::: :::{.definition title="?"} For $\lambda \in H\dual$, set \[ \Theta( \lambda) \da \ts{ \mu\in H\dual \st \mu\sim \lambda\, \text{ and } \, \mu\leq \lambda} ,\] which by the Harish-Chandra theorem is a subset of $W\cdot \lambda$ which is a finite set. ::: :::{.remark} The following tends to hold in any setting with "standard" modules, e.g. quantum groups or superalgebras: ::: :::{.proposition title="?"} Let $\lambda\in H\dual$, then a. $M( \lambda)$ has a composition series, b. Each composition factor of $M( \lambda)$ is of the form $L( \mu)$ for some $\mu\in \Theta( \lambda)$. Define $[M (\lambda): L( \mu)]$ to be the multiplicity of $L(\mu)$ as a composition factor of $M( \lambda)$. c. $[M( \lambda): L( \lambda)] = 1$. ::: :::{.proof title="?"} By induction on the number of maximal vectors (up to scalar multiples). If $M( \lambda)$ is irreducible then it's an irreducible highest weight module, and these are unique up to isomorphism and so $M( \lambda) = L( \lambda)$ and we're done. Otherwise $M( \lambda)$ has a proper irreducible submodule $V$, and $V\in \mcm_ \lambda$ by closure under submodules. By the lemma, $V$ has a maximal vector of some weight $\mu$ which must be strictly less than $\lambda$, i.e. $\mu < \lambda$. As before, $\chi_{ \mu} = \chi_ \lambda$ and thus $\mu\in \Theta( \lambda)$. Consider $V$ and $M( \lambda)/ V$ -- each lies in $\mcm_ \lambda$ and either has fewer weights linked to \( \lambda \) than $M( \lambda)$ has, or else it must have exactly the same set of weights linked to $\lambda$, just with smaller multiplicities. By induction each of these has a composition series, and these can be pieced together into a series for $M$ since they fit into a SES. ::: # Monday, December 05 ## Kostant's Character Formula :::{.remark} Last time: $\mcm_ \lambda$ defined as a certain category of $L\dash$modules for \( \lambda\in H\dual \), and we defined $\theta( \lambda) \da \ts{ \mu\in H\dual \st \mu \sim \lambda,\,\, \mu\leq \lambda} \subseteq W. \lambda$. Proposition from last time: a. $M(\lambda)$ has a composition series, b. Each composition factor of $M(\lambda)$ is of the form $L( \mu)$ for some $\mu \in \theta( \lambda)$, c. $[M (\lambda): L( \mu) ] = 1$. Note that any character of $M$ is the sum of the characters of its composition factors. ::: :::{.proof title="of proposition"} **Part b**: Each composition factor of $M( \lambda)$ is in $\mcm_ \lambda$, hence by the lemma has a maximal vector. Since it's irreducible, it is a highest weight module $L( \mu)$ for some \( \mu\in \theta( \lambda) \). **Part c**: $[M( \lambda) : L( \lambda)] = 1$ since $\dim M( \lambda)_ \lambda = 1$ and all other weights are strictly less than $\lambda$. ::: :::{.remark} Order $\theta( \lambda) = \ts{ \mu_1, \cdots, \mu_t}$ such that $\mu_\leq \mu_j \implies i\leq j$. In particular, $\mu_t = \lambda$. By the proposition, $\ch_{M( \mu_j)}$ is a $\ZZ_{\geq 0}\dash$linear combination of $\ch_{L(\mu_i)}$ where $i\leq j$, and the coefficient of $\ch_{L(\mu_j)}$ is 1. Recording multiplicities in a matrix, we get the following: \begin{tikzcd} & {\ch_{M(\mu_j)}} &&&&& {\,} \\ {\ch_{L(\mu_i)}} && 1 & \bullet & \bullet & \bullet \\ && 0 & 1 & \bullet & \bullet \\ && 0 & 0 & \ddots & \bullet \\ && 0 & 0 & 0 & 1 \\ & {\,} \arrow[no head, from=1-2, to=6-2] \arrow[no head, from=1-2, to=1-7] \end{tikzcd} > [Link to Diagram](https://q.uiver.app/?q=WzAsMjAsWzIsMSwiMSJdLFszLDIsIjEiXSxbNCwzLCJcXGRkb3RzIl0sWzUsNCwiMSJdLFs1LDMsIlxcYnVsbGV0Il0sWzQsMiwiXFxidWxsZXQiXSxbMywxLCJcXGJ1bGxldCJdLFs0LDEsIlxcYnVsbGV0Il0sWzUsMSwiXFxidWxsZXQiXSxbNSwyLCJcXGJ1bGxldCJdLFsyLDIsIjAiXSxbMiwzLCIwIl0sWzMsMywiMCJdLFsyLDQsIjAiXSxbMyw0LCIwIl0sWzQsNCwiMCJdLFswLDEsIlxcY2hfe0woXFxtdV9pKX0iXSxbMSwwLCJcXGNoX3tNKFxcbXVfail9Il0sWzEsNSwiXFwsIl0sWzYsMCwiXFwsIl0sWzE3LDE4LCIiLDAseyJzdHlsZSI6eyJoZWFkIjp7Im5hbWUiOiJub25lIn19fV0sWzE3LDE5LCIiLDIseyJzdHlsZSI6eyJoZWFkIjp7Im5hbWUiOiJub25lIn19fV1d) This is an upper triangular unipotent matrix, and thus invertible. ::: :::{.corollary title="?"} Let $\lambda \in H\dual$, then \[ \ch_{L( \lambda)} = \sum_{ \mu \in \theta( \lambda)} c( \mu) \ch_{M( \mu)}, \qquad c( \mu)\in \ZZ, \, c( \lambda) = 1 .\] ::: :::{.remark} Assume \( \lambda\in \Lambda^+ \), and recall: - $\mcx = \ts{f: H\dual \to \CC \st \text{ the "forest" support condition }}$, - $p=\ch_{M(0)}$, - $p(\mu) = \size\ts{ \tv{i_1:\cdots:i_m} \in \ZZ_{\geq 0}^m }$, - $q\da \prod_{ \alpha\in \Phi^+} (e_{\alpha\over 2} - e_{-{\alpha\over 2}})$, - $\mu = -\sum_{1\leq j\leq m} i_j \beta_j$, - $wq = (-1)^{\ell(w)} q$ for $w\in W$, - $q \convolve p \convolve e_{-\rho} = e_0$, - $\ch_{M(\lambda)} = p \convolve e_ \lambda$, - $q \convolve \ch_{M( \lambda) } = e_{ \lambda+ \rho}$, - $\ch_ \lambda\da \ch_{L( \lambda)} = \sum_{ \mu\in \theta( \lambda)} c( \mu) \ch_{M( \mu)}$, - $q\convolve \ch_{L( \lambda)} = \sum c( \mu) q \convolve \ch_{M( \mu)} = \sum c( \mu) e_{ \mu + \rho}$. $\qquad\star_1$ Fixing $w\in W$, we have \[ \sum_{ \mu\in \theta( \lambda) } c( \mu) e_{w (\lambda+ \rho)} &= w( q \convolve \ch_ \lambda) \\ &= wq \convolve w \ch_{ \lambda} \\ &= (-1)^{\ell(w)} q \ch_ \lambda \qquad \text{since $\ch_ \lambda$ is $W\dash$invariant} \\ &= (-1)^{\ell(w)} \sum_ \mu c( \mu) e_{ \mu+ \rho} .\] Since \( \lambda\in \Lambda^+ \), $W$ acts simply transitively on $\theta( \lambda) + \rho \da \ts{v+\rho \st v\in \theta( \lambda) }$. Note \( \mu \sim \lambda\iff \mu+ \rho = w( \lambda+ \rho) \) for some $w\in W$, which is unique since \( \lambda+ \rho \) is strongly/strictly dominant, and lemma 13.2A shows its stabilizer is the identity. So $\stab_W( \lambda+ \rho) = \ts{1}$. The equation $\mu + \rho = w( \lambda+ \rho)$ implies $\mu + \rho \leq \lambda+ \rho$, since apply $W$ to dominant elements goes down in the partial order. Thus $\mu\in \theta(\lambda)$, and $\theta( \lambda)$ consists of precisely those $\mu$ satisfying this equation, and \[ \theta( \lambda) = W\cdot \lambda .\] Continuing the computation, take $\mu = \lambda$ on the LHS, so $w( \lambda+ \rho) = \mu+ \rho$ and \[ c( \lambda) e_{w( \lambda+ \rho)} = (-1)^{\ell(w)} c( \mu) e_{ \mu+ \rho}\implies c(\mu) = (-1)^{\ell(w)} c( \lambda) .\] Substituting this into $\star_1$ yields \[ q \convolve \ch_ \lambda = \sum_{w\in W} (-1)^{\ell(w)} e_{w ( \lambda+ \rho)} \qquad \star_2 ,\] so \[ \ch_ \lambda &= q \convolve p \convolve e_{ - \rho} \convolve \ch_ \lambda\\ &= p \convolve e_{- \rho} \convolve \sum_{w\in W} (-1)^{\ell(w)} e_{w (\lambda+ \rho)} \\ &= \sum_{w\in W} (-1)^{\ell(w)} p \convolve e_{w( \lambda+ \rho) - \rho} &= \sum_{w\in W} (-1)^{\ell(w)} p \convolve e_{w\cdot \lambda} .\] This yields the following: ::: :::{.theorem title="Kostant"} For \( \lambda\in \Lambda^+ \) dominant, the weight multiplicities in $L( \lambda)$ are given by \[ \dim L( \lambda)_\mu \da m_ \lambda( \mu) = \sum_{w\in W} (-1)^{\ell(w)} p( \mu+ \rho - w( \lambda+ \rho)) = \sum_{w\in W} (-1)^{\ell(w)} p( \mu - w\cdot \lambda) .\] ::: ## $\S 24.3$ Weyl's Character Formula :::{.lemma title="?"} \[ q = \sum_{w\in W} (-1)^{\ell(w)} e_{w \rho} .\] ::: :::{.proof title="?"} Take $\lambda = 0$ in $\star_2$, and use that $\ch_0 = e_0$ where $L(0) \cong \CC$. ::: :::{.theorem title="Weyl's Character Formula"} Let $\lambda \in \Lambda^+$, then \[ \qty{ \sum_{w\in W} (-1)^{ \ell(w)} e_{w\rho} } \convolve \ch_{L( \lambda)} = \sum_{w\in W} (-1)^{\ell(w)} e_{w( \lambda+ \rho)} .\] ::: :::{.proof title="?"} Apply $\star_2$ and the lemma. ::: :::{.corollary title="Weyl's Dimension Formula"} \[ \dim L( \lambda) = { \prod_{ \alpha\in \Phi^+} \inp { \lambda+ \rho}{ \alpha} \over\prod_{\alpha\in \Phi^+} \inp \rho \alpha } = \sum_{ \mu \in \Pi( \lambda)} m_ \lambda( \mu) .\] ::: # Tuesday, December 06 ## Weyl Dimension Formula :::{.remark} Last time: - $q \da \sum_{ \alpha\in \Phi^+} (e_{\alpha\over 2} - e_{-{\alpha\over 2}}) = \sum_{w\in W} (-1)^{\ell(w)} e_{w \rho}$. - The WCF: $q\convolve \ch_{ \lambda} = \sum_{w\in W} (-1)^{\ell(w)} e_{w ( \lambda+ \rho)}$ - An alternative writing of the WCF: \[ \ch_ \lambda= { \sum_{w\in W} (-1)^{\ell(w)} e_{w (\lambda+ \rho)} \over \sum_{w\in W} (-1)^{\ell(w)} e_{w \rho}} ,\] where the denominator is denoted the *Weyl denominator*. ::: :::{.corollary title="Weyl dimension formula"} \[ \dim L( \lambda) = { \prod_{ \sigma\in \Phi^+} \inp{ \lambda+ \rho}{ \alpha} \over \prod_{ \alpha\in \Phi^+} \inp \rho \alpha } ,\] which is a quotient of two integers. ::: :::{.exercise title="?"} Show that $W$ always has an equal number of even and odd elements, so $\sum_{w\in W} (-1)^{\ell(w)} = 0$. ::: :::{.proof title="?"} Note $\ch_{ \lambda} = \sum _{\mu} \dim L( \lambda)_ \mu e_ \mu\in \ZZ[ \Lambda]$, and $\dim L( \lambda) = \sum_{ \mu\in \Lambda} m_ \lambda( \mu)$. Viewing $\ch_ \lambda: \Lambda\to \ZZ$ as a restriction of a function $H\dual \to \CC$, $\dim L( \lambda)$ is the sum of all values of $\ch_ \lambda$. Work in the $\CC\dash$subalgebra $\mcx_0$ of $\mcx$ generated by the characteristic functions $S \da \ts{e_ \mu\st \mu\in \Lambda}$; this equals the span of $S$ since $e_{ \mu} \convolve e_{\nu} = e_{ \mu+ \nu}$. We have a map \[ v: \mcx_0 &\to \CC \\ f &\mapsto \sum_{ \mu\in \Lambda} f( \mu) ,\] which makes sense since $\size \supp f < \infty$. This function sums the values we're after, so the goal is to compute $v( \ch_ \lambda)$. By the exercise, attempting to apply this directly to the numerator and denominator yields $0/0$, and we get around this by using a variant of L'Hopital's rule. Define $\del_ \alpha (e_ \mu) = ( \mu, \alpha)e_ \mu$, extended linearly to $\mcx_0$. In the basis $S$ this operator is diagonal, and this is a derivation relative to convolution: \[ \del_ \alpha \qty{ e_ \mu \convolve e_{ \nu} } &= \del_ \alpha( e_{ \mu+ \nu} ) \\ &= ( \mu+ \nu, \alpha)e_{ \mu+ \nu} \\ &= \qty{ (\mu, \alpha) e_ \mu} \convolve e_ \nu + e_ \mu \convolve \qty{ (\nu, \alpha) e_ \nu} \\ &= (\del_ \alpha e_ \mu) \convolve e_ \nu + e_ \mu \convolve (\del _{\alpha} e _{\nu}) .\] Moreover they commute, i.e. $\del_ \alpha \del_ \beta = \del _{\beta} \del _{\beta}$. Set $\del \da \prod_{ \alpha\in \Phi^+} \del_ \alpha$ where the product here is composition, and view $\del$ as an $m$th order differential operator. Write $\omega( \lambda+ \rho)\da \sum_{w\in W} (-1)^{\ell(w)} e_{w (\lambda+ \rho)}$ for \( \lambda\in \Lambda^+ \), so $q = \omega ( \rho)$. Rewriting the WCF we have \[ \omega( \rho) \convolve \ch_ \lambda = \omega( \lambda+ \rho) \qquad \star_1 ,\] and \[ \prod_{ \alpha\in \Phi^+}\qty{e_{ \alpha\over 2} - e_{- {\alpha\over 2}}} \convolve \ch_ \lambda = \omega( \lambda+ \rho) .\] We now try to apply $\del$ to both sides, followed by $v$. Note that if any two factors of $\del$ hit the same factor on the LHS, then noting that $v(e_{\alpha\over 2} - e_{-{ \alpha\over 2}}) = 0$, such terms will vanish. So the total result will be zero unless all of the factors of $\del$ are applied to the $q$ factor in the LHS. So apply $v\circ \del$ to $\star_1$ to get \[ v( \del \omega( \rho)) v( \ch_ \lambda) = v( \del \omega( \lambda+ \rho)) .\] We can compute \[ v( \del \omega ( \rho)) = v\qty{ \del \sum_{w\in W} (-1)^{\ell(w)} e_{ w \rho} } = \sum_{w\in W} (-1)^{\ell(w)} v(\del( e_{ w \rho} ) ) .\] We have \[ v ( \del( e_{ w \rho})) &= v\qty{ \qty{ \prod_{\alpha\in \Phi^+} \del_\alpha } e_{ w \rho} } \\ &= v\qty{\prod_{ \alpha\in \Phi^+} (w \rho, \alpha) e_{ w \rho} } \\ &= \prod_{ \alpha\in \Phi^+} ( w \rho, \alpha) \\ &= \prod_{ \alpha\in \Phi^+} ( \rho, w\inv \alpha) \qquad \star_2 .\] Note that $w\inv \cdot \Phi^+$ is a permutation of $\Phi^+$, just potentially with some signs changed -- in fact, exactly $n(w\inv)$, the number of positive roots sent to negative roots, and $n(w\inv) = \ell(w\inv)$. Thus the above is equal to \[ (-1)^{\ell(w)} \prod_{ \alpha\in \Phi^+}( \rho, \alpha) .\] Continuing $\star_2$, we have \[ v( \del( e_{ w \rho})) &= \sum_{w\in W} (-1)^{\ell(w)} (-1)^{\ell(w)} \prod_{ \alpha\in \Phi^+}( \rho, \alpha) \\ &= \size W \prod_{ \alpha\in \Phi^+}( \rho, \alpha) ,\] which is the LHS. Similarly for the RHS, \[ v(\del( \lambda+ \rho)) = \size W \prod_{ \alpha\in \Phi^+} (\lambda+ \rho, \alpha) .\] Taking the quotient yields \[ \dim L(\lambda) = {\size W \prod_{ \alpha\in \Phi^+}( \rho, \alpha) \over \size W \prod_{ \alpha\in \Phi^+} (\lambda+ \rho, \alpha) } = {\prod_{ \alpha\in \Phi^+}( \rho, \alpha) \over \prod_{ \alpha\in \Phi^+} (\lambda+ \rho, \alpha) } .\] Multiplying the numerator and denominator by $\prod_{ \alpha\in \Phi^+} {2\over (\alpha, \alpha)}$ yields \[ \prod_{ \alpha\in \Phi^+} \inp{\rho}{\alpha} \over \prod_{ \alpha\in \Phi^+} \inp{ \lambda+ \rho} {\alpha} .\] ::: :::{.remark} If $\alpha\in \Phi^+$, using that $\alpha\dual$ is a basis of $\Phi\dual$, one can write \( \alpha\dual = \sum_{i=1}^\ell c_i^{ \alpha} \alpha_i\dual \) for some $c_{i}^{\alpha} \in \ZZ_{\geq 0}$ and $\lambda = \sum_{i=1}^\ell m_i \lambda_i$ for $m_i \in \ZZ_{ \geq 0}$, using that $( \rho, \alpha_i\dual) = \inp{ \rho}{ \alpha_i} = 1$ one can rewrite the dimension formula in terms of the integers $c_i^{\alpha}$ and $m_i$. ::: ## New Directions :::{.remark} Where you could go after studying semisimple finite-dimensional Lie algebras over $\CC$: - Infinite-dimensional representations of such algebras, e.g. the Verma modules $M( \lambda)$. One has a SES $K( \lambda) \injects M( \lambda) \surjects L( \lambda)$, which doesn't split since $M( \lambda)$ is indecomposable. - Category $\OO$, expressing characters of simples in terms of characters of Vermas. - Parabolic versions of Verma modules: we've looked at modules induced from $B = H + N$, but one could look at parabolics $P = U + \sum L_i$. - Coxeter groups, i.e. groups generated by reflections, including Weyl groups. These can be infinite, which ones are finite? - Quantize Coxeter groups to get Hecke algebras, which are algebras over $\CC[q, q\inv]$. See Humphreys. - Representations of Lie groups over $\RR$, semisimple algebraic groups, representations of finite groups of Lie type (see the classification of finite simple groups, e.g. algebraic groups over finite fields). - Characteristic $p$ representation theory, which is much more difficult. - Infinite-dimensional Lie algebras over $\CC$, e.g. affine/Kac-Moody algebras using the Serre relations on generalized Cartan matrices. See also current algebras, loop algebras. - Quantum groups (quantized enveloping algebras), closely tied to modular representation theory. ::: # Useful Tricks - $[[xy]z] = [x[yz]] + [[xz]y] = [x[yz]] - [y[xz]]$. - $xz.w = zx.w + [xz].w$. - If $N, M$ are upper triangular, $[NM]$ has zeros along the diagonal. # Summary of main results ## Computation - Bracketing elementary matrices: \[ [e_{ij}, e_{kl}] = \delta_{jk} e_{il} - \delta_{li} e_{kj} .\] ## Classical Algebras - $\liesl_2(\FF)$ is dimension 3, corresponds to type $A_2$, and generated by \[ x=\left(\begin{array}{ll}0 & 1 \\ 0 & 0\end{array}\right)\quad h=\left(\begin{array}{cc} 1 & 0 \\ 0 & -1 \end{array}\right) y=\left(\begin{array}{ll}0 & 0 \\ 1 & 0\end{array}\right)\quad \\ \\ [x, y]=h, \quad[h, x]=2 x, \quad[y, h]=2 y .\] - $\liesl_n(\FF)$ is dimension $n^2-1$ and corresponds to type $A_{n-1}$. ## Definitions - $N_L(S) = \ts{x\in L \st [xS] \subseteq S}$. - $C_L(S) = \ts{x\in L \st [x S] = 0}$. - $L$ is simple iff $L\neq 0$ and $\Id(L) = \ts{0, L}$. - $L$ is solvable iff $L^{(n+1)} \da [ L^{(n)} L^{(n)}] \converges{n\to\infty}\to 0$. - $L$ is semisimple iff $\Rad(L) \da \sum_{I \normal L \text { solvable}} I = 0$. - $L$ is nilpotent iff $L^{n+1} \da [L L^{n-1}] \converges{n\to\infty}\to 0$. - The Killing form is $\kappa(x,y)\da \Trace(\ad_x \circ \ad_y)$. - Checking $\lmod$ actions: - $( \lambda_1 x + \lambda_2 y).\vector v = \lambda_1 (x.\vector v) + \lambda_2(y.\vector v)$ - $x.(\lambda_1 \vector v+ \lambda_2 \vector w) = \lambda_1 (x.\vector v) + \lambda_2(x.\vector w)$ - $[xy].\vector v = x.(y.\vector v) - y.(x.\vector v)$. This is the axiom that introduces weird formulas for homs/tensors/duals. - Duals: $(x.f)(\vector v) \da -f(x.\vector v)$ - Tensors: $x.(\vector v\tensor \vector w) \da \qty{ (x.\vector v)\tensor \vector w} + \qty{\vector v\tensor (x.\vector w)}$. - The Casimir element: for $\phi: L\to \liegl(V)$ an irreducible representation of $L$ semisimple, define $\beta(x, y) \da \Trace(\phi(x)\circ \phi(y))$. Pick a basis and dual basis $\ts{e_i}, \ts{e_i\dual}$ with respect to $\beta$ and define $c_\phi(\beta) \da \sum \phi(e_i)\phi(e_i\dual) \in \Endo(V)$. - This is an endomorphism of $V$ commuting with the $L\dash$action which has nonzero trace. ## Results - Engel's theorem: if every $x\in L$ is ad-nilpotent then $L$ is nilpotent. - Since conversely (for free) $L$ nilpotent implies $\ad_x$ is nilpotent for every $x$, this becomes an iff: $L$ is nilpotent iff every $x\in L$ is ad-nilpotent. - Lie's theorem: if $L\leq \liegl(V)$ is solvable then $L$ stabilizes a flag in $V$ (i.e. $L$ has an upper triangular basis). - Cartan's criterion: if $L\leq \liegl(V)$ and $\Trace(xy) = 0$ for all $x\in [LL]$ and $y\in L$, then $L$ is solvable. - $L$ is semisimple iff $\kappa_L$ is nodegenerate. - If $L$ is semisimple, it decomposes as $L = \bigoplus L_i$ where the $L_i$ are uniquely determined simple ideals, and every simple ideal of $L$ is one such $L_i$. - $\ker (L \mapsvia{\ad_L} \liegl(V)) = Z(L)$ and simple algebras are centerless, so any simple Lie algebra is isomorphic to a linear Lie algebra $\liegl(V)$ for some $V$, namely $\im \ad_L$. - Schur's lemma: if $L \mapsvia{\phi} \liegl(V)$ is an irreducible representation, then $C_{\liegl(V)}(\phi(L)) = \CC I$, i.e. the only endomorphisms of $V$ commuting with every $\phi(x)$ are scalar operators. Equivalently, $\Endo_L(V) \cong \CC$. - Weyl's theorem: if $L$ is semisimple and $\phi: L\to \liegl(V)$ is a finite-dimensional representation then $\phi$ is completely reducible. --- title: "Problem Sets: Lie Algebras" subtitle: "Problem Set 1" author: - name: D. Zack Garza affiliation: University of Georgia email: dzackgarza@gmail.com date: Fall 2022 order: 1 --- # Problem Set 1 ## Section 1 :::{.problem title="Humphreys 1.1"} Let $L$ be the real vector space $\mathbf{R}^{3}$. Define $[x y]=x \times y$ (cross product of vectors) for $x, y \in L$, and verify that $L$ is a Lie algebra. Write down the structure constants relative to the usual basis of $\mathbf{R}^{3}$. ::: :::{.solution} It suffices to check the 3 axioms given in class that define a Lie algebra: - **L1 (Bilinearity)**: This can be quickly seen from the formula \[ a\cross b = \norm{a}\cdot \norm{b}\sin(\theta_{ab}) \hat{n}_{ab} \] where $\hat{n}_{ab}$ is the vector orthogonal to both $a$ and $b$ given by the right-hand rule. The result follows readily from a direct computation: \[ (ra)\cross (tb) &= \norm{ra} \cdot \norm{tb} \sin(\theta_{ra, tb}) \hat{n}_{ra, tb} \\ &= (rt) \norm{a} \cdot \norm{b} \sin(\theta_{a, b}) \hat{n}_{a, b} \\ &= (rt)\qty{a\cross b} ,\] where we've used the fact that the angle between $a$ and $b$ is the same as the angle between any of their scalar multiples, as is their normal. - **L2**: that $a\times a = 0$ readily follows from the same formula, since $\sin( \theta_{a, a}) = \sin(0) = 0$. - **L3 (The Jacobi identity)**: This is most easily seen from the "BAC - CAB" formula, \[ a\times (b\times c) = b\inner{a}{c} - c\inner{a}{b} .\] We proceed by expanding the Jacobi expression: \[ a\times(b\times c) + c\times (a\times b) + b\times (c\times a) &= {\color{blue} b\inner{a}{c} } - {\color{red} c\inner{a}{b} }\\ &\quad + {\color{green} a \inner{ c }{ b } } - {\color{blue} b \inner{ c }{ a } } \\ &\quad + {\color{red} c \inner{ a }{ b } } - {\color{green} a \inner{ b }{ c } } \\ &= 0 .\] For the structure constants, let $\ts{e_1, e_2, e_3}$ be the standard Euclidean basis for $\RR^3$; we can then write $e_i\times e_j = \sum_{k=1}^3 c_{ij}^k e_k$ and we would like to determine the $c_{ij}^k$. One can compute the following multiplication table: | $e_i\cross e_j$ | $e_1$ | $e_2$ | $e_3$ | |----------------- |-------- |-------- |-------- | | $e_1$ | $0$ | $e_3$ | $-e_2$ | | $e_2$ | $-e_3$ | $0$ | $e_1$ | | $e_3$ | $e_2$ | $-e_1$ | $0$ | Thus the structure constants are given by the antisymmetric Levi-Cevita symbol, \[ c_{ij}^k = \eps^{ijk} \da \begin{cases} 0 & \text{if any index $i,j,k$ is repeated} \\ \sgn \sigma_{ijk} & \text{otherwise}, \end{cases} \] where $\sigma_{ijk} \in S_3$ is the permutation associated to $(i, j, k)$ in cycle notation and $\sgn \sigma$ is the sign homomorphism. :::{.remark} An example to demonstrate how the Levi-Cevita symbol works: \[ e_1\times e_2 = c_{12}^1 e_1 + c_{12}^2 e_2 + c_{12}^3 e_3 = 0 e_1 + 0 e_2 + 1e_3 \] since the first two terms have a repeated index and \[ c_{12}^3 = \eps_{1,2,3} = \sgn(123) = \sgn(12)(23) = (-1)^2 = 1 \] using that $\sgn \sigma = (-1)^m$ where $m$ is the number of transpositions in $\sigma$. ::: ::: :::{.problem title="Humphreys 1.6"} Let $x \in \liegl_n(\FF)$ have $n$ distinct eigenvalues $a_{1}, \ldots, a_{n}$ in $\FF$. Prove that the eigenvalues of $\ad_x$ are precisely the $n^{2}$ scalars $a_{i}-a_{j}$ for $1 \leq i, j \leq n$, which of course need not be distinct. ::: :::{.solution} For a fixed $n$, let $e_{ij} \in \liegl_n(\FF)$ be the matrix with a 1 in the $(i, j)$ position and zeros elsewhere. We will use the following fact: \[ e_{ij} e_{kl} = \delta_{jk} e_{il} ,\] where $\delta_{jk} = 1 \iff j=k$, which implies that \[ [e_{ij} e_{kl} ] = e_{ij}e_{kl} - e_{kl}e_{ij} = \delta_{jk} e_{il} - \delta_{li}e_{kj} .\] Suppose without loss of generality[^diag_wlog] that $x$ is diagonal and of the form $x = \diag(a_1, a_2, \cdots, a_n)$. Then the eigenvectors of $x$ are precisely the $e_{ij}$, since a direct check via matrix multiplication shows $xe_{ij} = a_i e_{ij}$. We claim that every $e_{ij}$ is again an eigenvector of $\ad_x$ with eigenvalue $a_i - a_j$. Noting that the $e_{ij}$ are also left eigenvectors satisfying $e_{ij}x = a_j e_{ij}$, one readily computes \[ \ad_x e_{ij} \da [x, e_ij] = xe_{ij} - e_{ij} x = a_i e_{ij} - a_j e_{ij} = (a_i - a_j)e_{ij} ,\] yielding at least $n^2$ eigenvalues. Since $\ad_x$ expanded in the basis $\ts{e_{ij}}_{1\leq i, j \leq n}$ is an $n\times n$ matrix, this exhausts all possible eigenvalues. [^diag_wlog]: If $x$ is not diagonal, one can use that $x$ is diagonalizable over $\FF$ since $x$ has distinct eigenvalues in $\FF$. So one can reduce to the diagonal case by a change-of-basis of $\FF^n$ that diagonalizes $x$. ::: :::{.problem title="Humphreys 1.9, one Lie type only"} When $\characteristic \FF =0$, show that each classical algebra $L=\mathrm{A}_{\ell}, \mathrm{B}_{\ell}, \mathrm{C}_{\ell}$, or $\mathrm{D}_{\ell}$ is equal to $[L L]$. (This shows again that each algebra consists of trace 0 matrices.) ::: :::{.solution} We will check for this type $A_n$, corresponding to $L \da \liesl_{n+1}$. Since $[LL] \subseteq L$, it suffices to show $L \subseteq [LL]$, and we can further reduce to writing every basis element of $L$ as a commutator in $[LL]$. Note that $L$ has a standard basis given by the matrices - $\ts{x_i \da e_{ij} \st i > j}$ corresponding to $\lien^-$, - $\ts{h_i \da e_{ii} - e_{i+1, i+1} \st 1\leq i \leq n}$ corresponding to $\lieh$, and - $\ts{y_i \da e_{ij} \st i < j}$ corresponding to $\lien^+$. Considering the equation $[e_{ij} e_{kl} ] = \delta_{jk} e_{il} - \delta_{li}e_{kj}$, one can choose $j=k$ to preserve the first term and $l\neq i$ to kill the second term. So letting $t, i, j$ be arbitrary with $i\neq j$, we have \[ [e_{it} e_{tj}] = \delta_{tt} e_{ij} - \delta_{ij}e_{tt} = e_{ij} ,\] yielding all of the $x_i$ and $y_i$. But in fact we are done, using the fact that $h_i = [x_i y_i]$. ::: :::{.problem title="Humphreys 1.11"} Verify that the commutator of two derivations of an $\FF\dash$algebra is again a derivation, whereas the ordinary product need not be. ::: :::{.solution} We want to show that $[\Der(L) \Der(L)] \subseteq \Der(L)$, so let $f,g\in \Der(L)$. The result follows from a direct computation; letting $D \da [fg]$, we have \[ D(ab) = [fg](ab) &= (fg-gf)(ab) \\ &= fg(ab) - gf(ab) \\ &= f\qty{g(a)b + ag(b) } - g\qty{ f(a)b + af(b)} \\ &= f\qty{g(a)b} + f\qty{ag(b)} - g\qty{f(a)b} - g\qty{af(b)} \\ &= { {\color{blue} (fg)(a)b } + {\color{red} g(a)f(b)} } \\ &\quad + { {\color{red} f(a)g(b) } + {\color{green} a (fg)(b)} } \\ &\quad - { {\color{blue} (gf)(a) b } + {\color{red} f(a)g(b)} } \\ &\quad - { {\color{red} g(a)f(b) } - {\color{green} a(gf)(b)} } \\ &= {\color{blue} [fg](a) b} - {\color{green} a [fg](b) } \\ &= D(a)b - aD(b) .\] To see that ordinary products of derivations need not be derivations, consider the operators $D_x \da \dd{}{x}, D_y = \dd{}{y}$ acting on a finite-dimensional vector space of multivariate polynomials of some bounded degree, as a sub $\RR\dash$algebra of $\RR[x,y]$. Take $f(x,y) = x+y$ and $g(x,y) = xy$, so that $fg = g f = x^2 y+ xy^2$. Then $[D_x D_y] = 0$ since mixed partial derivatives are equal, but \[ D_x D_y (fg) = D_x \qty{x^2 + 2xy} = 2x + 2y \neq 0 .\] ::: :::{.problem title="Humphreys 1.12"} Let $L$ be a Lie algebra over an algebraically closed field $\FF$ and let $x \in L$. Prove that the subspace of $L$ spanned by the eigenvectors of $\ad_x$ is a subalgebra. ::: :::{.solution} Let $E_x \subseteq L$ be the subspace spanned by eigenvectors of $\ad_x$; it suffices to show $[E_x E_x] \subseteq E_x$. Letting $y_i \in E_x$, we have $\ad_x(y_i) = \lambda_i y_i$ for some scalars \( \lambda_i \in \FF \), and we want to show $\ad_x([y_1 y_2]) = \lambda_{12} [y_1 y_2]$ for some scalar \( \lambda_{12} \). Note that the Jacobi identity is equivalent to $\ad$ acting as a derivation with respect to the bracket, i.e. \[ \ad_x([yz]) = [\ad_x(y) z] + [y \ad_x(z)] \implies [x[yz]] = [[xy]z] + [y[xz]] .\] The result then follows from a direct computation: \[ \ad_x([y_1y_2]) &= [[xy_1]y_2] + [y_1 [xy_2]] \\ &= [ \lambda_1 y_1 y_2] + [y_1 \lambda_2 y_2] \\ &= (\lambda_1 + \lambda_2)[y_1 y_2] .\] ::: ## Section 2 :::{.problem title="Humphreys 2.1"} Prove that the set of all inner derivations $\ad_x, x \in L$, is an ideal of $\Der L$. ::: :::{.solution} It suffices to show $[\Der(L) \Inn(L)] \subseteq \Inn(L)$, so let $f\in \Der(L)$ and $\ad_x \in \Inn(L)$. The result follows from the following check: \[ [f\ad_x](l) &= (f\circ \ad_x)(l) - (\ad_x \circ f)(l) \\ &= f([xl]) - [x f(l)] \\ &= [f(x) l] + [x f(l)] - [x f(l)] \\ &= [f(x) l] \\ &= \ad_{f(x)}(l), \qquad \text{and } \ad_{f(x)} \in \Inn(L) .\] ::: :::{.problem title="Humphreys 2.2"} Show that $\mathfrak{s l}_n( \FF)$ is precisely the derived algebra of $\mathfrak{g l}_n( \FF)$ (cf. Exercise 1.9). ::: :::{.solution} We want to show $\liegl_n(\FF)^{(1)} \da [\liegl_n(\FF) \liegl_n(\FF)] = \liesl_n(\FF)$. $\subseteq$: This immediate from the fact that for any matrices $A$ and $B$, \[ \tr([AB]) = \tr(AB -BA) = \tr(AB) - \tr(BA) = \tr(AB) - \tr(AB) = 0 .\] $\supseteq$: From a previous exercise, we know that $[\liesl_n(\FF) \liesl_n(\FF)] = \liesl_n(\FF)$, and since $\liesl_n(\FF) \subseteq \liegl_n(\FF)$ we have \[ \liesl_n(\FF) = \liesl_n(\FF)^{(1)} \subseteq \liegl_n(\FF)^{(1)} .\] ::: :::{.problem title="Humphreys 2.5"} Suppose $\operatorname{dim} L=3$ and $L=[L L]$. Prove that $L$ must be simple. Observe first that any homomorphic image of $L$ also equals its derived algebra. Recover the simplicity of $\mathfrak{s l}_2( \FF)$ when $\characteristic \FF \neq 2$. ::: :::{.solution} Let $I\normal L$ be a proper ideal, then $\dim L/I < \dim L$ forces $\dim L/I = 1,2$. Since $L\surjects L/I$, the latter is the homomorphic image of a Lie algebra and thus $(L/I)^{(1)} = L/I$ by the hint. Note that in particular, $L/I$ is not abelian. We proceed by cases: - $\dim L/I = 1$. - In this case, $L/I = \FF x$ is generated by a single element $x$. Since $[xx] = 0$ in any Lie algebra, we have $(\FF x)^{(1)} = 0$, contradicting that $L/I$ is not abelian. $\contradiction$ - $\dim L/I = 2$: Write $L/I = \FF x + \FF y$ for distinct generators $x,y$, and consider the multiplication table for the bracket. - If $[xy] = 0$, then $L/I$ is abelian, a contradiction. $\contradiction$ - Otherwise, without loss of generality $[xy] =x$ as described at the end of section 1.4. In this case, $(L/I)^{(1)} \subseteq \FF_x \subsetneq L/I$, again a contradiction. $\contradiction$ So no such proper ideals $I$ can exist, forcing $L$ to be simple. Applying this to $L \da \liesl_2(\FF)$, we have $\dim_\FF \liesl_2(\FF) = 2^2-1 = 3$, and from a previous exercise we know $\liesl_2(\FF)^{(1)} = \liesl_2(\FF)$, so the above argument applies and shows simplicity. ::: :::{.problem title="Humphreys 2.10"} Let $\sigma$ be the automorphism of $\mathfrak{s l}_2(\FF)$ defined in (2.3). Verify that - $\sigma(x)=-y$, - $\sigma(y)=-x$, - $\sigma(h)=-h$. Note that this automorphism is defined as \[ \sigma = \exp(\ad_x)\circ \exp(\ad_{-y}) \circ \exp(\ad_x) .\] ::: :::{.solution} We recall that $\exp \ad_x(y) \da \sum_{n\geq 0}{1\over n!} \ad_x^n(y)$, where the exponent denotes an $n\dash$fold composition of operators. To compute these power series, first note that $\ad_t(t) = 0$ for $t=x,y,h$ by axiom **L2**, so \[ (\exp \ad_t)(t) = 1(t) + \ad_t(t) + {1\over 2} \ad_t^2(t) + \cdots = 1(t) = t \] where $1$ denotes the identity operator. It is worth noting that if $\ad_t^n(t') = 0$ for some $n$ and some fixed $t,t'$, then it is also zero for all higher $n$ since each successive term involves bracketing with the previous term: \[ \ad^{n+1}_t(t') = [t\, \ad_t^n(t')] = [t\, 0] = 0 .\] We first compute some individual nontrivial terms that will appear in $\sigma$. The first order terms are given by standard formulas, which we collect into a multiplication table for the bracket: | | $x$ | $h$ | $y$ | |----- |------ |------- |------- | | $x$ | $0$ | $-2x$ | $h$ | | $h$ | $2x$ | $0$ | $-2y$ | | $y$ | $-h$ | $2y$ | $0$ | We can thus read off the following: - $\ad_x(y) = h$ - $\ad_x(h) = -2x$ - $\ad_{-y}(x) = [-y x] = [xy] = h$ - $\ad_{-y}(h) = [-yh] = [hy] = -2y$ For reference, we compute and collect higher order terms: - $\ad_x^n(y)$: - $\ad_x^1(y) = h$ from above, - $\ad_x^2(y) = \ad_x([xy]) = \ad_x(h) = [xh] = -[hx] = -2x$, - $\ad_x^3(y) = \ad_x(-2x) = 0$, so $\ad_x^{\geq 3}(y) = 0$. - $\ad_x^n(h)$: - $\ad_x^1(h) = -2x$ from above, - $\ad_x^2(h) = \ad_x(-2x) = 0$, so $\ad_x^{\geq 2}(h) = 0$. - $\ad_{-y}^n(x)$: - $\ad_{-y}^1(x) = h$ from above, - $\ad_{-y}^2(x) = \ad_{-y}(h) = [-yh] = [hy] = -2y$, - $\ad_{-y}^2(x) = \ad_{-y}(-2y) = 0$, so $\ad_{-y}^{n\geq 2}(x) = 0$. - $\ad_{-y}^n(h)$: - $\ad_{-y}^1(h) = -2y$ from above, and so $\ad_{-y}^{\geq 2}(h) = 0$. Finally, we can compute the individual terms of $\sigma$: \[ (\exp \ad_x)(x) &= x \\ \\ (\exp \ad_x)(h) &= 1(h) + \ad_x(h) \\ &= h + (-2x) \\ &= h-2x \\ \\ (\exp \ad_x)(y) &= 1(y) + \ad_x(y) + {1\over 2}\ad_x^2(y) \\ &= y + h + {1\over 2}(-2x) \\ &= y+h-x \\ \\ (\exp \ad_{-y})(x) &= 1(x) + \ad_{-y}(x) x + {1\over 2} \ad^2_{-y}(x) \\ &= x + h +{1\over 2}(-2y) \\ &= x+h-y \\ \\ (\exp \ad_{-y})(h) &= 1(h) + \ad_{-y}(h) \\ &= h - 2y \\ \\ (\exp \ad_{-y})(y) &= y ,\] and assembling everything together yields \[ \sigma(x) &= (\exp \ad_x \circ \exp \ad_{-y} \circ \exp \ad_x)(x) \\ &= (\exp \ad_x \circ \exp \ad_{-y})(x) \\ &= (\exp \ad_x)(x+h-y) \\ &= (x) + (h-2x) - (y+h-x) \\ &= -y \\ \\ \sigma(y) &= (\exp \ad_x \circ \exp \ad_{-y} \circ \exp \ad_x)(y) \\ &= (\exp \ad_x \circ \exp \ad_{-y} )(y+h-x) \\ &= \exp \ad_x\qty{(y) + (h-2y) - (x+h-y) } \\ &= \exp \ad_x\qty{-x} \\ &= -x \\ \\ \sigma(h) &= (\exp \ad_x \circ \exp \ad_{-y} \circ \exp \ad_x)(h) \\ &= (\exp \ad_x \circ \exp \ad_{-y} )(h-2x) \\ &= (\exp \ad_x )( (h-2y) - 2(x+h-y) ) \\ &= (\exp \ad_x )(-2x -h ) \\ &= -2(x) - (h-2x) \\ &= -h .\] ::: --- title: "Problem Sets: Lie Algebras" subtitle: "Problem Set 2" author: - name: D. Zack Garza affiliation: University of Georgia email: dzackgarza@gmail.com date: Fall 2022 order: 2 --- # Problem Set 2 ## Section 3 :::{.problem title="Humphreys 3.1"} Let $I$ be an ideal of $L$. Then each member of the derived series or descending central series of $I$ is also an ideal of $L$. ::: :::{.solution} To recall definitions: - The derived series of $L$ is $L \contains L^{(0)} \da [LL] \contains L^{(1)} \da [[LL] [LL]] \contains \cdots$ and termination implies solvability. - The descending central series of $L$ is $L \contains L^1 \da [LL] \contains L^2 \da [L [LL]] \contains \cdots$, and termination implies nilpotency (and hence solvability since $[LL] \subseteq L \implies L^{(i)} \subseteq L^i$). - $I \normal L \iff [L,I] \subseteq I$. For the derived series, inductively suppose $I \da I^{(i)}$ is an ideal, so $[L I] \subseteq I$. We then want to show $I^{(i+1)} \da [I, I]$ is an ideal, so $[L, [I, I] ] \subseteq [I, I]$. Letting $l\in L$, and $i,j\in I$, one can use the Jacobi identity, antisymmetry of the bracket, and the fact that $[I, I] \da L^{(i+1)} \subseteq I$ to write \[ [L, [I, I]] &\ni [l[ij]] \\ &= [[li]j] - [ [lj] i] \\ &\in [[L,I], I] - [[L,I], I] \\ &\subseteq [[L,I], I] \subseteq [I,I] .\] Similarly, for the lower central series, inductively suppose $I\da I^i$ is an ideal, so $[L, I] \subseteq I$; we want to show $[L, [L, I]] \subseteq [L, I]$. Again using the Jacobi identity and antisymmetry, we have \[ [L, [L, I]] &\ni [l_1, [l_2, i]] \\ &= [[i, l_1], l_2] + [[l_2, l_1], i] \\ &\subseteq [[I,L], L] + [ [L, L], I] \\ &\subseteq [I, L] + [L, I] \subseteq [L, I] .\] ::: :::{.problem title="Humphreys 3.4"} Prove that $L$ is solvable (resp. nilpotent) if and only $\ad(L)$ is solvable (resp. nilpotent). ::: :::{.solution} $\implies$: By the propositions in Section 3.1 (resp. 3.2), the homomorphic image of any solvable (resp. nilpotent) Lie algebra is again solvable (resp. nilpotent). $\impliedby$: There is an exact sequence \[ 0 \to Z(L) \to L \mapsvia{\ad} \ad(L) \to 0 ,\] exhibiting $\ad(L)\cong L/Z(L)$. Thus if $\ad(L)$ is solvable, noting that centers are always solvable, we can use the fact that the 2-out-of-3 property for short exact sequences holds for solvability. Moreover, by the proposition in Section 3.2, if $L/Z(L)$ is nilpotent then $L$ is nilpotent. ::: :::{.problem title="Humphreys 3.6"} Prove that the sum of two nilpotent ideals of a Lie algebra $L$ is again a nilpotent ideal. Therefore, $L$ possesses a unique maximal nilpotent ideal. Determine this ideal for the nonabelian 2-dimensional algebra $\FF x + \FF y$ where $[xy]=x$, and the 3-dimensional algebra $\FF x + \FF y + \FF z$ where - $[xy] = z$ - $[xz]=y$ - $[yz] = 0$ ::: :::{.solution} To see that sums of nilpotent ideals are nilpotent, suppose $I^N = J^M = 0$ are nilpotent ideals. Then $(I+J)^{M+N} \subseteq I^M + J^N$ by collecting terms and using the absorbing property of ideals. One can now construct a maximal nilpotent ideal in $L$ by defining $M$ as the sum of all nilpotent ideals in $L$. That this is unique is clear, since $M$ is nilpotent, so if $M'$ is another maximal nilpotent ideal then $M \subseteq M'$ and $M' \subseteq M$. Consider the 2-dimensional algebra $L \da \FF x + \FF y$ where $[xy] = x$ and let $I$ be the maximal nilpotent ideal. Note that $L$ is not nilpotent since $L^k = \FF x$ for all $k\geq 0$, since $L^1 = \FF x$ and $[L, \FF x] = \FF x$ (since all brackets are either zero or $\pm x$). However, this also shows that the subalgebra $\FF x$ is an ideal, and is in fact a nilpotent ideal since $[\FF x, \FF x] = 0$. Although $\FF y$ is a nilpotent subalgebra, it is not an ideal since $[L, \FF y] = \FF x$. So $I$ is at least 1-dimensional, since it contains $\FF x$, and at most 1-dimensional, since it is not all of $L$, forcing $I = \FF x$. Consider now the 3-dimensional algebra $L \da \FF x + \FF y + \FF z$ with the multiplication table given in the problem statement above. Note that $L$ is not nilpotent, since $L^1 = \FF y + \FF z = L^k$ for all $k\geq 2$. This follows from consider $[L, \FF y + \FF z]$, where choosing $x\in L$ is always a valid choice and choosing $y$ or $z$ in the second slot hits all generators; however, no element brackets to $x$. So similar to the previous algebra, the ideal $J \da \FF x + \FF y$ is an ideal, and it is nilpotent since all brackets between $y$ and $z$ vanish. By similar dimensional considerations, $J$ must equal the maximal nilpotent ideal. ::: :::{.problem title="Humphreys 3.10"} Let $L$ be a Lie algebra, $K$ an ideal of $L$ such that $L / K$ is nilpotent and such that $\ro{\ad_x}{K}$ is nilpotent for all $x \in L$. Prove that $L$ is nilpotent. ::: :::{.solution} Suppose that $M \da L/K$ is nilpotent, so the lower central series terminates and $M^n = 0$ for some $n$. Then $L^n \subseteq K$ for the same $n$, and the claim is that $L^n$ is nilpotent. This follows from applying Engel's theorem: let $x\in L^n \subseteq K$, then $\ro{\ad_x}{L^n} = 0$ by assumption. So every element of $L^n$ is ad-nilpotent, making it nilpotent. Since $0 = (L^n)^k = L^{n+k}$ for some $k$, this forces $L$ to be nilpotent as well. ::: ## Section 4 :::{.problem title="Humphreys 4.1"} Let $L= \liesl(V)$. Use Lie's Theorem to prove that $\operatorname{Rad} L=Z(L)$; conclude that $L$ is semisimple. > Hint: observe that $\operatorname{Rad} L$ lies in each maximal solvable subalgebra $B$ of $L$. Select a basis of $V$ so that $B=L \cap \mathfrak{t}(n, \mathrm{~F})$, and notice that $B^t$ is also a maximal solvable subalgebra of $L$. Conclude that $\operatorname{Rad} L \subset L \cap \mathfrak{d}(n, \mathrm{~F})$ (diagonal matrices), then that $\operatorname{Rad} L=Z(L) .]$ ::: :::{.solution} Let $R = \Rad(L)$ be the radical (maximal solvable ideal) of $L$. Using the hint, if $S \leq L$ is a maximal solvable subalgebra then it must contain $R$. By (a corollary of) Lie's theorem, $S$ stabilizes a flag and thus there is a basis with respect to which all elements of $S$ (and thus $R$) are upper triangular. Thus $S \subseteq \lieb$; however, taking the transpose of every element in $S$ again yields a maximal solvable ideal which is lower triangular and thus contained in $\lieb^-$. Thus $R \subseteq S \subseteq \lieb \intersect \lieb^- = \lieh$, which consists of just diagonal matrices. We have $Z(L) \subseteq R$ since centers are solvable, and the claim is that $R \subseteq \lieh \implies R \subseteq Z(L)$. It suffices to show that $R$ consists of scalar matrices, since it is well-known that $Z(\liegl_n(\FF))$ consists of precisely scalar matrices, and this contains $Z(L)$ since $L \leq \liegl_n(\FF)$ is a subalgebra. This follows by letting $\ell = \sum a_i e_{i,i}$ be an element of $\Rad(L)$ and considering bracketing elements of $\liesl_n(\FF)$ against it. Bracketing elementary matrices $e_{i, j}$ with $i\neq j$ yields \[ [e_{i,j}, \ell] = a_j e_{i, j} - a_i e_{i, j} ,\] which must be an element of $\Rad(L)$ and thus diagonal, which forces $a_j = a_i$ for all $i, j$. To conclude that $L$ is semisimple, note that a scalar traceless matrix is necessarily zero, and so $Z(\liesl(V)) = 0$. This suffices since $\Rad(L) = 0 \iff L$ is semisimple. ::: :::{.problem title="Humphreys 4.3, Failure of Lie's theorem in positive characteristic"} Consider the $p \times p$ matrices: \[ x=\left[\begin{array}{cccccc} 0 & 1 & 0 & . & . & 0 \\ 0 & 0 & 1 & 0 & . & 0 \\ . & . & \cdot & \cdot & \cdot & \cdot \\ 0 & \cdot & \cdot & \cdot & \cdot & 1 \\ 1 & . & \cdot & \cdot & \cdots & 0 \end{array}\right] ,\qquad y = \diag(0,1,2,3,\cdots,p-1) .\] Check that $[x, y]=x$, hence that $x$ and $y$ span a two dimensional solvable subalgebra $L$ of $\mathfrak{g l}(p, F)$. Verify that $x, y$ have no common eigenvector. ::: :::{.solution} Note that $x$ acts on the left on matrices $y$ by cycling all rows of $y$ up by one position, and similar yacts on the right by cycling the columns to the right. Thus \[ xy - yx &= \begin{bmatrix} 0 & 1 & 0 & 0 & 0 \\ 0 & 0 & 2 & 0 & 0 \\ 0 & \vdots & \vdots & \ddots & 0 \\ 0 & 0 & \cdots & 0 & p-1 \\ 0 & 0 & \cdots & 0 & 0 \end{bmatrix} - \begin{bmatrix} 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 & 0 \\ \vdots & \cdots & 0 & 2 & 0 \\ 0 & 0 & \cdots & 0 & 3 \\ p-1 & 0 & \cdots & 0 & 0 \end{bmatrix} \\ &= \begin{bmatrix} 0 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 0 & 1 \\ -(p-1) & 0 & 0 & 0 & 0 \end{bmatrix}\\ &\equiv \begin{bmatrix} 0 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 0 & 1 \\ 0 & 0 & 0 & 0 & 0 \end{bmatrix} \qquad \in \GL_n(\FF_p) \\ &= x .\] Thus $L \da \FF x + \FF y$ span a solvable subalgebra, since $L^{(1)} = \FF x$ and so $L^{(2)} = 0$. Moreover, note that every basis vector $e_i$ is an eigenvector for $y$ since $y(e_i) = i e_i$, while no basis vector is an eigenvector for $x$ since $x(e_i) = e_{i+1}$ for $1\leq i \leq p-1$ and $x(e_p) = e_1$, so $x$ cycles the various basis vectors. ::: :::{.problem title="Humphreys 4.4"} For arbitrary $p$, construct a counterexample to Corollary $\mathrm{C}$[^cor_c] as follows: Start with $L \subset \liegl_p(\FF)$ as in Exercise 3. Form the vector space direct sum $M=L \oplus \FF^p$, and make $M$ a Lie algebra by decreeing that $\FF^p$ is abelian, while $L$ has its usual product and acts on $\FF^p$ in the given way. Verify that $M$ is solvable, but that its derived algebra $\left(=\FF x+ \FF^p\right)$ fails to be nilpotent. [^cor_c]: Corollary C states that if $L$ is solvable then every $x\in L^{(1)}$ is ad-nilpotent, and thus $L^{(1)}$ is nilpotent. ::: :::{.solution} For pairs $A_1 \oplus v_1$ and $A_2 \oplus v_2$ in $M$, we'll interpret the given definition of the bracket as \[ [A_1 \oplus v_1, A_2 \oplus v_2] \da [A_1, A_2] \oplus (A_1(v_2) - A_2(v_1)) ,\] where $A_i(v_j)$ denotes evaluating an endomorphism $A\in \liegl_p(\FF)$ on a vector $v\in \FF^p$. We also define $L = \FF x + \FF y$ with $x$ and $y$ the given matrices in the previous problem, and note that $L$ is solvable with derived series \[ L = \FF x \oplus \FF y \contains L^{(1)} = \FF x \contains L^{(2)} = 0 .\] Consider the derived series of $M$ -- by inspecting the above definition, we have \[ M^{(1)} \subseteq L^{(1)} \oplus \FF^p = \FF x \oplus \FF^p .\] Moreover, we have \[ M^{(2)} \subseteq L^{(2)} \oplus \FF^p = 0 \oplus \FF^p ,\] which follows from considering considering bracketing two elements in $M^{(1)}$: set $w_{ij} \da A_i(v_j) - A_j(v_i)$, then \[ [ [A_1, A_2] \oplus w_{1,2}, \,\, [A_3, A_4] \oplus w_{3, 4} ] \hspace{16em} \\ = [ [A_1, A_2], [A_3, A_4] ] \oplus [A_1, A_2](w_{3, 4}) - [A_3, A_4](w_{1, 2}) .\] We can then see that $M^{(3)} = 0$, since for any $w_i \in \FF^p$, \[ [0 \oplus w_1, \, 0 \oplus w_2] = 0 \oplus 0(w_2)-0(w_1) = 0 \oplus 0 ,\] and so $M$ is solvable. Now consider its derived subalgebra $M^{(1)} = \FF x \oplus \FF^p$. If this were nilpotent, every element would be ad-nilpotent, but let $v = \tv{1,1,\cdots, 1}$ and consider $\ad_{x \oplus 0}$. We have \[ \ad_{x \oplus 0}(0 \oplus v) = [x \oplus 0, 0 \oplus v] = 0 \oplus xv = 0 \oplus v ,\] where we've used that $x$ acts on the left on vectors by cycling the entries. Thus $\ad_{x \oplus 0}^n (0 \oplus v) = 0 \oplus v$ for all $n\geq 1$ and $x \oplus 0 \in M^{(1)}$ is not ad-nilpotent. ::: --- title: "Problem Sets: Lie Algebras" subtitle: "Problem Set 3" author: - name: D. Zack Garza affiliation: University of Georgia email: dzackgarza@gmail.com date: Fall 2022 order: 3 --- # Problem Set 3 ## Section 5 :::{.problem title="5.1"} Prove that if $L$ is nilpotent then the Killing form of $L$ is identically zero. ::: :::{.solution} Note that if $L$ is nilpotent than every $\ell \in L$ is ad-nilpotent, so letting $x,y\in L$ be arbitrary, their commutator $\ell \da [xy]$ is ad-nilpotent. Thus $\ad_{[xy]} \in \Endo(L)$ is a nilpotent endomorphism of $L$, which are always traceless. The claim is the following: for any $x,y\in L$, \[ \Trace(\ad_{[xy]}) = 0\implies \Trace(\ad_x \circ \ad_y) = 0 ,\] from which it follows immediately that $\beta$ is identically zero. First we can use the fact that $\ad: L\to \liegl(L)$ preserves brackets, and so \[ \ad_{[xy]_L} = [\ad_x \ad_y]_{\liegl(L)} = \ad_x \circ \ad_y - \ad_y \circ\ad_x ,\] and so \[ 0 = \Trace(\ad_{[xy]}) = \Trace(\ad_x \ad_y - \ad_y \ad_x) = \Trace(\ad_x \ad_y) - \Trace(\ad_y \ad_x) .\] where we've used that the trace is an $\FF\dash$linear map $\liegl(L) \to \FF$. This forces \[ \Trace(\ad_x \ad_y) = - \Trace(\ad_y \ad_x) ,\] but by the cyclic property of traces, we always have \[ \Trace(\ad_x \ad_y) = \Trace(\ad_y \ad_x) .\] Combining these yields $\Trace(\ad_x \ad_y) = 0$. ::: :::{.problem title="5.7"} Relative to the standard basis of $\liesl_3(\FF)$, compute $\det \kappa$. What primes divide it? > Hint: use 6.7, which says $\kappa_{\liegl_n}(x, y) = 2n \Trace(xy)$. ::: :::{.solution} We have the following standard basis: \[ x_1=\left[\begin{array}{ccc} \cdot & 1 & \cdot \\ \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot \end{array}\right] \quad &x_2=\left[\begin{array}{ccc} \cdot & \cdot & 1 \\ \cdot & \cdot & \cdot \\ \cdot & \cdot & . \end{array}\right] &x_3=\left[\begin{array}{ccc} \cdot & \cdot & \cdot \\ \cdot & \cdot & 1 \\ \cdot & \cdot & \cdot \end{array}\right]\\ h_1=\left[\begin{array}{ccc} 1 & \cdot & \cdot \\ \cdot & -1 & \cdot \\ \cdot & \cdot & \cdot \end{array}\right] \quad &h_2=\left[\begin{array}{ccc} \cdot & \cdot & \cdot \\ \cdot & 1 & \cdot \\ \cdot & \cdot & -1 \end{array}\right] &\\ y_1=\left[\begin{array}{ccc} \cdot & \cdot & \cdot \\ 1 & \cdot & \cdot \\ \cdot & \cdot & \cdot \end{array}\right] \quad &y_2=\left[\begin{array}{lll} \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot \\ 1 & \cdot & \cdot \end{array}\right] &y_3=\left[\begin{array}{ccc} \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot \\ \cdot & 1 & \cdot \end{array}\right] .\] For notational convenience, let $\ts{v_1,\cdots, v_8}$ denote this ordered basis. Direct computations show - $[x_1 v_1] = [x_1 x_1] = 0$ - $[x_1 v_2] = [x_1 x_2] = 0$ - $[x_1 v_3] = [x_1 x_3] = e_{13} = x_2 = v_2$ - $[x_1 v_4] = [x_1 h_1] = -2e_{12} = -2x_2 = -2 v_2$ - $[x_1 v_5] = [x_1 h_2] = e_{12} = x_1 = v_1$ - $[x_1 v_6] = [x_1 y_1] = e_{11} - e_{22} = h_1 = v_4$ - $[x_1 v_7] = [x_1 y_2] = -e_{31} = -y_2 = v_6$ - $[x_1 v_8] = [x_1 y_3] = 0$ Let $E_{ij}$ denote the elementary $8\times 8$ matrices with a 1 in the $(i, j)$ position. We then have, for example, \[ \ad_{x_1} &= 0 + 0 + E_{2,3} -2 E_{2, 4} + E_{1, 5} + E_{4, 6} + E_{6, 7} + 0 \\ &= \left(\begin{array}{rrrrrrr} \cdot & \cdot & \cdot & \cdot & 1 & \cdot & \cdot \\ \cdot & \cdot & 1 & -2 & \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot & 1 & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & 1 \\ \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot \end{array}\right) .\] The remaining computations can be readily automated on a computer, yielding the following matrices for the remaining $\ad_{v_i}$: - $\ad_{x_1} = 0 + 0 + E_{2,3} -2 E_{1, 4} + E_{1, 5} + E_{4, 6} + E_{8, 7} + 0$ - $\ad_{x_2} = 0 + 0 + 0 -E_{2, 4} -E_{2, 5} - E_{3, 6} + (E_{4, 7} +E_{5, 7}) + E_{1, 8}$ - $\ad_{x_3} = -E_{2,1} + 0 + 0 + E_{3, 4} -2 E_{3, 5} + 0 + E_{6, 7} + E_{5, 8}$ - $\ad_{h_1} = 2E_{1,1} + E_{2, 2} - E_{3, 3} + 0 + 0 -2 E_{6, 6} - E_{7,7} + E_{8, 8}$ - $\ad_{h_2} = -E_{1, 1} + E_{2,2} +2 E_{3,3} + 0 + 0 + E_{6,6} - E_{7,7} -2 E_{8,8}$ - $\ad_{y_1} = -E_{4, 1} + E_{3, 2} + 0 + 2E_{6, 4} - E_{6, 5} + 0 + 0 - E_{7, 8}$ - $\ad_{y_2} = E_{8,1} - (E_{4, 2} + E_{5, 2}) - E_{6, 3} + E_{7, 4} + E_{7, 5} +0 +0 + 0$ - $\ad_{y_3} = 0 - E_{1, 2} - E_{5, 3} - E_{8, 4} +2 E_{8, 5} + E_{7, 6} + 0 + 0$ Now forming the matrix $(\beta)_{ij} \da \Trace(\ad_{v_i} \ad_{v_j})$ yields \[ \beta = \left(\begin{array}{rrrrrrrr} \cdot & \cdot & \cdot & \cdot & \cdot & 2 & \cdot & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & 6 & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & 6 \\ \cdot & \cdot & \cdot & 12 & -6 & \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot & -6 & 12 & \cdot & \cdot & \cdot \\ 2 & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & 6 & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & \cdot & 6 & \cdot & \cdot & \cdot & \cdot & \cdot \end{array}\right) ,\] whence $\det(\beta) = (2\cdot 6\cdot 6)^2(12^2-36) = - 2^8 3^7$. ::: ## Section 6 :::{.problem title="6.1"} Using the standard basis for $L=\mathfrak{s l}_2(\FF)$, write down the Casimir element of the adjoint representation of $L$ *(cf. Exercise 5.5)*. Do the same thing for the usual (3-dimensional) representation of $\mathfrak{s l}_3(\FF)$, first computing dual bases relative to the trace form. ::: :::{.solution} A computation shows that in the basis $\ts{e_i} \da \ts{x,h,y}$, the Killing form is represented by \[ \beta = \mattt 004 080 400 \implies \beta^{-T} = \mattt 00{1\over 4} 0 {1\over 8}0 {1\over 4} 0 0 ,\] yielding the dual basis $\ts{e_i\dual}$ read from the columns of $\beta^{-T}$: - $x\dual = {1\over 4}y$, - $h\dual = {1\over 8} h$, - $y\dual = {1\over 4}x$. Thus letting $\phi = \ad$, we have \[ c_\phi &= \sum \phi(e_i)\phi(e_i\dual) \\ &= \ad(x) \ad(x\dual) + \ad(h) \ad(h\dual) + \ad(y)\ad(y\dual) \\ &= \ad(x) \ad(y/4) + \ad(h) \ad(h/8) + \ad(y)\ad(x/4) \\ &= {1\over 4} \ad_x \ad_y + {1\over 8} \ad_h^2 + {1\over 4} \ad_y \ad_x .\] For $\liesl_3$, first take the ordered basis $\ts{v_1,\cdots, v_8} = \ts{x_1, x_2, x_3, h_1, h_2, y_1, y_2, y_3}$ as in the previous problem. So we form the matrix $(\beta)_{ij} \da \Trace(v_i v_j)$ by computing various products and traces on a computer to obtain \[ \beta = \left(\begin{array}{rrrrrrrr} \cdot & \cdot & \cdot & \cdot & \cdot & 1 & \cdot & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & 1 & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & 1 \\ \cdot & \cdot & \cdot & 2 & -1 & \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot & -1 & 2 & \cdot & \cdot & \cdot \\ 1 & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & 1 & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & \cdot & 1 & \cdot & \cdot & \cdot & \cdot & \cdot \end{array}\right) \implies \beta^{-T} = \left(\begin{array}{rrrrrrrr} \cdot & \cdot & \cdot & \cdot & \cdot & 1 & \cdot & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & 1 & \cdot \\ \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & 1 \\ \cdot & \cdot & \cdot & \frac{2}{3} & \frac{1}{3} & \cdot & \cdot & \cdot \\ \cdot & \cdot & \cdot & \frac{1}{3} & \frac{2}{3} & \cdot & \cdot & \cdot \\ 1 & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & 1 & \cdot & \cdot & \cdot & \cdot & \cdot & \cdot \\ \cdot & \cdot & 1 & \cdot & \cdot & \cdot & \cdot & \cdot \end{array}\right) ,\] which yields the dual basis - $x_i\dual = y_i$ - $h_1\dual = {2\over 3}h_1 + {1\over 3}h_2$ - $h_2\dual = {1\over 3}h_1 + {2\over 3}h_2$ - $y_i\dual = x_i$ We can thus compute the Casimir element of the standard representation $\phi$ on a computer as \[ c_{\phi} &= \sum_i \phi(x_i)\phi(x_i\dual) + \phi(h_1)\phi(h_1\dual) + \phi(h_2)\phi(h_2\dual) + \sum_i \phi(y_i)\phi(y_i\dual) \\ &= \sum_i x_i y_i + h_1 h_1\dual + h_2 h_2\dual + \sum_i y_i x_i \\ &= \sum_i \qty{x_i y_i + y_i x_i} \\ &= {8\over 3}I .\] ::: :::{.problem title="6.3"} If $L$ is solvable, every irreducible representation of $L$ is one dimensional. ::: :::{.solution} Let $\phi: L\to V$ be an irreducible representation of $L$. By Lie's theorem. $L$ stabilizes a flag in $V$, say $F^\bullet = F^1 \subset \cdots F^n = V$ where $F^k = \gens{v_1,\cdots, v_k}$ for some basis $\ts{v_i}_{i\leq n}$. Since $\phi$ is irreducible, the only $L\dash$invariant subspaces of $V$ are $0$ and $V$ itself. However, each $F^k$ is an $L\dash$invariant subspace, which forces $n=1$ and $F^1 = V$. Thus $V$ is 1-dimensional. ::: :::{.problem title="6.5"} A Lie algebra $L$ for which $\operatorname{Rad} L=Z(L)$ is called reductive.[^reductive_examples] (a) If $L$ is reductive, then $L$ is a completely reducible ad $L$-module. [^6.5a_hint] In particular, $L$ is the direct sum of $Z(L)$ and $[L L]$, with $[L L]$ semisimple. (b) If $L$ is a classical linear Lie algebra (1.2), then $L$ is semisimple. *(Cf. Exercise 1.9.)* (c) If $L$ is a completely reducible $\ad(L)\dash$module, then $L$ is reductive. (d) If $L$ is reductive, then all finite dimensional representations of $L$ in which $Z(L)$ is represented by semisimple endomorphisms are completely reducible. [^reductive_examples]: Examples: $L$ abelian, $L$ semisimple, $L=\mathfrak{g l}_n(\FF)$. [^6.5a_hint]: If ad $L \neq 0$, use Weyl's Theorem. ::: :::{.solution .foldopen} **Part 1**: If $\ad(L) \neq 0$, as hinted, we can attempt to apply Weyl's theorem to the representation $\phi: \ad(L)\to \liegl(L)$: if we can show $\ad(L)$ is semisimple, then $\phi$ (and thus $L$) will be a completely reducible $\ad(L)\dash$module. Assume $L$ is reductive, so $\ker(\ad) = Z(L) = \Rad(L)$, and by the first isomorphism theorem $\ad(L) \cong L/\Rad(L)$. We can now use the fact stated in Humphreys on page 11 that for an arbitrary Lie algebra $L$, the quotient $L/\Rad(L)$ is semisimple -- this follows from the fact that $\Rad(L/\Rad(L)) = 0$, since the maximal solvable ideal in the quotient would need to be a maximal proper ideal in $L$ containing $\Rad(L)$, which won't exist by maximality of $\Rad(L)$. Thus $\ad(L)$ is semisimple, and Weyl's theorem implies it is completely reducible. To show that $L = Z(L) \oplus [LL]$, we first show that it decomposes as a sum $L = Z(L) + [LL]$, and then that the intersection is empty so the sum is direct. We recall that a Lie algebra is semisimple if and only if it has no nonzero abelian ideals. Since $L/Z(L)$ is semisimple, we have $[L/Z(L), L/Z(L)] = L/Z(L)$ since it would otherwise be a nonzero abelian ideal in $L/Z(L)$. We can separately identify $[L/Z(L), L/Z(L)] \cong [LL]/Z(L)$, since the latter is also semisimple and the former is an abelian ideal in it. Combining these, we have $[LL]/Z(L) \cong L/Z(L) \cong \ad(L)$, and so we have an extension \[ 0 \to Z(L) \to L \to [LL] \to 0 .\] Since this sequence splits at the level of vector spaces, $L = Z(L) + [LL]$ as an $\ad(L)\dash$module, although the sum need not be direct. To show that it is, note that $Z(L) \leq L$ is an $\ad(L)\dash$invariant submodule, and by complete reducibility has an $\ad(L)\dash$invariant complement $W$. We can thus write $L = W \oplus Z(L)$, and moreover $[LL] \subseteq W$, and so we must have $W = [LL]$ and $L = [LL] \oplus Z(L)$. Finally, to see that $[LL]$ is semisimple, note that the above decomposition allows us to write $L/Z(L) \cong [LL]$, and $\Rad(L/Z(L)) = \Rad(L/\Rad(L)) = 0$ so $\Rad([LL]) = 0$. **Part 2**: Omitted for time. **Part 3**: Omitted for time. **Part 4**: Omitted for time. ::: :::{.problem title="6.6"} Let $L$ be a simple Lie algebra. Let $\beta(x, y)$ and $\gamma(x, y)$ be two symmetric associative bilinear forms on $L$. If $\beta, \gamma$ are nondegenerate, prove that $\beta$ and $\gamma$ are proportional. > Hint: Use Schur's Lemma. ::: :::{.solution} The strategy will be to define an irreducible $L\dash$module $V$ and use the two bilinear forms to produce an element of $\Endo_L(V)$, which will be 1-dimensional by Schur's lemma. The representation we'll take will be $\phi \da \ad: L\to \liegl(L)$, and since $L$ is simple, $\ker \ad = 0$ since otherwise it would yield a nontrivial ideal of $L$. Since this is a faithful representation, we will identify $L$ with its image $V \da \ad(L) \subseteq \liegl(L)$ and regard $V$ as an $L\dash$module. As a matter of notation, let $\beta_x(y) \da \beta(x, y)$ and similarly $\gamma_x(y) \da \gamma(x, y)$, so that $\beta_x, \gamma_x$ can be regarded as linear functionals on $V$ and thus elements of $V\dual$. This gives an $\FF\dash$linear map \[ \Phi_1: V &\to V\dual \\ x &\mapsto \beta_x ,\] which we claim is an $L\dash$module morphism. Assuming this for the moment, note that by the general theory of bilinear forms on vector spaces, since $\beta$ and $\gamma$ are nondegenerate, the assignments $x\mapsto \beta_x$ and $x\mapsto \gamma_x$ induce vector space isomorphisms $V\iso V\dual$. Accordingly, for any linear functional $f\in V\dual$, there is a unique element $z(f) \in V$ such that $f(v) = \gamma(z(f), v)$. So define a map using the representing element for $\gamma$: \[ \Phi_2: V\dual &\to V \\ f &\mapsto z(f) ,\] which we claim is also an $L\dash$module morphism. We can now define their composite \[ \Phi \da \Phi_2 \circ \Phi_1: V &\to V \\ x &\mapsto z(\beta_x) ,\] which sends an element $x\in V$ to the element $z = z(\beta_x) \in V$ such that $\beta_x(\wait) = \gamma_z(\wait)$ as functionals. An additional claim is that $\Phi$ commutes with the image $V \da \ad(L) \subseteq \liegl(L)$. Given this, by Schur's lemma we have $\Phi\in \Endo_L(V) = \FF$ (where we've used that a compositions of morphisms is again a morphism) and so $\Phi = \lambda \id_L$ for some scalar $\lambda\in \FF$. To see why this implies the result, we have equalities of functionals \[ \beta(x, \wait) &= \beta_x(\wait) \\ &= \gamma_{z(\beta_x) }(\wait) \\ &= \gamma( z(\beta_x), \wait)\\ &= \gamma( \Phi(x), \wait) \\ &= \gamma(\lambda x, \wait) \\ &= \lambda\gamma(x, \wait) ,\] and since this holds for all $x$ we have $\beta(\wait, \wait) = \lambda \gamma(\wait, \wait)$ as desired. --- :::{.claim} $\Phi_1$ is an $L\dash$module morphism. ::: :::{.proof title="?"} We recall that a morphism of $L\dash$modules $\phi: V\to W$ is an $\FF\dash$linear map satisfying \[ \phi(\ell .\vector x) = \ell.\phi(\vector x) \qquad\forall \ell\in L,\,\forall \vector x\in V .\] In our case, the left-hand side is \[ \Phi_1(\ell . \vector x) \da \Phi_1( \ad_\ell(\vector x) ) = \Phi_1([\ell,\vector x]) = \beta_{[\ell, \vector x]} = \beta( [\ell, \vector x], \wait) .\] and the right-hand side is \[ \ell.\Phi_1(\vector x) \da \ell.\beta_{\vector x} \da (y\mapsto -\beta_{\vector x}( \ell. y)) \da (\vector y\mapsto -\beta_{\vector x}( [\ell, \vector y] )) = -\beta(\vector x, [\ell, \wait]) .\] By anticommutativity of the bracket, along with $\FF\dash$linearity and associativity of $\beta$, we have \[ \beta([\ell, \vector x], \vector y) = -\beta([\vector x, \ell], \vector y) = -\beta(\vector x, [\ell, \vector y]) \qquad \forall \vector y\in V \] and so the above two sides do indeed coincide. ::: :::{.claim} $\Phi_2$ is an $L\dash$module morphism. ::: :::{.proof title="?"} Omitted for time, proceeds similarly. ::: :::{.claim} $\Phi$ commutes with $\ad(L)$. ::: :::{.proof title="?"} Letting $x\in L$, we want to show that $\Phi \circ \ad_x = \ad_x \circ \Phi \in \liegl(L)$, i.e. that these two endomorphisms of $L$ commute. Fixing $\ell\in L$, the LHS expands to \[ \Phi(\ad_x(\ell)) = z(\beta_{\ad_x(\ell) }) = z(\beta_{[x\ell]}) ,\] while the RHS is \[ \ad_x(\Phi(\ell)) = \ad_x(z(\beta_\ell)) = [x, z(\beta_\ell)] .\] Recalling that $\Phi(t) = z(\beta_t)$ is defined to be the unique element $t\in L$ such that $\beta(t, \wait) = \gamma(z(\beta_t), \wait)$, for the above two to be equal it suffices to show that \[ \beta([x, \ell], \wait) = \gamma( [x, z(\beta_\ell)], \wait) \] as linear functionals. Starting with the RHS of this expression, we have \[ \gamma( [ x, z(\beta_\ell) ], \wait ) &= -\gamma( [z(\beta_\ell), x], \wait ) \quad\text{by antisymmetry}\\ &= -\gamma(z(\beta_\ell), [x, \wait]) \quad\text{by associativity of }\gamma \\ &= -\beta(\ell, [x, \wait]) \quad\text{by definition of } z(\beta_\ell) \\ &= -\beta([\ell, x], \wait) \\ &= \beta([x, \ell], \wait) .\] ::: ::: :::{.problem title="6.7"} It will be seen later on that $\mathfrak{sl}_n(\FF)$ is actually simple. Assuming this and using Exercise 6, prove that the Killing form $\kappa$ on $\mathfrak{s l}_n(\FF)$ is related to the ordinary trace form by $\kappa(x, y)=2 n \operatorname{Tr}(x y)$. ::: :::{.solution} By the previous exercise, the trace pairing $(x,y)\mapsto \Trace(xy)$ is related to the Killing form by $\kappa(x,y) = \lambda \Trace(x,y)$ for some $\lambda$ -- here we've used the fact that since $\liesl_n(\FF)$ is simple, $\Rad(\Trace) = 0$ and thus the trace pairing is nodegenerate. Since the scalar only depends on the bilinear forms and not on any particular inputs, it suffices to compute it for any pair $(x, y)$, and in fact we can take $x=y$. For $\liesl_n$, we can take advantage of the fact that in the standard basis, $\ad_{h_i}$ will be diagonal for any standard generator $h_i\in \lieh$, making $\Trace( \ad_{h_i}^2)$ easier to compute for general $n$. Take the standard $h_{1} \da e_{11} - e_{22}$, and consider the matrix of $\ad_{h_1}$ in the ordered basis $\ts{x_1,\cdots, x_k, h_1,\cdots, h_{n-1}, y_1,\cdots, y_k }$ which has $k + (n-1) + k = n^2-1$ elements where $k= (n^2-n)/2$. We'll first compute the Killing form with respect to this basis. In order to compute the various $[h_1, v_i]$, we recall the formula $[e_{ij}, e_{kl}] = \delta_{jk} e_{il} - \delta_{li} e_{kj}$. Applying this to $h_{1}$ yields \[ [h_{1}, e_{ij}] = [e_{11} - e_{22}, e_{ij}] = [e_{11}, e_{ij}] - [e_{22}, e_{ij}] = (\delta_{1i} e_{2j} - \delta_{1j} e_{i1}) - (\delta_{2i} e_{2j} - \delta_{2j} e_{i2}) .\] We proceed to check all of the possibilities for the results as $i, j$ vary with $i\neq j$ using the following schematic: \[ \left[\begin{array}{ c | c | c } \cdot & a & R_1 \, \cdots \\ \hline b & \cdot & R_2\, \cdots \\ \hline \overset{C_1}{\vdots} & \overset{C_2}{\vdots} & M \end{array}\right] .\] The possible cases are: - $a: i=1, j=2 \implies [h_1, e_{ij}] = 2e_{12}$, covering 1 case. - $b: i=2, j=1 \implies [h_1, e_{ij}] = -2 e_{21}$ covering 1 case. - $R_1: i=1, j>2 \implies [h_1, e_{ij}] = e_{1j}$ covering $n-2$ cases. - $R_2: i=2, j>2 \implies [h_1, e_{ij}] = e_{2j}$ covering $n-2$ cases. - $C_1: j=1, i>2 \implies [h_1, e_{ij}] = e_{i1}$ covering $n-2$ cases. - $C_2: j=2, i>2 \implies [h_1, e_{ij}] = e_{i2}$ covering $n-2$ cases. - $M: i, j > 2 \implies [h_1, e_{ij}] = 0$ covering the remaining cases. Thus the matrix of $\ad_{h_1}$ has $4(n-2)$ ones and $2, -2$ on the diagonal, and $\ad_{h_1}^2$ as $4(n-2)$ ones and $4, 4$ on the diagonal, yielding \[ \Trace(\ad_{h_1}^2) = 4(n-2) + 2(4) = 4n .\] On the other hand, computing the standard trace form yields \[ \Trace(h_1^2) = \Trace(\diag(1,1,0,0,\cdots)) = 2 ,\] and so \[ \Trace(\ad_{h_1}^2) = 4n = 2n \cdot 2 = 2n\cdot \Trace(h_1^2) \implies \lambda = 2n .\] ::: --- title: "Problem Sets: Lie Algebras" subtitle: "Problem Set 4" author: - name: D. Zack Garza affiliation: University of Georgia email: dzackgarza@gmail.com date: Fall 2022 order: 4 --- # Problem Set 3 ## Section 7 :::{.problem title="Humphreys 7.2"} $M=\mathfrak{sl}(3, \FF)$ contains a copy of $L\da \liesl(2, \FF)$ in its upper left-hand $2 \times 2$ position. Write $M$ as direct sum of irreducible $L$-submodules ($M$ viewed as $L$ module via the adjoint representation): \[ V(0) \oplus V(1) \oplus V(1) \oplus V(2) .\] ::: :::{.solution .foldopen} Noting that - $\dim V(m) = m +1$ - $\dim \liesl_3(\FF) = 8$ - $\dim(V(0) \oplus V(1) \oplus V(1) \oplus V(2)) = 1+2+2+3 = 8$, it suffices to find distinct highest weight elements of weights $0,1,1,2$ and take the irreducible submodules they generate. As long as the spanning vectors coming from the various $V(n)$ are all distinct, they will span $M$ as a vector space by the above dimension count and individually span the desired submodules. Taking the standard basis $\ts{v_1,\cdots, v_8} \da \ts{x_1, x_2, x_3, h_1, h_2, y_1, y_2, y_3}$ for $\liesl_3(\FF)$ with $y_i = x_i^t$, note that the image of the inclusion $\liesl_2(\FF) \injects \liesl_3(\FF)$ can be identified with the span of $\ts{w_1,w_2,w_3} \da \ts{x_1, h_1, y_1}$ and it suffices to consider how these $3\times 3$ matrices act. Since any highest weight vector must be annihilated by the $x_1\dash$action, to find potential highest weight vectors one can compute the matrix of $\ad_{x_1}$ in the above basis and look for zero columns: \[ \ad_{x_1} = \left(\begin{array}{rrrrrrrr} 0 & 0 & 0 & -2 & 1 & 0 & 0 & 0 \\ 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & -1 & 0 \end{array}\right) .\] Thus $\ts{v_1 = x_1, v_2 = x_2, v_8 = y_3}$ are the only options for highest weight vectors of nonzero weight, since $\ad_{x_1}$ acts nontrivially on the remaining basis elements. Computing the matrix of $\ad{h_1}$, one can read off the weights of each: \[ \ad_{h_1} = \left(\begin{array}{rrrrrrrr} 2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & -1 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & -2 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & -1 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \end{array}\right) .\] Thus the candidates for highest-weight vectors are: - $x_1$ for $V(2)$, - $x_2$ for one copy of $V(1)$, - $y_3$ for the other copy of $V(1)$, - $h_1$ or $h_2$ for $V(0)$. We can now repeatedly apply the $y_1\dash$action to obtain the other vectors in each irreducible module. For $V(2)$: - $v_0 = x_1$ which has weight 2, - $v_1 = y_1.v_0 = [y_1, x_1] = -h_1$ which has weight 0, - $v_2 = {1\over 2} y_1^2.v_0 = {1\over 2} [y_1, [y_1, x_1]] = -y_1$ which has weight -2. Since we see $h_1$ appears in this submodule, we see that we should later take $h_2$ as the maximal vector for $V(0)$. Continuing with $V(1)$: - $v_0 = x_2$ which has weight 1, - $v_1 = y_1.v_0 = [y_1, x_2] = x_3$ which has weight -1. For the other $V(1)$: - $v_0 = y_3$ with weight 1, - $v_1 = -y_2$ with weight -1. For $V(0)$: - $v_0 = h_2$. We see that we get the entire basis of $\liesl_3(\FF)$ this way with no redundancy, yielding the desired direct product decomposition. ::: :::{.problem title="Humphreys 7.5"} Suppose char $\FF=p>0, L=\mathfrak{s l}(2, \FF)$. Prove that the representation $V(m)$ of $L$ constructed as in Exercise 3 or 4 is irreducible so long as the highest weight $m$ is strictly less than $p$, but reducible when $m=p$. > Note: this corresponds to the formulas in lemma 7.2 parts (a) through (c), or by letting $L\actson \FF^2$ in the usual way and extending $L\actson \FF[x, y]$ by derivations, so $l.(fg) = (l.f)g + f(l.g)$ and taking the subspace of homogeneous degree $m$ polynomials $\gens{x^m, x^{m-1}y, \cdots, y^m}$ to get an irreducible module of highest weight $m$. ::: :::{.solution} The representation $V(m)$ in Lemma 7.2 is defined by the following three equations, where $v_0 \in V_{ m}$ is a highest weight vector and $v_k \da y^k v_0/k!$: 1. $h\cdot v_i=(m-2 i) v_i$ 2. $y \cdot v_i=(i+1) v_{i+1}$ 3. $x \cdot v_i=(m-i+1) v_{i-1}$. Supposing $m< p$, the vectors $\ts{v_0, v_1,\cdots, v_m}$ still span an irreducible $L\dash$module since it contains no nontrivial $L\dash$submodules, just as in the characteristic zero case. However, if $m=p$, then note that $y.v_{m-1} = (m-1+1) v_m = 0 v_m = 0$ and consider the set $\ts{v_0, \cdots, v_{m-1}}$. This spans an $m\dash$dimensional subspace of $V$, and the equations above show it is invariant under the $L\dash$action, so it yields an $m\dash$dimensional submodule of $V(m)$. Since $\dim_\FF V(m) = m+1$, this is a nontrivial proper submodule, so $V(m)$ is reducible. ::: :::{.problem title="Humphreys 7.6"} Decompose the tensor product of the two $L$-modules $V(3), V(7)$ into the sum of irreducible submodules: $V(4) \oplus V(6) \oplus V(8) \oplus V(10)$. Try to develop a general formula for the decomposition of $V(m) \otimes V(n)$. ::: :::{.solution} By a theorem from class, we know the weight space decomposition of any $\liesl_2(\CC)\dash$module $V$ takes the following form: \[ V = V_{-m} \oplus V_{-m+2} \oplus \cdots \oplus V_{m-2} \oplus V_m ,\] where $m$ is a highest weight vector, and each weight space $V_{\mu}$ is 1-dimensional and occurs with multiplicity one. In particular, since $V(m)$ is a highest-weight module of highest weight $m$, we can write \[ V(3) &= \hspace{5.6em} V_{-3} \oplus V_{-1} \oplus V_1 \oplus V_3 \\ V(7) &= V_{-7} \oplus V_{-5} \oplus V_{-3} \oplus V_{-1} \oplus V_1 \oplus V_3 \oplus V_{5} \oplus V_{7} ,\] and tensoring these together yields modules with weights between $-3 -7 = -10$ and $3+7 = 10$: \[ V(3) \tensor V(7) &= V_{-10} \oplus V_{-8}\sumpower{2} \oplus V_{-6}\sumpower{3} \oplus V_{-4}\sumpower{4} \oplus V_{-2}\sumpower{4} \\ \qquad& \oplus V_0\sumpower{4} \\ \qquad& \oplus V_2\sumpower{4} \oplus V_4\sumpower{4} \oplus V_6\sumpower{3} \oplus V_8\sumpower{2} \oplus V_{10} .\] This can be more easily parsed by considering formal characters: \[ \ch(V(3)) &= e^{-3} + e^{-1} + e^{1} + e^3 = \\ \ch(V(7)) &= e^{-7} + e^{-5} + e^{-3} + e^{-1} + e^1 + e^3 + e^5 + e^7 \\ \ch(V(3) \tensor V(7)) &= \ch(V(3))\cdot \ch(V(7)) \\ \\ &= (e^{-10} + e^{10}) + 2(e^{-8} + e^{8}) + 3( e^{-6} + e^6) \\ &\qquad + 4( e^{-4} + e^4) + 4(e^{-2} + e^2) +4 \\ \\ &= (e^{-10} + e^{10}) + 2(e^{-8} + e^{8}) + 3( e^{-6} + e^6) \\ &\qquad + 4\ch(V(4)) ,\] noting that $\ch(V(4)) = e^{-4} + e^{-2} + e^{2} + e^{4}$ and collecting terms. To see that $V(3) \tensor V(7)$ decomposes as $V(4) \oplus V(6) \oplus V(8) \oplus V(10)$ one can check for equality of characters to see that the various weight spaces and multiplicities match up: \[ \ch(V(4) \oplus V(6) \oplus V(8) \oplus V(10)) &= \ch(V(4)) + \ch(V(6)) + \ch(V(8)) + \ch(V(10) \\ \\ &= \qty{e^{-4} + \cdots + e^4} + \qty{e^{-6} + \cdots + e^6} \\ &\quad +\qty{e^{-8} + \cdots + e^8} + \qty{e^{-10} + \cdots + e^{10}} \\ \\ &= 2\ch(V(4)) + (e^{-6} + e^6) \\ &\,\, + \ch(V(4)) + (e^{-6} + e^6) + (e^{-8} + e^8) \\ &\,\,+ \ch(V(4)) + (e^{-6} + e^6) + (e^{-8} + e^8) + (e^{-10} + e^{10})\\ \\ &= 4\ch(V(4)) + 3(e^{-6} + e^6) \\ &\,\, + 2(e^{-8} + e^8) + (e^{-10} + e^{10}) ,\] which is equal to $\ch(V(3) \tensor V(7))$ from above. More generally, for two such modules $V, W$ we can write \[ V\tensor_\FF W = \bigoplus _{\lambda \in\lieh\dual} \bigoplus _{\mu_1 + \mu_2 = \lambda} V_{\mu_1} \tensor_\FF W_{\mu_2} ,\] where we've used the following observation about the weight of $\lieh$ acting on a tensor product of weight spaces: supposing $v\in V_{\mu_1}$ and $w\in W_{\mu_2}$, \[ h.(v\tensor w) &= (hv)\tensor w + v\tensor(hw) \\ &= (\mu_1 v)\tensor w + v\tensor (\mu_2 w) \\ &= (\mu_1 v)\tensor w + (\mu_2 v)\tensor w \\ &= (\mu_1 + \mu_2)(v\tensor w) ,\] and so $v\tensor w \in V_{\mu_1 + \mu_2}$. Taking $V(m_1), V(m_2)$ with $m_1 \geq m_2$ then yields a general formula: \[ V(m_1) \tensor_\FF V(m_2) = \bigoplus _{n=-m_1-m_2}^{m_1+m_2} \bigoplus_{a + b = n} V_a \tensor_\FF V_b = \bigoplus_{n = m_1-m_2}^{m_2 + m_1} V(n) .\] ::: ## Section 8 :::{.problem title="Humphreys 8.9"} Prove that every three dimensional semisimple Lie algebra has the same root system as $\mathfrak{s l}(2, \FF)$, hence is isomorphic to $\mathfrak{s l}(2, \FF)$. ::: :::{.solution} There is a formula for the dimension of $L$ in terms of the rank of $\Phi$ and its cardinality, which is more carefully explained in the solution below for problem 8.10: \[ \dim \lieg = \rank \Phi + \size \Phi .\] Thus if $\dim L = 3$ then the only possibility is that $\rank \Phi = 1$ and $\size \Phi = 2$, using that $\rank \Phi \leq \size \Phi$ and that $\size \Phi$ is always even since each $\alpha\in \Phi$ can be paired with $-\alpha\in \Phi$. In particular, the root system $\Phi$ of $L$ must have rank 1, and there is a unique root system of rank 1 (up to equivalence) which corresponds to $A_1$ and $\liesl_2(\FF)$. By the remark in Humphreys at the end of 8.5, there is a 1-to-1 correspondence between pairs $(L, H)$ with $L$ a semisimple Lie algebra and $H$ a maximal toral subalgebra and pairs $(\Phi, \EE)$ with $\Phi$ a root system and $\EE\contains \Phi$ its associated Euclidean space. Using this classification, we conclude that $L\cong \liesl_2(\FF)$. ::: :::{.problem title="Humphreys 8.10"} Prove that no four, five or seven dimensional semisimple Lie algebras exist. ::: :::{.solution} We can first write \[ \lieg = \lien^- \oplus \lieh \oplus \lien^+,\qquad \lien^+ \da \bigoplus _{\alpha\in \Phi^+} \lieg_{ \alpha},\quad \lien^- \da \bigoplus _{\alpha\in \Phi^+} \lieg_{ -\alpha} .\] Writing $N\da \lien^+ \oplus \lien^- = \bigoplus _{\alpha\in \Phi} \lieg_{\alpha}$, we note that $\dim_\FF \lieg_{ \alpha} = 1$ for all $\alpha\in \Phi$. Thus $\dim_\FF N = \size \Phi$ and \[ \dim_\FF \lieg = \dim_\FF \lieh + \size \Phi .\] We can also use the fact that $\dim_\FF \lieh = \rank \Phi \da \dim_\RR \RR \Phi$, the dimension of the Euclidean space spanned by $\Phi$, and so we have a general formula \[ \dim_\FF \lieg = \rank \Phi + \size \Phi ,\] which we'll write as $d=r+f$. We can observe that $f\geq 2r$ since if $\mcb\da \ts{ \alpha_1, \cdots, \alpha_r}$ is a basis for $\Phi$, no $-\alpha_i$ is in $\mcb$ but $\ts{\pm \alpha_1, \cdots, \pm \alpha_r} \subseteq \Phi$ by the axiomatics of a root system. Thus \[ \dim_\FF \lieg = r+f \geq r + 2r = 3r .\] We can now examine the cases for which $d = r+f = 4,5,7$: - $r=1$: as shown in class, there is a unique root system $A_1$ of rank 1 up to equivalence and satisfies $f=2$ and thus $d=3$, which is not a case we need to consider. - $r=2$: this yields $d \geq 3r = 6$, so this entirely rules out $d=4,5$ as possibilities for a semisimple Lie algebra. Using that every $\alpha\in \Phi$ is one of a pair $+\alpha, -\alpha\in \Phi$, we in fact have that $f$ is always even -- in other words, $\Phi = \Phi^+ \disjoint \Phi^-$ with $\size \Phi^+ = \size \Phi^-$, so $f \da \size \Phi = 2\cdot \size \Phi^+$. Thus $d = r+f = 2 +f$ is even in this case, ruling out $d=7$ when $r=2$. - $r\geq 3$: in this case we have $d \geq 3r = 9$, ruling out $d=7$ once and for all. ::: --- title: "Problem Sets: Lie Algebras" subtitle: "Problem Set 5" author: - name: D. Zack Garza affiliation: University of Georgia email: dzackgarza@gmail.com date: Fall 2022 order: 5 --- # Problem Set 5 ## Section 9 :::{.proposition title="Humphreys 9.2"} Prove that $\Phi\dual$ is a root system in $E$, whose Weyl group is naturally isomorphic to $\mathcal{W}$; show also that $\left\langle\alpha\dual, \beta\dual\right\rangle=\langle\beta, \alpha\rangle$, and draw a picture of $\Phi\dual$ in the cases $A_1, A_2, B_2, G_2$. ::: :::{.solution} We recall and introduce some notation: \[ \norm{ \alpha}^2 &\da (\alpha, \alpha) \\ \\ \inp{ \beta}{ \alpha} &\da {2 (\beta, \alpha)\over \norm{ \alpha}^2} \\ &= {2 (\beta, \alpha) \over ( \alpha, \alpha)} \\ \\ s_\alpha( \beta) &= \beta - {2 (\beta, \alpha) \over \norm{\alpha}^2 } \alpha \\ &= \beta - {2 (\beta, \alpha) \over (\alpha, \alpha) } \alpha \\ \\ \alpha\dual &\da {2 \over \norm{\alpha}^2} \alpha = {2\over (\alpha, \alpha)} \alpha .\] :::{.claim} \[ \inp{\alpha\dual}{\beta\dual} = \inp \beta \alpha .\] ::: :::{.proof title="?"} This is a computation: \[ \inp{ \alpha\dual} { \beta\dual} &= {2 (\alpha\dual, \beta\dual) \over \norm{\beta\dual}^2 } \\ &= {2 (\alpha\dual, \beta\dual) \over (\beta\dual, \beta\dual) } \\ &= {2\qty{ {2\alpha\over \norm{\alpha}^2}, {2 \beta\over \norm{\beta}^2} } \over \qty{{2 \beta\over \norm{\beta}^2}, {2 \beta\over \norm{\beta}^2} } } \\ &= {2^3 \norm{\beta}^4 (\alpha, \beta)\over 2^2 \norm{\alpha}^2 \norm{\beta}^2 (\beta, \beta)} \\ &= {2^3 \norm{\beta}^4 (\alpha, \beta)\over 2^2 (\alpha, \alpha) \norm{\beta}^2 \norm{\beta}^2} \\ &= {2( \alpha, \beta) \over (\alpha, \alpha)} \\ &= \inp{\beta}{ \alpha} .\] ::: :::{.claim} $\Phi\dual$ is a root system. ::: :::{.proof title="?"} The axioms can be checked individually: - $R1$: there is a bijection of sets \[ \Phi &\iso \Phi\dual \\ \alpha &\mapsto \alpha\dual ,\] thus $\size \Phi\dual = \size \Phi < \infty$. To see that $\RR \Phi\dual = \EE$, for $\vector v\in \EE$, use the fact that $\RR \Phi = \EE$ to write $\vector v = \sum_{ \alpha\in \Phi} c_\alpha \alpha$, then \[ \vector v &= \sum_{ \alpha\in \Phi} c_ \alpha \alpha \\ &= \sum_{ \alpha\in \Phi} c_{\alpha} {\norm{ \alpha}^2\over 2}\cdot {2\over \norm{ \alpha}^2} \alpha \\ &\da \sum_{ \alpha\in \Phi} c_{\alpha} {\norm{ \alpha}^2\over 2} \alpha\dual \\ &= \sum_{\alpha\dual \in \Phi\dual} d_{\alpha\dual} \alpha\dual, \qquad d_{ \alpha\dual} \da {1\over 2}c_ \alpha \norm{ \alpha^2} ,\] so $\vector v\in \RR \Phi\dual$. Finally, $\vector 0\not\in \Phi\dual$ since ${2\over (\alpha, \alpha)} \alpha\neq \vector 0$ since $\alpha \in \Phi\implies \alpha\neq \vector 0$, and $2/(\alpha, \alpha)$ is never zero. - $R2$: It suffices to show that if $\lambda \alpha\dual = \beta\dual \in \Phi\dual$ then $\lambda = \pm 1$ and $\beta\dual = \alpha\dual$. So suppose \( \lambda \alpha\dual = \beta\dual \), then \[ \lambda {2\over \norm{ \alpha}^2} \alpha = {2\over \norm{ \beta}^2} \beta \implies \beta = \lambda{\norm{\beta}^2 \over \norm{ \alpha}^2} \alpha \da \lambda' \alpha ,\] and since $\Phi$ satisfies $R2$, we have \( \lambda' = \pm 1 \) and $\beta = \alpha$. But then \[ \pm 1 = \lambda {\norm{ \beta}^2 \over \norm{ \alpha}^2} = \lambda{\norm{ \alpha}^2 \over \norm{ \alpha}^2} = \lambda .\] Finally, if \( \alpha = \beta \) then \( \alpha\dual = \beta\dual \) since $\Phi, \Phi\dual$ are in bijection. ::: :::{.proof title="of R3 and R4"} Continuing: - $R3$: It suffices to show that if \( \alpha\dual, \beta\dual \in \Phi\dual \) then $s_{\alpha\dual}(\beta\dual) = \gamma\dual$ for some \( \gamma\dual\in \Phi \dual \). This follows from a computation: \[ s_{ \alpha\dual}(\beta\dual) &= \beta\dual - \inp{\beta\dual }{ \alpha\dual} \alpha\dual \\ &= \beta\dual - \inp{ \alpha}{ \beta} \alpha\dual \\ &= {2\beta\over \norm{ \beta}^2 }- \inp{ \alpha}{ \beta} {2 \alpha\over \norm{ \alpha}^2 } \\ &= {2\beta\over \norm{ \beta}^2 }- {2 (\alpha, \beta) \over (\beta, \beta) } {2 \alpha\over \norm{ \alpha}^2 } \\ &= {2\beta\over \norm{ \beta}^2 }- {2 (\alpha, \beta) \over \norm{\beta}^2 } {2 \alpha\over \norm{ \alpha}^2 } \\ &= {2\over \norm{\beta}^2} \qty{ \beta - {2 (\alpha, \beta) \over \norm{\alpha}^2 } \alpha } \\ &= {2\over ( \beta, \beta)} \qty{ \beta - {2 (\beta, \alpha) \over \norm{\alpha}^2 } \alpha } \\ &= {2\over ( \beta, \beta)} \sigma_{ \alpha}(\beta) \\ &= {2\over ( \sigma_{ \alpha}(\beta), \sigma_{ \alpha}(\beta) )} \sigma_{ \alpha}(\beta) \\ &\da (\sigma_ \alpha( \beta))\dual ,\] where we've used that \( \sigma_{\alpha} \) is an isometry with respect to the symmetric bilinear form $(\wait, \wait)$. - $R4$: This follows directly from the formula proved in the claim at the beginning: \[ \inp{ \alpha\dual}{\beta\dual} = \inp{ \beta}{ \alpha}\in \ZZ ,\] since \( \alpha, \beta\in \Phi \) and $\Phi$ satisfies $R4$. ::: :::{.claim} There is an isomorphism of groups $\mcw(\Phi) \iso \mcw(\Phi\dual)$. ::: :::{.proof title="?"} There is a map of Weyl groups \[ \tilde \psi: \mcw(\Phi) &\iso \mcw(\Phi\dual) \\ s_ \alpha &\mapsto s_{\alpha\dual} ,\] which is clearly a bijection of sets with inverse $s_{\alpha\dual} \mapsto s_{ \alpha}$. Since it is also a group morphism, this yields an isomorphism of groups. ::: :::{.remark} The following are pictures of $\Phi\dual$ in the stated special cases: - $A_1$: Writing $\Phi(A_1) = \ts{\alpha = e_1 - e_2, - \alpha = e_2 -e_1} \subseteq \RR^2$, we have $(\alpha, \alpha) = \sqrt 2$ and thus $\Phi(A_1)\dual = \ts{\sqrt{2} \alpha, - \sqrt{2} \alpha}$: ![](figures/2022-10-31_22-18-46.png) - $A_2$: Writing $\Phi(A_2) = \ts{e_1 - e_2, e_1 - e_3, e_2 - e_1, e_2 - e_3, e_3 - e_1, e_3 - e_2} \subseteq \RR^3$, noting that every root has length $\sqrt{2}$, the dual results in a scaled version of $A_2$: ![](figures/2022-10-31_22-58-28.png) - $B_2$: Let $\Phi(B_2) = \ts{e_1, -e_1, e_2, -e_2, e_1 + e_2, e_1 - e_2, -e_1 + e_2, -e_1 -e_2}$ with $\alpha = e_1$ the short root at $\beta = -e_1 + e_2$ the long root, taking the dual fixes the short roots $\pm e_1$ and $\pm e_2$, and normalizes the lengths of the long roots $\pm e_1 \pm e_2$ to 1: ![](figures/2022-10-31_23-18-28.png) - $G_2$: $\Phi(G_2)$ is shown here in gray, with $\Phi(G_2)\dual$ in green: ![](figures/2022-10-31_23-45-39.png) ::: ::: :::{.proposition title="Humphreys 9.3"} In Table 1, show that the order of $\sigma_\alpha \sigma_\beta$ in $\mathcal{W}$ is (respectively) $2,3,4,6$ when $\theta=\pi / 2, \pi / 3$ (or $2 \pi / 3$ ), $\pi / 4$ (or $3 \pi / 4$ ), $\pi / 6$ (or $5 \pi / 6$ ). > Note that $\sigma_\alpha \sigma_\beta=$ rotation through $2 \theta$. ::: :::{.solution} Given the hint, this is immediate: if $s_\alpha s_\beta = R_{2\theta}$ is a rigid rotation through an angle of $2\theta$, then it's clear that \[ R_{2 \cdot {\pi \over 2}}^2 = R_{2 \cdot {\pi \over 3}}^3 = R_{2\cdot {\pi \over 4}}^4 = R_{2\cdot {\pi \over 6}}^6 = \id ,\] since these are all rotations through an angle of $2\pi$. To prove the hint, note that in any basis, a reflection has determinant $-1$ since it fixes an $n-1\dash$dimensional subspace (the hyperplane $H_\alpha$ of reflection) and negates its 1-dimensional complement (generated by the normal to $H_\alpha$). On the other hand, $\det(s_\alpha s_\beta) = (-1)^2 = 1$ and is an isometry that only fixes the intersection $H_\alpha = H_\beta = \ts{\vector 0}$, so it must be a rotation. To see that this is a rotation through an angle of exactly $2\theta$, consider applying $s_\beta\circ s_\alpha$ to a point $P$. Letting $H_\alpha, H_\beta$ by the corresponding hyperplanes. We then have the following geometric situation: ![](figures/2022-11-01_00-54-53.png) We then have $\theta_1 + \theta_2 = \theta$, noting that the angle between $\alpha$ and $\beta$ is equal to the angle between the hyperplanes $H_\alpha, H_\beta$. The total angle measure between $P$ and $s_\beta(s_\alpha(P))$ is then $2\theta_1 + 2\theta_2 = 2\theta$. ::: :::{.proposition title="Humphreys 9.4"} Prove that the respective Weyl groups of $A_1 \times A_1, A_2, B_2, G_2$ are dihedral of order $4,6,8,12$. If $\Phi$ is any root system of rank 2 , prove that its Weyl group must be one of these. ::: :::{.solution} In light of the fact that \[ D_{2n} = \gens{s, r \st r^n = s^2 = 1, srs\inv = r^{-1}} \] where $r$ is a rotation and $s$ is a reflection, for the remainder of this problem, let $s \da s_\alpha$ and $r \da s_\alpha s_\beta$ after choosing roots $\alpha$ and $\beta$. - $A_1\times A_1$: we have $\Phi(A_2) = \pm e_1, \pm e_2$, and setting $\alpha = e_1, \beta = e_2$ yields $\theta = \pi/2$. We have \[ \mcw(A_2) = \ts{\id, s_{\alpha}, s_{\beta}, s_\alpha s_\beta} \] where $s_{\alpha}^2 = s_{\beta}^2 = 1$, $s_\alpha s_\beta$ is rotation through $2\theta = \pi$ radians, and $(s_\alpha s_\beta)^2 = \id$. Setting $r= s_\alpha, s= s_\alpha s_\beta$ yields $r^2 = s^2 = \id$ and $srs = s$, which are the defining relations for $D_4$. - $A_2$: there is an inscribed triangle in the regular hexagon formed by the convex hull of the roots (see the dotted triangle below), and the reflections $s_\alpha$ about the hyperplanes $H_\alpha$ restrict to precisely the symmetries of this triangle, yielding $D_{6}$: ![](figures/2022-11-01_01-35-46.png) Alternatively, choose a simple system $\Delta = \ts{\alpha= e_1, \beta= -e_1 - e_2}$, then $\mcw(A_2) = \gens{s_\alpha, s_\alpha s_\beta}$ is enough to generate the Weyl group. Since we have $s \da s_\alpha \implies s^2 = 1$ and $r\da s_\alpha s_\beta \implies r_3 = 1$ (since $\theta = \pi/3$), these satisfy the relations of $D_6$. - $B_2$: there is similarly a square on which the hyperplane reflections act on, highlighted with dotted lines here: ![](figures/2022-11-01_01-53-11.png) Since the $s_\alpha$ act faithfully as the symmetries of a square, we have $\mcw(B_2)\cong D_{8}$. Alternatively, take $\alpha = e_1$ and $\beta = -e_1 + e_2$ and set $s = s_\alpha, r = s_\alpha s_\beta$. Then $\mcw(B_2) = \gens{s, r}$ and since $s^2 = r^4 = e$ (since here $\theta = \pi/4$) and they satisfy the proper commutation relation, this yields precisely the relations for $D_{2n}, n=4$. - $G_2$: In this case, the convex hull of the short roots form a hexagon, on which the hyperplane reflections precisely restrict to symmetries: ![](figures/2022-11-01_02-01-32.png) This yields $\mcw(G_2)\cong D_{12}$. Alternatively, take $\alpha = e_1$ and $\beta$ the long root in quadrant II, set $s = s_\alpha, r= s_\alpha s_\beta$, then $s^2 = r^6 = 1$ since $\theta = \pi/6$ and again the commutation relations for $D_{2n}, n=6$ are satisfied. Finally, for any root system $\Phi$ of rank 2, we will have $\mce(\Phi) = \gens{ s \da s_ \alpha, r\da s_ \alpha s _{\beta} }$. Because $\theta$ is restricted to one of the angles in Table 1 in Humphreys $\S 9.4$, i.e. the angles discussed in problem 9.3 above, the order of $s$ is always 2 and the order of $r$ is one of $4,6,8,12$. Since $srs\inv =srs = r\inv$ in all cases, this always yields a dihedral group. ::: ## Section 10 :::{.proposition title="Humphreys 10.1"} Let $\Phi\dual$ be the dual system of $\Phi, \Delta\dual=\left\{\alpha\dual \mid \alpha \in \Delta\right\}$. Prove that $\Delta\dual$ is a base of $\Phi\dual$. > Compare Weyl chambers of $\Phi$ and $\Phi\dual$. ::: :::{.solution} Suppose that $\Delta$ is a base of $\Phi$. We can use the fact that bases are in bijective correspondence with Weyl chambers via the correspondence \[ \Delta \to \mathrm{WC}(\Delta) \da \ts{v\in \EE\st (v, \delta) > 0 \,\, \forall \delta\in \Delta} ,\] sending $\Delta$ to all of the vectors making an acute angle with all simple vectors $\delta \in \Delta$, or equivalently the intersection of the positive half-spaces formed by the hyperplanes $H_{\delta}$ for $\delta\in \Delta$. The claim is that $\mathrm{WC}(\Delta\dual) = \mathrm{WC}(\Delta)$, i.e. the Weyl chamber is preserved under taking duals. This follows the fact that if $v\in \mathrm{WC}(\Delta)$, then $(v,\delta)> 0$ for all $\delta\in \Delta$. Letting $\delta\dual \in \Delta\dual$, we have \[ (v,\delta\dual) > \qty{v, {2 \over (\delta, \delta)}\delta } = {2 (v, \delta) \over (\delta,\delta)} > 0 \] using that every term in the last step is non-negative. Since this works for every $\delta\dual \in \Delta\dual$, this yields $v\in \mathrm{WC}(\Delta\dual)$, and a similar argument shows the reverse containment. So $\Delta\dual$ corresponds to a fundamental Weyl chamber and thus a base. ::: :::{.proposition title="Humphreys 10.9"} Prove that there is a unique element $\sigma$ in $\mathcal{W}$ sending $\Phi^{+}$to $\Phi^{-}$(relative to $\Delta$ ). Prove that any reduced expression for $\sigma$ must involve all $\sigma_\alpha(\alpha \in \Delta)$. Discuss $\ell(\sigma)$. ::: :::{.solution} The existence and uniqueness of such an element follows directly from the fact that $W$ acts simply transitively on the set of bases, and since $\Delta$ and $-\Delta$ are both bases, there is some $w_0\in W$ such that $w_0(\Delta) = - \Delta$ and consequently $w_0(\Phi^+) = \Phi^-$. Since $\ell(\alpha) = n(\alpha) \leq \size \Phi^+$ for any root $\alpha$ and $n(w_0) = \size \Phi^+$ by definition, $w_0$ must be the longest element in $W$, i.e. $\ell(w_0)$ is maximal. Any reduced expression for $w_0$ must involve all $s_\alpha$ -- if not, and say $s_\alpha$ doesn't occur in any reduced expression for $w_0$, then $w_0$ does not change the sign of $\alpha$ since every $s_\beta$ for $\beta\neq \alpha \in \Delta$ changes the sign of $\beta$ and acts by permutations on $\Phi^+\sm\ts{\beta}$. However, in this case, $w_0' \da w_0s_\alpha$ satisfies $n(w_0') = n(w_0) + 1$ since $w_0'$ necessarily changes the sign of $\alpha$, contradicting maximality of $w_0$. Finally, we have $\ell(w_0) = n(w_0) = \size \Phi^+$. ::: ## Section 11 :::{.proposition title="Humphreys 11.3"} Use the algorithm of (11.1) to write down all roots for $G_2$. Do the same for $C_3$: \[ \left(\begin{array}{rrr}2 & -1 & 0 \\ -1 & 2 & -1 \\ 0 & -2 & 2\end{array}\right) \] ::: :::{.solution} Note that it suffices to find all positive roots, since $\Phi = \Phi^+ \disjoint \Phi^-$ once a simple system $\Delta$ is chosen. Since $\size \Phi(G_2) = 12$, it thus suffices to find 6 positive roots. For $G_2$, the Dynkin diagram indicates one long and one short root, so let $\alpha$ be short and $\beta$ be long. In this system we have \[ \inp \alpha \alpha = \inp \beta \beta &= 2 \\ \inp \alpha \beta &= -1 \\ \inp \beta \alpha &= -3 .\] - The $\beta$ root string through $\alpha$: since $\height(\alpha) = 1$ and $\beta-\alpha\not\in \Phi$, we have $r=0$. Since $q = -\inp \alpha \beta = -(-1) = 1$, we obtain the string $\alpha, \alpha + \beta$. - The $\alpha$ root string through $\beta$: since $\height( \beta) = 1$ and $\alpha -\beta \not\in \Phi$ we have $r=0$ again. Here $q = - \inp \alpha \beta = - (-3) = 3$, we obtain \( \beta, \beta+ \alpha, \beta + 2 \alpha, \beta+ 3 \alpha \) - We know that the $\alpha$ root strings through any of the above roots will yield nothing new. - The $\beta$ root strings through $\alpha + \beta, \beta + 2\alpha$ turn out to yield no new roots. - The $\beta$ root string through $\beta + 3 \alpha$: since $(\beta + 3\alpha) - \beta = 3 \alpha\not\in\Phi$, using that only $\pm \alpha\in \Phi$, we have $r=0$. We also have \[ r-q = \inp {\beta + 3 \alpha}{ \beta} = \inp \beta \beta + 3 \inp \alpha \beta = 2 + 3(-1) =-1 ,\] we have $q=1$ and obtain $\beta+ 3 \alpha, 2\beta + 3 \alpha$. Combining these yields 6 positive roots: \[ \Phi^+(G_2) = \ts{ \alpha, \alpha+ \beta, \beta, \beta+ 2 \alpha, \beta+ 3 \alpha, 2 \beta +3 \alpha} .\] --- For $C_3$, there are $2\cdot 3^2 = 18$ total roots and thus 9 positive roots to find. Let $\alpha, \beta, \gamma$ be the three ordered simple roots, then the Cartan matrix specifies \[ \inp \alpha \alpha = \inp \beta \beta = \inp \gamma \gamma &= 2 \\ \inp \beta \alpha = \inp \alpha \beta &= -1 \\ \inp \alpha \gamma = \inp \gamma \alpha &= 0 \\ \inp \beta \gamma &= -1 \\ \inp \gamma \beta &= -2 .\] - The $\alpha$ root string through $\beta$: $r=0,\, q = - \inp \alpha \beta = 1\leadsto \alpha, \alpha + \beta$. - The $\beta$ root string through $\gamma$: $r=0,\, q = - \inp \gamma \beta = 2 \leadsto \gamma, \gamma+ \beta, \gamma + 2 \beta$. - The $\gamma$ root string through $\alpha$: here $r=q=0$ since $\inp \gamma \alpha = 0$. - The $\alpha$ root string through \( \alpha + \beta \): $r=0$ since \( \alpha + \beta - \gamma\not\in \Phi \), and $r-1 = \inp{\alpha+ \beta}{ \gamma}= -1\implies q=1 \leadsto \alpha + \beta+ \gamma$. - The $\beta$ root string through \( \alpha + \beta + \gamma \): here $r=1$ since $(\alpha + \beta + \gamma) - 2 \gamma \not\in \Phi$, and $r-q = \inp{\alpha+ \beta + \gamma}{ \beta} = -1 + 2 + 2 = -1 \implies q=2 \leadsto \alpha + 2 \beta + \gamma, \alpha + 3 \beta+ \gamma$. This yields 9 positive roots: \[ \Phi^+(C_3) = \ts{\alpha, \beta, \gamma, \alpha + \beta, \gamma + \beta, \gamma+ 2 \beta, \alpha+ \beta+ \gamma, \alpha+ 2 \beta + \gamma, \alpha + 3 \alpha + \gamma} .\] ::: --- title: "Problem Sets: Lie Algebras" subtitle: "Problem Set 6" author: - name: D. Zack Garza affiliation: University of Georgia email: dzackgarza@gmail.com date: Fall 2022 order: 6 --- # Problem Set 6 ## Section 17 :::{.proposition title="17.1"} ::: :::{.proposition title="17.3"} ::: ## Section 18 :::{.problem title="18.1"} ::: :::{.problem title="18.4"} ::: ## Section 20 :::{.problem title="20.3"} > Do one type that is not $A_n$. ::: :::{.problem title="20.5"} ::: :::{.problem title="20.8"} :::