diff --git "a/books/cam/III_L/algebras.tex" "b/books/cam/III_L/algebras.tex"
new file mode 100644--- /dev/null
+++ "b/books/cam/III_L/algebras.tex"
@@ -0,0 +1,3703 @@
+\documentclass[a4paper]{article}
+
+\def\npart {III}
+\def\nterm {Lent}
+\def\nyear {2017}
+\def\nlecturer {C.\ J.\ B.\ Brookes}
+\def\ncourse {Algebras}
+
+\input{header}
+
+\newcommand\GKdim{\mathrm{GK}\mdash\mathrm{dim}}
+\DeclareMathOperator\Dim{Dim}
+\newcommand\HH{H\!H}
+
+\begin{document}
+\maketitle
+{\small
+\setlength{\parindent}{0em}
+\setlength{\parskip}{1em}
+The aim of the course is to give an introduction to algebras. The emphasis will be on non-commutative examples that arise in representation theory (of groups and Lie algebras) and the theory of algebraic D-modules, though you will learn something about commutative algebras in passing.
+
+Topics we discuss include:
+
+\begin{itemize}
+ \item Artinian algebras. Examples, group algebras of finite groups, crossed products. Structure theory. Artin--Wedderburn theorem. Projective modules. Blocks. $K_0$.
+
+ \item Noetherian algebras. Examples, quantum plane and quantum torus, differential operator algebras, enveloping algebras of finite dimensional Lie algebras. Structure theory. Injective hulls, uniform dimension and Goldie's theorem.
+
+ \item Hochschild chain and cochain complexes. Hochschild homology and cohomology. Gerstenhaber algebras.
+
+ \item Deformation of algebras.
+
+ \item Coalgebras, bialgebras and Hopf algebras.
+\end{itemize}
+
+\subsubsection*{Pre-requisites}
+It will be assumed that you have attended a first course on ring theory, eg IB Groups, Rings and Modules. Experience of other algebraic courses such as II Representation Theory, Galois Theory or Number Fields, or III Lie algebras will be helpful but not necessary.
+}
+\tableofcontents
+
+\setcounter{section}{-1}
+\section{Introduction}
+We start with the definition of an algebra. Throughout the course, $k$ will be a field.
+\begin{defi}[$k$-algebra]\index{$k$-algebra}\index{algebra}
+ A (unital) associative $k$-algebra is a $k$-vector space $A$ together with a linear map $m: A \otimes A \to A$, called the product map, and linear map $u: k \to A$, called the unit map, such that
+ \begin{itemize}
+ \item The product induced by $m$ is associative.
+ \item $u(1)$ is the identity of the multiplication.
+ \end{itemize}
+\end{defi}
+In particular, we don't require the product to be commutative. We usually write $m(x \otimes y)$ as $xy$.
+
+\begin{eg}
+ Let $K/k$ be a finite field extension. Then $K$ is a (commutative) $k$-algebra.
+\end{eg}
+
+\begin{eg}
+ The $n\times n$ matrices $M_n(k)$ over $k$ form a non-commutative $k$-algebra.
+\end{eg}
+
+\begin{eg}
+ The quaternions $\H$ is an $\R$-algebra, with an $\R$-basis $1, i, j, k$, and multiplication given by
+ \[
+ i^2 = j^2 = k^2 = 1,\quad ij = k,\quad ji = -k.
+ \]
+ This is in fact a \term{division algebra} (or \term{skew fields}), i.e.\ the non-zero elements have multiplicative inverse.
+\end{eg}
+
+\begin{eg}
+ Let $G$ be a finite group. Then the group algebra
+ \[
+ kG = \left\{\sum \lambda_g g: g \in G, \lambda_g \in k\right\}
+ \]
+ with the obvious multiplication induced by the group operation is a $k$-algebra.
+
+ These are the associative algebras underlying the representation theory of finite groups.
+\end{eg}
+
+Most of the time, we will just care about algebras that are finite-dimensional as $k$-vector spaces. However, often what we need for the proofs to work is not that the algebra is finite-dimensional, but just that it is \emph{Artinian}. These algebras are defined by some finiteness condition on the ideals.
+
+\begin{defi}[Ideal]\index{ideal}
+ A \emph{left ideal} of $A$ is a $k$-subspace of $A$ such that if $x \in A$ and $y \in I$, then $xy \in I$. A \emph{right ideal} is one where we require $yx \in I$ instead. An \emph{ideal} is something that is both a left ideal and a right ideal.
+\end{defi}
+Since the multiplication is not necessarily commutative, we have to make the distinction between left and right things. Most of the time, we just talk about the left case, as the other case is entirely analogous.
+
+The definition we want is the following:
+\begin{defi}[Artinian algebra]\index{Artinian algebra}\index{algebra!Artinian}
+ An algebra $A$ is \emph{left Artinian} if it satisfies the \term{descending chain condition} (\term{DCC}) on left ideals, i.e.\ if we have a descending chain of left ideals
+ \[
+ I_1 \geq I_2 \geq I_3 \geq \cdots,
+ \]
+ then there is some $N$ such that $I_{N + m} = I_{N}$ for all $m \geq 0$.
+
+ We say an algebra is \emph{Artinian} if it is both left and right Artinian.
+\end{defi}
+
+\begin{eg}
+ Any finite-dimensional algebra is Artinian.
+\end{eg}
+
+The main classification theorem for Artinian algebras we will prove is the following result:
+\begin{thm}[Artin--Wedderburn theorem]\index{Artin--Wedderburn theorem}
+ Let $A$ be a left-Artinian algebra such that the intersection of the maximal left ideals is zero. Then $A$ is the direct sum of finitely many matrix algebras over division algebras.
+\end{thm}
+When we actually get to the theorem, we will rewrite this in a way that seems a bit more natural.
+
+One familiar application of this theorem is in representation theory. The group algebra of a finite group is finite-dimensional, and in particular Artinian. We will later see that Maschke's theorem is equivalent to saying that the hypothesis of the theorem holds. So this puts a very strong constraint on how the group algebra looks like.
+
+After studying Artinian rings, we'll talk about Noetherian algebras.
+\begin{defi}[Noetherian algebra]\index{Noetherian algebra}\index{algebra!Noetherian}
+ An algebra is \emph{left Noetherian} if it satisfies the \term{ascending chain condition} (\term{ACC}) on left ideals, i.e.\ if
+ \[
+ I_1 \leq I_2 \leq I_3 \leq \cdots
+ \]
+ is an ascending chain of left ideals, then there is some $N$ such that $I_{N + m} = I_N$ for all $m \geq 0$.
+
+ Similarly, we can define right Noetherian algebras, and say an algebra is \emph{Noetherian} if it is both left and right Noetherian.
+\end{defi}
+
+We can again look at some examples.
+\begin{eg}
+ Again all finite-dimensional algebras are Noetherian.
+\end{eg}
+
+\begin{eg}
+ In the commutative case, Hilbert's basis theorem tells us a polynomial algebra $k[X_1, \cdots, X_k]$ in finitely many variables is Noetherian. Similarly, the power series rings $k[[X_1, \cdots, X_n]]$ are Noetherian.
+\end{eg}
+
+\begin{eg}
+ The \term{universal enveloping algebra} of a finite-dimensional Lie algebra are the (associative!) algebras that underpin the representation theory of these Lie algebras.
+\end{eg}
+
+\begin{eg}
+ Some differential operator algebras are Noetherian. We assume $\Char k = 0$. Consider the polynomial ring $k[X]$. We have operators ``multiplication by $X$'' and ``differentiate with respect to $X$'' on $k[X]$. We can form the algebra $k[X, \frac{\partial}{\partial x}]$ of differential operators on $k[X]$, with multiplication given by the composition of operators. This is called the \term{Weyl algebra} $A_1$. We will show that this is a non-commutative Noetherian algebra.
+\end{eg}
+
+\begin{eg}
+ Some group algebras are Noetherian. Clearly all group algebras of finite groups are Noetherian, but the group algebras of certain infinite groups are Noetherian. For example, we can take
+ \[
+ G = \left\{
+ \begin{pmatrix}
+ 1 & \lambda & \mu\\
+ 0 & 1 & \nu\\
+ 0 & 0 & 0
+ \end{pmatrix}: \lambda, \mu, \nu \in \Z
+ \right\},
+ \]
+ and this is Noetherian. However, we shall not prove this.
+\end{eg}
+
+We will see that all left Artinian algebras are left Noetherian. While there is a general theory of non-commutative Noetherian algebras, it is not as useful as the theory of \emph{commutative} Noetherian algebras.
+
+In the commutative case, we often look at $\Spec A$, the set of prime ideals of $A$. However, sometimes in the non-commutative there are few prime ideals, and so $\Spec$ is not going to keep us busy.
+\begin{eg}
+ In the Weyl algebra $A_1$, the only prime ideals are $0$ and $A_1$.
+\end{eg}
+
+We will prove a theorem of Goldie:
+\begin{thm}[Goldie's theorem]\index{Goldie's theorem}
+ Let $A$ be a right Noetherian algebra with no non-zero ideals all of whose elements are nilpotent. Then $A$ embeds in a finite direct sum of matrix algebras over division algebras.
+\end{thm}
+
+Some types of Noetherian algebras can be thought of as non-commutative polynomial algebras and non-commutative power series, i.e.\ they are \emph{deformations} of the analogous commutative algebra. For example, we say $A_1$ is a deformation of the polynomial algebra $k[X, Y]$, where instead of having $XY - YX = 0$, we have $XY - YX = 1$. This also applies to enveloping algebras and Iwasawa algebras. We will study when one can deform the multiplication so that it remains associative, and this is bound up with the cohomology theory of associative algebras --- \emph{Hochschild cohomology}. The Hochschild complex has rich algebraic structure, and this will allow us to understand how we can deform the algebra.
+
+At the end, we shall quickly talk about bialgebras and Hopf algebras. In a bialgebra, one also has a comultiplication map $A \to A \otimes A$, which in representation theory is crucial in saying how to regard a tensor product of two representations as a representation.
+
+%We finish with a little bit of history. In 1890's, Hilbert proved some big theorems in complex polynomial algebras. In 1920's, Noether came and abstracted out the key properties that made Hilbert's work work, and came up with the notion of the Noetherian property. In 1930s and 1940s, the development of standard commutative algebra. In 1945 came Hochschild, and in 1960s came Goldi.e.\ In 1960 we had Gerstenhaber, who told us about the structure of the Hochschild complex. In the 1960s and 1970s, the study of enveloping algebras of differential operators started, and from the 1980s we started studying quantum groups, whose motivation came from mathematical physics, but got the algebraists excited.
+
+\section{Artinian algebras}
+\subsection{Artinian algebras}
+We continue writing down some definitions. We already defined left and right Artinian algebras in the introduction. Most examples we'll meet are in fact finite-dimensional vector spaces over $k$. However, there exists some more perverse examples:
+
+\begin{eg}
+ Let
+ \[
+ A = \left\{
+ \begin{pmatrix}
+ r & s\\
+ 0 & t
+ \end{pmatrix}: r \in \Q, s, t \in \R
+ \right\}
+ \]
+ Then this is right Artinian but not left Artinian over $\Q$. To see it is not left Artinian, note that there is an ideal
+ \[
+ I = \left\{
+ \begin{pmatrix}
+ 0 & s\\
+ 0 & 0
+ \end{pmatrix}: s \in \R
+ \right\} \cong \R
+ \]
+ of $A$, and a matrix $\begin{pmatrix}r & s\\0 & t\end{pmatrix}$ acts on this on the left by sending $\begin{pmatrix}0 & s'\\ 0 & 0\end{pmatrix}$ to $rs'$. Since $\R$ is an infinite-dimensional $\Q$-vector space, one sees that there is an infinite strictly descending chain of ideals contained in $I$.
+
+ The fact that it is right Artinian is a direct verification. Indeed, it is not difficult to enumerate all the right ideals, which is left as an exercise for the reader.
+\end{eg}
+
+As in the case of commutative algebra, we can study the modules of an algebra.
+\begin{defi}[Module]\index{module}\index{bimodule}
+ Let $A$ be an algebra. A \emph{left $A$-module} is a $k$-vector space $M$ and a bilinear map
+ \[
+ \begin{tikzcd}[cdmap]
+ A \otimes M \ar[r] & M\\
+ a \otimes m \ar[r, maps to] & xm
+ \end{tikzcd}
+ \]
+ such that $(ab)m = a(bm)$ for all $a, b \in A$ and $m \in M$. Right $A$-modules are defined similarly.
+
+ An \emph{$A\mdash A$-bimodule} is a vector space $M$ that is both a left $A$-module and a right $A$-module, such that the two actions commute --- for $a, b \in A$ and $x \in M$, we have
+ \[
+ a(xb) = (ax)b.
+ \]
+\end{defi}
+
+\begin{eg}
+ The algebra $A$ itself is a left $A$-module. We write this as $_A A$, and call this the \term{left regular representation}. Similarly, the right action is denoted $A_A$. These two actions are compatible by associativity, so $A$ is an $A\mdash A$-bimodule.
+\end{eg}
+
+If we write $\End_k(A)$ for the $k$-linear maps $A \to A$, then $\End_k$ is naturally a $k$-algebra by composition, and we have a $k$-algebra homomorphism $A \to \End_k(A)$ that sends $a \in A$ to multiplication by $a$ on the left. However, if we want to multiply on the right instead, it is no longer a $k$-algebra homomorphism $A \to \End_k(A)$. Instead, it is a map $A \to \End_k(A)^\op$, where
+\begin{defi}[Opposite algebra]\index{opposite algebra}\index{algebra!opposite}\index{$A^\op$}
+ Let $A$ be a $k$-algebra. We define the \emph{opposite algebra} $A^\op$ to be the algebra with the same underlying vector space, but with multiplication given by
+ \[
+ x \cdot y = yx.
+ \]
+ Here on the left we have the multiplication in $A^\op$ and on the right we have the multiplication in $A$.
+\end{defi}
+In general, a left $A$-module is a right $A^\op$-module.
+
+As in the case of ring theory, we can talk about prime ideals. However, we will adopt a slightly different definition:
+\begin{defi}[Prime ideal]\index{prime ideal}\index{ideal!prime}
+ An ideal $P$ is \emph{prime} if it is a proper ideal, and if $I$ and $J$ are ideals with $IJ \subseteq P$, then either $I \subseteq P$ or $J \subseteq P$.
+\end{defi}
+It is an exercise to check that this coincides in the commutative case with the definition using elements.
+
+\begin{defi}[Annihilator]\index{annihilator}
+ Let $M$ be a left $A$-module and $m \in M$. We define the \emph{annihilators} to be
+ \begin{align*}
+ \Ann(m) &= \{a \in A: am = 0\}\\
+ \Ann(M) &= \{a \in A: am = 0\text{ for all }m \in M\} = \bigcap_{m \in M} \Ann(m).
+ \end{align*}
+\end{defi}
+Note that $\Ann(m)$ is a left ideal of $A$, and is in fact the kernel of the $A$-module homomorphism $A \to M$ given by $x \mapsto xm$. We'll denote the image of this map by $Am$, a left submodule of $M$, and we have
+\[
+ \frac{A}{\Ann(m)} \cong Am.
+\]
+On the other hand, it is easy to see that $\Ann(M)$ is an fact a (two-sided) ideal.
+
+\begin{defi}[Simple module]\index{simple module}\index{irreducible module}\index{module!simple}\index{module!irreducible}
+ A non-zero module $M$ is \emph{simple} or \emph{irreducible} if the only submodules of $M$ are $0$ and $M$.
+\end{defi}
+
+It is easy to see that
+\begin{prop}
+ Let $A$ be an algebra and $I$ a left ideal. Then $I$ is a maximal left ideal iff $A/I$ is simple.
+\end{prop}
+
+\begin{eg}
+ $\Ann(m)$ is a maximal left ideal iff $Am$ is irreducible.
+\end{eg}
+
+\begin{prop}
+ Let $A$ be an algebra and $M$ a simple module. Then $M \cong A/I$ for some (maximal) left ideal $I$ of $A$.
+\end{prop}
+
+\begin{proof}
+ Pick an arbitrary element $m \in M$, and define the $A$-module homomorphism $\varphi: A \to M$ by $\varphi(a) = am$. Then the image is a non-trivial submodule, and hence must be $M$. Then by the first isomorphism theorem, we have $M \cong A/\ker \varphi$.
+\end{proof}
+
+Before we start doing anything, we note the following convenient lemma:
+\begin{lemma}
+ Let $M$ be a finitely-generated $A$ module. Then $M$ has a maximal proper submodule $M'$.
+\end{lemma}
+
+\begin{proof}
+ Let $m_1, \cdots, m_k \in M$ be a minimal generating set. Then in particular $N = \bra m_1, \cdots, m_{k - 1}\ket$ is a proper submodule of $M$. Moreover, a submodule of $M$ containing $N$ is proper iff it does not contain $m_k$, and this property is preserved under increasing unions. So by Zorn's lemma, there is a maximal proper submodule.
+\end{proof}
+
+\begin{defi}[Jacobson radical]\index{Jacobson radical}\index{$J(A)$}
+ The \index{Jacobson radical} $J(A)$ of $A$ is the intersection of all maximal left ideals.
+\end{defi}
+This is in fact an ideal, and not just a left one, because
+\[
+ J(A) = \bigcap \{\text{maximal left ideals}\} = \bigcap_{m \in M, M\text{ simple}} \Ann(m) = \bigcap_{M\text{ simple}} \Ann(M),
+\]
+which we have established is an ideal. Yet, it is still not clear that this is independent of us saying ``left'' instead of ``right''. It, in fact, does not, and this follows from the Nakayama lemma:
+
+\begin{lemma}[Nakayama lemma]\index{Nakayama lemma}
+ The following are equivalent for a left ideal $I$ of $A$.
+ \begin{enumerate}
+ \item $I \leq J(A)$.
+ \item For any finitely-generated left $A$-module $M$, if $IM = M$, then $M = 0$, where $IM$ is the module generated by elements of the form $am$, with $a \in I$ and $m \in M$.
+ \item $G = \{1 + a: a \in I\} = 1 + I$ is a subgroup of the unit group of $A$.
+ \end{enumerate}
+\end{lemma}
+In particular, this shows that the Jacobson radical is the largest ideal satisfying (iii), which is something that does not depend on handedness.
+
+\begin{proof}\leavevmode
+ \begin{itemize}
+ \item (i) $\Rightarrow$ (ii): Suppose $I \leq J(A)$ and $M \not= 0$ is a finitely-generated $A$-module, and we'll see that $IM \lneq M$.
+
+ Let $N$ be a maximal submodule of $M$. Then $M/N$ is a simple module, so for any $\bar{m} \in M/N$, we know $\Ann(\bar{m})$ is a maximal left ideal. So $J(A) \leq \Ann(M/N)$. So $IM \leq J(A) M \leq N \lneq M$.
+ \item (ii) $\Rightarrow$ (iii): Assume (ii). We let $x \in I$ and set $y = 1 + x$. Hence $1 = y - x \in Ay + I$. Since $Ay + I$ is a left ideal, we know $Ay + I = A$. In other words, we know
+ \[
+ I \left(\frac{A}{Ay}\right) = \frac{A}{Ay}.
+ \]
+ Now using (ii) on the finitely-generated module $A/Ay$ (it is in fact generated by $1$), we know that $A/Ay = 0$. So $A = Ay$. So there exists $z \in A$ such that $1 = zy = z(1 + x)$. So $(1 + x)$ has a left inverse, and this left inverse $z$ lies in $G$, since we can write $z = 1 - zx$. So $G$ is a subgroup of the unit group of $A$.
+
+ \item (iii) $\Rightarrow$ (i): Suppose $I_1$ is a maximal left ideal of $A$. Let $x \in I$. If $x \not \in I_1$, then $I_1 + Ax = A$ by maximality of $I$. So $1 = y + zx$ for some $y \in I_1$ and $z \in A$. So $y = 1 - zx \in G$. So $y$ is invertible. But $y \in I_1$. So $I_1 = A$. This is a contradiction. So we found that $I < I_1$, and this is true for all maximal left ideals $I_1$. Hence $I \leq J(A)$.\qedhere
+ \end{itemize}
+\end{proof}
+
+We now come to the important definition:
+\begin{defi}[Semisimple algebra]\index{semisimple algebra}\index{algebra!semisimple}
+ An algebra is \emph{semisimple} if $J(A) = 0$.
+\end{defi}
+We will very soon see that for Artinian algebras, being semi-simple is equivalent to a few other very nice properties, such as being completely reducible. For now, we shall content ourselves with some examples.
+
+\begin{eg}
+ For any $A$, we know $A/J(A)$ is always semisimple.
+\end{eg}
+
+We can also define
+\begin{defi}[Simple algebra]\index{simple algebra}\index{algebra!simple}
+ An algebra is \emph{simple} if the only ideals are $0$ and $A$.
+\end{defi}
+It is trivially true that any simple algebra is semi-simple --- the Jacobson radical is an ideal, and it is not $A$. A particularly important example is the following:
+
+\begin{eg}
+ Consider $M_n(k)$. We let $e_i$ be the matrix with $1$ in the $(i, i)$th entry and zero everywhere else. This is idempotent, i.e.\ $e_i^2 = e_i$. It is also straightforward to check that
+ \[
+ A e_i =
+ \left\{
+ \begin{pmatrix}
+ 0 & \cdots & 0 & a_1 & 0 & \cdots & 0\\
+ 0 & \cdots & 0 & a_2 & 0 & \cdots & 0\\
+ \vdots & \ddots & \vdots & \vdots & \vdots & \ddots & \vdots\\
+ 0 & \cdots & 0 & a_n & 0 & \cdots & 0
+ \end{pmatrix}
+ \right\}
+ \]
+ The non-zero column is, of course, the $i$th column. Similarly, $e_i A$ is the matrices that are zero apart from in the $i$th row. These are in fact all left and all right ideals respectively. So the only ideals are $0$ and $A$.
+
+ As a left $A$-module, we can decompose $A$ as
+ \[
+ _A A = \bigoplus_{i = 1}^n A e_i,
+ \]
+ which is a decomposition into \emph{simple} modules.
+\end{eg}
+
+\begin{defi}[Completely reducible]\index{completely reducible}
+ A module $M$ of $A$ is \emph{completely reducible} iff it is a sum of simple modules.
+\end{defi}
+Here in the definition, we said ``sum'' instead of ``direct sum'', but the following proposition shows it doesn't matter:
+
+\begin{prop}
+ Let $M$ be an $A$-module. Then the following are equivalent:
+ \begin{enumerate}
+ \item $M$ is completely reducible.
+ \item $M$ is the direct sum of simple modules.
+ \item Every submodule of $M$ has a \term{complement}, i.e.\ for any submodule $N$ of $M$, there is a complement $N'$ such that $M = N \oplus N'$.
+ \end{enumerate}
+\end{prop}
+Often, the last condition is the most useful in practice.
+
+\begin{proof}\leavevmode
+ \begin{itemize}
+ \item (i) $\Rightarrow$ (ii): Let $M$ be completely reducible, and consider the set
+ \[
+ \left\{\{S_\alpha \leq M\} : S_\alpha\text{ are simple},\; \sum S_\alpha\text{ is a direct sum}\right\}.
+ \]
+ Notice this set is closed under increasing unions, since the property of being a direct sum is only checked on finitely many elements. So by Zorn's lemma, it has a maximal element, and let $N$ be the sum of the elements.
+
+ Suppose this were not all of $M$. Then there is some $S \leq M$ such that $S \not\subseteq N$. Then $S \cap N \subsetneq S$. By simplicity, they intersect trivially. So $S + N$ is a direct sum, which is a contradiction. So we must have $N = M$, and $M$ is the direct sum of simple modules.
+ \item (ii) $\Rightarrow$ (i) is trivial.
+ \item (i) $\Rightarrow$ (iii): Let $N \leq M$ be a submodule, and consider
+ \[
+ \left\{\{S_\alpha \leq M\} : S_\alpha\text{ are simple},\; N + \sum S_\alpha\text{ is a direct sum}\right\}.
+ \]
+ Again this set has a maximal element, and let $P$ be the direct sum of those $S_\alpha$. Again if $P \oplus N$ is not all of $M$, then pick an $S \leq M$ simple such that $S$ is not contained in $P \oplus N$. Then again $S \oplus P \oplus N$ is a direct sum, which is a contradiction.
+ \item (iii) $\Rightarrow$ (i): It suffices to show that if $N < M$ is a proper submodule, then there exists a simple module that intersects $N$ trivially. Indeed, we can take $N$ to be the sum of all simple submodules of $M$, and this forces $N = M$.
+
+ To do so, pick an $x \not \in N$, and let $P$ be submodule of $M$ maximal among those satisfying $P \cap N = 0$ and $x \not \in N \oplus P$. Then $N \oplus P$ is a proper submodule of $M$. Let $S$ be a complement. We claim $S$ is simple.
+
+ If not, we can find a proper submodule $S'$ of $S$. Let $Q$ be a complement of $N \oplus P \oplus S'$. Then we can write
+ \[
+ \begin{array}{ccccccccc}
+ M &=& N &\oplus& P &\oplus& S' &\oplus& Q\\
+ x &=& n &+& p &+& s&+& q
+ \end{array}.
+ \]
+ By assumption, $s$ and $q$ are not both zero. We wlog assume $s$ is non-zero. Then $P \oplus Q$ is a larger submodule satisfying $(P \oplus Q) \cap N = 0$ and $x \not \in N \oplus (P \oplus Q)$. This is a contradiction. So $S$ is simple, and we are done.\qedhere
+ \end{itemize}
+\end{proof}
+
+Using these different characterizations, we can prove that completely reducible modules are closed under the familiar ex operations.
+\begin{prop}
+ Sums, submodules and quotients of completely reducible modules are completely reducible.
+\end{prop}
+
+\begin{proof}
+ It is clear by definition that sums of completely reducible modules are completely reducible.
+
+ To see that submodules of completely reducible modules are completely reducible, let $M$ be completely reducible, and $N \leq M$. Then for each $x \in N$, there is some simple submodule $S \leq M$ containing $x$. Since $S \cap N \leq S$ and contains $x$, it must be $S$, i.e.\ $S \subseteq N$. So $N$ is the sum of simple modules.
+
+ Finally, to see quotients are completely reducible, if $M$ is completely reducible and $N$ is a submodule, then we can write
+ \[
+ M = N \oplus P
+ \]
+ for some $P$. Then $M/N \cong P$, and $P$ is completely reducible.
+\end{proof}
+
+We will show that every left Artinian algebra is completely reducible over itself iff it is semi-simple. We can in fact prove a more general fact for $A$-modules. To do so, we need a generalization of the Jacobson radical.
+
+\begin{defi}[Radical]\index{radical}
+ For a module $M$, we write $\Rad(M)$ for the intersection of maximal submodules of $M$, and call it the \emph{radical} of $M$.
+\end{defi}
+Thus, we have $\Rad(_A A) = J(A) = \Rad(A_A)$.
+
+\begin{prop}
+ Let $M$ be an $A$-module satisfying the descending chain condition on submodules. Then $M$ is completely reducible iff $\Rad(M) = 0$.
+\end{prop}
+
+\begin{proof}
+ It is clear that if $M$ is completely reducible, then $\Rad(M) = 0$. Indeed, we can write
+ \[
+ M = \bigoplus_{\alpha \in A} S_\alpha,
+ \]
+ where each $S_\alpha$ is simple. Then
+ \[
+ J(A) \leq \bigcap_{\alpha \in A} \left(\bigoplus_{\beta \in A \setminus \{\alpha\}} S_\beta\right) = \{0\}.
+ \]
+ Conversely, if $\Rad(M) = 0$, we note that since $M$ satisfies the descending chain condition on submodules, there must be a \emph{finite} collection $M_1, \cdots, M_n$ of maximal submodules whose intersection vanish. Then consider the map
+ \[
+ \begin{tikzcd}[cdmap]
+ M \ar[r] & \displaystyle\bigoplus_{i = 1}^n \frac{M}{M_i}\\
+ x \ar[r, maps to]& (x + M_1, x + M_2, \cdots, x + M_n)
+ \end{tikzcd}
+ \]
+ The kernel of this map is the intersection of the $M_i$, which is trivial. So this embeds $M$ as a submodule of $\bigoplus \frac{M}{M_i}$. But each $\frac{M}{M_i}$ is simple, so $M$ is a submodule of a completely reducible module, hence completely reducible.
+\end{proof}
+
+\begin{cor}
+ If $A$ is a semi-simple left Artinian algebra, then $_AA$ is completely reducible.
+\end{cor}
+
+\begin{cor}
+ If $A$ is a semi-simple left Artinian algebra, then every left $A$-module is completely reducible.
+\end{cor}
+
+\begin{proof}
+ Every $A$-module $M$ is a quotient of sums of $_AA$. Explicitly, we have a map
+ \[
+ \begin{tikzcd}[cdmap]
+ \displaystyle\bigoplus_{m \in M} {}_A A \ar[r] & M\\
+ (a_m) \ar[r, maps to] & \sum a_m m
+ \end{tikzcd}
+ \]
+ Then this map is clearly surjective, and thus $M$ is a quotient of $\bigoplus_M {}_AA$.
+\end{proof}
+
+If $A$ is not semi-simple, then it turns out it is rather easy to figure out radical of $M$, at least if $M$ is finitely-generated.
+\begin{lemma}
+ Let $A$ be left Artinian, and $M$ a finitely generated left $A$-module, then $J(A) M = \Rad(M)$.
+\end{lemma}
+
+\begin{proof}
+ Let $M'$ be a maximal submodule of $M$. Then $M/M'$ is simple, and is in fact $A/I$ for some maximal left ideal $I$. Then we have
+ \[
+ J(A) \left(\frac{M}{M'}\right) = 0,
+ \]
+ since $J(A) < I$. Therefore $J(A) M \leq M'$. So $J(A)M \leq \Rad(M)$.
+
+ Conversely, we know $\frac{M}{J(A) M}$ is an $A/J(A)$-module, and is hence completely reducible as $A/J(A)$ is semi-simple (and left Artinian). Since an $A$-submodule of $\frac{M}{J(A) M}$ is the same as an $A/J(A)$-submodule, it follows that it is completely reducible as an $A$-module as well. So
+ \[
+ \Rad\left(\frac{M}{J(A)M}\right) = 0,
+ \]
+ and hence $\Rad(M) \leq J(A) M$.
+\end{proof}
+
+\begin{prop}
+ Let $A$ be left Artinian. Then
+ \begin{enumerate}
+ \item $J(A)$ is nilpotent, i.e.\ there exists some $r$ such that $J(A)^r = 0$.
+ \item If $M$ is a finitely-generated left $A$-module, then it is both left Artinian and left Noetherian.
+ \item $A$ is left Noetherian.
+ \end{enumerate}
+\end{prop}
+
+\begin{proof}\leavevmode
+ \begin{enumerate}
+ \item Since $A$ is left-Artinian, and $\{J(A)^r: r \in \N\}$ is a descending chain of ideals, it must eventually be constant. So $J(A)^r = J(A)^{r + 1}$ for some $r$. If this is non-zero, then again using the descending chain condition, we see there is a left ideal $I$ with $J(A)^r I \not= 0$ that is minimal with this property (one such ideal exists, say $J(A)$ itself).
+
+ Now pick $x \in I$ with $J(A)^r x \not = 0$. Since $J(A)^{2r} = J(A)^r$, it follows that $J(A)^r (J(A)^r x) \not= 0$. So by minimality, $J(A)^r x \geq I$. But the other inclusion clearly holds. So they are equal. So there exists some $a \in J(A)^r$ with $x = ax$. So
+ \[
+ (1 - a) x = 0.
+ \]
+ But $1 - a$ is a unit. So $x = 0$. This is a contradiction. So $J(A)^r = 0$.
+ \item Let $M_i = J(A)^i M$. Then $M_i/M_{i + 1}$ is annihilated by $J(A)$, and hence completely reducible (it is a module over semi-simple $A/J(A)$). Since $M$ is a finitely generated left $A$-module for a left Artinian algebra, it satisfies the descending chain condition for submodules (exercise), and hence so does $M_i/M_{i + 1}$. % fill in exercise
+
+ So we know $M_i/M_{i + 1}$ is a finite sum of simple modules, and therefore satisfies the ascending chain condition. So $M_i/M_{i + 1}$ is left Noetherian, and hence $M$ is (exercise).
+
+ \item Follows from (ii) since $A$ is a finitely-generated left $A$-module.\qedhere
+ \end{enumerate}
+\end{proof}
+
+%A last little lemma before doing Artin--Wedderburn:
+%\begin{lemma}
+% Let $A$ be left Artinian and suppose $J(A)$ is finitely-generated as a left $A$-module, and $_AA$ is completely reducible, then $A$ is semi-simple.
+%\end{lemma}
+%
+%\begin{proof}
+% If $_AA$ is completely reducible, then $J(A)$ has a complement. Write
+% \[
+% _A A = J(A) \oplus \frac{_AA}{J(A)}.
+% \]
+% Multiplying on the left by $J(A)$ gives $J(A) = J(A)^2$. Since $J(A)$ is nilpotent, this implies we must have $J(A) = 0$. % or by nakamyama?
+%\end{proof}
+
+
+\subsection{Artin--Wedderburn theorem}
+We are going to state the Artin--Wedderburn theorem for right (as opposed to left) things, because this makes the notation easier for us.
+\begin{thm}[Artin--Wedderburn theorem]\index{Artin--Wedderburn theorem}
+ Let $A$ be a semisimple right Artinian algebra. Then
+ \[
+ A = \bigoplus_{i = 1}^r M_{n_i}(D_i),
+ \]
+ for some division algebra $D_i$, and these factors are uniquely determined.
+
+ $A$ has exactly $r$ isomorphism classes of simple (right) modules $S_i$, and
+ \[
+ \End_A (S_i) = \{\text{$A$-module homomorphisms $S_i \to S_i$}\} \cong D_i,
+ \]
+ and
+ \[
+ \dim_{D_i}(S_i) = n_i.
+ \]
+ If $A$ is simple, then $r = 1$.
+\end{thm}
+If we had the left version instead, then we need to insert $\op$'s somewhere.
+
+Artin--Wedderburn is an easy consequence of two trivial lemma. The key idea that leads to Artin--Wedderburn is the observation that the map $A_A \to \End_A(A_A)$ sending $a$ to left-multiplication by $a$ is an isomorphism of algebras. So we need to first understand endomorphism algebras, starting with Schur's lemma.
+
+\begin{lemma}[Schur's lemma]\index{Schur's lemma}
+ Let $M_1, M_2$ be simple right $A$-modules. Then either $M_1 \cong M_2$, or $\Hom_A(M_1, M_2) = 0$. If $M$ is a simple $A$-module, then $\End_A(M)$ is a division algebra.
+\end{lemma}
+
+\begin{proof}
+ A non-zero $A$-module homomorphism $M_1 \to M_2$ must be injective, as the kernel is submodule. Similarly, the image has to be the whole thing since the image is a submodule. So this must be an isomorphism, and in particular has an inverse. So the last part follows as well.
+\end{proof}
+
+As mentioned, we are going to exploit the isomorphism $A_A \cong \End_A(A_A)$. This is easy to see directly, but we can prove a slightly more general result, for the sake of it:
+\begin{lemma}\leavevmode
+ \begin{enumerate}
+ \item If $M$ is a right $A$-module and $e$ is an idempotent in $A$, i.e.\ $e^2 = e$, then $Me \cong \Hom_A(eA, M)$.
+ \item We have
+ \[
+ eAe \cong \End_A(eA).
+ \]
+ In particular, we can take $e = 1$, and recover $\End_A(A_A) \cong A$.
+ \end{enumerate}
+\end{lemma}
+
+\begin{proof}\leavevmode
+ \begin{enumerate}
+ \item We define maps
+ \[
+ \begin{tikzcd}[cdmap]
+ me \ar[r, maps to] & (ex \mapsto mex)\\
+ Me \ar[r, "f_1", yshift=2] & \Hom(eA, M) \ar[l, "f_2", yshift=-2]\\
+ \alpha(e) & \alpha \ar[l, maps to]
+ \end{tikzcd}
+ \]
+ We note that $\alpha(e) = \alpha(e^2) = \alpha(e) e \in Me$. So this is well-defined. By inspection, these maps are inverse to each other. So we are done.
+
+ Note that we might worry that we have to pick representatives $me$ and $ex$ for the map $f_1$, but in fact we can also write it as $f(a)(y) = ay$, since $e$ is idempotent. So we are safe.
+ \item Immediate from above by putting $M = eA$.\qedhere
+ \end{enumerate}
+\end{proof}
+
+\begin{lemma}
+ Let $M$ be a completely reducible right $A$-module. We write
+ \[
+ M = \bigoplus S_i^{n_i},
+ \]
+ where $\{S_i\}$ are distinct simple $A$-modules. Write $D_i = \End_A(S_i)$, which we already know is a division algebra. Then
+ \[
+ \End_A (S_i^{n_i}) \cong M_{n_i} (D_i),
+ \]
+ and
+ \[
+ \End_A(M) = \bigoplus M_{n_i} (D_i)
+ \]
+\end{lemma}
+
+\begin{proof}
+ The result for $\End_A(S_i^{n_i})$ is just the familiar fact that a homomorphism $S^n \to S^m$ is given by an $m \times n$ matrix of maps $S \to S$ (in the case of vector spaces over a field $k$, we have $\End(k) \cong k$, so they are matrices with entries in $k$). Then by Schur's lemma, we have
+ \[
+ \End_A(M) = \bigoplus_i \End_A(M_i) \cong M_{n_i}(D_i).\qedhere
+ \]
+\end{proof}
+
+We now prove Artin--Wedderburn.
+\begin{proof}[Proof of Artin--Wedderburn]
+ If $A$ is semi-simple, then it is completely reducible as a right $A$-module. So we have
+ \[
+ A \cong \End(A_A) \cong \bigoplus M_{n_i}(D_i).
+ \]
+ We now decompose each $M_{n_i}(D_i)$ into a sum of simple modules. We know each $M_{n_i}(D_i)$ is a non-trivial $M_{n_i}(D_i)$ module in the usual way, and the action of the other summands is trivial. We can simply decompose each $M_{n_i}(D_i)$ as the sum of submodules of the form
+ \[
+ \left\{
+ \begin{pmatrix}
+ 0 & 0 & \cdots & 0 & 0\\
+ \vdots & \vdots & \ddots & \vdots & \vdots\\
+ 0 & 0 & \cdots & 0 & 0\\
+ a_1 & a_2 & \cdots & a_{n_i - 1} & a_{n_i}\\
+ \vdots & \vdots & \ddots & \vdots & \vdots\\
+ 0 & 0 & \cdots & 0 & 0 \\
+ \end{pmatrix}
+ \right\}
+ \]
+ and there are $n_i$ components. We immediately see that if we write $S_i$ for this submodule, then we have
+ \[
+ \dim_{D_i}(S_i) = n_i.
+ \]
+ Finally, we have to show that every simple module $S$ of $A$ is one of the $S_i$. We simply have to note that if $S$ is a simple $A$-module, then there is a non-trivial map $f: A \to S$ (say by picking $x \in S$ and defining $f(a) = xa$). Then in the decomposition of $A$ into a direct sum of simple submodules, there must be one factor $S_i$ such that $f|_{S_i}$ is non-trivial. Then by Schur's lemma, this is in fact an isomorphism $S_i \cong S$.
+\end{proof}
+
+This was for semi-simple algebras. For a general right Artinian algebra, we know that $A/J(A)$ is semi-simple and inherits the Artinian property. Then Artin--Wedderburn applies to $A/J(A)$.
+
+Some of us might be scared of division algebras. Sometimes, we can get away with not talking about them. If $A$ is not just Artinian, but finite-dimensional, then so are the $D_i$.
+
+Now pick an arbitrary $x \in D_i$. Then the sub-algebra of $D_i$ generated by $x$ is be commutative. So it is in fact a subfield, and finite dimensionality means it is algebraic over $k$. Now if we assume that $k$ is algebraically closed, then $x$ must live in $k$. So we've shown that these $D_i$ must be $k$ itself. Thus we get
+
+\begin{cor}
+ If $k$ is algebraically closed and $A$ is a finite-dimensional semi-simple $k$-algebra, then
+ \[
+ A \cong \bigoplus M_{n_i}(k).
+ \]
+\end{cor}
+This is true, for example, when $k = \C$.
+
+We shall end this section by applying our results to group algebras. Recall the following definition:
+\begin{defi}[Group algebra]\index{group algebra}\index{$kG$}
+ Let $G$ be a group and $k$ a field. The \emph{group algebra} of $G$ over $k$ is
+ \[
+ kG = \left\{\sum \lambda_g g: g \in G, \lambda_g \in k\right\}.
+ \]
+ This has a bilinear multiplication given by the obvious formula
+ \[
+ (\lambda_g g) (\mu_h h) = \lambda_g \mu_h (gh).
+ \]
+\end{defi}
+
+The first thing to note is that group algebras are almost always semi-simple.
+\begin{thm}[Maschke's theorem]\index{Maschke's theorem}
+ Let $G$ be a finite group and $p \nmid |G|$, where $p = \Char k$, so that $|G|$ is invertible in $k$, then $kG$ is semi-simple.
+\end{thm}
+
+\begin{proof}
+ We show that any submodule $V$ of a $kG$-module $U$ has a complement. Let $\pi: U \to V$ be any $k$-vector space projection, and define a new map
+ \[
+ \pi' = \frac{1}{|G|} \sum_{g \in G} g\pi g^{-1}: U \to V.
+ \]
+ It is easy to see that this is a $kG$-module homomorphism $U \to V$, and is a projection. So we have
+ \[
+ U = V \oplus \ker \pi',
+ \]
+ and this gives a $kG$-module complement.
+\end{proof}
+
+There is a converse to Maschke's theorem:
+\begin{thm}
+ Let $G$ be finite and $kG$ semi-simple. Then $\Char k \nmid |G|$.
+\end{thm}
+
+\begin{proof}
+ We note that there is a simple $kG$-module $S$, given by the trivial module. This is a one-dimensional $k$ vector space. We have
+ \[
+ D = \End_{kG}(S) = k.
+ \]
+ Now suppose $kG$ is semi-simple. Then by Artin--Wedderburn, there must be only one summand of $S$ in $kG$.
+
+ Consider the following two ideals of $kG$: we let
+ \[
+ I_1 = \left\{\sum \lambda_g g \in kG: \sum \lambda_g = 0\right\}.
+ \]
+ This is in fact a two-sided ideal of $kG$. We also have the center of the algebra, given by
+ \[
+ I_2 = \left\{\lambda \sum g \in kG: \lambda \in k\right\}.
+ \]
+ Now if $\Char k \mid |G|$, then $I_2 \subseteq I_1$. So we can write
+ \[
+ kG = \frac{kG}{I_1} \oplus I_1 = \frac{kG}{I_1} \oplus I_2 \oplus \cdots.
+ \]
+ But we know $G$ acts trivially on $\frac{kG}{I_1}$ and $I_2$, and they both have dimension $1$. This gives a contradiction. So we must have $\Char k \nmid |G|$.
+\end{proof}
+
+We can do a bit more of representation theory. Recall that when $k$ is algebraically closed and has characteristic zero, then the number of simple $kG$-modules is the number of conjugacy classes of $G$. There is a more general result for a general characteristic $p$ field:
+\begin{thm}
+ Let $k$ be algebraically closed of characteristic $p$, and $G$ be finite. Then the number of simple $kG$ modules (up to isomorphism) is equal to the number of conjugacy classes of elements of order not divisible by $p$. These are known as the \term{$p$-regular elements}.
+\end{thm}
+
+We immediately deduce that
+\begin{cor}
+ If $|G| = p^r$ for some $r$ and $p$ is prime, then the trivial module is the only simple $kG$ module, when $\Char k = p$.
+\end{cor}
+Note that we can prove this directly rather than using the theorem, by showing that $I = \ker (kG \to k)$ is a nilpotent ideal, and annihilates all simple modules. % exercise
+
+\begin{proof}[Proof sketch of theorem]
+ The number of simple $kG$ modules is just the number of simple $kG/J(kG)$ module, as $J(kG)$ acts trivially on every simple module. There is a useful trick to figure out the number of simple $A$-modules for a given semi-simple $A$. Suppose we have a decomposition
+ \[
+ A \cong \bigoplus_{i = 1}^r M_{n_i}(k).
+ \]
+ Then we know $r$ is the number of simple $A$-modules. We now consider $[A, A]$, the $k$-subspace generated by elements of the form $xy - yx$. Then we see that
+ \[
+ \frac{A}{[A, A]} \cong \bigoplus_{i = 1}^r \frac{M_{n_i}(k)}{[M_{n_i}(k), M_{n_i}(k)]}.
+ \]
+ Now by linear algebra, we know $[M_{n_i}(k), M_{n_i}(k)]$ is the trace zero matrices, and so we know
+ \[
+ \dim_k \frac{M_{n_i}(k)}{[M_{n_i}(k), M_{n_i}(k)]} = 1.
+ \]
+ Hence we know
+ \[
+ \dim \frac{A}{[A, A]} = r.
+ \]
+ Thus we need to compute
+ \[
+ \dim_k \frac{kG/J(kG)}{[kG/J(kG), kG/J(kG)]}
+ \]
+ We then note the following facts:
+ \begin{enumerate}
+ \item For a general algebra $A$, we have
+ \[
+ \frac{A/J(A)}{[A/J(A), A/J(A)]} \cong \frac{A}{[A, A] + J(A)}.
+ \]
+ \item Let $g_1, \cdots, g_m$ be conjugacy class representatives of $G$. Then
+ \[
+ \{g_i + [kG, kG]\}
+ \]
+ forms a $k$-vector space basis of $kG/[kG, kG]$. % exercise!
+ \item If $g_1, \cdots, g_r$ is a set of representatives of $p$-regular conjugacy classes, then
+ \[
+ \left\{g_i + \Big([kG, kG] + J(kG)\Big)\right\}
+ \]
+ form a basis of $kG/([kG, kG] + J(kG))$. % not exercise!
+ \end{enumerate}
+ Hence the result follows.
+\end{proof}
+One may find it useful to note that $[kG, kG] + J(kG)$ consists of the elements in $kG$ such that $x^{p^s} \in [kG, kG]$ for some $s$.
+
+In this proof, we look at $A/[A, A]$. However, in the usual proof of the result in the characteristic zero looks at the center $Z(A)$. The relation between these two objects is that the first is the $0$th Hochschild \emph{homology} group of $A$, while the second is the $0$th Hochschild \emph{cohomology} group of $A$.
+
+\subsection{Crossed products}
+Number theorists are often interested in representations of Galois groups and $kG$-modules where $k$ is an algebraic number field, e.g.\ $\Q$. In this case, the $D_i$'s appearing in Artin--Wedderburn may be non-commutative.
+
+We have already met one case of a non-commutative division ring, namely the quaternions $\H$. This is in fact an example of a general construction.
+
+\begin{defi}[Crossed product]\index{crossed product}
+ The \emph{crossed product} of a $k$-algebra $B$ and a group $G$ is specified by the following data:
+ \begin{itemize}
+ \item A group homomorphism $\phi: G \to \Aut_k(B)$, written
+ \[
+ \phi_g(\lambda) = \lambda^g;
+ \]
+ \item A function
+ \[
+ \Psi(g, h): G \times G \to B.
+ \]
+ \end{itemize}
+ The crossed product algebra has underlying set
+ \[
+ \sum \lambda_g g: \lambda_g \in B.
+ \]
+ with operation defined by
+ \[
+ \lambda g \cdot \mu h = \lambda \mu^g \Psi(g, h) (gh).
+ \]
+ The function $\Psi$ is required to be such that the resulting product is associative.
+\end{defi}
+We should think of the $\mu^g$ as specifying what happens when we conjugate $g$ pass $\mu$, and then $\Psi(g, h) (gh)$ is the product of $g$ and $h$ in the crossed product.
+
+Usually, we take $B = K$, a Galois extension of $k$, and $G = \Gal(K/k)$. Then the action $\phi_g$ is the natural action of $G$ on the elements of $K$, and we restrict to maps $\Psi: G \times G \to K^\times$ only.
+
+\begin{eg}
+ Consider $B = K = \C$, and $k = \R$. Then $G = \Gal(\C/\R) \cong \Z/2\Z = \{e, g\}$, where $g$ is complex conjugation. The elements of $\H$ are of the form
+ \[
+ \lambda_e e + \lambda_g g,
+ \]
+ where $\lambda_e, \lambda_g \in \C$, and we will write
+ \[
+ 1 \cdot g = g,\quad i \cdot g = k,\quad 1 \cdot e = 1,\quad i \cdot e = i.
+ \]
+ Now we want to impose
+ \[
+ -1 = j^2 = 1g \cdot 1g = \psi(g, g) e.
+ \]
+ So we set $\Psi(g, g) = -1$. We can similarly work out what we want the other values of $\Psi$ to be. % complete?
+\end{eg}
+Note that in general, crossed products need not be division algebras.
+
+The crossed product is not just a $k$-algebra. It has a natural structure of a \term{$G$-graded algebra}, in the sense that we can write it as a direct sum
+\[
+ BG = \bigoplus_{g \in G} Bg,
+\]
+and we have $Bg_1 \cdot Bg_2 \subseteq B g_1 g_2$.
+
+Focusing on the case where $K/k$ is a Galois extension, we use the notation $(K, G, \Psi)$, where $\Psi: G \times G \to K^\times$. Associativity of these crossed products is equivalent to a \term{$2$-cocycle condition} $\Psi$, which you will be asked to make precise on the first example sheet.
+
+Two crossed products $(K, G, \Psi_1)$ and $(K, G, \Psi_2)$ are isomorphic iff the map
+\[
+ \begin{tikzcd}[cdmap]
+ G \times G \ar[r] & K^\times\\
+ (g, h) \ar[r, maps to] & \Psi_1(g, h) (\Psi_2(g, h))^{-1}
+ \end{tikzcd}
+\]
+satisfies a $2$-coboundary condition, which is again left for the first example sheet. Therefore the second (group) cohomology
+\[
+ \frac{\{\text{$2$-cocycles}: G \times G \to K^\times\}}{\{\text{$2$-coboundaries}: G \times G \to K^\times\}}
+\]
+determines the isomorphism classes of (associative) crossed products $(K, G, \Psi)$.
+
+\begin{defi}[Central simple algebra]\index{central simple algebra}\index{algebra!central simple}
+ A \emph{central simple $k$-algebra} is a finite-dimensional $k$-algebra which is a simple algebra, and with a center $Z(A) = k$.
+\end{defi}
+Note that any simple algebra is a division algebra, say by Schur's lemma. So the center must be a field. Hence any simple $k$-algebra can be made into a central simple algebra simply by enlarging the base field.
+
+\begin{eg}
+ $M_n(k)$ is a central simple algebra.
+\end{eg}
+
+The point of talking about these is the following result:
+\begin{fact}
+ Any central simple $k$-algebra is of the form $M_n(D)$ for some division algebra $D$ which is also a central simple $k$-algebra, and is a crossed product $(K, G, \Psi)$.
+\end{fact}
+Note that when $K = \C$ and $k = \R$, then the second cohomology group has $2$ elements, and we get that the only central simple $\R$-algebras are $M_n(\R)$ or $M_n(\H)$.
+
+For amusement, we also note the following theorem:
+
+\begin{fact}[Wedderburn]
+ Every finite division algebra is a field.
+\end{fact}
+
+\subsection{Projectives and blocks}
+In general, if $A$ is not semi-simple, then it is not possible to decompose $A$ as a direct sum of simple modules. However, what we can do is to decompose it as a direct sum of indecomposable projectives.
+
+We begin with the definition of a projective module.
+
+\begin{defi}[Projective module]\index{projective module}\index{module!projective}
+ An $A$-module is \emph{projective} $P$ if given modules $M$ and $M'$ and maps
+ \[
+ \begin{tikzcd}
+ & P \ar[d, "\alpha"]\\
+ M' \ar[r, two heads, "\theta"] & M \ar[r] & 0
+ \end{tikzcd},
+ \]
+ then there exists a map $\beta: P \to M'$ such that the following diagram commutes:
+ \[
+ \begin{tikzcd}
+ & P \ar[d, "\alpha"] \ar[ld, dashed, "\beta"']\\
+ M' \ar[r, two heads, "\theta"] & M \ar[r] & 0
+ \end{tikzcd}.
+ \]
+ Equivalently, if we have a short exact sequence
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & N \ar[r, hook] & M' \ar[r, two heads] & M \ar[r] & 0,
+ \end{tikzcd}
+ \]
+ then the sequence
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & \Hom(P, N) \ar[r, hook] & \Hom(P, M') \ar[r, two heads] & \Hom(P, M) \ar[r] & 0
+ \end{tikzcd}
+ \]
+ is exact.
+\end{defi}
+Note that we get exactness at $\Hom(P, N)$ and $\Hom(P, M')$ for any $P$ at all. Projective means it is also exact at $\Hom(P, M)$.
+
+\begin{eg}
+ Free modules are always projective.
+\end{eg}
+In general, projective modules are ``like'' free modules. We all know that free modules are nice, and most of the time, when we want to prove things about free modules, we are just using the property that they are projective. It is also possible to understand projective modules in an algebro-geometric way --- they are ``locally free'' modules.
+
+It is convenient to characterize projective modules as follows:
+\begin{lemma}
+ The following are equivalent:
+ \begin{enumerate}
+ \item $P$ is projective.
+ \item Every surjective map $\phi: M \to P$ splits, i.e.
+ \[
+ M \cong \ker \phi \oplus N
+ \]
+ where $N \cong P$.
+ \item $P$ is a direct summand of a free module.
+ \end{enumerate}
+\end{lemma}
+
+\begin{proof}\leavevmode
+ \begin{itemize}
+ \item (i) $\Rightarrow$ (ii): Consider the following lifting problem:
+ \[
+ \begin{tikzcd}
+ & P \ar[d, equals] \ar[dl, dashed]\\
+ M \ar[r, two heads, "\phi"] & P \ar[r] & 0
+ \end{tikzcd},
+ \]
+ The lifting gives an embedding of $P$ into $M$ that complements $\ker \phi$ (by the splitting lemma, or by checking it directly).
+ \item (ii) $\Rightarrow$ (iii): Every module admits a surjection from a free module (e.g.\ the free module generated by the elements of $P$)
+ \item (iii) $\Rightarrow$ (i): It suffices to show that direct summands of projectives are projective. Suppose $P$ is is projective, and
+ \[
+ P \cong A \oplus B.
+ \]
+ Then any diagram
+ \[
+ \begin{tikzcd}
+ & A \ar[d, "\alpha"]\\
+ M' \ar[r, two heads, "\theta"] & M \ar[r] & 0
+ \end{tikzcd},
+ \]
+ can be extended to a diagram
+ \[
+ \begin{tikzcd}
+ & A \oplus B \ar[d, "\tilde{\alpha}"]\\
+ M' \ar[r, two heads, "\theta"] & M \ar[r] & 0
+ \end{tikzcd},
+ \]
+ by sending $B$ to $0$. Then since $A \oplus B \cong P$ is projective, we obtain a lifting $A \oplus B \to M'$, and restricting to $A$ gives the desired lifting.\qedhere
+ \end{itemize}
+\end{proof}
+
+Our objective is to understand the direct summands of a general Artinian $k$-algebra $A$, not necessarily semi-simple. Since $A$ is itself a free $A$-module, we know these direct summands are always projective.
+
+Since $A$ is not necessarily semi-simple, it is in general impossible to decompose it as a direct sum of simples. What we can do, though, is to decompose it as a direct sum of \emph{indecomposable modules}.
+
+\begin{defi}[Indecomposable]\index{indecomposable}
+ A non-zero module $M$ is indecomposable if $M$ cannot be expressed as the direct sum of two non-zero submodules.
+\end{defi}
+Note that since $A$ is (left) Artinian, it can always be decomposed as a finite sum of indecomposable (left) submodules. Sometimes, we are also interested in decomposing $A$ as a sum of (two-sided) ideals. These are called blocks.
+
+\begin{defi}[Block]\index{block}
+ The \emph{blocks} are the direct summands of $A$ that are indecomposable as ideals.
+\end{defi}
+
+\begin{eg}
+ If $A$ is semi-simple Artinian, then Artin-Wedderburn tells us
+ \[
+ A = \bigoplus M_{n_i}(D_i),
+ \]
+ and the $M_{n_i}(D_i)$ are the blocks.
+\end{eg}
+
+We already know that every Artinian module can be decomposed as a direct sum of indecomposables. The first question to ask is whether this is unique. We note the following definitions:
+
+\begin{defi}[Local algebra]\index{local algebra}\index{algebra!local}
+ An algebra is \emph{local} if it has a unique maximal left ideal, which is $J(A)$, which is the unique maximal right ideal.
+\end{defi}
+If so, then $A/J(A)$ is a division algebra. This name, of course, comes from algebraic geometry (cf.\ local rings).
+
+\begin{defi}[Unique decomposition property]\index{unique decomposition property}
+ A module $M$ has the \emph{unique decomposition property} if $M$ is a finite direct sum of indecomposable modules, and if
+ \[
+ M = \bigoplus_{i = 1}^m M_i = \bigoplus_{i = 1}^n M_i',
+ \]
+ then $n = m$, and, after reordering, $M_i = M_i'$.
+\end{defi}
+
+We want to prove that $A$ as an $A$-module always has the unique decomposition property. The first step is the following criterion for determining unique decomposition property.
+
+\begin{thm}[Krull--Schmidt theorem]\index{Krull--Schmidt theorem}
+ Suppose $M$ is a finite sum of indecomposable $A$-modules $M_i$, with each $\End(M_i)$ local. Then $M$ has the unique decomposition property.
+\end{thm}
+
+\begin{proof} % return to this later
+ Let
+ \[
+ M = \bigoplus_{i = 1}^m M_i = \bigoplus_{i = 1}^n M_i'.
+ \]
+ We prove by induction on $m$. If $m = 1$, then $M$ is indecomposable. Then we must have $n = 1$ as well, and the result follows.
+
+ For $m > 1$, we consider the maps
+ \[
+ \begin{tikzcd}
+ \alpha_i: M_i' \ar[r, hook] & M \ar[r, two heads] & M_1\\
+ \beta_i: M_1 \ar[r, hook] & M \ar[r, two heads] & M_i'
+ \end{tikzcd}
+ \]
+ We observe that
+ \[
+ \id_{M_1} = \sum_{i = 1}^n \alpha_i \circ \beta_i: M_1 \to M_1.
+ \]
+ Since $\End_A(M_1)$ is local, we know some $\alpha_i \circ \beta_i$ must be invertible, i.e.\ a unit, as they cannot all lie in the Jacobson radical. We may wlog assume $\alpha_1 \circ \beta_1$ is a unit. If this is the case, then both $\alpha_1$ and $\beta_1$ have to be invertible. So $M_1 \cong M_1'$. % why
+
+ Consider the map $\id - \theta = \phi$, where
+ \[
+ \begin{tikzcd}
+ \theta: M \ar[r, two heads] & M_1 \ar[r, "\alpha_1^{-1}"] & M_1' \ar[r, hook] & M \ar[r, two heads] & \bigoplus_{i = 2}^m M_i \ar[r, hook] & M.
+ \end{tikzcd}
+ \]
+ Then $\phi(M_1') = M_1$. So $\phi|_{M_1'}$ looks like $\alpha_1$. Also
+ \[
+ \phi\left(\bigoplus_{i = 2}^m M_i\right) = \bigoplus_{i = 2}^m M_i,
+ \]
+ So $\phi|_{\bigoplus_{i = 2}^m M_i}$ looks like the identity map. So in particular, we see that $\phi$ is surjective. However, if $\phi(x) = 0$, this says $x = \theta(x)$, So
+ \[
+ x \in \bigoplus_{i = 2}^m M_i.
+ \]
+ But then $\theta(x) = 0$. Thus $x = 0$. Thus $\phi$ is an automorphism of $m$ with $\phi(M_1') = \phi(M_1)$. So this gives an isomorphism between
+ \[
+ \bigoplus_{i = 2}^m M_i = \frac{M}{M_1} \cong \frac{M}{M_1} \cong \bigoplus_{i = 2}^n M_i',
+ \]
+ and so we are done by induction.
+\end{proof}
+
+Now it remains to prove that the endomorphism rings are local. Recall the following result from linear algebra.
+\begin{lemma}[Fitting]
+ Suppose $M$ is a module with both the ACC and DCC on submodules, and let $f \in \End_A(M)$. Then for large enough $n$, we have
+ \[
+ M = \im f^n \oplus \ker f^n.
+ \]
+\end{lemma}
+
+\begin{proof}
+ By ACC and DCC, we may choose $n$ large enough so that
+ \[
+ f^n: f^n(M) \to f^{2n}(M)
+ \]
+ is an isomorphism, as if we keep iterating $f$, the image is a descending chain and the kernel is an ascending chain, and these have to terminate.
+
+ If $m \in M$, then we can write
+ \[
+ f^n(m) = f^{2n}(m_1)
+ \]
+ for some $m_1$. Then
+ \[
+ m = f^n(m_1) + (m - f^n(m_1)) \in \im f^n + \ker f^n,
+ \]
+ and also
+ \[
+ \im f^n \cap \ker f^n = \ker (f^n: f^n(M) \to f^{2n}(M)) = 0.
+ \]
+ So done.
+\end{proof}
+
+\begin{lemma}
+ Suppose $M$ is an indecomposable module satisfying ACC and DCC on submodules. Then $B = \End_A(M)$ is local.
+\end{lemma}
+
+\begin{proof}
+ Choose a maximal left ideal of $B$, say $I$. It's enough to show that if $x \not \in I$, then $x$ is left invertible. By maximality of $I$, we know $B = Bx + I$. We write
+ \[
+ 1 = \lambda x + y,
+ \]
+ for some $\lambda \in B$ and $y \in I$. Since $y \in I$, it has no left inverse. So it is not an isomorphism. By Fitting's lemma and the indecomposability of $M$, we see that $y^m = 0$ for some $m$. Thus
+ \[
+ (1 + y + y^2 + \cdots + y^{m - 1}) \lambda x = (1 + y + \cdots + y^{m - 1})(1 - y) = 1.
+ \]
+ So $x$ is left invertible.
+\end{proof}
+
+\begin{cor}
+ Let $A$ be a left Artinian algebra. Then $A$ has the unique decomposition property.
+\end{cor}
+
+\begin{proof}
+ We know $A$ satisfies the ACC and DCC condition. So $_A A$ is a finite direct sum of indecomposables.
+\end{proof}
+
+So if $A$ is an Artinian algebra, we know $A$ can be uniquely decomposed as a direct sum of indecomposable projectives,
+\[
+ A = \bigoplus P_j.
+\]
+For convenience, we will work with right Artinian algebras and right modules instead of left ones. It turns out that instead of studying projectives in $A$, we can study idempotent elements instead.
+
+Recall that $\End(A_A) \cong A$. The projection onto $P_j$ is achieved by left multiplication by an idempotent $e_j$,
+\[
+ P_j = e_j A.
+\]
+The fact that the $A$ decomposes as a \emph{direct sum} of the $P_j$ translates to the condition
+\[
+ \sum e_j = 1,\quad e_i e_j = 0
+\]
+for $i \not= j$.
+\begin{defi}[Orthogonal idempotent]\index{orthogonal idempotents}\index{idempotent!orthogonal}
+ A collection of idempotents $\{e_i\}$ is \emph{orthogonal} if $e_i e_j = 0$ for $i \not= j$.
+\end{defi}
+
+The indecomposability of $P_j$ is equivalent to $e_j$ being primitive:
+\begin{defi}[Primitive idempotent]\index{idempotent!primitive}\index{primitive idempotent}
+ An idempotent is \emph{primitive} if it cannot be expressed as a sum
+ \[
+ e = e' + e'',
+ \]
+ where $e', e''$ are orthogonal idempotents, both non-zero.
+\end{defi}
+We see that giving a direct sum decomposition of $A$ is equivalent to finding an orthogonal collection of primitive idempotents that sum to $1$. This is rather useful, because idempotents are easier to move around that projectives.
+
+Our current plan is as follows --- given an Artinian algebra $A$, we can quotient out by $J(A)$, and obtain a semi-simple algebra $A/J(A)$. By Artin--Wedderburn, we know how we can decompose $A/J(A)$, and we hope to be able to lift this decomposition to one of $A$. The point of talking about idempotents instead is that we know what it means to lift elements.
+\begin{prop}
+ Let $N$ be a nilpotent ideal in $A$, and let $f$ be an idempotent of $A/N \equiv \bar{A}$. Then there is an idempotent $e \in A$ with $f = \bar{e}$.
+\end{prop}
+In particular, we know $J(A)$ is nilpotent, and this proposition applies. The proof involves a bit of magic.
+\begin{proof}
+ We consider the quotients $A/N^i$ for $i \geq 1$. We will lift the idempotents successively as we increase $i$, and since $N$ is nilpotent, repeating this process will eventually land us in $A$.
+
+ Suppose we have found an idempotent $f_{i - 1} \in A/N^{i - 1}$ with $\bar{f}_{i - 1} = f$. We want to find $f_i \in A/N^i$ such that $\bar{f}_i = f$.
+
+ For $i > 1$, we let $x$ be an element of $A/N^i$ with image $f_{i - 1}$ in $A/N^{i - 1}$. Then since $x^2 - x$ vansishes in $A/N^{i - 1}$, we know $x^2 - x \in N^{i - 1}/N^i$. Then in particular,
+ \[
+ (x^2 - x)^2 = 0 \in A/N^i.\tag{$\dagger$}
+ \]
+ We let
+ \[
+ f_i = 3x^2 - 2x^3.
+ \]
+ Then by a direct computation using $(\dagger)$, we find $f_i^2 = f_i$, and $f_i$ has image $3f_{i - 1} - 2 f_{i - 1} = f_{i - 1}$ in $A/N^{i - 1}$ (alternatively, in characteristic $p$, we can use $f_i = x^p$). Since $N^k = 0$ for some $k$, this process gives us what we want.
+\end{proof}
+
+Just being able to lift idempotents is not good enough. We want to lift decompositions as projective indecomposables. So we need to do better.
+\begin{cor}
+ Let $N$ be a nilpotent ideal of $A$. Let
+ \[
+ \bar{1} = f_1 + \cdots + f_r
+ \]
+ with $\{f_i\}$ orthogonal primitive idempotents in $A/N$. Then we can write
+ \[
+ 1 = e_1 + \cdots + e_r,
+ \]
+ with $\{e_i\}$ orthogonal primitive idempotents in $A$, and $\bar{e}_i = f_i$.
+\end{cor}
+
+\begin{proof}
+ We define a sequence $e_i' \in A$ inductively. We set
+ \[
+ e_1' = 1.
+ \]
+ Then for each $i > 1$, we pick $e_i'$ a lift of $f_i + \cdots + f_t \in e_{i - 1}' A e_{i - 1}'$, since by inductive hypothesis we know that $f_i + \cdots + f_t \in e_{i - 1}' A e_{i - 1}'/N$. Then
+ \[
+ e_i' e_{i + 1}' = e_{i + 1}' = e_{i + 1}' e_i'.
+ \]
+ We let
+ \[
+ e_i = e_i'- e_{i + 1}'.
+ \]
+ Then
+ \[
+ \bar{e}_i = f_i.
+ \]
+ Also, if $j > i$, then
+ \[
+ e_j = e_{i + 1}' e_j e_{i + 1}',
+ \]
+ and so
+ \[
+ e_i e_j = (e_i' - e_{i + 1}') e_{i + 1}' e_j e_{i + 1}' = 0.
+ \]
+ Similarly $e_j e_i = 0$.
+\end{proof}
+
+We now apply this lifting of idempotents to $N = J(A)$, which we know is nilpotent. We know $A/N$ is the direct sum of simple modules, and thus the decomposition corresponds to
+\[
+ \bar{1} = f_1 + \cdots + f_t \in A/J(A),
+\]
+and these $f_i$ are orthogonal primitive idempotents. Idempotent lifting then gives
+\[
+ 1 = e_1 + \cdots + e_t \in A,
+\]
+and these are orthogonal primitive idempotents. So we can write
+\[
+ A = \bigoplus e_j A = \bigoplus P_i,
+\]
+where $P_i = e_i A$ are indecomposable projectives, and $P_i/P_i J(A) = S_i$ is simple. By Krull--Schmidt, any indecomposable projective isomorphic to one of these $P_j$.
+
+The final piece of the picture is to figure out when two indecomposable projectives lie in the same block. Recall that if $M$ is a right $A$-module and $e$ is idempotent, then
+\[
+ Me \cong \Hom_A(eA, M).
+\]
+In particular, if $M = fA$ for some idempotent $f$, then
+\[
+ \Hom(eA, fA) \cong fAe.
+\]
+However, if $e$ and $f$ are in different blocks, say $B_1$ and $B_2$, then
+\[
+ fAe \in B_1 \cap B_2 = 0,
+\]
+since $B_1$ and $B_2$ are (two-sided!) ideals. So we know
+\[
+ \Hom(eA, fA) = 0.
+\]
+So if $\Hom(eA, fA) \not= 0$, then they are in the same block. The existence of a homomorphism can alternatively be expressed in terms of composition factors.
+
+We have seen that each indecomposable projective $P$ has a simple ``top''
+\[
+ P/PJ(A) \cong S.
+\]
+\begin{defi}[Composition factor]\index{composition factor}
+ A simple module $S$ is a composition factor of a module $M$ if there are submodules $M_1 \leq M_2$ with
+ \[
+ M_2/M_1 \cong S.
+ \]
+\end{defi}
+Suppose $S$ is a composition factor of a module $M$. Then we have a diagram
+\[
+ \begin{tikzcd}
+ & P \ar[d] \ar[dl, dashed]\\
+ M_2 \ar[r, two heads ] & S \ar[r] & 0
+ \end{tikzcd}
+\]
+So by definition of projectivity, we obtain a non-zero diagonal map $P \to M_2 \leq M$ as shown.
+
+\begin{lemma}
+ Let $P$ be an indecomposable projective, and $M$ an $A$-module. Then $\Hom(P, M) \not= 0$ iff $P/P J(A)$ is a composition factor of $M$.
+\end{lemma}
+
+\begin{proof}
+ We have proven $\Rightarrow$. Conversely, suppose there is a non-zero map $f: P \to M$. Then it factors as
+ \[
+ S = \frac{P}{PJ(A)} \to \frac{\im f}{(\im f)J(A)}.
+ \]
+ Now we cannot have $\im f = (\im f)J(A)$, or else we have $\im f = (\im f)J(A)^n = 0$ for sufficiently large $n$ since $J(A)$ is nilpotent. So this map must be injective, hence an isomorphism. So this exhibits $S$ as a composition factor of $M$.
+\end{proof}
+
+We define a (directed) graph whose vertices are labelled by indecomposable projectives, and there is an edge $P_1 \to P_2$ if the top $S_1$ of $P_1$ is a composition factor of $P_2$.
+\begin{thm}
+ Indecomposable projectives $P_1$ and $P_2$ are in the same block if and only if they lie in the same connected component of the graph.
+\end{thm}
+
+\begin{proof}
+ It is clear that $P_1$ and $P_2$ are in the same connected component, then they are in the same block.
+
+ Conversely, consider a connected component $X$, and consider
+ \[
+ I = \bigoplus_{P \in X} P.
+ \]
+ We show that this is in fact a left ideal, hence an ideal. Consider any $x \in A$. Then for each $P \in X$, left-multiplication gives a map $P \to A$, and if we decompose
+ \[
+ A = \bigoplus P_i,
+ \]
+ then this can be expressed as a sum of maps $f_i: P \to P_i$. Now such a map can be non-zero only if $P$ is a composition factor of $P_i$. So if $f_i \not= 0$, then $P_i \in X$. So left-multiplication by $x$ maps $I$ to itself, and it follows that $I$ is an ideal.
+\end{proof}
+
+\subsection{\tph{$K_0$}{K0}{K0}}
+We now briefly talk about the notion of $K_0$.
+\begin{defi}[$K_0$]\index{$K_0$}
+ For any associative $k$-algebra $A$, consider the free abelian group with basis labelled by the isomorphism classes $[P]$ of finitely-generated projective $A$-modules. Then introduce relations
+ \[
+ [P_1] + [P_2] = [P_1 \oplus P_2],
+ \]
+ This yields an abelian group which is the quotient of the free abelian group by the subgroup generated by
+ \[
+ [P_1] + [P_2] - [P_1 \oplus P_2].
+ \]
+ The abelian group is $K_0(A)$.
+\end{defi}
+
+\begin{eg}
+ If $A$ is an Artinian algebra, then we know that any finitely-generated projective is a direct sum of indecomposable projectives, and this decomposition is unique by Krull-Schmidt. So
+ \[
+ K_0(A) = \left\{\parbox{7cm}{\centering abelian group generated by the isomorphism classes of indecomposable projectives}\right\}.
+ \]
+ So $K_0(A) \cong \Z^r$, where $r$ is the number of isomorphism classes of indecomposable projectives, which is the number of isomorphism classes of simple modules.
+
+ Here we're using the fact that two indecomposable projectives are isomorphic iff their simple tops are isomorphic.
+\end{eg}
+
+It turns out there is a canonical map $K_0(A) \to A/[A, A]$. Recall we have met $A/[A, A]$ when we were talking about the number of simple modules. We remarked that it was the $0$th Hochschild homology group, and when $A = kG$, there is a $k$-basis of $A/[A, A]$ given by $g_i + [A, A]$, where $g_i$ are conjugacy class representatives.
+
+To construct this canonical map, we first look at the trace map
+\[
+ M_n(A) \to A/[A, A].
+\]
+This is a $k$-linear map, invariant under conjugation. We also note that the canonical inclusion
+\begin{align*}
+ M_n(A) &\hookrightarrow M_{n + 1}(A)\\
+ X & \mapsto
+ \begin{pmatrix}
+ X & 0\\
+ 0 & 0
+ \end{pmatrix}
+\end{align*}
+is compatible with the trace map. We observe that the trace induces an isomorphism
+\[
+ \frac{M_n(A)}{[M_n(A), M_n(A)]} \to \frac{A}{[A, A]},
+\]
+by linear algebra.
+
+Now if $P$ is finitely generated projective. It is a direct summand of some $A^n$. Thus we can write
+\[
+ A^n = P \oplus Q,
+\]
+for $P, Q$ projective. Moreover, projection onto $P$ corresponds to an idempotent $e$ in $M_n(A) = \End_A(A^n)$, and that
+\[
+ P = e(A^n).
+\]
+and we have
+\[
+ \End_A(P) = e M_n(A) e.
+\]
+Any other choice of idempotent yields an idempotent $e_1$ conjugate to $e$ in $M_{2n}(A)$. % exercise
+
+Therefore the trace of an endomorphism of $P$ is well-defined in $A/[A, A]$, independent of the choice of $e$. Thus we have a trace map
+\[
+ \End_A(P) \to A/[A, A].
+\]
+In particular, the trace of the identity map on $P$ is the trace of $e$. We call this the \emph{trace of $P$}\index{trace!of projective}.
+
+Note that if we have finitely generated projectives $P_1$ and $P_2$, then we have
+\begin{align*}
+ P_1 \oplus Q_1 &= A^n\\
+ P_2 \oplus Q_2 &= A^m
+\end{align*}
+Then we have
+\[
+ (P_1 \oplus P_2) \oplus (Q_1 \oplus Q_2) = A^{m + n}.\
+\]
+So we deduce that
+\[
+ \tr (P_1 \oplus P_2) = \tr P_1 + \tr P_2.
+\]
+\begin{defi}[Hattori-Stallings trace map]
+ The map $K_0(A) \to A/[A, A]$ induced by the trace is the \term{Hattori--Stallings trace map}.
+\end{defi}
+
+\begin{eg}
+ Let $A = kG$, and $G$ be finite. Then $A/[A, A]$ is a $k$-vector space with basis labelled by a set of conjugacy class representatives $\{g_i\}$. Then we know, for a finitely generated projective $P$, we can write
+ \[
+ \tr P = \sum r_P(g_i) g_i,
+ \]
+ where the $r_p(g_i)$ may be regarded as class functions. However, $P$ may be regarded as a $k$-vector space.
+
+ So the there is a trace map
+ \[
+ \End_K(P) \to k,
+ \]
+ and also the ``character'' $\chi_p: G \to k$, where $\chi_P(g) = \tr g$. Hattori proved that if $C_G(g)$ is the centralizer of $g \in G$, then
+ \[
+ \chi_p(g) = |C_G(G)| r_p(g^{-1}).\tag{$*$}
+ \]
+ If $\Char k = 0$ and $k$ is is algebraically closed, then we know $kG$ is semi-simple. So every finitely generated projective is a direct sum of simples, and
+ \[
+ K_0(kG) \cong \Z^r
+ \]
+ with $r$ the number of simples, and $(*)$ implies that the trace map
+ \[
+ \Z^r \cong K_0(kG) \to \frac{kG}{[kG, kG]} \cong k^r
+ \]
+ is the natural inclusion.
+\end{eg}
+This is the start of the theory of algebraic $K$-theory, which is a homology theory telling us about the endomorphisms of free $A$-modules. We can define $K_1(A)$ to be the abelianization of
+\[
+ \GL(A) = \lim_{n \to \infty} \GL_n(A).
+\]
+$K_2(A)$ tells us something about the relations required if you express $\GL(A)$ in terms of generators and relations. We're being deliberately vague. These groups are very hard to compute.
+
+Just as we saw in the $i = 0$ case, there are canonical maps
+\[
+ K_i(A) \to \HH_i(A),
+\]
+where $\HH_*$ is the Hochschild homology. The $i = 1$ case is called the \term{Dennis trace map}. These are analogous to the \emph{Chern maps} in topology.
+
+\section{Noetherian algebras}
+\subsection{Noetherian algebras}
+In the introduction, we met the definition of Noetherian algebras.
+\begin{defi}[Noetherian algebra]\index{Noetherian algebra}\index{algebra!Noetherian}
+ An algebra is \emph{left Noetherian} if it satisfies the \term{ascending chain condition} (\term{ACC}) on left ideals, i.e.\ if
+ \[
+ I_1 \leq I_2 \leq I_3 \leq \cdots
+ \]
+ is an ascending chain of left ideals, then there is some $N$ such that $I_{N + m} = I_N$ for all $m \geq 0$.
+
+ Similarly, we say an algebra is \emph{Noetherian} if it is both left and right Noetherian.
+\end{defi}
+We've also met a few examples. Here we are going to meet lots more. In fact, most of this first section is about establishing tools to show that certain algebras are Noetherian.
+
+One source of Noetherian algebras is via constructing polynomial and power series rings. Recall that in IB Groups, Rings and Modules, we proved the Hilbert basis theorem:
+\begin{thm}[Hilbert basis theorem]\index{Hilbert basis theorem}
+ If $A$ is Noetherian, then $A[X]$ is Noetherian.
+\end{thm}
+Note that our proof did not depend on $A$ being commutative. The same proof works for non-commutative rings. In particular, this tells us $k[X_1, \cdots, X_n]$ is Noetherian.
+
+It is also true that power series rings of Noetherian algebras are also Noetherian. The proof is very similar, but for completeness, we will spell it out completely.
+\begin{thm}
+ Let $A$ be left Noetherian. Then $A[[X]]$ is Noetherian.
+\end{thm}
+
+\begin{proof}
+ Let $I$ be a left ideal of $A[[X]]$. We'll show that if $A$ is left Noetherian, then $I$ is finitely generated. Let
+ \[
+ J_r = \{a: \text{there exists an element of $I$ of the form }aX^r + \text{higher degree terms}\}.
+ \]
+ We note that $J_r$ is a left ideal of $A$, and also note that
+ \[
+ J_0 \leq J_1 \leq J_2 \leq J_3 \leq \cdots,
+ \]
+ as we can always multiply by $X$. Since $A$ is left Noetherian, this chain terminates at $J_N$ for some $N$. Also, $J_0, J_1, J_2, \cdots, J_N$ are all finitely generated left ideals. We suppose $a_{i1}, \cdots, a_{is_i}$ generates $J_i$ for $i = 1, \cdots, N$. These correspond to elements
+ \[
+ f_{ij}(X) = a_{ij} X^j + \text{higher odder terms} \in I.
+ \]
+ We show that this finite collection generates $I$ as a left ideal. Take $f(X) \in I$, and suppose it looks like
+ \[
+ b_n X^n + \text{higher terms},
+ \]
+ with $b_n \not = 0$.
+
+ Suppose $n < N$. Then $b_n \in J^n$, and so we can write
+ \[
+ b_n = \sum c_{nj} a_{nj}.
+ \]
+ So
+ \[
+ f(X) - \sum c_{nj} f_{nj}(X) \in I
+ \]
+ has zero coefficient for $X^n$, and all other terms are of higher degree.
+
+ Repeating the process, we may thus wlog $n \geq N$. We get $f(X)$ of the form $d_N X^N + $ higher degree terms. The same process gives
+ \[
+ f(X) - \sum c_{Nj} f_{Nj}(X)
+ \]
+ with terms of degree $N + 1$ or higher. We can repeat this yet again, using the fact $J_N = J_{N + 1}$, so we obtain
+ \[
+ f(X) - \sum c_{Nj} f_{Nj}(x) - \sum d_{N+1, j} X f_{Nj}(X) + \cdots.
+ \]
+ So we find
+ \[
+ f(X) = \sum e_j(X) f_{Nj}(X)
+ \]
+ for some $e_j(X)$. So $f$ is in the left ideal generated by our list, and hence so is $f$.
+\end{proof}
+
+\begin{eg}
+ It is straightforward to see that quotients of Noetherian algebras are Noetherian. Thus, algebra images of the algebras $A[x]$ and $A[[x]]$ would also be Noetherian.
+
+ For example, finitely-generated commutative $k$-algebras are always Noetherian. Indeed, if we have a generating set $x_i$ of $A$ as a $k$-algebra, then there is an algebra homomorphism
+ \[
+ \begin{tikzcd}[cdmap]
+ k[X_1, \cdots, X_n] \ar[r] & A\\
+ X_i \ar[r, maps to] & x_i
+ \end{tikzcd}
+ \]
+\end{eg}
+
+We also saw previously that
+\begin{eg}
+ Any Artinian algebra is Noetherian.
+\end{eg}
+
+The next two examples we are going to see are less obviously Noetherian, and proving that they are Noetherian takes some work.
+
+\begin{defi}[$n$th Weyl algebra]\index{Weyl algebra}\index{$A_n(k)$}\index{$A_n$}
+ The \emph{$n$th Weyl algebra} $A_n(k)$ is the algebra generated by $X_1, \cdots, X_n, Y_1, \cdots, Y_n$ with relations
+ \[
+ Y_i X_i - X_i Y_i = 1,
+ \]
+ for all $i$, and everything else commutes.
+\end{defi}
+
+This algebra acts on the polynomial algebra $k[X_1, \cdots, X_n]$ with $X_i$ acting by left multiplication and $Y_i = \frac{\partial}{\partial X_i}$. Thus $k[X_1, \cdots, X_n]$ is a left $A_n(k)$ module. This is the prototype for thinking about differential algebras, and $D$-modules in general (which we will not talk about).
+
+The other example we have is the universal enveloping algebra of a Lie algebra.
+\begin{defi}[Universal enveloping algebra]\index{universal enveloping algebra}\index{Lie algebra!universal enveloping algebra}
+ Let $\mathfrak{g}$ be a Lie algebra over $k$, and take a $k$-vector space basis $x_1, \cdots, x_n$. We form an associative algebra with generators $x_1, \cdots, x_n$ with relations
+ \[
+ x_i x_j - x_j x_i = [x_i, x_j],
+ \]
+ and this is the \emph{universal enveloping algebra} $\mathcal{U}(\mathfrak{g})$.
+\end{defi}
+
+\begin{eg}
+ If $\mathfrak{g}$ is abelian, i.e.\ $[x_i, x_j] = 0$ in $\mathfrak{g}$, then the enveloping algebra is the polynomial algebra in $x_1, \cdots, x_n$.
+\end{eg}
+
+\begin{eg}
+ If $\mathfrak{g} = \sl_2(k)$, then we have a basis
+ \[
+ \begin{pmatrix}
+ 0 & 1\\
+ 0 & 0
+ \end{pmatrix},\quad
+ \begin{pmatrix}
+ 0 & 0\\
+ 1 & 0
+ \end{pmatrix},\quad
+ \begin{pmatrix}
+ 1 & 0\\
+ 0 & -1
+ \end{pmatrix}.
+ \]
+ They satisfy
+ \[
+ [e, f] = h,\quad [h, e] = 2e,\quad [h, f] = -2f,
+ \]
+\end{eg}
+
+To prove that $A_n(k)$ and $\mathcal{U}(\mathfrak{g})$ are Noetherian, we need some machinery, that involves some ``deformation theory''. The main strategy is to make use of a natural \emph{filtration} of the algebra.
+
+\begin{defi}[Filtered algebra]\index{filtered algebra}\index{algebra!filtered}\index{$\Z$-filtered algebra}
+ A \emph{($\Z$-)filtered algebra $A$} is a collection of $k$-vector spaces
+ \[
+ \cdots \leq A_{-1} \leq A_0 \leq A_1 \leq A_2 \leq \cdots
+ \]
+ such that $A_i \cdot A_j \subseteq A_{i + j}$ for all $i, j \in \Z$, and $1 \in A_0$.
+\end{defi}
+For example a polynomial ring is naturally filtered by the degree of the polynomial.
+
+The definition above was rather general, and often, we prefer to talk about more well-behaved filtrations.
+\begin{defi}[Exhaustive filtration]\index{filtration!exhaustive}\index{exhaustive filtration}
+ A filtration is \emph{exhaustive} if $\bigcup A_i = A$.
+\end{defi}
+
+\begin{defi}[Separated filtration]\index{separated filtration}\index{filtration!separated}
+ A filtration is \emph{separated} if $\bigcap A_i = \{0\}$.
+\end{defi}
+Unless otherwise specified, our filtrations are exhaustive and separated.
+
+For the moment, we will mostly be interested in positive filtrations.
+
+\begin{defi}[Positive filtration]\index{positive filtration}\index{filtration!positive}
+ A filtration is \emph{positive} if $A_i = 0$ for $i < 0$.
+\end{defi}
+
+Our canonical source of filtrations is the following construction:
+\begin{eg}
+ If $A$ is an algebra generated by $x_1, \cdots, x_n$, say, we can set
+ \begin{itemize}
+ \item $A_0$ is the $k$-span of $1$
+ \item $A_1$ is the $k$-span of $1, x_1, \cdots, x_n$
+ \item $A_1$ is the $k$-span of $1, x_1, \cdots, x_n, x_i x_j$ for $i, j \in \{1, \cdots, n\}$.
+ \end{itemize}
+ In general, $A_r$ is elements that are of the form of a (non-commutative) polynomial expression of degree $\leq r$.
+\end{eg}
+Of course, the filtration depends on the choice of the generating set.
+
+Often, to understand a filtered algebra, we consider a nicer object, known as the \emph{associated graded algebra}.
+\begin{defi}[Associated graded algebra]\index{associated graded algebra}
+ Given a filtration of $A$, the \emph{associated graded algebra} is the vector space direct sum
+ \[
+ \gr A = \bigoplus \frac{A_i}{A_{i - 1}}.
+ \]
+ This is given the structure of an algebra by defining multiplication by
+ \[
+ (a + A_{i - 1}) (b + A_{j - 1}) = ab + A_{i + j - 1} \in \frac{A_{i + j}}{A_{i + j - 1}}.
+ \]
+\end{defi}
+In our example of a finitely-generated algebra, the graded algebra is generated by $x_1 + A_0, \cdots, x_n + A_0 \in A_1/A_0$.
+
+The associated graded algebra has the natural structure of a graded algebra:
+\begin{defi}[Graded algebra]\index{graded algebra}\index{algebra!graded}\index{$\Z$-graded algebra}
+ A ($\Z$-)\emph{graded algebra} is an algebra $B$ that is of the form
+ \[
+ B = \bigoplus_{i \in \Z} B_i,
+ \]
+ where $B_i$ are $k$-subspaces, and $B_i B_j \subseteq B_{i + j}$. The $B_i$'s are called the \term{homogeneous components}\index{graded algebra!homogeneous components}\index{algebra!homogeneous components}.
+
+ A \term{graded ideal}\index{ideal!graded} is an ideal of the form
+ \[
+ \bigoplus J_i,
+ \]
+ where $J_i$ is a subspace of $B_i$, and similarly for left and right ideals.
+\end{defi}
+
+There is an intermediate object between a filtered algebra and its associated graded algebra, known as the \emph{Rees algebra}.
+\begin{defi}[Rees algebra]\index{Rees algebra}\index{filtered algebra!Rees algebra}\index{algebra!Rees algebra}
+ Let $A$ be a filtered algebra with filtration $\{A_i\}$. Then the \emph{Rees algebra} $\Rees(A)$ is the subalgebra $\bigoplus A_i T^i$ of the Laurent polynomial algebra $A[T, T^{-1}]$ (where $T$ commutes with $A$).
+\end{defi}
+
+Since $1 \in A_0 \subseteq A_1$, we know $T \in \Rees(A)$. The key observation is that
+\begin{itemize}
+ \item $\Rees(A)/(T) \cong \gr A$.
+ \item $\Rees(A)/(1 - T) \cong A$.
+\end{itemize}
+
+Since $A_n(k)$ and $\mathcal{U}(\mathfrak{g})$ are finitely-generated algebras, they come with a natural filtering induced by the generating set. It turns out, in both cases, the associated graded algebras are pretty simple.
+\begin{eg}
+ Let $A = A_n(k)$, with generating set $X_1, \cdots, X_n$ and $Y_1, \cdots, Y_n$. We take the filtration as for a finitely-generated algebra. Now observe that if $a_i \in A_i$, and $a_j \in A_j$, then
+ \[
+ a_i a_j - a_j a_i \in A_{i + j - 2}.
+ \]
+ So we see that $\gr A$ is commutative, and in fact
+ \[
+ \gr A_n(k) \cong k[\bar{X}_1, \cdots, \bar{X}_n, \bar{Y}_1, \cdots, \bar{Y}_n],
+ \]
+ where $\bar{X}_i$, $\bar{Y}_i$ are the images of $X_i$ and $Y_i$ in $A_1/A_0$ respectively. This is not hard to prove, but is rather messy. It requires a careful induction.
+\end{eg}
+
+\begin{eg}
+ Let $\mathfrak{g}$ be a Lie algebra, and consider $A = \mathcal{U}(\mathfrak{g})$. This has generating set $x_1, \cdots, x_n$, which is a vector space basis for $\mathfrak{g}$. Again using the filtration for finitely-generated algebras, we get that if $a_i \in A_i$ and $a_j \in A_j$, then
+ \[
+ a_i a_j - a_j a_i \in A_{i + j - 1}.
+ \]
+ So again $\gr A$ is commutative. In fact, we have
+ \[
+ \gr A \cong k[\bar{x}_1, \cdots, \bar{x}_n].
+ \]
+ The fact that this is a polynomial algebra amounts to the same as the \term{Poincar\'e-Birkhoff-Witt theorem}. This gives a $k$-vector space basis for $\mathcal{U}(\mathfrak{g})$.
+\end{eg}
+
+In both cases, we find that $\gr A$ are finitely-generated and commutative, and therefore Noetherian. We want to use this fact to deduce something about $A$ itself.
+
+\begin{lemma}
+ Let $A$ be a positively filtered algebra. If $\gr A$ is Noetherian, then $A$ is left Noetherian.
+\end{lemma}
+By duality, we know that $A$ is also right Noetherian.
+
+\begin{proof}
+ Given a left ideal $I$ of $A$, we can form
+ \[
+ \gr I = \bigoplus \frac{I \cap A_i}{I \cap A_{i - 1}},
+ \]
+ where $I$ is filtered by $\{I \cap A_i\}$. By the isomorphism theorem, we know
+ \[
+ \frac{I \cap A_i}{I \cap A_{i - 1}} \cong \frac{I \cap A_i + A_{i - 1}}{A_{i - 1}} \subseteq \frac{A_i}{A_{i - 1}}.
+ \]
+ Then $\gr I$ is a left graded ideal of $\gr A$.
+
+ Now suppose we have a strictly ascending chain
+ \[
+ I_1 < I_2 < \cdots
+ \]
+ of left ideals. Since we have a positive filtration, for some $A_i$, we have $I_1 \cap A_i \subsetneq I_2 \cap A_i$ and $I_1 \cap A_{i - 1} = I_2 \cap A_{i - 1}$. Thus
+ \[
+ \gr I_1 \subsetneq \gr I_2 \subsetneq \gr I_3 \subsetneq \cdots.
+ \]
+ This is a contradiction since $\gr A$ is Noetherian. So $A$ must be Noetherian.
+\end{proof}
+Where we need positivity is the existence of that transition from equality to non-equality. If we have a $\Z$-filtered algebra instead, then we need to impose some completeness assumption, but we will not go into that.
+
+\begin{cor}
+ $A_n(k)$ and $\mathcal{U}(\mathfrak{g})$ are left/right Noetherian.
+\end{cor}
+
+\begin{proof}
+ $\gr A_n(k)$ and $\gr \mathcal{U}(\mathfrak{g})$ are commutative and finitely generated algebras.
+\end{proof}
+
+Note that there is an alternative filtration for $A_n(k)$ yielding a commutative associated graded algebra, by setting $A_0 = k[X_1, \cdots, X_n]$ and
+\[
+ A_1 = k[X_1, \cdots, X_n] + \sum_{j = 1}^n k[X_1, \cdots, X_n] Y_j,
+\]
+i.e.\ linear terms in the $Y$, and then keep on going. Essentially, we are filtering on the degrees of the $Y_i$ only. This also gives a polynomial algebra as an associative graded algebra. The main difference is that when we take the commutator, we don't go down by two degrees, but one only. Later, we will see this is advantageous when we want to get a Poisson bracket on the associated graded algebra.
+
+We can look at further examples of Noetherian algebras.
+\begin{eg}
+ The \term{quantum plane} \term{$k_q[X, Y]$} has generators $X$ and $Y$, with relation
+ \[
+ XY = q YX
+ \]
+ for some $q \in k^\times$. This thing behaves differently depending on whether $q$ is a root of unity or not.
+\end{eg}
+This quantum plane first appeared in mathematical physics.
+\begin{eg}
+ The \term{quantum torus} \term{$k_q[X, X^{-1}, Y, Y^{-1}]$} has generators $X$, $X^{-1}$, $Y$, $Y^{-1}$ with relations
+ \[
+ XX^{-1} = YY^{-1} = 1,\quad XY = q YX.
+ \]
+\end{eg}
+The word ``quantum'' in this context is usually thrown around a lot, and doesn't really mean much apart from non-commutativity, and there is very little connection with actual physics.
+
+These algebras are both left and right Noetherian. We cannot prove these by filtering, as we just did. We will need a version of Hilbert's basis theorem which allows twisting of the coefficients. This is left as an exercise on the second example sheet.
+
+In the examples of $A_n(k)$ and $\mathcal{U}(\mathfrak{g})$, the associated graded algebras are commutative. However, it turns out we can still capture the non-commutativity of the original algebra by some extra structure on the associated graded algebra.
+
+So suppose $A$ is a (positively) filtered algebra whose associated graded algebra $\gr A$ is commutative. Recall that the filtration has a corresponding Rees algebra, and we saw that $\Rees A / (T) \cong \gr A$. Since $\gr A$ is commutative, this means
+\[
+ [\Rees A, \Rees A] \subseteq (T).
+\]
+This induces a map $\Rees (A) \times \Rees (A) \to (T)/(T^2)$, sending $(r, s) \mapsto T^2 + [r, s]$. Quotienting out by $(T)$ on the left, this gives a map
+\[
+ \gr A \times \gr A \to \frac{(T)}{(T^2)}.
+\]
+We can in fact identify the right hand side with $\gr A$ as well. Indeed, the map
+\[
+ \begin{tikzcd}[column sep=large]
+ \gr A \cong \displaystyle\frac{\Rees(A)}{(T)} \ar[r, "\text{mult. by $T$}"] & \displaystyle\frac{(T)}{(T^2)}
+ \end{tikzcd},
+\]
+is an isomorphism of $\gr A \cong \Rees A/(T)$-modules. We then have a bracket
+\[
+ \begin{tikzcd}[cdmap]
+ \{\ph, \ph\}: \gr A \times \gr A \ar[r] & \gr A\\
+ (\bar{r}, \bar{s}) \ar[r, maps to] & \{r, s\}
+ \end{tikzcd}.
+\]
+Note that in our original filtration of the Weyl algebra $A_n(k)$, since the commutator brings us down by two degrees, this bracket vanishes identically, but using the alternative filtration does not give a non-zero $\{\ph, \ph\}$.
+
+This $\{\ph, \ph\}$ is an example of a \emph{Poisson bracket}.
+
+\begin{defi}[Poisson algebra]\index{Poisson algebra}
+ An associative algebra $B$ is a \emph{Poisson algebra} if there is a $k$-bilinear bracket $\{\ph, \ph\}: B \times B \to B$ such that
+ \begin{itemize}
+ \item $B$ is a Lie algebra under $\{\ph, \ph\}$, i.e.
+ \[
+ \{r, s\} = - \{s, r\}
+ \]
+ and
+ \[
+ \{\{r, s\}, t\} + \{\{s, t\}, r\} + \{\{t, r\}, s\} = 0.
+ \]
+ \item We have the \term{Leibnitz rule}
+ \[
+ \{r, st\} = s\{r, t\} + \{r, s\} t.
+ \]
+ \end{itemize}
+\end{defi}
+The second condition says $\{r, \ph\}: B \to B$ is a derivation.
+
+\subsection{More on \tph{$A_n(k)$}{An(k)}{An(k)} and \texorpdfstring{$\mathcal{U}(\mathfrak{g})$}{U(g)}}
+Our goal now is to study modules of $A_n(k)$. The first result tells us we must focus on infinite dimensional modules.
+\begin{lemma}
+ Suppose $\Char k = 0$. Then $A_n(k)$ has no non-zero modules that are finite-dimensional $k$-vector spaces.
+\end{lemma}
+
+\begin{proof}
+ Suppose $M$ is a finite-dimensional module. Then we've got an algebra homomorphism $\theta: A_n(k) \to \End_k(M) \cong M_m(k)$, where $m = \dim_k M$.
+
+ In $A_n(k)$, we have
+ \[
+ Y_1 X_1 - X_1 Y_1 = 1.
+ \]
+ Applying the trace map, we know
+ \[
+ \tr(\theta(Y_1) \theta(X_1) - \theta(X_1) \theta(Y_1)) = \tr I = m.
+ \]
+ But since the trace is cyclic, the left hand side vanishes. So $m = 0$. So $M$ is trivial.
+\end{proof}
+A similar argument works for the quantum torus, but using determinants instead.
+
+We're going to make use of our associated graded algebras from last time, which are isomorphic to polynomial algebras. Given a filtration $\{A_i\}$ of $A$, we may filter a module with generating set $\mathcal{S}$ by setting
+\[
+ M_i = A_i \mathcal{S}.
+\]
+Note that
+\[
+ A_j M_i \subseteq M_{i + j},
+\]
+which allows us to form an \term{associated graded module}\index{$\gr M$}
+\[
+ \gr M = \bigoplus \frac{M_i}{M_{i + 1}}.
+\]
+This is a graded $\gr A$-module, which is finitely-generated if $M$ is. So we've got a finitely-generated graded module over a graded commutative algebra.
+
+To understand this further, we prove some results about graded modules over commutative algebras, which is going to apply to our $\gr A$ and $\gr M$.
+\begin{defi}[Poincar\'e series]\index{Poincar\'e series}
+ Let $V$ be a graded module over a graded algebra $S$, say
+ \[
+ V = \bigoplus_{i = 0}^\infty V_i.
+ \]
+ Then the \emph{Poincar\'e series} is
+ \[
+ P(V, t) = \sum_{i = 0}^\infty (\dim V_i) t^i.
+ \]
+\end{defi}
+
+\begin{thm}[Hilbert-Serre theorem]\index{Hilbert-Serre theorem}
+ The Poincar\'e series $P(V, t)$ of a finitely-generated graded module
+ \[
+ V = \bigoplus_{i = 0}^\infty V_i
+ \]
+ over a finitely-generated generated commutative algebra
+ \[
+ S = \bigoplus_{i = 0}^\infty S_i
+ \]
+ with homogeneous generating set $x_1, \cdots, x_m$ is a rational function of the form
+ \[
+ \frac{f(t)}{\prod(1 - t^{k_i})},
+ \]
+ where $f(t) \in \Z[t]$ and $k_i$ is the degree of the generator $x_i$.
+\end{thm}
+
+\begin{proof}
+ We induct on the number $m$ of generators. If $m = 0$, then $S = S_0 = k$, and $V$ is therefore a finite-dimensional $k$-vector space. So $P(V, t)$ is a polynomial.
+
+ Now suppose $m > 0$. We assume the theorem is true for $ 0$. In fact, we have
+ \[
+ \phi(t) = \frac{f(1)}{(d - 1)!} t^{d - 1} + \text{lower degree term}.
+ \]
+ Since $f(1) \not= 0$, this has degree $d - 1$.
+
+ This implies that
+ \[
+ \sum_{j = 0}^i \dim V_j
+ \]
+ is a polynomial in $\Q[t]$ of degree $d$.
+\end{proof}
+This $\phi(t)$ is the \term{Hilbert polynomial}, and $\chi(t)$ the \term{Samuel polynomial}. Some people call $\chi(t)$ the Hilbert polynomial instead, though.
+
+We now want to apply this to our cases of $\gr A$, where $A = A_n(k)$ or $\mathcal{U}(\mathfrak{g})$, filtered as before. Then we deduce that
+\[
+ \sum_0^i \dim \frac{M_j}{M_{j - 1}} = \chi(i),
+\]
+for a polynomial $\chi(t) \in \Q[t]$. But we also know
+\[
+ \sum_{j = 0}^i \dim \frac{M_j}{M_{j - 1}} = \dim M_i.
+\]
+We are now in a position to make a definition.
+\begin{defi}[Gelfand-Kirillov dimension]\index{Gelfand-Kirillov dimension}\index{$\GKdim(M)$}\index{$d(M)$}
+ Let $A = A_n(k)$ or $\mathcal{U}(\mathfrak{g})$ and $M$ a finitely-generated $A$-module, filtered as before. Then the \emph{Gelfand-Kirillov dimension} $d(M)$ of $M$ is the degree of the Samuel polynomial of $\gr M$ as a $\gr A$-module.
+\end{defi}
+This makes sense because $\gr A$ is a commutative algebra in this case. \emph{A priori}, it seems like this depends on our choice of filtering on $M$, but actually, it doesn't. For a more general algebra, we can define the dimension as below:
+
+\begin{defi}[Gelfand-Kirillov dimension]\index{Gelfand-Kirillov dimension}\index{$\GKdim(M)$}\index{$d(M)$}
+ Let $A$ be a finitely-generated $k$-algebra, which is filtered as before, and a finitely-generated $A$-module $M$, filtered as before. Then the GK-dimension of $M$ is
+ \[
+ d(M) = \limsup_{n \to \infty} \frac{\log (\dim M_n)}{\log n}.
+ \]
+\end{defi}
+In the case of $A = A_n(k)$ or $\mathcal{U}(\mathfrak{g})$, this matches the previous definition. Again, this does not actually depend on the choice of generating sets.
+
+Recall we showed that no non-zero $A_n(k)$-module $M$ can have finite dimension as a $k$-vector space. So we know $d(M) > 0$. Also, we know that $d(M)$ is an integer for cases $A = A_n$ or $\mathcal{U}(\mathfrak{g})$, since it is the degree of a polynomial. However, for general $M = A$, we can get non-integral values. In fact, the values we can get are $0, 1, 2$, and then any real number $\geq 2$. We can also have $\infty$ if the $\limsup$ doesn't exist.
+
+\begin{eg}
+ If $A = kG$, then we have $\GKdim(kG) < \infty$ iff $G$ has a subgroup $H$ of finite index with $H$ embedding into the strictly upper triangular integral matrices, i.e.\ matrices of the form
+ \[
+ \begin{pmatrix}
+ 1 & * & \cdots & *\\
+ 0 & 1 & \cdots & *\\
+ \vdots & \vdots & \ddots & \vdots\\
+ 0 & 0 & \cdots & 1
+ \end{pmatrix}.
+ \]
+ This is a theorem of Gromoll, and is quite hard to prove.
+\end{eg}
+
+\begin{eg}
+ We have $\GKdim(A)= 0$ iff $A$ is finite-dimensional as a $k$-vector space.
+
+ We have
+ \[
+ \GKdim(k[X]) = 1,
+ \]
+ and in general
+ \[
+ \GKdim(k[X_1, \cdots, X_n]) = n.
+ \]
+ Indeed, we have
+ \[
+ \dim_k(\text{$m$th homogeneous component}) = \binom{m + n}{n}.
+ \]
+ So we have
+ \[
+ \chi(t) =
+ \begin{pmatrix}
+ t + n\\n
+ \end{pmatrix}
+ \]
+ This is of degree $n$, with leading coefficient $\frac{1}{n!}$.
+\end{eg}
+
+We can make the following definition, which we will not use again:
+\begin{defi}[Multiplicity]\index{multiplicity}
+ Let $A$ be a commutative algebra, and $M$ an $A$-module. The \emph{multiplicity} of $M$ with $d(M) = d$ is
+ \[
+ d! \times \text{leading coefficient of $\chi(t)$}.
+ \]
+\end{defi}
+On the second example sheet, we will see that the multiplicity is integral.
+
+We continue looking at more examples.
+\begin{eg}
+ We have $d(A_n(k)) = 2n$, and $d(\mathcal{U}(\mathfrak{g})) = \dim_k \mathfrak{g}$. Here we are using the fact that the associated graded algebras are polynomial algebras.
+\end{eg}
+
+\begin{eg}
+ We met $k[X_1, \cdots, X_n]$ as the ``canonical'' $A_n(k)$-module. The filtration of the module matches the one we used when thinking about the polynomial algebra as a module over itself. So we get
+ \[
+ d(k[X_1, \cdots, X_n]) = n.
+ \]
+\end{eg}
+
+\begin{lemma}
+ Let $M$ be a finitely-generated $A_n$-module. Then $d(M) \leq 2n$.
+\end{lemma}
+
+\begin{proof}
+ Take generators $m_1, \cdots, m_s$ of $M$. Then there is a surjective filtered module homomorphism
+ \[
+ \begin{tikzcd}[cdmap]
+ A_n \oplus \cdots \oplus A_n \ar[r] & M\\
+ (a_1, \cdots, a_s) \ar[r, maps to] & \sum a_i m_i
+ \end{tikzcd}
+ \]
+ It is easy to see that quotients can only reduce dimension, so
+ \[
+ \GKdim(M) \leq d(A_n \oplus \cdots \oplus A_n).
+ \]
+ But
+ \[
+ \chi_{A_n \oplus \cdots \oplus A_n} = s \chi_{A_n}
+ \]
+ has degree $2n$.
+\end{proof}
+
+More interestingly, we have the following result:
+\begin{thm}[Bernstein's inequality]\index{Bernstein's inequality}
+ Let $M$ be a non-zero finitely-generated $A_n(k)$-module, and $\Char k = 0$. Then
+ \[
+ d(M) \geq n.
+ \]
+\end{thm}
+
+%\begin{cor}
+% If $M$ is an $A_n(k)$-module, and $\Char k = 0$, then
+% \[
+% d(M) \geq n.
+% \]
+%\end{cor}
+%
+\begin{defi}[Holonomic module]\index{holonomic module}
+ An $A_n(k)$ module $M$ is \emph{holonomic} iff $d(M) = n$.
+\end{defi}
+If we have a holonomic module, then we can quotient by a maximal submodule, and get a simple holonomic module. For a long time, people thought all simple modules are holonomic, until someone discovered a simple module that is not holonomic. In fact, most simple modules are not holonomic, but we something managed to believe otherwise.
+
+\begin{proof}
+ Take a generating set and form the canonical filtrations $\{A_i\}$ of $A_n(k)$ and $\{M_i\}$ of $M$. We let $\chi(t)$ be the Samuel polynomial. Then for large enough $i$, we have
+ \[
+ \chi(i) = \dim M_i.
+ \]
+ We claim that
+ \[
+ \dim A_i \leq \dim \Hom_k(M_i, M_{2i}) = \dim M_i \times \dim M_{2i}.
+ \]
+ Assuming this, for large enough $i$, we have
+ \[
+ \dim A_i \leq \chi(i) \chi(2i).
+ \]
+ But we know
+ \[
+ \dim A_i = \binom{i + 2}{2n},
+ \]
+ which is a polynomial of degree $2n$. But $\chi(t) \chi(2t)$ is a polynomial of degree $2 d(M)$. So we get that
+ \[
+ n \leq d(M).
+ \]
+ So it remains to prove the claim. It suffices to prove that the natural map
+ \[
+ A_i \to \Hom_k (M_i, M_{2i}),
+ \]
+ given by multiplication is injective.
+
+ So we want to show that if $a \in A_i \not= 0$, then $a M_i \not= 0$. We prove this by induction on $i$. When $i = 0$, then $A_0 = k$, and $M_0$ is a finite-dimensional $k$-vector space. Then the result is obvious.
+
+ If $i > 0$, we suppose the result is true for smaller $i$. We let $a \in A_i$ is non-zero. If $a M_i = 0$, then certainly $a \not\in k$. We express
+ \[
+ a = \sum c_{\boldsymbol\alpha\boldsymbol\beta} X_1^{\alpha_1} X_2^{\alpha_2} \cdots X_n^{\alpha_n} Y_1^{\beta_1} \cdots Y_n^{\beta_n},
+ \]
+ where $\boldsymbol\alpha = (\alpha_1, \cdots, \alpha_n)$, $\boldsymbol\beta = (\beta_1, \cdots, \beta_n)$, and $c_{\boldsymbol\alpha, \boldsymbol\beta} \in k$.
+
+ If possible, pick a $j$ such that $c_{\boldsymbol\alpha, \boldsymbol\alpha} \not= 0$ for some $\boldsymbol\alpha$ with $\alpha_j \not= 0$ (this happens when there is an $X$ involved). Then
+ \[
+ [Y_j, a] = \sum \alpha_j c_{\boldsymbol\alpha, \boldsymbol\beta} X_1^{\alpha_1} \cdots X_j^{\alpha_j - 1} \cdots X_n^{\alpha_n} Y_1^{\beta_1} \cdots Y_n^{\beta_n},
+ \]
+ and this is non-zero, and lives in $A_{i - 1}$.
+
+ If $a M_i = 0$, then certainly $a M_{i - 1} = 0$. Hence
+ \[
+ [Y_j, a] M_{i - 1} = (Y_j a - a Y_j) M_{i - 1} = 0,
+ \]
+ using the fact that $Y_j M_{i - 1} \subseteq M_i$. This is a contradiction.
+
+ If $a$ only has $Y$'s involved, then we do something similar using $[X_j, a]$.
+\end{proof}
+There is also a geometric way of doing this.
+
+We take $k = \C$. We know $\gr A_n$ is a polynomial algebra
+\[
+ \gr A_n = k[\bar{X}_1, \cdots, \bar{X}_n, \bar{Y}_1, \cdots, \bar{Y}_n],
+\]
+which may be viewed as the coordinate algebra of the cotangent bundle on affine $n$-space $\C^n$. The points of this correspond to the maximal ideals of $\gr A_n$. If $I$ is a left ideal of $A_n(\C)$, then we can form $\gr I$ and we can consider the set of maximal ideals containing it. This gives us the \term{characteristic variety} $\mathrm{Ch}(A_n/I)$.
+
+We saw that there was a Poisson bracket on $\gr A_n$, and this may be used to define a skew-symmetric form on the tangent space at any point of the cotangent bundle. In this case, this is non-degenerate skew-symmetric form.
+
+We can consider the tangent space $U$ of $\mathrm{Ch} (A_n/I)$ at a non-singular point, and there's a theorem of Gabber (1981) which says that $U \supseteq U^\perp$, where $\perp$ is with respect to the skew-symmetric form. By non-degeneracy, we must have $\dim U \geq n$, and we also know that
+\[
+ \dim \mathrm{Ch}(A_n/I) = d(A_n/I).
+\]
+So we find that $d(A_n/I) \geq n$.
+
+In the case of $A = U(\mathfrak{g})$, we can think of $\gr A$ as the coordinate algebra on $\mathfrak{g}^*$, the vector space dual of $\mathfrak{g}$. The Poisson bracket leads to a skew-symmetric form on tangent spaces at points of $\mathfrak{g}^*$. In this case, we don't necessarily get non-degeneracy. However, on $\mathfrak{g}$, we have the adjoint action of the corresponding Lie group $G$, and this induces a co-adjoint action on $\mathfrak{g}^*$. Thus $\mathfrak{g}^*$ is a disjoint union of orbits. If we consider the induced skew-symmetric form on tangent spaces of orbits (at non-singular points), then it is non-degenerate.
+
+\subsection{Injective modules and Goldie's theorem}
+The goal of this section is to prove Goldie's theorem.
+\begin{thm}[Goldie's theorem]
+ Let $A$ be a right Noetherian algebra with no non-zero ideals all of whose elements are nilpotent. Then $A$ embeds in a finite direct sum of matrix algebras over division algebras.
+\end{thm}
+The outline of the proof is as follows --- given any $A$, we embed $A$ in an ``injective hull'' $E(A)$. We will then find that similar to what we did in Artin--Wedderburn, we can decompose $\End(E(A))$ into a direct sum of matrix algebras over division algebras. But we actually cannot. We will have to first quotient $\End(E(A))$ by some ideal $I$.
+
+On the other hand, we do not actually have an embedding of $A \cong \End_A(A)$ into $\End(E(A))$. Instead, what we have is only a homomorphism $\End_A(A) \to \End(E(A))/I$, where we quotient out by the same ideal $I$. So actually the two of our problems happen to cancel each other out.
+
+We will then prove that the kernel of this map contains only nilpotent elements, and then our hypothesis implies this homomorphism is indeed an embedding.
+
+We begin by first constructing the injective hull. This is going to involve talking about injective modules, which are dual to the notion of projective modules.
+\begin{defi}[Injective module]\index{injective module}\index{module!injective}
+ An $A$-module $E$ is \emph{injective} if for every diagram of $A$-module maps
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & M \ar[r, "\theta", tail] \ar[d, "\phi"] & N \ar[ld, "\psi", dashed]\\
+ & E
+ \end{tikzcd},
+ \]
+ such that $\theta$ is injective, there exists a map $\psi$ that makes the diagram commute. Equivalently, $\Hom(\ph, E)$ is an exact functor.
+\end{defi}
+
+\begin{eg}
+ Take $A = k$. Then all $k$-vector spaces are injective $k$-modules.
+\end{eg}
+
+\begin{eg}
+ Take $A = k[X]$. Then $k(X)$ is an injective $k[X]$-module.
+\end{eg}
+
+\begin{lemma}
+ Every direct summand of an injective module is injective, and direct products of injectives is injective.
+\end{lemma}
+
+\begin{proof}
+ Same as proof for projective modules.
+\end{proof}
+
+\begin{lemma}
+ Every $A$-module may be embedded in an injective module.
+\end{lemma}
+We say the category of $A$-modules has \term{enough injectives}. The dual result for projectives was immediate, as free modules are projective.
+
+\begin{proof}
+ Let $M$ be a right $A$-module. Then $\Hom_k(A, M)$ is a right $A$-module via
+ \[
+ (fa)(x) = f(ax).
+ \]
+ We claim that $\Hom_k(A, M)$ is an injective module. Suppose we have
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & M_1 \ar[r, "\theta"] \ar[d, "\phi"] & N_1\\
+ & \Hom_k(A, M)
+ \end{tikzcd}
+ \]
+ We consider the $k$-module diagram
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & M_1 \ar[r, "\theta"] \ar[d, "\alpha"] & N_1 \ar[ld, dashed, "\beta"]\\
+ & M
+ \end{tikzcd}
+ \]
+ where $\alpha(m_1) = \phi(m_1)(1)$. Since $M$ is injective as a $k$-module, we can find the $\beta$ such that $\alpha = \beta \theta$. We define $\psi: N_1 \to \Hom_k(A, M)$ by
+ \[
+ \psi(n_1)(x) = \beta(n_1 x).
+ \]
+ It is straightforward to check that this does the trick. Also, we have an embedding $M \hookrightarrow \Hom_k(A, M)$ by $m \mapsto (\phi_n: x \mapsto mx)$.
+\end{proof}
+The category theorist will write the proof in a line as
+\[
+ \Hom_A(\ph, \Hom_k(A, M)) \cong \Hom_k(\ph \otimes_A A , M) \cong \Hom_k(\ph, M),
+\]
+which is exact since $M$ is injective as a $k$-module.
+
+Note that neither the construction of $\Hom_k(A, M)$, nor the proof that it is injective requires the right $A$-modules structure of $M$. All we need is that $M$ is an injective $k$-module.
+
+\begin{lemma}
+ An $A$-module is injective iff it is a direct summand of every extension of itself.
+\end{lemma}
+
+\begin{proof}
+ Suppose $E$ is injective and $E'$ is an extension of $E$. Then we can form the diagram
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & E \ar[r] \ar[d, "\mathrm{id}"] & E' \ar[dl, dashed, "\psi"]\\
+ & E
+ \end{tikzcd},
+ \]
+ and then by injectivity, we can find $\psi$. So
+ \[
+ E' = E \oplus \ker \psi.
+ \]
+ Conversely, suppose $E$ is a direct summand of every extension. But by the previous lemma, we can embed $E$ in an injective $E'$. This implies that $E$ is a direct summand of $E'$, and hence injective.
+\end{proof}
+
+There is some sort of ``smallest'' injective a module embeds into, and this is called the \emph{injective hull}, or \emph{injective envelope}. This is why our injectives are called $E$. The ``smallness'' will be captured by the fact that it is an essential extension.
+
+\begin{defi}[Essential submodule]\index{essential submodule}\index{submodule!essential}
+ An \emph{essential submodule} $M$ of an $A$-module $N$ is one where $M \cap V \not= \{0\}$ for every non-zero submodule $V$ of $N$. We say $N$ is an \term{essential extension}\index{extension!essential} of $M$.
+\end{defi}
+
+\begin{lemma}
+ An essential extension of an essential extension is essential.
+\end{lemma}
+
+\begin{proof}
+ Suppose $M < E < F$ are essential extensions. Then given $N \leq F$, we know $N \cap E \not= \{0\}$, and this is a submodule of $E$. So $(N \cap E) \cap M = N \cap M \not= 0$. So $F$ is an essential extension of $M$.
+\end{proof}
+
+\begin{lemma}
+ A maximal essential extension is an injective module.
+\end{lemma}
+Such maximal things exist by Zorn's lemma.
+
+\begin{proof}
+ Let $E$ be a maximal essential extension of $M$, and consider any embedding $E \hookrightarrow F$. We shall show that $E$ is a direct summand of $F$. Let $S$ be the set of all non-zero submodules $V$ of $F$ with $V \cap E = \{0\}$. We apply Zorn's lemma to get a maximal such module, say $V_1$.
+
+ Then $E$ embeds into $F/V_1$ as an essential submodule. By transitivity of essential extensions, $F/V_1$ is an essential extension of $M$, but $E$ is maximal. So $E \cong F/V_1$. In other words,
+ \[
+ F = E \oplus V_1.\qedhere
+ \]
+\end{proof}
+We can now make the following definition:
+\begin{defi}[Injective hull]\index{injective hull}\index{injective envelope}
+ A maximal essential extension of $M$ is the \emph{injective hull} (or \emph{injective envelope}) of $M$, written $E(M)$.
+\end{defi}
+
+\begin{prop}
+ Let $M$ be an $A$-module, with an inclusion $M \hookrightarrow I$ into an injective module. Then this extends to an inclusion $E(M) \hookrightarrow I$.
+\end{prop}
+
+\begin{proof}
+ By injectivity, we can fill in the diagram
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & M \ar[r, hook] \ar[d, hook] & E(M) \ar[dl, "\psi", dashed]\\
+ & I
+ \end{tikzcd}.
+ \]
+ We know $\psi$ restricts to the identity on $M$. So $\ker \psi \cap M = \{0\}$. By Since $E(M)$ is essential, we must have $\ker \psi = 0$. So $E(M)$ embeds into $I$.
+\end{proof}
+
+\begin{prop}
+ Suppose $E$ is an injective essential extension of $M$. Then $E \cong E(M)$. In particular, any two injective hulls are isomorphic.
+\end{prop}
+
+\begin{proof}
+ By the previous lemma, $E(M)$ embeds into $E$. But $E(M)$ is a maximal essential extension. So this forces $E = E(M)$.
+\end{proof}
+
+Using what we have got, it is not hard to see that
+\begin{prop}
+ \[
+ E(M_1 \oplus M_2) = E(M_1) \oplus E(M_2).
+ \]
+\end{prop}
+
+\begin{proof}
+ We know that $E(M_1) \oplus E(M_2)$ is also injective (since finite direct sums are the same as direct products), and also $M_1 \oplus M_2$ embeds in $E(M_1) \oplus E(M_2)$. So it suffices to prove this extension is essential.
+
+ Let $V \leq E(M_1) \oplus E(M_2)$. Then either $V/E(M_1) \not= 0$ or $V/E(M_2) \not= 0$.
+
+ We wlog it is the latter. Note that we can naturally view
+ \[
+ \frac{V}{E(M_2)} \leq \frac{E(M_1) \oplus E(M_2)}{E(M_2)} \cong E(M_1).
+ \]
+ Since $M_1 \subseteq E(M_1)$ is essential, we know
+ \[
+ M_1 \cap (V/E(M_2)) \not= 0.
+ \]
+ So there is some $m_1 + m_2 \in V$ such that $m_2 \in E(M_2)$ and $m_1 \in M_1$. Now consider
+ \[
+ \{m \in E(M_2): am_1 + m \in V\text{ for some }a \in A\}.
+ \]
+ This is a non-empty submodule of $E(M_2)$, and so contains an element of $M_2$, say $n$. Then we know $am_1 + n \in V \cap (M_1 \oplus M_2)$, and we are done.
+\end{proof}
+
+The next two examples of injective hulls will be stated without proof: % insert proof?
+\begin{eg}
+ Take $A = k[X]$, and $M = k[X]$. Then $E(M) = k(X)$.
+\end{eg}
+
+\begin{eg}
+ Let $A = k[X]$ and $V = k$ be the trivial module, where $X$ acts by $0$. Then
+ \[
+ E(V) = \frac{k[X, X^{-1}]}{X k[X]},
+ \]
+ which is a quotient of $A$-modules. We note $V$ embeds in this as
+ \[
+ V \cong \frac{k[X]}{X k[X]} \hookrightarrow \frac{k[X, X^{-1}]}{X k[X]}.
+ \]
+\end{eg}
+
+%\begin{eg}
+% Let $A$ be a finite-dimensional $k$-algebra. If $P$ is a left $A$-module, then $P$ is indecomposable projective iff $P^*$ is indecomposable injective $A$-module.
+%
+% Indeed, if $P$ is projective, then it is a summand of $_AA$, and if $P^*$ is injective, then it is a summand of $(_AA)^*$
+%
+% If $A$ is Frobenius, then $(_AA)^* \cong A_A$. Then $P^*$ is a projective module. So in this case, injectives are the same as projectives. % what is Frobenius?
+%\end{eg}
+% fix this
+
+\begin{defi}[Uniform module]\index{uniform module}\index{module!uniform}
+ A non-zero module $V$ is \emph{uniform} if given non-zero submodules $V_1, V_2$, then $V_1 \cap V_2 \not= \{0\}$.
+\end{defi}
+
+\begin{lemma}
+ $V$ is uniform iff $E(V)$ is indecomposable.
+\end{lemma}
+
+\begin{proof}
+ Suppose $E(V) = A \oplus B$, with $A, B$ non-zero. Then $V \cap A \not= \{0\}$ and $V \cap B \not= \{0\}$ since the extension is essential. So we have two non-zero submodules of $V$ that intersect trivially.
+
+ Conversely, suppose $V$ is not uniform, and let $V_1, V_2$ be non-zero submodules that intersect trivially. By Zorn's lemma, we suppose these are maximal submodules that intersect trivially. We claim
+ \[
+ E(V_1) \oplus E(V_2) = E(V_1 \oplus V_2) = E(V)
+ \]
+ To prove this, it suffices to show that $V$ is an essential extension of $V_1 \oplus V_2$, so that $E(V)$ is an injective hull of $V_1 \oplus V_2$.
+
+ Let $W \leq V$ be non-zero. If $W \cap (V_1 \oplus V_2) = 0$, then $V_1 \oplus (V_2 \oplus W)$ is a larger pair of submodules with trivial intersection, which is not possible. So we are done.
+\end{proof}
+
+\begin{defi}[Domain]\index{domain}
+ An algebra is a \emph{domain} if $xy = 0$ implies $x = 0$ or $y = 0$.
+\end{defi}
+This is just the definition of an integral domain, but when we have non-commutative algebras, we usually leave out the word ``integral''.
+
+To show that the algebras we know and love are indeed domains, we again do some deformation.
+\begin{lemma}
+ Let $A$ be a filtered algebra, which is exhaustive and separated. Then if $\gr A$ is a domain, then so is $A$.
+\end{lemma}
+
+\begin{proof}
+ Let $x \in A_i \setminus A_{i - 1}$, and $y \in A_j \setminus A_{j - 1}$. We can find such $i, j$ for any elements $x, y \in A$ because the filtration is exhaustive and separated. Then we have
+ \begin{align*}
+ \bar{x} &= x + A_{i - 1} \not= 0 \in A_i/A_{i - 1}\\
+ \bar{y} &= y + A_{j - 1} \not= 0 \in A_j/A_{j - 1}.
+ \end{align*}
+ If $\gr A$ is a domain, then we deduce $\bar{x}\bar{y} \not= 0$. So we deduce that $xy \not \in A_{i + j - 1}$. In particular, $xy \not = 0$.
+\end{proof}
+
+\begin{cor}
+ $A_n(k)$ and $\mathcal{U}(\mathfrak{g})$ are domains.
+\end{cor}
+
+\begin{lemma}
+ Let $A$ be a right Noetherian domain. Then $A_A$ is uniform, i.e.\ $E(A_A)$ is indecomposable.
+\end{lemma}
+
+\begin{proof}
+ Suppose not, and so there are $xA$ and $yA$ non-zero such that $xA \cap yA = \{0\}$. So $xA \oplus yA$ is a direct sum.
+
+ But $A$ is a domain and so $yA \cong A$ as a right $A$-module. Thus $yxA \oplus yyA$ is a direct sum living inside $yA$. Further decomposing $yyA$, we find that
+ \[
+ xA \oplus yx A \oplus y^2 xA \oplus \cdots \oplus y^n xA
+ \]
+ is a direct sum of non-zero submodules. But this is an infinite strictly ascending chain as $n \to \infty$, which is a contradiction.
+\end{proof}
+
+Recall that when we proved Artin--Wedderburn, we needed to use Krull--Schmidt, which told us the decomposition is unique up to re-ordering. That relied on the endomorphism algebra being local. We need something similar here.
+
+\begin{lemma}
+ Let $E$ be an indecomposable injective right module. Then $\End_A(E)$ is a local algebra, with the unique maximal ideal given by
+ \[
+ I = \{f \in \End(E): \ker f\text{ is essential}\}.
+ \]
+\end{lemma}
+Note that since $E$ is indecomposable injective, given any non-zero $V \leq E$, we know $E(V)$ embeds into, and hence is a direct summand of $E$. Hence $E(V) = E$. So $\ker f$ being essential is the same as saying $\ker f$ being non-zero. However, this description of the ideal will be useful later on.
+
+\begin{proof}
+ Let $f: E \to E$ and $\ker f = \{0\}$. Then $f(E)$ is an injective module, and so is a direct summand of $E$. But $E$ is indecomposable. So $f$ is surjective. So it is an isomorphism, and hence invertible. So it remains to show that
+ \[
+ I = \{f \in \End(E): \ker f\text{ is essential}\}
+ \]
+ is an ideal.
+
+ If $\ker f$ and $\ker g$ are essential, then $\ker (f + g) \geq \ker f \cap \ker g$, and the intersection of essential submodules is essential. So $\ker (f + g)$ is also essential.
+
+ Also, if $\ker g$ is essential, and $f$ is arbitrary, then $\ker (f \circ g) \geq \ker g$, and is hence also essential. So $I$ is a maximal left ideal.
+\end{proof}
+
+The point of this lemma is to allow us to use Krull--Schmidt.
+
+\begin{lemma}
+ Let $M$ be a non-zero Noetherian module. Then $M$ is an essential extension of a direct sum of uniform submodules $N_1, \cdots, N_r$. Thus
+ \[
+ E(M) \cong E(N_1) \oplus \cdots E(N_r)
+ \]
+ is a direct sum of finitely many indecomposables.
+
+ This decomposition is unique up to re-ordering (and isomorphism).
+\end{lemma}
+
+\begin{proof}
+ We first show any non-zero Noetherian module contains a uniform one. Suppose not, and $M$ is in particular not uniform. So it contains non-zero $V_1, V_2'$ with $V_1 \cap V_2' = 0$. But $V_2'$ is not uniform by assumption. So it contains non-zero $V_2$ and $V_3'$ with zero intersection. We keep on repeating. Then we get
+ \[
+ V_1 \oplus V_2 \oplus \cdots \oplus V_n
+ \]
+ is a strictly ascending chain of submodules of $M$, which is a contradiction.
+
+ Now for non-zero Noetherian $M$, pick $N_1$ uniform in $M$. Either $N_1$ is essential in $M$, and we're done, or there is some $N_2'$ non-zero with $N_1 \cap N_2' = 0$. We pick $N_2$ uniform in $N_2'$. Then either $N_1 \oplus N_2$ is essential, or\ldots
+
+ And we are done since $M$ is Noetherian. Taking injective hulls, we get
+ \[
+ E(M) = E(N_1) \oplus \cdots \oplus E(N_r),
+ \]
+ and we are done by Krull--Schmidt and the previous lemma.
+\end{proof}
+
+This is the crucial lemma, which isn't really hard. This allows us to define yet another dimension for Noetherian algebras.
+\begin{defi}[Uniform dimension]\index{uniform dimension}\index{dimension!uniform}
+ The \emph{uniform dimension}, or \term{Goldie rank} of $M$ is the number of indecomposable direct summands of $E(M)$.
+\end{defi}
+This is analogous to vector space dimensions in some ways.
+
+\begin{eg}
+ The Goldie rank of domains is $1$, as we showed $A_A$ is uniform. This is true for $A_n(k)$ and $\mathcal{U}(\mathfrak{g})$.
+\end{eg}
+
+\begin{lemma}
+ Let $E_1, \cdots, E_r$ be indecomposable injectives. Put $E = E_1 \oplus \cdots \oplus E_r$. Let $I = \{f \in \End_A(E): \ker f\text{ is essential}\}$. This is an ideal, and then
+ \[
+ \End_A(E)/I \cong M_{n_1}(D_1) \oplus \cdots \oplus M_{n_s}(D_s)
+ \]
+ for some division algebras $D_i$.
+\end{lemma}
+
+\begin{proof}
+ We write the decomposition instead as
+ \[
+ E = E_1^{n_1} \oplus \cdots \oplus E_r^{n_r}.
+ \]
+ Then as in basic linear algebra, we know elements of $\End(E)$ can be written as an $r \times r$ matrix whose $(i, j)$th entry is an element of $\Hom(E_i^{n_i}, E_j^{n_j})$.
+
+ Now note that if $E_i \not \cong E_j$, then the kernel of a map $E_i \to E_j$ is essential in $E_i$. So quotienting out by $I$ kills all of these ``off-diagonal'' entries.
+
+ Also $\Hom(E_i^{n_i}, E_i^{n_i}) = M_{n_i}(\End(E_i))$, and so quotienting out by $I$ gives $M_{n_i}(\End(E_i)/\{\text{essential kernel}\}) \cong M_{n_i}(D_i)$, where
+ \[
+ D_i \cong \frac{\End(E_i)}{\text{essential kernel}},
+ \]
+ which we know is a division algebra since $I$ is a maximal ideal.
+\end{proof}
+
+The final piece to proving Goldie's theorem is the following piece
+\begin{lemma}
+ If $A$ is a right Noetherian algebra, then any $f: A_A \to A_A$ with $\ker f$ essential in $A_A$ is nilpotent.
+\end{lemma}
+
+\begin{proof}
+ Consider
+ \[
+ 0 < \ker f \leq \ker f^2 \leq \cdots.
+ \]
+ Suppose $f$ is not nilpotent. We claim that this is a strictly increasing chain. Indeed, for all $n$, we have $f^n(A_A) \not= 0$. Since $\ker f$ is essential, we know
+ \[
+ f^n(A_A) \cap \ker f \not= \{0\}.
+ \]
+ This forces $\ker f^{n + 1} > \ker f^n$, which is a contradiction.
+\end{proof}
+
+We can now prove Goldie's theorem.
+\begin{thm}[Goldie's theorem]\index{Goldie's theorem}
+ Let $A$ be a right Noetherian algebra with no non-zero ideals all of whose elements are nilpotent. Then $A$ embeds in a finite direct sum of matrix algebras over division algebras.
+\end{thm}
+
+\begin{proof}
+ As usual, we have a map
+ \[
+ \begin{tikzcd}[cdmap]
+ A \ar[r] & \End_A(A_A)\\
+ x \ar[r, maps to] & \text{left multiplication by $x$}
+ \end{tikzcd}
+ \]
+ For a map $A_A \to A_A$, it lifts to a map $E(A_A) \to E(A_A)$ by injectivity:
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & A_A \ar[d, "f"] \ar[r, "\theta"] & E(A_A) \ar[d, dashed, "f'"]\\
+ & A_A \ar[r, "\theta"] & E(A_A)
+ \end{tikzcd}
+ \]
+ We can complete the diagram to give a map $f': E(A_A) \to E(A_A)$, which restricts to $f$ on $A_A$. This is not necessarily unique. However, if we have two lifts $f'$ and $f''$, then the difference $f' - f''$ has $A_A$ in the kernel, and hence has an essential kernel. So it lies in $I$. Thus, if we compose maps
+ \[
+ \begin{tikzcd}
+ A_A \ar[r] & \End_A(A_A) \ar[r] & \End(E(A_A))/I
+ \end{tikzcd}.
+ \]
+ The kernel of this consists of $A$ which when multiplying on the left has essential kernel. This is an ideal all of whose elements is nilpotent. By assumption, any such ideal vanishes. So we have an embedding of $A$ in $\End(E(A_A))/I$, which we know to be a direct sum of matrix algebras over division rings.
+\end{proof}
+Goldie didn't present it like this. This work in injective modules is due to Matlis.
+
+We saw that (right Noetherian) domains had Goldie rank $1$. So we get that $\End(E(A))/I \cong D$ for some division algebra $D$. So by Goldie's theorem, a right Noetherian algebra embeds in a division algebra. In particular, this is true for $A_n(k)$ and $\mathcal{U}(\mathfrak{g})$.
+
+\section{Hochschild homology and cohomology}
+\subsection{Introduction}
+We now move on to talk about Hochschild (co)homology. We will mostly talk about Hochschild cohomology, as that is the one that is interesting. Roughly speaking, given a $k$-algebra $A$ and an $A\mdash A$-bimodule $M$, Hochschild cohomology is an infinite sequence of $k$-vector spaces $HH^n(A, M)$ indexed by $n \in \N$ associated to the data. While there is in theory an infinite number of such vector spaces, we are mostly going to focus on the cases of $n = 0, 1, 2$, and we will see that these groups can be interpreted as things we are already familiar with.
+
+The construction of these Hochschild cohomology groups might seem a bit arbitrary. It is possible to justify these \emph{a priori} using the general theory of homological algebra and/or model categories. On the other hand, Hochschild cohomology is sometimes used as motivation for the general theory of homological algebra and/or model categories. Either way, we are not going to develop these general frameworks, but are going to justify Hochschild cohomology in terms of its practical utility.
+
+Unsurprisingly, Hochschild (co)homology was first developed by Hochschild in 1945, albeit only working with algebras of finite (vector space) dimension. It was introduced to give a cohomological interpretation and generalization of some results of Wedderburn. Later in 1962/1963, Gerstenhaber saw how Hochschild cohomology was relevant to the deformations of algebras. More recently, it's been realized that that the Hochschild cochain complex has additional algebraic structure, which allows yet more information about deformation.
+
+As mentioned, we will work with $A\mdash A$-bimodules over an algebra $A$. If our algebra has an augmentation, i.e.\ a ring map to $k$, then we can have a decent theory that works with left or right modules. However, for the sake of simplicity, we shall just work with bimodules to make our lives easier.
+
+Recall that a $A\mdash A$-bimodule is an algebra with both left and right $A$ actions that are compatible. For example, $A$ is an $A\mdash A$-bimodule, and we sometimes write it as $_AA_A$ to emphasize this. More generally, we can view $A^{\otimes (n + 2)} = A \otimes_k \cdots \otimes_k A$ as an $A\mdash A$-bimodule by
+\[
+ x(a_0 \otimes a_1 \otimes \cdots \otimes a_{n + 1})y = (x a_0) \otimes a_1 \otimes \cdots \otimes (a_{n + 1} y).
+\]
+The crucial property of this is that for any $n \geq 0$, the bimodule $A^{\otimes (n + 2)}$ is a free $A\mdash A$-bimodule. For example, $A \otimes_k A$ is free on a single generator $1 \otimes_k 1$, whereas if $\{x_i\}$ is a $k$-basis of $A$, then $A \otimes_k A \otimes_k A$ is free on $\{1 \otimes_k x_i \otimes_k 1\}$.
+
+The general theory of homological algebra says we should be interested in such free things.
+\begin{defi}[Free resolution]\index{free resolution}\index{resolution!free}
+ Let $A$ be an algebra and $M$ an $A\mdash A$-bimodule. A \emph{free resolution} of $M$ is an exact sequence of the form
+ \[
+ \begin{tikzcd}
+ \cdots \ar[r, "d_2"] & F_2 \ar[r, "d_1"] & F_1 \ar[r, "d_0"] & F_0 \ar[r] & M
+ \end{tikzcd},
+ \]
+ where each $F_n$ is a free $A\mdash A$-bimodule.
+\end{defi}
+More generally, we can consider a \term{projective resolution}\index{resolution!projective} instead, where we allow the bimodules to be projective. In this course, we are only interested in one particular free resolution:
+
+\begin{defi}[Hochschild chain complex]\index{Hochschild chain complex}\index{chain complex!Hochschild}
+ Let $A$ be a $k$-algebra with multiplication map $\mu: A \otimes A$. The \emph{Hochschild chain complex} is
+ \[
+ \begin{tikzcd}
+ \cdots \ar[r, "d_1"] & A \otimes_k A \otimes_k A \ar[r, "d_0"] & A \otimes_k A \ar[r, "\mu"] & A \ar[r] & 0.
+ \end{tikzcd}
+ \]
+ We refer to $A^{\otimes_k(n + 2)}$ as the \term{degree $n$} term. The differential $d: A^{\otimes_k (n + 3)} \to A^{\otimes_k(n + 2)}$ is given by
+ \[
+ d(a_0 \otimes_k \cdots \otimes_k a_{n + 1}) = \sum_{i = 0}^{n + 1} (-1)^i a_0 \otimes_k \cdots \otimes_k a_i a_{i + 1} \otimes_k \cdots \otimes_k a_{n + 2}.
+ \]
+\end{defi}
+This is a free resolution of $_AA_A$ (the exactness is merely a computation, and we shall leave that as an exercise to the reader). In a nutshell, given an $A\mdash A$-bimodule $M$, its Hochschild homology and cohomology is obtained by applying $\ph\otimes_{A\mdash A} M$ and $\Hom_{A\mdash A}(\ph, M)$ to the Hochschild chain complex, and then taking the homology and cohomology of the resulting chain complex. We shall explore in more detail what this means.
+
+It is a general theorem that we could have applied the functors $\ph \otimes_{A \mdash A} M$ and $\Hom_{A \mdash A} (\ph, M)$ to \emph{any} projective resolution of $_AA_A$ and take the (co)homology, and the resulting vector spaces will be the same. However, we will not prove that, and will just always stick to this standard free resolution all the time.
+
+\subsection{Cohomology}
+As mentioned, the construction of Hochschild cohomology involves applying $\Hom_{A\mdash A}(\ph, M)$ to the Hochschild chain complex, and looking at the terms $\Hom_{A\mdash A}(A^{\otimes(n + 2)}, M)$. This is usually not very convenient to manipulate, as it involves talking about bimodule homomorphisms. However, we note that $A^{\otimes (n + 2)}$ is a free $A\mdash A$-bimodule generated by a basis of $A^{\otimes n}$. Thus, there is a canonical isomorphism
+\[
+ \Hom_{A \mdash A}(A^{\otimes (n + 2)}, M)\cong \Hom_k(A^{\otimes n}, M),
+\]
+and $k$-linear maps are much simpler to work with.
+
+\begin{defi}[Hochschild cochain complex]\index{Hochschild cochain complex}\index{cochain complex!Hochschild}
+ The \emph{Hochschild cochain complex} of an $A\mdash A$-bimodule $M$ is what we obtain by applying $\Hom_{A\mdash A}(\ph, M)$ to the Hochschild chain complex of $A$. Explicitly, we can write it as
+ \[
+ \begin{tikzcd}
+ \Hom_k(k, M) \ar[r, "\delta_0"] & \Hom_k(A, M) \ar[r, "\delta_1"] & \Hom_k(A \otimes A, M) \ar[r] & \cdots,
+ \end{tikzcd}
+ \]
+ where
+ \begin{align*}
+ (\delta_0 f)(a) &= a f(1) - f(1) a\\
+ (\delta_1 f)(a_1 \otimes a_2) &= a_1 f(a_2) - f(a_1 a_2) + f(a_1) a_2\\
+ (\delta_2 f)(a_1 \otimes a_2 \otimes a_3) &= a_1 f(a_2 \otimes a_3) - f(a_1a_2 \otimes a_3) \\
+ &\hphantom{={}}+ f(a_1 \otimes a_2 a_3) - f(a_1 \otimes a_2)a_3\\
+ (\delta_{n - 1} f)(a_1 \otimes \cdots \otimes a_n) &= a_1 f(a_2 \otimes \cdots \otimes a_n)\\
+ &\hphantom{={}}+ \sum_{i = 1}^n (-1)^i f(a_1 \otimes \cdots \otimes a_i a_{i + 1} \otimes \cdots \otimes a_n) \\
+ &\hphantom{={}}+ (-1)^{n + 1} f(a_1 \otimes \cdots \otimes a_{n - 1})a_n
+ \end{align*}
+ The reason the end ones look different is that we replaced $\Hom_{A\mdash A}(A^{\otimes (n + 2)}, M)$ with $\Hom_k(A^{\otimes n}, M)$.
+\end{defi}
+
+The crucial observation is that the exactness of the Hochschild chain complex, and in particular the fact that $d^2 = 0$, implies $\im \delta_{n - 1} \subseteq \ker \delta_n$.
+\begin{defi}[Cocycles]\index{cocycles}
+ The \emph{cocycles} are the elements in $\ker \delta_n$.
+\end{defi}
+
+\begin{defi}[Coboundaries]\index{coboundaries}
+ The \emph{coboundaries} are the elements in $\im \delta_n$.
+\end{defi}
+These names came from algebraic topology.
+
+\begin{defi}[Hochschild cohomology groups]\index{Hochschild cohomology groups}\index{$\HH^n(A, M)$}
+ We define
+ \begin{align*}
+ \HH^0(A, M) &= \ker \delta_0\\
+ \HH^n(A, N) &= \frac{\ker \delta_n}{\im \delta_{n - 1}}
+ \end{align*}
+ These are $k$-vector spaces.
+\end{defi}
+If we do not want to single out $\HH^0$, we can extend the Hochschild cochain complex to the left with $0$, and setting $\delta_n = 0$ for $n < 0$ (or equivalently extending the Hochschild chain complex to the right similarly), Then
+\[
+ \HH^0(A, M) = \frac{\ker \delta_0}{\im \delta_{-1}} = \ker \delta_0.
+\]
+The first thing we should ask ourselves is when the cohomology groups vanish. There are two scenarios where we can immediately tell that the (higher) cohomology groups vanish.
+
+\begin{lemma}
+ Let $M$ be an injective bimodule. Then $\HH^n(A, M) = 0$ for all $n \geq 1$.
+\end{lemma}
+
+\begin{proof}
+ $\Hom_{A\mdash A}(\ph, M)$ is exact.
+\end{proof}
+
+\begin{lemma}
+ If $_AA_A$ is a projective bimodule, then $\HH^n(A, M) = 0$ for all $M$ and all $n \geq 1$.
+\end{lemma}
+If we believed the previous remark that we could compute Hochschild cohomology with any projective resolution, then this result is immediate --- indeed, we can use $\cdots \to 0 \to 0 \to A \to A \to 0$ as the projective resolution. However, since we don't want to prove such general results, we shall provide an explicit computation.
+
+\begin{proof}
+ If $_AA_A$ is projective, then all $A^{\otimes n}$ are projective. At each degree $n$, we can split up the Hochschild chain complex as the short exact sequence
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & \displaystyle\frac{A^{\otimes(n + 3)}}{\ker d_n} \ar[r, hook, "d_n"] & A^{\otimes(n + 2)} \ar[r, two heads, "d_{n - 1}"] & \im d_{n - 1} \ar[r] & 0
+ \end{tikzcd}
+ \]
+ The $\im d$ is a submodule of $A^{\otimes (n + 1)}$, and is hence projective. So we have
+ \[
+ A^{\otimes(n + 2)} \cong \frac{A^{\otimes(n + 3)}}{\ker d_n}\oplus \im d_{n - 1},
+ \]
+ and we can write the Hochschild chain complex at $n$ as
+ \[
+ \begin{tikzcd}[row sep=tiny]
+ \displaystyle \ker d_n \oplus \frac{A^{\otimes(n + 3)}}{\ker d_n} \ar[r, "d_n"] & \displaystyle \frac{A^{\otimes(n + 3)}}{\ker d_n}\oplus \im d_{n - 1} \ar[r, "d_{n - 1}"] & \displaystyle \frac{A^{\otimes(n + 1)}}{\im d_{n - 1}} \oplus \im d_{n - 1}\\
+ (a, b) \ar[r, maps to] & (b, 0)\\
+ & (c, d) \ar[r, maps to] & (0, d)
+ \end{tikzcd}
+ \]
+ Now $\Hom(\ph, M)$ certainly preserves the exactness of this, and so the Hochschild cochain complex is also exact. So we have $\HH^n(A, M) = 0$ for $n \geq 1$.
+\end{proof}
+This is a rather strong result. By knowing something about $A$ itself, we deduce that the Hochschild cohomology of \emph{any} bimodule whatsoever must vanish.
+
+Of course, it is not true that $\HH^n(A, M)$ vanishes in general for $n \geq 1$, or else we would have a rather boring theory. In general, we define
+\begin{defi}[Dimension]\index{dimension}
+ The \emph{dimension} of an algebra $A$ is
+ \[
+ \Dim A = \sup\{n: \HH^n(A, M) \not= 0\text{ for some $A\mdash A$-bimodule $M$}\}.
+ \]
+ This can be infinite if such a sup does not exist.
+\end{defi}
+Thus, we showed that $_AA_A$ embeds as a direct summand in $A \otimes A$, then $\Dim A = 0$.
+
+\begin{defi}[$k$-separable]\index{separable algebra}\index{$k$-separable algebra}\index{separable algebra}\index{algebra!$k$-separable}
+ An algebra $A$ is \emph{$k$-separable} if $_AA_A$ embeds as a direct summand of $A \otimes A$.
+\end{defi}
+Since $A \otimes A$ is a free $A\mdash A$-bimodule, this condition is equivalent to $A$ being projective. However, there is some point in writing the definition like this. Note that an $A\mdash A$-bimodule is equivalently a left $A \otimes A^{\op}$-module. Then $_AA_A$ is a direct summand of $A \otimes A$ if and only if there is a \term{separating idempotent} $e \in A \otimes A^\op$ so that $_AA_A$ viewed as $A \otimes A^{\op}$-bimodule is $(A \otimes A^\op)e$.
+
+This is technically convenient, because it is often easier to write down a separating idempotent than to prove directly $A$ is projective.
+
+Note that whether we write $A \otimes A^\op$ or $A \otimes A$ is merely a matter of convention. They have the same underlying set. The notation $A \otimes A$ is more convenient when we take higher powers, but we can think of $A \otimes A^\op$ as taking $A$ as a left-$A$ right-$k$ module and $A^\op$ as a left-$k$ right-$A$, and tensoring them gives a $A\mdash A$-bimodule.
+
+We just proved that separable algebras have dimension $0$. Conversely, we have
+\begin{lemma}
+ If $\Dim A = 0$, then $A$ is separable.
+\end{lemma}
+
+\begin{proof}
+ Note that there is a short exact sequence
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & \ker \mu \ar[r] & A \otimes A \ar[r, "\mu"] & A \ar[r] & 0
+ \end{tikzcd}
+ \]
+ If we can show this splits, then $A$ is a direct summand of $A \otimes A$. To do so, we need to find a map $A \otimes A \to \ker \mu$ that restricts to the identity on $\ker \mu$.
+
+ To do so, we look at the first few terms of the Hochschild chain complex
+ \[
+ \begin{tikzcd}
+ \cdots \ar[r, "d"] & \im d \oplus \ker \mu \ar[r] & A \otimes A \ar[r, "\mu"] & A \ar[r] & 0
+ \end{tikzcd}.
+ \]
+ By assumption, for any $M$, applying $\Hom_{A\mdash A} (\ph, M)$ to the chain complex gives an exact sequence. Omitting the annoying $_{A \mdash A}$ subscript, this sequence looks like
+ \begin{multline*}
+ 0 \longrightarrow \Hom(A, M) \overset{\mu^*}{\longrightarrow} \Hom(A \otimes A, M)\\
+ \overset{(*)}{\longrightarrow} \Hom(\ker \mu, M) \oplus \Hom(\im d, M) \overset{d^*}{\longrightarrow} \cdots
+ \end{multline*}
+ Now $d^*$ sends $\Hom(\ker \mu, M)$ to zero. So $\Hom(\ker \mu, M)$ must be in the image of $(*)$. So the map
+ \[
+ \Hom(A \otimes A, M) \longrightarrow \Hom(\ker \mu, M)
+ \]
+ must be surjective. This is true for any $M$. In particular, we can pick $M = \ker \mu$. Then the identity map $\id_{\ker \mu}$ lifts to a map $A \otimes A \to \ker \mu$ whose restriction to $\ker \mu$ is the identity. So we are done.
+\end{proof}
+
+\begin{eg}
+ $M_n(k)$ is separable. It suffices to write down the separating idempotent. We let $E_{ij}$ be the elementary matrix with $1$ in the $(i,j)$th slot and $0$ otherwise. We fix $j$, and then
+ \[
+ \sum_i E_{ij} \otimes E_{ji} \in A\otimes A^\op
+ \]
+ is a separating idempotent.
+\end{eg}
+
+\begin{eg}
+ Let $A = kG$ with $\Char k \nmid |G|$. Then $A \otimes A^\op = kG \otimes (kG)^\op \cong kG \otimes kG$. But this is just isomorphic to $k(G \times G)$, which is again semi-simple.
+
+ Thus, as a bimodule, $A \otimes A$ is completely reducible. So the quotient of $_AA_A$ is a direct summand (of bimodules). So we know that whenever $\Char k \nmid |G|$, then $kG$ is $k$-separable.
+\end{eg}
+
+The obvious question is --- is this notion actually a generalization of separable field extensions? This is indeed the case.
+\begin{fact} % insert a proof
+ Let $L/K$ be a finite field extension. Then $L$ is separable as a $K$-algebra iff $L/K$ is a separable field extension.
+\end{fact}
+However $k$-separable algebras must be finite-dimensional $k$-vector spaces. So this doesn't pass on to the infinite case.
+
+In the remaining of the chapter, we will study what Hochschild cohomology in the low dimensions mean. We start with $\HH^0$. The next two propositions follow from just unwrapping the definitions:
+\begin{prop}
+ We have
+ \[
+ \HH^0(A, M) = \{m \in M : am - ma = 0\text{ for all }a \in A\}.
+ \]
+ In particular, $\HH^0(A, A)$ is the center of $A$.
+\end{prop}
+
+\begin{prop}
+ \[
+ \ker \delta_1 = \{f \in \Hom_k(A, M): f(a_1 a_2) = a_1 f(a_2) + f(a_1) a_2\}.
+ \]
+ These are the \term{derivations} from $A$ to $M$. We write this as \term{$\Der(A, M)$}.
+
+ On the other hand,
+ \[
+ \im \delta_0 = \{f \in \Hom_k(A, M): f(a) = am - ma \text{ for some }m \in M\}.
+ \]
+ These are called the \term{inner derivations} from $A$ to $M$. So
+ \[
+ \HH^1(A, M) = \frac{\Der(A, M)}{\mathrm{InnDer}(A, M)}.
+ \]
+ Setting $A = M$, we get the derivations and inner derivations of $A$.
+\end{prop}
+
+\begin{eg}
+ If $A = k[x]$, and $\Char k = 0$, then
+ \[
+ \Der A = \left\{p(X) \frac{\d}{\d x} : p(x) \in k[X]\right\},
+ \]
+ and there are no (non-trivial) inner derivations because $A$ is commutative. So we find
+ \[
+ \HH^1(k[X], k[X]) \cong k[X].
+ \]
+\end{eg}
+In general, $\Der(A)$ form a Lie algebra, since
+\[
+ D_1 D_2 - D_2 D_1 \in \End_k(A)
+\]
+is in fact a derivation if $D_1$ and $D_2$ are.
+
+There is another way we can think about derivations, which is via semi-direct products.
+
+\begin{defi}[Semi-direct product]\index{semi-direct product}\index{$A \ltimes M$}
+ Let $M$ be an $A\mdash A$-bimodule. We can form the semi-direct product of $A$ and $M$, written $A \ltimes M$, which is an algebra with elements $(a, m) \in A \times M$, and multiplication given by
+ \[
+ (a_1, m_1) \cdot (a_2, m_2) = (a_1 a_2, a_1 m_2 + m_1 a_2).
+ \]
+ Addition is given by the obvious one.
+
+ Alternatively, we can write
+ \[
+ A \ltimes M \cong A + M\varepsilon,
+ \]
+ where $\varepsilon$ commutes with everything and $\varepsilon^2 = 0$. Then $M\varepsilon$ forms an ideal with $(M\varepsilon)^2 = 0$.
+\end{defi}
+In particular, we can look at $A \ltimes A \cong A + A \varepsilon$. This is often written as $A[\varepsilon]$\index{$A[\varepsilon]$}.
+
+Previously, we saw first cohomology can be understood in terms of derivations. We can formulate derivations in terms of this semi-direct product.
+\begin{lemma}
+ We have
+ \[
+ \Der_k(A, M) \cong \{\text{algebra complements to $M$ in $A \ltimes M$ isomorphic to $A$}\}.
+ \]
+\end{lemma}
+
+\begin{proof}
+ A complement to $M$ is an embedded copy of $A$ in $A \ltimes M$,
+ \[
+ \begin{tikzcd}[cdmap]
+ A \ar[r] & A \ltimes M\\
+ a \ar[r, maps to] & (a, D_a)
+ \end{tikzcd}
+ \]
+ The function $A \to M$ given by $a \mapsto D_a$ is a derivation, since under the embedding, we have
+ \[
+ ab \mapsto (ab, a D_b + D_a b).
+ \]
+ Conversely, a derivation $f: A \to M$ gives an embedding of $A$ in $A \ltimes M$ given by $a \mapsto (a, f(a))$.
+\end{proof}
+
+We can further rewrite this characterization in terms of automorphisms of the semi-direct product. This allows us to identify inner derivations as well.
+
+\begin{lemma}
+ We have
+ \[
+ \Der(A, M) \cong \left\{\parbox{6cm}{\centering automorphisms of $A \ltimes M$ of the form $a \mapsto a + f(a) \varepsilon$, $m\varepsilon \mapsto m\varepsilon$}\right\},
+ \]
+ where we view $A \ltimes M \cong A + M \varepsilon$.
+
+ Moreover, the inner derivations correspond to automorphisms achieved by conjugation by $1 + m\varepsilon$, which is a unit with inverse $1 - m \varepsilon$.
+\end{lemma}
+The proof is a direct check.
+
+This applies in particular when we pick $M = {_AA_A}$, and the Lie algebra of derivations of $A$ may be thought of as the set of infinitesimal automorphisms.
+
+\separator
+
+Let's now consider $\HH^2(A, M)$. This is to be understood in terms of extensions, of which the ``trivial'' example is the semi-direct product.
+
+\begin{defi}[Extension]\index{extension}
+ Let $A$ be an algebra and $M$ and $A\mdash A$-bimodule. An extension of $A$ by $M$. An \emph{extension} of $A$ by $M$ is a $k$-algebra $B$ containing a 2-sided ideal $I$ such that
+ \begin{itemize}
+ \item $I^2 = 0$;
+ \item $B/I \cong A$; and
+ \item $M \cong I$ as an $A\mdash A$-bimodule.
+ \end{itemize}
+ Note that since $I^2 = 0$, the left and right multiplication in $B$ induces an $A\mdash A$-bimodule structure on $I$, rather than just a $B\mdash B$-bimodule.
+\end{defi}
+
+We let $\pi: B \to A$ be the canonical quotient map. Then we have a short exact sequence
+\[
+ \begin{tikzcd}
+ 0 \ar[r] & I \ar[r] & B \ar[r] & A \ar[r] & 0
+ \end{tikzcd}.
+\]
+Then two extensions $B_1$ and $B_2$ are isomorphic if there is a $k$-algebra isomorphism $\theta: B_1 \to B_2$ such that the following diagram commutes:
+\[
+ \begin{tikzcd}[row sep=small]
+ & & B_1 \ar[rd] \ar[dd, dashed, "\theta"]\\
+ 0 \ar[r] & I \ar[ur] \ar[dr] & & A \ar[r] & 0\\
+ & & B_2 \ar[ur]
+ \end{tikzcd}.
+\]
+Note that the semi-direct product is such an extension, called the \term{split extension}.
+
+\begin{prop}
+ There is a bijection between $\HH^2(A, M)$ with the isomorphism classes of extensions of $A$ by $M$.
+\end{prop}
+This is something that should be familiar if we know about group cohomology.
+
+\begin{proof}
+ Let $B$ be an extension with, as usual, $\pi: B \to A$, $I = M = \ker \pi$, $I^2 = 0$. We now try to produce a cocycle from this.
+
+ Let $\rho$ be any $k$-linear map $A \to B$ such that $\pi(\rho(a)) = a$. This is possible since $\pi$ is surjective. Equivalently, $\rho(\pi(b)) = b \bmod I$. We define a $k$-linear map
+ \[
+ f_\rho: A \otimes A \to I \cong M
+ \]
+ by
+ \[
+ a_1 \otimes a_2 \mapsto \rho(a_1) \rho(a_2) - \rho(a_1 a_2).
+ \]
+ Note that the image lies in $I$ since
+ \[
+ \rho(a_1) \rho(a_2) \equiv \rho(a_1 a_2) \pmod I.
+ \]
+ It is a routine check that $f_\rho$ is a $2$-cocycle, i.e.\ it lies in $\ker \delta_2$.
+
+ If we replace $\rho$ by any other $\rho'$, we get $f_{\rho'}$, and we have
+ \begin{align*}
+ &\hphantom{={}}f_\rho(a_1 \otimes a_2) - f_{\rho'} (a_1 \otimes a_2) \\
+ &= \rho(a_1) (\rho(a_2) - \rho'(a_2)) - (\rho(a_1 a_2) - \rho'(a_1 a_2)) + (\rho(a_1) - \rho'(a_1))\rho'(a_2)\\
+ &= a_1 \cdot (\rho(a_2) - \rho'(a_2)) - (\rho(a_1 a_2) - \rho'(a_1 a_2)) + (\rho(a_1) - \rho'(a_1)) \cdot a_2,
+ \end{align*}
+ where $\ph$ denotes the $A\mdash A$-bimodule action in $I$. Thus, we find
+ \[
+ f_\rho - f_{\rho'} = \delta_1(\rho - \rho'),
+ \]
+ noting that $\rho - \rho'$ actually maps to $I$.
+
+ So we obtain a map from the isomorphism classes of extensions to the second cohomology group.
+
+ Conversely, given an $A\mdash A$-bimodule $M$ and a $2$-cocycle $f: A \otimes A \to M$, we let
+ \[
+ B_f = A \oplus M
+ \]
+ as $k$-vector spaces. We define the multiplication map
+ \[
+ (a_1, m_1)(a_2, m_2) = (a_1 a_2, a_1 m_2 + m_1 a_2 + f(a_1 \otimes a_2)).
+ \]
+ This is associative precisely because of the 2-cocycle condition. The map $(a, m) \to a$ yields a homomorphism $\pi: B \to A$, with kernel $I$ being a two-sided ideal of $B$ which has $I^2 = 0$. Moreover, $I \cong M$ as an $A\mdash A$-bimodule. Taking $\rho: A \to B$ by $\rho(a) = (a, 0)$ yields the $2$-cocycle we started with.
+
+ Finally, let $f'$ be another $2$ co-cycle cohomologous to $f$. Then there is a linear map $\tau: A \to M$ with
+ \[
+ f - f' = \delta_1 \tau.
+ \]
+ That is,
+ \[
+ f(a_1 \otimes A_2) = f'(a_1 \otimes a_2) + a_1 \cdot \tau(a_2) - \tau(a_1 a_2) + \tau(a_1) \cdot a_2.
+ \]
+ Then consider the map $B_f \to B_f'$ given by
+ \[
+ (a, m) \mapsto (a, m + \tau(a)).
+ \]
+ One then checks this is an isomorphism of extensions. And then we are done.
+\end{proof}
+In the proof, we see $0$ corresponds to the semi-direct product.
+
+\begin{cor}
+ If $\HH^2(A, M) = 0$, then all extensions are split.
+\end{cor}
+
+We now prove some theorems that appear to be trivialities. However, they are trivial only because we now have the machinery of Hochschild cohomology. When they were first proved, such machinery did not exist, and they were written in a form that seemed less trivial.
+\begin{thm}[Wedderburn, Malcev]
+ Let $B$ be a $k$-algebra satisfying
+ \begin{itemize}
+ \item $\Dim (B/J(B)) \leq 1$.
+ \item $J(B)^2 = 0$
+ \end{itemize}
+ Then there is an subalgebra $A \cong B/J(B)$ of $B$ such that
+ \[
+ B = A \ltimes J(B).
+ \]
+ Furthermore, if $\Dim (B/J(B)) = 0$, then any two such subalgebras $A, A'$ are conjugate, i.e.\ there is some $x \in J(B)$ such that
+ \[
+ A' = (1 + x) A (1 + x)^{-1}.
+ \]
+ Notice that $1 + x$ is a unit in $B$.
+\end{thm}
+In fact, the same result holds if we only require $J(B)$ to be nilpotent. This follows from an induction argument using this as a base case, which is messy and not really interesting.
+
+\begin{proof}
+ We have $J(B)^2 = 0$. Since we know $\Dim (B/J(B)) \leq 1$, we must have
+ \[
+ \HH^2(A, J(B)) = 0
+ \]
+ where
+ \[
+ A \cong \frac{B}{J(B)}.
+ \]
+ Note that we regard $J(B)$ as an $A\mdash A$-bimodule here. So we know that all extension of $A$ by $J(B)$ are semi-direct, as required.
+
+ Furthermore, if $\Dim (B/J(B)) = 0$, then we know $\HH^1(A, J(A)) = 0$. So by our older lemmas, we see that complements are all conjugate, as required.
+\end{proof}
+
+\begin{cor}
+ If $k$ is algebraically closed and $\dim_k B < \infty$, then there is a subalgebra $A$ of $B$ such that
+ \[
+ A \cong \frac{B}{J(B)},
+ \]
+ and
+ \[
+ B = A \ltimes J(B).
+ \]
+ Moreover, $A$ is unique up to conjugation by units of the form $1 + x$ with $x \in J(B)$.
+\end{cor}
+
+\begin{proof}
+ We need to show that $\Dim (A) = 0$. But we know $B/J(B)$ is a semi-simple $k$-algebra of finite dimension, and in particular is Artinian. So by Artin--Wedderburn, we know $B/J(B)$ is a direct sum of matrix algebras over $k$ (since $k$ is algebraically closed and $\dim_k(B/J(B))$).
+
+ We have previously observed that $M_n(k)$ is $k$-separable. Since $k$-separability behaves well under direct sums, we know $B/J(B)$ is $k$-separable, hence has dimension zero.
+
+ It is a general fact that $J(B)$ is nilpotent.
+\end{proof}
+
+\subsection{Star products}
+We are now going to do study some deformation theory. Suppose $A$ is a $k$-algebra. We write $V$ for the underlying vector space of $A$. Then there is a natural algebra structure on $V \otimes_k k[[t]] = V[[t]]$, which we may write as $A[[t]]$. However, we might we to consider more interesting algebra structures on this vector space. Of course, we don't want to completely forget the algebra structure on $A$. So we make the following definition:
+
+\begin{defi}[Star product]\index{star product}
+ Let $A$ be a $k$-algebra, and let $V$ be the underlying vector space. A \emph{star product} is an associative $k[[t]]$-bilinear product on $V[[t]]$ that reduces to the multiplication on $A$ when we set $t = 0$.
+\end{defi}
+
+Can we produce non-trivial star products? It seems difficult, because when we write down an attempt, we need to make sure it is in fact associative, and that might take quite a bit of work. One example we have already seen is the following:
+\begin{eg} % return to this
+ Given a filtered $k$-algebra $A'$, we formed the Rees algebra associated with the filtration, and it embeds as a vector space in $(\gr A')[[t]]$. Thus we get a product on $(\gr A')[[t]]$.
+
+ There are two cases where we are most interested in --- when $A' = A_n(k)$ or $A' = \mathcal{U}(\mathfrak{g})$. We saw that $\gr A'$ was actually a (commutative) polynomial algebra. However, the product on the Rees algebra is non-commutative. So the $*$-product will be non-commutative.
+\end{eg}
+% We are in the world of quantization here. We started with a commutative algebra $\gr A'$, and ended up with a non-commutative star product. For algebraists, this is what ``quantization'' means. Often, if we want to stress the ``quantum'' point of view, we may use $q$ instead of $t$.
+
+In general, the availability of star products is largely controlled by the Hochschild cohomology of $A$. To understand this, let's see what we actually need to specify to get a star product. Since we required the product to be a $k[[t]]$-bilinear map
+\[
+ f: V[[t]] \times V[[t]] \to V[[t]],
+\]
+all we need to do is to specify what elements of $V = A$ are sent to. Let $a, b \in V = A$. We write\index{$F_i$}
+\[
+ f(a, b) = ab + t F_1(a, b) + t^2 F_2(a, b) + \cdots.
+\]
+Because of bilinearity, we know $F_i$ are $k$-bilinear maps, and so correspond to $k$-linear maps $V \otimes V \to V$. For convenience, we will write
+\[
+ F_0(a, b) = ab.
+\]
+The only non-trivial requirement $f$ has to satisfy is associativity:
+\[
+ f(f(a, b), c) = f(a, f(b, c)).
+\]
+What condition does this force on our $F_i$? By looking at coefficients of $t$, this implies that for all $\lambda = 0, 1, 2, \cdots$, we have
+\[
+ \sum_{\substack{m + n = \lambda\\m, n \geq 0}} \Big(F_m(F_n(a, b), c) - F_m(a, F_n(b, c))\Big) = 0.\tag{$*$}
+\]
+For $\lambda = 0$, we are just getting the associativity of the original multiplication on $A$. When $\lambda = 1$, then this says
+\[
+ a F_1(b, c) - F_1(ab, c) + F_1(a, bc) - F_1(a, b)c = 0.
+\]
+All this says is that $F_1$ is a $2$-cocycle! This is not surprising. Indeed, we've just seen (a while ago) that working mod $t^2$, the extensions of $A$ by $_AA_A$ are governed by $\HH^2$. Thus, we will refer to $2$-cocycles as \term{infinitesimal deformations}\index{deformation!infinitesimal} in this context.
+
+Note that given an arbitrary $2$ co-cycle $A \otimes A \to A$, it may not be possible to produce a star product with the given $2$-cocycle as $F_1$.
+\begin{defi}[Integrable $2$-cocycle]\index{integrable $2$-cocycle}\index{$2$-cocycle!integrable}\index{cocycle!integrable}
+ Let $f: A \otimes A \to A$ be a $2$-cocycle. Then it is \emph{integrable} if it is the $F_1$ of a star product.
+\end{defi}
+We would like to know when a $2$-cocycle is integrable. Let's rewrite $(*)$ as $(\dagger_\lambda)$:
+\[
+ \sum_{\substack{m + n = \lambda\\m, n > 0}}\Big(F_m(F_n(a, b), c) - F_m(a, F_n(b, c))\Big) = (\delta_2 F_\lambda)(a, b, c).\tag{$\dagger_\lambda$}
+\]
+Here we are identifying $F_\lambda$ with the corresponding $k$-linear map $A \otimes A \to A$.
+
+For $\lambda = 2$, this says
+\[
+ F_1(F_1(ab), c) - F_1(a, F_1(b, c)) = (\delta_2 F_2)(a, b, c).
+\]
+If $F_1$ is a $2$-cocycle, then one can check that the LHS gives a $3$-cocycle. If $F_1$ is integrable, then the LHS has to be equal to the RHS, and so must be a coboundary, and thus has cohomology class zero in $\HH^3(A, A)$.
+
+In fact, if $F_1, \cdots, F_{\lambda - 1}$ satisfy $(\dagger_1), \cdots, (\dagger_{\lambda - 1})$, then the LHS of $(\dagger_\lambda)$ is also a $3$-cocycle. If it is a coboundary, and we have defined $F_1, \cdots, F_{\lambda_1}$, then we can define $F_\lambda$ such that $(\dagger_\lambda)$ holds. However, if it is not a coboundary, then we get stuck, and we see that our choice of $F_1, \cdots, F_{\lambda - 1}$ does not lead to a $*$-product.
+
+The $3$-cocycle appearing on the LHS of $(\dagger_\lambda)$ is an \term{obstruction} to integrability.
+
+If, however, they are always coboundaries, then we can inductively define $F_1, F_2, \cdots$ to give a $*$-product. Thus we have proved
+\begin{thm}[Gerstenhaber]
+ If $\HH^3(A, A) = 0$, then all infinitesimal deformations are integrable.
+\end{thm}
+Of course, even if $\HH^3(A, A) \not= 0$, we can still get $*$-products, but we need to pick our $F_1$ more carefully.
+
+Now after producing star products, we want to know if they are equivalent.
+
+\begin{defi}[Equivalence of star proeducts]\index{equivalence of star products}\index{star product!equivalence}
+ Two star products $f$ and $g$ are \emph{equivalent} on $V \otimes k[[t]]$ if there is a $k[[t]]$-linear automorphism $\Phi$ of $V[[t]]$ of the form
+ \[
+ \Phi(a) = a + t \phi_1(a) + t^2 \phi_2(a) + \cdots
+ \]
+ sch that
+ \[
+ f(a, b) = \Phi^{-1} g(\Phi(a), \Phi(b)).
+ \]
+ Equivalently, the following diagram has to commute:
+ \[
+ \begin{tikzcd}
+ V[[t]] \otimes V[[t]] \ar[r, "f"] \ar[d, "\Phi \otimes \Phi"] & V[[t]] \ar[d, "\Phi"]\\
+ V[[t]] \otimes V[[t]] \ar[r, "g"] & V[[t]]
+ \end{tikzcd}
+ \]
+ Star products equivalent to the usual product on $A \otimes k[[t]]$ are called \emph{trivial}\index{trivial!star product}\index{star product!trivial}.
+\end{defi}
+
+\begin{thm}[Gerstenhaber]
+ Any non-trivial star product $f$ is equivalent to one of the form
+ \[
+ g(a, b) = ab + t^n G_n(a, b) + t^{n + 1} G_{n + 1}(a, b) + \cdots,
+ \]
+ where $G_n$ is a $2$-cocycle and not a coboundary. In particular, if $\HH^2(A, A) = 0$, then any star product is trivial.
+\end{thm}
+
+\begin{proof}
+ Suppose as usual
+ \[
+ f(a, b) = ab + t F_1(a, b) + t^2 F_2(a, b) + \cdots,
+ \]
+ and suppose $F_1, \cdots, F_{n - 1} = 0$. Then it follows from $(\dagger)$ that
+ \[
+ \delta_2 F_n = 0.
+ \]
+ If $F_n$ is a coboundary, then we can write
+ \[
+ F_n = - \delta \phi_n
+ \]
+ for some $\phi_n: A \to A$. We set
+ \[
+ \Phi_n(a) = a + t^n \phi_n(a).
+ \]
+ Then we can compute that
+ \[
+ \Phi_n^{-1}(f (\Phi_n(a), \Phi_n(b)))
+ \]
+ is of the form
+ \[
+ ab + t^{n + 1}G_{n + 1}(a, b) + \cdots.
+ \]
+ So we have managed to get rid of a further term, and we can keep going until we get the first non-zero term not a coboundary.
+
+ Suppose this never stops. Then $f$ is trivial --- we are using that $\cdots \circ \Phi_{n + 2} \circ \Phi_{n + 1} \circ \Phi_n$ converges in the automorphism ring, since we are adding terms of higher and higher degree.
+\end{proof}
+
+We saw that derivations can be thought of as infinitesimal automorphisms. One can similarly consider $k[[t]]$-linear maps of the form
+\[
+ \Phi(a) = a + t \phi_1(a) + t^2 \phi_2(a) + \cdots
+\]
+and consider whether they define automorphisms of $A \otimes k[[t]]$. Working modulo $t^2$, we have already done this problem --- we are just considering automorphisms of $A[\varepsilon]$, and we saw that these automorphisms correspond to derivations.
+
+\begin{defi}[Integrable derivation]\index{derivation!integrable}\index{integrable!derivation}
+ We say a derivation is \emph{integrable} if there is an automorphism of $A \otimes k[[t]]$ that gives the derivation when we mod $t^2$.
+\end{defi}
+In this case, the obstructions are $2$-cocycles which are not coboundaries.
+
+\begin{thm}[Gerstenhaber]
+ Suppose $\HH^2(A, A) = 0$. Then all derivations are integrable.
+\end{thm}
+The proof is an exercise on the third example sheet.
+
+We haven't had many examples so far, because Hochschild cohomology is difficult to compute. But we can indeed do some examples.
+\begin{eg}
+ Let $A = k[x]$. Since $A$ is commutative, we have
+ \[
+ \HH^0(A, A) = A.
+ \]
+ Since $A$ is commutative, $A$ has no inner derivations. So we have
+ \[
+ \HH^1(A, A) = \Der A = \left\{f(x) \frac{\d}{\d x}: f(x) \in k[x]\right\}.
+ \]
+ For any $i > 1$, we have
+ \[
+ \HH^i(A, A) = 0.
+ \]
+ So we have
+ \[
+ \Dim(A) = 1.
+ \]
+ We can do this by explicit calculation. If we look at our Hochschild chain complex, we had a short exact sequence
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & \ker \mu \ar[r] & A \otimes A \ar[r] & A \ar[r] & 0
+ \end{tikzcd}\tag{$*$}
+ \]
+ and thus we have a map
+ \[
+ \begin{tikzcd}
+ A \otimes A \otimes A \ar[r, "d"] & A \otimes A
+ \end{tikzcd}
+ \]
+ whose image is $\ker \mu$ .
+
+ The point is that $\ker \mu$ is a projective $A\mdash A$-bimodule. This will mean that $\HH^i(A, M) = 0$ for $i \geq 2$ in the same way we used to show that when $_AA_A$ is a projective $A\mdash A$-bimodule for $i \geq 1$. In particular, $\HH^i(A, A) = 0$ for $i \geq 2$.
+
+ To show that $\ker \mu$ is projective, we notice that
+ \[
+ A \otimes A = k[X] \otimes_k k[X] \cong k[X, Y].
+ \]
+ So the short exact sequence $(*)$ becomes
+ \[
+ \begin{tikzcd}
+ 0 \ar[r] & (X - Y)k[X, Y] \ar[r] & k[X, Y] \ar[r] & k[X] \ar[r] & 0
+ \end{tikzcd}.
+ \]
+ So $(X - Y)k[X, Y]$ is a free $k[X, Y]$ module, and hence projective.
+
+ We can therefore use our theorems to see that any extension of $k[X]$ by $k[X]$ is split, and any $*$-product is trivial. We also get that any derivation is integrable.
+\end{eg}
+
+\begin{eg}
+ If we take $A = k[X_1, X_2]$, then again this is commutative, and
+ \begin{align*}
+ \HH^0(A, A) &= A\\
+ \HH^1(A, A) &= \Der A.
+ \end{align*}
+ We will talk about $\HH^2$ later, and similarly
+ \[
+ \HH^i(A, A) = 0
+ \]
+ for $i \geq 3$.
+\end{eg}
+From this, we see that we may have star products other than the trivial ones, and in fact we know we have, because we have one arising from the Rees algebra of $A_1(k)$. But we know that any infinitesimal deformation yields a star product. So there are much more.
+
+\subsection{Gerstenhaber algebra}
+We now want to understand the equations $(\dagger)$ better. To do so, we consider the graded vector space
+\[
+ \HH^\Cdot (A, A) = \bigoplus_{A = 0}^\infty \HH^n(A, A),
+\]
+as a whole. It turns out this has the structure of a \emph{Gerstenhaber algebra}
+
+The first structure to introduce is the cup product. They are a standard tool in cohomology theories. We will write
+\[
+ S^n(A, A) = \Hom_k(A^{\otimes n}, A) = \Hom_{A\mdash A} (A^{\otimes (n + 2)}, {_AA_A}).
+\]
+The Hochschild chain complex is then the graded chain complex $S^{\Cdot}(A, A)$.
+
+\begin{defi}[Cup product]\index{cup product}
+ The \emph{cup product}
+ \[
+ \smile: S^m (A, A) \otimes S^n(A, A) \to S^{m + n}(A, A)
+ \]
+ is defined by
+ \[
+ (f \smile g)(a_1 \otimes \cdots \otimes a_m \otimes b_1 \otimes \cdots \otimes b_n) = f(a_1 \otimes \cdots \otimes a_m) \cdot g(b_1 \otimes \cdots \otimes b_n),
+ \]
+ where $a_i, b_j \in A$.
+\end{defi}
+Under this product, $S^\Cdot(A, A)$ becomes an associative graded algebra.
+
+Observe that
+\[
+ \delta(f \smile g) = \delta f \smile g + (-1)^{mn} f \smile \delta g.
+\]
+So we say $\delta$ is a (left) \term{graded derivation}\index{derivation!graded} of the graded algebra $S^\Cdot(A, A)$. In homological (graded) algebra, we often use the same terminology but with suitable sign changes which depends on the degree.
+
+Note that the cocycles are closed under $\smile$. So cup product induces a product on $\HH^\Cdot(A, A)$. If $f \in S^m(A, A)$ and $g \in S^n(A, A)$, and both are cocycles, then
+\[
+ (-1)^m(g \smile f - (-1)^{mn} (f \smile g)) = \delta (f \circ g),
+\]
+where $f \circ g$ is defined as follows: we set\index{$f \circ_i g$}
+\begin{multline*}
+ f \circ_i g (a_1 \otimes \cdots \otimes a_{i - 1} \otimes b_1 \otimes \cdots \otimes b_n \otimes a_{i + 1} \cdots \otimes a_m)\\
+ = f(a_1 \otimes \cdots \otimes a_{i - 1} \otimes g(b_1 \otimes \cdots \otimes b_n) \otimes a_{i + 1} \otimes \cdots \otimes a_m).
+\end{multline*}
+Then we define\index{$f \circ g$}\index{circle product}
+\[
+ f \circ g = \sum_{i = 1}^m (-1)^{(n - 1)(i - 1)} f \circ_i g.
+\]
+This product $\circ$ is not an associative product, but is giving a \term{pre-Lie structure} to $S^\Cdot (A, A)$.
+
+\begin{defi}[Gerstenhaber bracket]\index{Gerstenhaber bracket}
+ The \emph{Gerstenhaber bracket} is
+ \[
+ [f, g] = f \circ g - (-1)^{(n + 1)(m + 1)} g \circ f
+ \]
+\end{defi}
+This defines a graded Lie algebra structure on the Hochschild chain complex, but notice that we have a degree shift by $1$. It is a grade Lie algebra on $S^{\Cdot + 1}(A, A)$.
+
+Of course, we really should define what a graded Lie algebra is.
+\begin{defi}[Graded Lie algebra]\index{graded Lie algebra}\index{Lie algebra!graded}
+ A \emph{graded Lie algebra} is a vector space
+ \[
+ L = \bigoplus L_i
+ \]
+ with a bilinear bracket $[\ph,\ph] : L \times L \to L$ such that
+ \begin{itemize}
+ \item $[L_i, L_j] \subseteq L_{i + j}$;
+ \item $[f, g] -(-1)^{mn} [g, f]$; and
+ \item The \term{graded Jacobi identity}\index{Jacobi identity!graded} holds:
+ \[
+ (-1)^{mp} [[f, g], h] + (-1)^{mn} [[g, h], f] + (-1)^{np} [[h, f] ,g] = 0
+ \]
+ \end{itemize}
+ where $f \in L_m$, $g \in L_n$, $h \in L_p$.
+\end{defi}
+
+In fact, $S^{\Cdot + 1}(A, A)$ is a \term{differential graded Lie algebra} under the Gerstenhaber bracket.
+
+\begin{lemma}
+ The cup product on $\HH^\Cdot(A, A)$ is graded commutative, i.e.
+ \[
+ f \smile g = (-1)^{mn} (g \smile f).
+ \]
+ when $f \in \HH^m(A, A)$ and $g \in \HH^n(A, A)$.
+\end{lemma}
+
+\begin{proof}
+ We previously ``noticed'' that
+ \[
+ (-1)^m(g \smile f - (-1)^{mn} (f \smile g)) = \delta (f \circ g),\qedhere
+ \]
+\end{proof}
+
+\begin{defi}[Gerstenhaber algebra]\index{Gerstenhaber algebra}
+ A \emph{Gerstenhaber algebra} is a graded vector space
+ \[
+ H = \bigoplus H^i
+ \]
+ with $H^{\Cdot + 1}$ a graded Lie algebra with respect to a bracket $[\ph, \ph]: H^m \times H^n \to H^{m + n - 1}$, and an associative product $\smile: H^m \times H^n \to H^{m + n}$ which is graded commutative, such that if $f \in H^m$, then $[f, \ph]$ acts as a degree $m - 1$ graded derivation of $\smile$:
+ \[
+ [f, g \smile h] = [f, g] \smile h + (-1)^{(m - 1)}n g \smile [f, h]
+ \]
+ if $g \in H^n$.
+\end{defi}
+This is analogous to the definition of a Poisson algebra. We've seen that $\HH^\Cdot(A, A)$ is an example of a Gerstenhaber algebra.
+
+We can look at what happens in low degrees. We know that $H^0$ is a commutative $k$-algebra, and $\smile: H^0 \times H^1 \to H^1$ is a module action.
+
+Also, $H^1$ is a Lie algebra, and $[\ph, \ph]: H^1 \times H^0 \to H^0$ is a Lie module action, i.e.\ $H^0$ gives us a Lie algebra representation of $H^1$. In other words, the corresponding map $[\ph, \ph]: H^1 \to \End_k(H^0)$ gives us a map of Lie algebras $H^1 \to \Der(H^0)$.
+
+The prototype Gerstenhaber algebra is the exterior algebra $\bigwedge \Der A$ for a commutative algebra $A$ (with $A$ in degree $0$).
+
+Explicitly, to define the exterior product over $A$, we first consider the tensor product over $A$ of two $A$-modules $V$ and $W$, defined by
+\[
+ V \otimes_A W = \frac{V \otimes_k W}{\bra av \otimes w - v \otimes aw\ket}
+\]
+The exterior product is then
+\[
+ V \wedge_A V = \frac{V \otimes_A V}{\bra v \otimes v: v \in V\ket}.
+\]
+The product is given by the wedge, and the \term{Schouten bracket} is given by
+\begin{multline*}
+ [\lambda_1 \wedge \cdots \wedge \lambda_m, \lambda_1' \wedge \cdots \wedge \lambda_n'] \\
+ = (-1)^{(m - 1)(n - 1)} \sum_{i, j} (-1)^{i + j} [\lambda_i, \lambda_j] \underbrace{\lambda_1 \wedge \cdots \lambda_m}_{\text{$i$th missing}} \wedge \underbrace{\lambda_1' \wedge \cdots \wedge \lambda_n'}_{\text{$j$th missing}}.
+\end{multline*}
+For any Gerstenhaber algebra $H = \bigoplus H^i$, there is a canonical homomorphism of Gerstenhaber algebras
+\[
+ \bigwedge_{H^0}H^1 \to H.
+\]
+\begin{thm}[Hochschild--Kostant--Ronsenberg (HKR) theorem]\index{Hochschild--Kostant--Ronsenberg theorem}\index{HKR theorem}
+ If $A$ is a ``smooth'' commutative $k$-algebra, and $\Char k = 0$, then the canonical map
+ \[
+ \bigwedge_A (\Der A) \to \HH^*(A, A)
+ \]
+ is an isomorphism of Gerstenhaber algebras.
+\end{thm}
+We will not say what ``smooth'' means, but this applies if $A = k[X_1, \cdots, X_n]$, or if $k = \C$ or $\R$ and $A$ is appropriate functions on smooth manifolds or algebraic varieties.
+
+In the 1960's, this was stated just for the algebra structure, and didn't think about the Lie algebra.
+
+\begin{eg}
+ Let $A = k[X, Y]$, with $\Char k = 0$. Then $\HH^0(A, A) = A$ and
+ \[
+ \HH^1(A, A) = \Der A \cong \left\{p(X, Y) \frac{\partial}{\partial y} + q(X, Y) \frac{\partial}{\partial Y}: p, q \in A\right\}.
+ \]
+ So we have
+ \[
+ \HH^2(A, A) = \Der A \wedge_A \Der A,
+ \]
+ which is generated as an $A$-modules by $\frac{\partial}{\partial X} \wedge \frac{\partial}{\partial Y}$. Then
+ \[
+ \HH^i(A, A) = 0\text{ for all }i \geq 3
+ \]
+\end{eg}
+
+We can now go back to talk about star products. Recall when we considered possible star products on $V \otimes_k k[[t]]$, where $V$ is the underlying vector space of the algebra $A$. We found that associativity of the star product was encapsulated by some equations $(\dagger_\lambda)$. Collectively, these are equivalent to the statement
+\begin{defi}[Maurer--Cartan equation]\index{Maurer--Cartan equation}
+ The \emph{Maurer--Cartan equation} is
+ \[
+ \delta f + \frac{1}{2} [f, f]_{\mathrm{Gerst}} = 0
+ \]
+ for the element
+ \[
+ f = \sum t^\lambda F_\lambda,
+ \]
+ where $F_0(a, b) = ab$.
+\end{defi}
+When we write $[\ph, \ph]_{\mathrm{Gerst}}$, we really mean the $k[[t]]$-linear extension of the Gerstenhaber bracket.
+
+If we want to think of things in cohomology instead, then we are looking at things modulo coboundaries. For the graded Lie algebra $\bigwedge^{\Cdot + 1}(\Der A)$, the Maurer--Cartan elements, i.e.\ solutions of the Maurer--Cartan equation, are the \term{formal Poisson structures}. They are formal power series of the form
+\[
+ \Pi = \sum t_i \pi_i
+\]
+for $\pi_i \in \Der A \wedge \Der A$, satisfying
+\[
+ [\Pi, \Pi] = 0.
+\]
+There is a deep theorem of Kontsevich from the early 2000's which implies
+
+\begin{thm}[Kontsevich]
+ There is a bijection
+ \[
+ \left\{\parbox{3cm}{\centering equivalence classes of star products}\right\} \longleftrightarrow \left\{\parbox{3cm}{\centering classes of formal Poisson structures}\right\}
+ \]
+ This applies for smooth algebras in $\Char 0$, and in particular for polynomial algebras $A = k[X_1, \cdots, X_n]$.
+\end{thm}
+This is a difficult theorem, and the first proof appeared in 2002.
+
+An unnamed lecturer once tried to give a Part III course with this theorem as the punchline, but the course ended up lasting 2 terms, and they never reached the punchline.
+
+\subsection{Hochschild homology}
+We don't really have much to say about Hochschild homology, but we are morally obliged to at least write down the definition.
+
+To do Hochschild homology, we apply $\ph \otimes_{A\mdash A} M$ for an $A\mdash A$-bimodule $M$ to the Hochschild chain complex.
+\[
+ \begin{tikzcd}
+ \cdots \ar[r, "d"] & A \otimes_k A \otimes_k A \ar[r, "d"] & A \otimes_k A \ar[r, "\mu"] & A \ar[r] & 0.
+ \end{tikzcd},
+\]
+We will ignore the $\to A \to 0$ bit. We need to consider what $\ph\otimes_{A\mdash A} \ph$ means. If we have bimodules $V$ and $W$, we can regard $V$ as a right $A \otimes A^{\op}$-module. We can also think of $W$ as a left $A \otimes A^\op$ module. We let
+\[
+ B = A \otimes A^{\op},
+\]
+and then we just consider
+\[
+ V \otimes_B W = \frac{V \otimes_k W}{\bra v x \otimes w - v \otimes xw: w \in B\ket} = \frac{V \otimes_k W}{\bra ava' \otimes w - v \otimes a' w a \ket}.
+\]
+Thus we have
+\[
+ \begin{tikzcd}
+ \cdots \ar[r, "b_1"] & (A \otimes_k A \otimes_k A) \otimes_{A\mdash A} M \ar[r, "b_0"] & (A \otimes_k A) \otimes_{A \mdash A}M \cong M
+ \end{tikzcd},
+\]
+\begin{defi}[Hochschild homology]\index{Hochschild homology}
+ The \emph{Hochschild homology} groups are
+ \begin{align*}
+ \HH_0 (A, M) &= \frac{M}{\im b_0}\\
+ \HH_i(A, M) &= \frac{\ker b_{i - 1}}{\im b_i}
+ \end{align*}
+ for $i > 0$.
+\end{defi}
+
+A long time ago, we counted the number of simple $kG$-modules for $k$ algebraically closed of characteristic $p$ when $G$ is finite. In the proof, we used $\frac{A}{[A, A]}$, and we pointed out this is $\HH_0(A, A)$.
+
+\begin{lemma}
+ \[
+ \HH_0(A, M) = \frac{M}{\bra xm - mx: m \in M, x \in A\ket}.
+ \]
+ In particular,
+ \[
+ \HH_0(A, A) = \frac{A}{[A, A]}.
+ \]
+\end{lemma}
+
+\begin{proof}
+ Exercise.
+\end{proof}
+
+\section{Coalgebras, bialgebras and Hopf algebras}
+We are almost at the end of the course. So let's define an algebra.
+\begin{defi}[Algebra]\index{algebra}\index{$k$-algebra}
+ A \emph{$k$-algebra} is a $k$-vector space $A$ and $k$-linear maps
+ \begin{align*}
+ \mu: A \otimes A &\to A & u: k &\to A\\
+ x \otimes y &\mapsto xy & \lambda &\mapsto \lambda I
+ \end{align*}
+ called the \term{multiplication}/\term{product} and \term{unit} such that the following two diagrams commute:
+ \[
+ \begin{tikzcd}
+ A \otimes A \otimes A \ar[r, "\mu \otimes \id"] \ar[d, "\id \otimes \mu"] & A \otimes A \ar[d, "\mu"]\\
+ A \otimes A \ar[r, "\mu"] & A
+ \end{tikzcd}
+ \quad
+ \begin{tikzcd}
+ k \otimes A \ar[rd, "\cong"] \ar[r, "u \otimes \id"] & A \otimes A \ar[d, "\mu"] & A \otimes k \ar[l, "\id \otimes u"] \ar[ld, "\cong"]\\
+ & A
+ \end{tikzcd}
+ \]
+ These encode \term{associativity} and \term{identity} respectively.
+\end{defi}
+Of course, the point wasn't to actually define an algebra. The point is to define a \emph{co}algebra, whose definition is entirely dual.
+
+\begin{defi}[Coalgebra]\index{coalgebra}
+ A \emph{coalgebra} is a $k$-vector space $C$ and $k$-linear maps
+ \begin{align*}
+ \Delta: C &\to C \otimes C & \varepsilon: C &\to k
+ \end{align*}
+ called \term{comultiplication}/\term{coproduct} and \term{counit} respectively, such that the following diagrams commute:
+ \[
+ \begin{tikzcd}
+ C \otimes C \otimes C & C \otimes C \ar[l, "\id \otimes \Delta"]\\
+ C \otimes C \ar[u, "\Delta \otimes \id"] & C \ar[l, "\Delta"] \ar[u, "\Delta"]
+ \end{tikzcd}
+ \quad
+ \begin{tikzcd}
+ k \otimes C & C \otimes C \ar[l, "\varepsilon \otimes \id"']\ar[r, "\id \otimes \varepsilon"] & C \otimes k\\
+ & C \ar[u, "\mu"] \ar[ru, "\cong"'] \ar[ul, "\cong"]
+ \end{tikzcd}
+ \]
+ These encode \term{coassociativity} and \term{coidentity}
+
+ A \term{morphism}\index{morphism!coalgebras} of coalgebras $f: C \to D$ is a $k$-linear map such that the following diagrams commute:
+ \[
+ \begin{tikzcd}
+ C \ar[d, "\Delta"] \ar[r, "f"] & D \ar[d, "\Delta"]\\
+ C \otimes C \ar[r, "f \otimes f"] & D \otimes D
+ \end{tikzcd}
+ \quad
+ \begin{tikzcd}
+ C \ar[d, "\varepsilon"] \ar[r, "f"] & D \ar[d, "\varepsilon"]\\
+ k \ar[r, equals] & k
+ \end{tikzcd}
+ \]
+ A subspace $I$ of $C$ is a \term{co-ideal} if $\Delta(I) \leq C \otimes I + I \otimes C$, and $\varepsilon(I) = 0$. In this case, $C/I$ inherits a coproduct and counit.
+
+ A \term{cocommutative coalgebra}\index{algebra!cocommutative} is one for which $\tau \circ \Delta = \Delta$, where $\tau: V \otimes W \to W \otimes V$ given by the $v \otimes w \mapsto w \otimes v$ is the ``twist map''.
+\end{defi}
+It might be slightly difficult to get one's head around what a coalgebra actually is. It, of course, helps to look at some examples, and we will shortly do so. It also helps to know that for our purposes, we don't really care about coalgebras \emph{per se}, but things that are both algebras and coalgebras, in a compatible way.
+
+There is a very natural reason to be interested in such things. Recall that when doing representation theory of groups, we can take the tensor product of two representations and get a new representation. Similarly, we can take the dual of a representation and get a new representation.
+
+If we try to do this for representations (ie.\ modules) of general algebras, we see that this is not possible. What is missing is that in fact, the algebras $kG$ and $\mathcal{U}(\mathfrak{g})$ also have the structure of coalgebras. In fact, they are \emph{Hopf algebras}, which we will define soon.
+
+We shall now write down some coalgebra structures on $kG$ and $\mathcal{U}(\mathfrak{g})$.
+\begin{eg}
+ If $G$ is a group, then $kG$ is a co-algebra, with
+ \begin{align*}
+ \Delta(g) &= g \otimes g\\
+ \varepsilon\left(\lambda_g \sum(g)\right) &= \sum \lambda_g.
+ \end{align*}
+ We should think of the specification $\Delta (g) = g \otimes g$ as saying that our groups act diagonally on the tensor products of representations. More precisely, if $V, W$ are representations and $v \in V, w \in W$, then $g$ acts on $v \otimes w$ by
+ \[
+ \Delta(g) \cdot (v \otimes w) = (g \otimes g) \cdot (v \otimes w) = (gv) \otimes (gw).
+ \]
+\end{eg}
+
+\begin{eg}
+ For a Lie algebra $\mathfrak{g}$ over $k$, the universal enveloping algebra $\mathcal{U}(\mathfrak{g})$ is a co-algebra with
+ \[
+ \Delta(x) = x \otimes 1 + 1 \otimes x
+ \]
+ for $x \in \mathfrak{g}$, and we extend this by making it an algebra homomorphism.
+
+ To define $\varepsilon$, we note that elements of $\mathcal{U}(\mathfrak{g})$ are uniquely of the form
+ \[
+ \lambda + \sum \lambda_{i_1, \ldots, i_n} x_1^{i_1} \cdots x_n^{i_n},
+ \]
+ where $\{x_i\}$ is a basis of $\mathfrak{g}$ (the PBW theorem). Then we define
+ \[
+ \varepsilon\left(\lambda + \sum \lambda_{i_1, \ldots, i_n} x_1^{i_1} \cdots x_n^{i_n}\right) = \lambda.
+ \]
+ This time, the specification of $\Delta$ is telling us that if $X \in \mathfrak{g}$ and $v, w$ are elements of a representation of $\mathfrak{g}$, then $X$ acts on the tensor product by
+ \[
+ \Delta(X) \cdot (v \otimes w) = Xv \otimes w + v \otimes Xw.
+ \]
+\end{eg}
+
+\begin{eg}
+ Consider
+ \[
+ \mathcal{O}(M_n(k)) = k[X_{ij}: 1 \leq i, j\leq n],
+ \]
+ the polynomial functions on $n \times n$ matrices, where $X_{ij}$ denotes the $ij$th entry. Then we define
+ \[
+ \Delta (X_{ij}) = \sum_{i = 1}^n X_{i\ell} \otimes X_{\ell j},
+ \]
+ and
+ \[
+ \varepsilon(X_{ij}) = \delta_{ij}.
+ \]
+ These are again algebra maps.
+
+ We can also talk about $\mathcal{O}(\GL_n(k))$ and $\mathcal{O}(\SL_n(k))$. The formula of the determinant gives an element $D \in \mathcal{O}(M_n(k))$. Then $\mathcal{O}(\GL_n(k))$ is given by adding a formal inverse to $D$ in $\mathcal{O}(\GL_n(k))$, and $\mathcal{O}(\SL_n(k))$ is obtained by quotienting out $\mathcal{O}(\GL_n(k))$ by the bi-ideal $\bra D - 1\ket$.
+
+ From an algebraic geometry point of view, these are the coordinate algebra of the varieties $M_n(k)$, $\GL_n(k)$ and $\SL_n(k)$.
+\end{eg}
+This is dual to matrix multiplication.
+
+We have seen that we like things that are both algebras and coalgebras, compatibly. These are known as \emph{bialgebras}.
+\begin{defi}[Bialgebra]\index{bialgebra}
+ A \emph{bialgebra} is a $k$-vector space $B$ and maps $\mu, \upsilon, \Delta, \varepsilon$ such that
+ \begin{enumerate}
+ \item $(B, \mu, u)$ is an algebra.
+ \item $(B, \Delta, \varepsilon)$ is a coalgebra.
+ \item $\Delta$ and $\varepsilon$ are algebra morphisms.
+ \item $\mu$ and $u$ are coalgebra morphisms.
+ \end{enumerate}
+\end{defi}
+Being a bialgebra means we can take tensor products of modules and still get modules. If we want to take duals as well, then it turns out the right notion is that of a Hopf algebra:
+\begin{defi}[Hopf algebra]\index{Hopf algebra}
+ A bialgebra $(H, \mu, u, \Delta, \varepsilon)$ is a \emph{Hopf algebra} if there is an \term{antipode} $S: H \to H$ that is a $k$-linear map such that
+ \[
+ \mu \circ (S \otimes \id) \circ \Delta = \mu \circ (\id \otimes S) \circ \Delta = u \circ \varepsilon.
+ \]
+\end{defi}
+
+\begin{eg}
+ $kG$ is a Hopf algebra with $S(g) = g^{-1}$.
+\end{eg}
+
+\begin{eg}
+ $\mathcal{U}(\mathfrak{g})$ is a Hopf algebra with $S(x) = -x$ for $x \in \mathcal{U}(\mathfrak{g})$.
+\end{eg}
+
+%\begin{eg}
+% The coordinate algebra $\mathcal{O}(M_n(k))$ is a Hopf algebra in the following way --- we let $D$ be the determinant of a matrix, which is an element of $\mathcal{O}(M_n(k))$. Then $\varepsilon(D) = 1$ and $\Delta(D) = D \otimes D$. So $D - 1$ generates a bi-ideal. We define
+% \[
+% \mathcal{O}(\SL_n(k)) = \frac{\mathcal{O}(\GL_n(k))}{\bra D - 1\ket}.
+% \]
+% This is a Hopf algebra with $SX_{ij}$ being the $ij$th entry of $(X_{ij})^{-1}$ (modulo $D - 1$).
+%\end{eg}
+
+Note that our examples are all commutative or co-commutative. The term \term{quantum groups} usually refers to a non-commutative non-co-commutative Hopf algebras. These are neither quantum nor groups.
+
+As usual, we write $V^*$ for $\Hom_k(V, k)$, and we note that if we have $\alpha: V \to W$, then this induces a dual map $\alpha^*: W^* \to V^*$.
+
+\begin{lemma}
+ If $C$ is a coalgebra, then $C^*$ is an algebra with multiplication $\Delta^*$ (that is, $\Delta^*|_{C^* \otimes C^*}$) and unit $\varepsilon^*$. If $C$ is co-commutative, then $C^*$ is commutative.
+\end{lemma}
+
+However, if an algebra $A$ is infinite dimensional as a $k$-vector space, then $A^*$ may not be a coalgebra. The problem is that $(A^* \otimes A^*)$ is a proper subspace of $(A \otimes A)^*$, and $\mu^*$ of an infinite dimensional $A$ need not take values in $A^* \otimes A^*$. However, all is fine for finite dimensional $A$, or if $A$ is graded with finite dimensional components, where we can form a graded dual.
+
+In general, for a Hopf algebra $H$, one can define the \term{Hopf dual},
+\[
+ H^0 = \{f \in H^*: \ker f \text{ contains an ideal of finite codimension}\}.
+\]
+\begin{eg}
+ Let $G$ be a finite group. Then $(kG)^*$ is a commutative non-co-commutative Hopf algebra if $G$ is non-abelian.
+
+ Let $\{g\}$ be the canonical basis for $kG$, and $\{\phi_g\}$ be the dual basis of $(kG)^*$. Then
+ \[
+ \Delta(\phi_g) = \sum_{h_1 h_2 = g} \phi_{h_1} \otimes \phi_{h_2}.
+ \]
+\end{eg}
+There is an easy way of producing non-commutative non-co-commutative Hopf algebras --- we take a non-commutative Hopf algebra and a non-co-commutative Hopf algebra, and take the tensor product of them, but this is silly.
+
+The easiest non-trivial example of a non-commutative non-co-commutative Hopf algebra is the \emph{Drinfeld double}, or \emph{quantum double}, which is a general construction from a finite dimensional hopf algebra.
+
+\begin{defi}[Drinfeld double]\index{Drinfeld double}\index{quantum double}
+ Let $G$ be a finite group. We define
+ \[
+ D(G) = (kG)^* \otimes_k kG
+ \]
+ as a vector space, and the algebra structure is given by the crossed product $(kG)^* \rtimes G$, where $G$ acts on $(kG)^*$ by
+ \[
+ f^g(x) = f(gxg^{-1}).
+ \]
+ Then the product is given by
+ \[
+ (f_1 \otimes g_1) (f_2 \otimes g_2) = f_1 f_2^{g_1^{-1}} \otimes g_1 g_2.
+ \]
+ The coalgebra structure is the tensor of the two coalgebras $(kG)^*$ and $kG$, with
+ \[
+ \Delta (\phi_g \otimes h) = \sum_{g_1 g_2 = g} \phi_{g_1} \otimes h \otimes \phi_{g_2} \otimes h.
+ \]
+ $D(G)$ is \term{quasitriangular}, i.e.\ there is an invertible element $R$ of $D(G) \otimes D(G)$ such that
+ \[
+ R \Delta (x) R^{-1} = \tau (\Delta(x)),
+ \]
+ where $\tau$ is the twist map. This is given by
+ \begin{align*}
+ R &= \sum_g (\phi_g \otimes 1) \otimes (1 \otimes g)\\
+ R^{_1} &= \sum_g (\phi_g \otimes 1) \otimes (1 \otimes g^{-1}).
+ \end{align*}
+ The equation $R \Delta R^{-1} = \tau \Delta$ results in an isomorphism between $U \otimes V$ and $V \otimes U$ for $D(G)$-bimodules $U$ and $V$, given by flip follows by the action of $R$.
+\end{defi}
+If $G$ is non-abelian, then this is non-commutative and non-co-commutative. The point of defining this is that the representations of $D(G)$ correspond to the $G$-equivariant $k$-vector bundles on $G$.
+
+As we said, this is a general construction.
+\begin{thm}[Mastnak, Witherspoon (2008)]
+ The bialgebra cohomology $H^{\Cdot}_{bi}(H, H)$ for a finite-dimensional Hopf algebra is equal to $\HH^{\Cdot}(D(H), k)$, where $k$ is the trivial module, and $D(H)$ is the Drinfeld double.
+\end{thm}
+\separator
+
+In 1990, Gerstenhaber and Schack defined bialgebra cohomology, and proved results about deformations of bialgebras analogous to our results from the previous chapter for algebras. In particular, one can consider infinitesimal deformations, and up to equivalence, these correspond to elements of the $2$nd cohomology group.
+
+There is also the question as to whether an infinitesimal deformation is integrable to give a bialgebra structure on $V \otimes k[[t]]$, where $V$ is the underlying vector space of the bialgebra.
+
+\begin{thm}[Gerstenhaber--Schack]
+ Every deformation is equivalent to one where the unit and counit are unchnaged. Also, deformation preserves the existence of an antipode, though it might change.
+\end{thm}
+
+\begin{thm}[Gerstenhaber--Schack]
+ All deformations of $\mathcal{O}(M_n(k))$ or $\mathcal{O}(\SL_n(k))$ are equivalent to one in which the comultiplication is unchanged.
+\end{thm}
+
+We nwo try to deform $\mathcal{O}(M_2(k))$. By the previous theorems, we only have to change the multiplication. Consider $\mathcal{O}_q(M_2(k))$ defined by
+\begin{align*}
+ X_{12} X_{11} &= q X_{11} X_{12}\\
+ X_{22} X_{12} &= q X_{12} X_{22}\\
+ X_{21} X_{11} &= q X_{11} X_{21}\\
+ X_{22} X_{21} &= q X_{21} X_{22}\\
+ X_{21} X_{12} &= X_{12} X_{21}\\
+ X_{11} X_{22} - X_{22} X_{11} &= (q^{-1} - q) X_{12} X_{21}.
+\end{align*}
+We define the \emph{quantum determinant}
+\[
+ \det_q = X_{11} X_{22} - q^{-1} X_{12} X_{21} = X_{22} X_{11} - q X_{12} X_{21}.
+\]
+Then
+\[
+ \Delta(\det_q) = \det_q \otimes \det_q,\quad \varepsilon (\det_q) = 1.
+\]
+Then we define
+\[
+ \mathcal{O}(\SL_2(k)) = \frac{\mathcal{O}(M_2(k))}{(\det_q - 1)},
+\]
+where we are quotienting by the 2-sided ideal. It is possible to define an antipode, given by
+\begin{align*}
+ S(X_{11}) &= X_{22}\\
+ S(X_{12}) &= -q X_{12}\\
+ S(X_{21}) &= -q^{-1} X_{21}\\
+ S(X_{22}) &= X_{11},
+\end{align*}
+and this gives a non-commutative and non-co-commutative Hopf algebra. This is an example that we pulled out of a hat. But there is a general construction due to Faddeev--Reshetikhin--Takhtajan (1988) via $R$-matrices, which are a way of producing a $k$-linear map
+\[
+ V \otimes V \to V \otimes V,
+\]
+where $V$ is a fintie-dimesnional vector space.
+
+We take a basis $e_1, \cdots, e_n$ of $V$, and thus a basis $e_1 \otimes e_j$ of $V \otimes V$. We write $R_{ij}^{\ell m}$ for the matrix of $R$, defined by
+\[
+ R(e_i \otimes e_j) = \sum_{\ell, m} R_{ij}^{\ell m} e_\ell \otimes e_m.
+\]
+The rows are indexed by pairs $(\ell, m)$, and the columns by pairs $(i, j)$, which are put in lexicographic order.
+
+The action of $R$ on $V \otimes V$ induces 3 different actions on $V \otimes V \otimes V$. For $s, t \in \{1, 2, 3\}$, we let $R_{st}$ be the invertible map $V \otimes V \otimes V \to V \otimes V \otimes V$ which acts like $R$ on the $s$th and $t$th components, and identity on the other. So for example,
+\[
+ R_{12} (e_1 \otimes e_2 \otimes v) = \sum^{\ell m}_{i, j} e_{\ell} \otimes e_m \otimes v.
+\]
+\begin{defi}[Yang--Baxter equation]
+ $R$ satisfies the \term{quantum Yang--Baxter equation} (\term{QYBE}) if
+ \[
+ R_{12} R_{13} R_{23} = R_{23} R_{13} R_{12}
+ \]
+ and the braided form of QYBE (\term{braid equation}) if
+ \[
+ R_{12} R_{23} R_{12} = R_{23} R_{12} R_{23}.
+ \]
+\end{defi}
+Note that $R$ satisfies QYBE iff $R\tau$ satisfies the braid equation. Solutions to either case are $R$-matrices.
+
+\begin{eg}
+ The identity map and the twist map $\tau$ satisfies both.
+
+ Take $V$ to be $2$-dimensional, and $R$ to be the map
+ \[
+ R_{ij}^{\ell m} =
+ \begin{pmatrix}
+ q & 0 & 0 & 0\\
+ 0 & 1 & 0 & 0\\
+ 0 & q - q^{-1} & 1 & 0\\
+ 0 & 0 & 0 & q
+ \end{pmatrix},
+ \]
+ where $q \not= 0 \in K$. Thus, we have
+ \begin{align*}
+ R(e_1 \otimes e_1) &= q e_1 \otimes e_2\\
+ R(e_2 \otimes e_1) &= e_2 \otimes e_1\\
+ R(e_1 \otimes e_2) &= e_1 \otimes e_2 + (q - q^{-1}) e_2 \otimes e_1\\
+ R(e_2 \otimes e_2) &= q e_2 \otimes e_2,
+ \end{align*}
+ and this satisfies QYBE. Similarly,
+ \[
+ (R \tau)^{\ell m}_{ij} =
+ \begin{pmatrix}
+ q & 0 & 0 & 0\\
+ 0 & 0 & 1 & 0\\
+ 0 & 1 & q - q^{-1} & 0\\
+ 0 & 0 & 0 & q
+ \end{pmatrix}
+ \]
+ satisfies the braid equation.
+\end{eg}
+We now define the general construction.
+\begin{defi}[$R$-symmetric algebra]\index{$R$-symmetric algebra}
+ Given the tensor algebra
+ \[
+ T(V) = \bigoplus_{n = 0}^\infty V^{\otimes n},
+ \]
+ we form the $R$-symmetric algebra
+ \[
+ S_{R}(V) = \frac{T(V)}{\bra z - R(z): z \in V \otimes V\ket}.
+ \]
+\end{defi}
+\begin{eg}
+ If $R$ is the identity, then $S_R(V) = T(V)$.
+\end{eg}
+\begin{eg}
+ If $R = \tau$, then $S_R(V)$ is the usual symmetric algebra.
+\end{eg}
+
+\begin{eg}
+ The \term{quantum plane} $\mathcal{O}_q(k^2)$ can be written as $S_R(V)$ with
+ \begin{align*}
+ R(e_1 \otimes e_2) &= q e_2 \otimes e_1\\
+ R(e_1 \otimes e_1) &= e_1 \otimes e_1\\
+ R(e_2 \otimes e_1) &= q^{-1} e_1 \otimes e_2\\
+ R(e_2 \otimes e_2) &= e_2 \otimes e_2.
+ \end{align*}
+\end{eg}
+Generally, given a $V$ which is finite-dimensional as a vector space, we can identify $(V \otimes V)^*$ with $V^* \otimes V^*$.
+
+We set $E = V \otimes V^* \cong \End_k(V) \cong M_n(k)$. We define $R_{13}$ and $R_{24}^*: E \otimes E \to E \otimes E$, where $R_{13}$ acts like $R$ on terms $1$ and $3$ in $E = V \otimes V^* \otimes V \otimes V^*$, and identity on the rest; $R_{24}^*$ acts like $R^*$ on terms $2$ and $4$.
+
+\begin{defi}[Coordinate algebra of quantum matrices]\index{coordinate algebra associated with quantum matrices}
+ The coordinate algebra of quantum matrices associated with $R$ is
+ \[
+ \frac{T(E)}{ \bra R_{13}(z) - R_{24}^*(z): z \in E \otimes E\ket} = S_R(E),
+ \]
+ where
+ \[
+ T = R_{24}^* R_{13}^{-1}.
+ \]
+ The coalgebra structure remains the same as $\mathcal{O}(M_n(k))$, and for the antipode, we write $E_1$ for the image of $e_1$ in $S_R(V)$, and similarly $F_j$ for $f_j$. Then we map
+ \begin{align*}
+ E_1 &\mapsto \sum_{j = 1}^n X_{ij} \otimes E_j\\
+ F_j &\mapsto \sum_{i = 1}^n F_i \otimes X_{ij}.
+ \end{align*}
+\end{defi}
+This is the general construction we are up for.
+
+\begin{eg}
+ We have
+ \[
+ \mathcal{O}_q(M_2(k)) = A_{R\tau}(V)
+ \]
+ for
+ \[
+ R_{ij}^{\ell m} =
+ \begin{pmatrix}
+ q & 0 & 0 & 0\\
+ 0 & 1 & 0 & 0\\
+ 0 & q - q^{-1} & 1 & 0\\
+ 0 & 0 & 0 & q
+ \end{pmatrix},
+ \]
+\end{eg}
+\printindex
+\end{document}