Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
proof-pile / books /cring /various.tex
zhangir-azerbayev
added books
afd65d6
raw
history blame
41.8 kB
\chapter{Various topics}
This chapter is currently a repository for various topics that may or may not
reach a status worthy of their own chapters in the future, but in any event
should be included.
\section{Linear algebra over rings}
\subsection{The determinant trick}
We want to understand what $IN=N$ means.
Let $I\subset R$ and ${}_R M$ finitely generated. Let
$E=\End_R (M)$, which is not commutative in general. We may view $M$ as an $E$-module
${}_E M$. Since every element in $R$ commutes with all of $E$, $E$ is an $R$-algebra (i.e.\
There is a homomorphism $R\to E$ sending $R$ into the center of $E$).
\begin{lemma}[Determinant Trick]
\begin{enumerate}\item[]
\item Every $\phi\in E$ such that $\phi(M)\subset IM$ satisfies a monic equation
of the form $\phi^n+a_1\phi^{n-1} +\cdots + a_n=0$, where each $a_i\in I$, i.e.\
$\phi$ is ``integral over $I$''.
\item $IM=M$ if and only if $(1-a)M=0$ for some $a\in I$.
\end{enumerate}
\end{lemma}
\begin{proof}
(1) Fix a finite set of generators, $M=Rm_1+\cdots + Rm_n$. Then we have
$\phi(m_i)=\sum_j a_{ij} m_j$, with $a_{ij}\in I$ by assumption. Let $A=(a_{ij})$.
Then these equations tell us that $(I\phi-A)\vec{m}=0$. Multiplying by the adjoint of
the matrix $I\phi-A$, we get that $\det(I\phi-A)m_i=0$ for each $i$. It follows that
$\det(I\phi-A)=0\in E$. But $\det(I\phi-A)=\phi^n+a_1\phi^{n-1}+\cdots +a_n$ for some
$a_i\in I$.
(2) The ``if'' part is clear. The ``only if'' part follows from (1), applied to
$\phi=\id_M$.
\end{proof}
\begin{remark}
Determinant trick (part 2) actually includes Nakayama's Lemma, because if $I$ is in
$\rad R$, $(1-a)$ is a unit, so $M=(1-a)M=0$.
\end{remark}
\begin{corollary}
For a finitely generated ideal $I\subset R$, $I=I^2$ if and only if $I=eR$ for some
$e=e^2$.
\end{corollary}
\begin{proof}
($\Leftarrow$) clear.
($\Rightarrow$) Apply determinant trick (part 2) to the case $M={}_R I$. We get
$(1-e)I=0$ for some $e\in I$, so $(1-e)a=0$ for each $a\in I$, so $a=ea$, so $I$ is
generated by $e$. Letting $a=e$, we see that $e$ is idempotent.
\end{proof}
\begin{corollary}[Vasconcelos-Strooker Theorem]
For any finitely generated module $M$ over \emph{any} commutative $R$. If $\phi\in
\End_R(M)$ is onto, then it is injective.
\end{corollary}
\begin{proof}
We can view $M$ as a module over $R[t]$, where $t$ acts by $\phi$. Apply the
determinant trick (part 2) to $I=t\cdot R[t]\subset R[t]$. We have that $IM=M$
because $\phi$ is surjective, so $m =\phi(m_0)=t\cdot m_0\in IM$. It follows that
there is some $th(t)$ such that $(1-th(t))M=0$. In particular, if $m\in \ker \phi$,
we have that $0=(1-h(t)t)m=1\cdot m=m$, so $\phi$ is injective.
\end{proof}
\subsection{Determinantal ideals}
\begin{definition}
An ideal $I\subset R$ is called \emph{dense}\index{dense ideal} if $rI=0$ implies $r=0$.
This is denoted $I\subset_d R$. This is the same as saying that ${}_RI$ is a
faithful module over $R$.
\end{definition}
If $I$ is a principal ideal, say $Rb$, then $I$ is dense exactly when $b\in \mathcal{C}(R)$. The
easiest case is when $R$ is a domain, in which case an ideal is dense exactly when it is
non-zero.
If $R$ is an integral domain, then by working over the quotient field, one can define
the rank of a matrix with entries in $R$. But if $R$ is not a domain, rank becomes
tricky. Let $\mathcal{D}_i(A)$ be the $i$-th \emph{determinantal ideal} in $R$, generated by all
the determinants of $i\times i$ minors of $A$. We define $\mathcal{D}_0(A)=R$. If $i\ge
\min\{n,m\}$, define $\mathcal{D}_i(A)=(0)$.
Note that $\mathcal{D}_{i+1}(A)\supset \mathcal{D}_i(A)$ because you can expand by minors, so we have a
chain
\[
R=\mathcal{D}_0(A)\supset \mathcal{D}_1(A)\supset \cdots \supset (0).
\]
\begin{definition}
Over a non-zero ring $R$, the \emph{McCoy rank} (or just \emph{rank}) of $A$ to be
the maximum $i$ such that $\mathcal{D}_i(A)$ is dense in $R$. The rank of $A$ is denoted
$rk(A)$.
\end{definition}
If $R$ is an integral domain, then $rk(A)$ is just the usual rank. Note that over any
ring, $rk(A)\le \min\{n,m\}$.
If $rk(A)=0$, then $\mathcal{D}_1(A)$ fails to be dense, so there is some non-zero element $r$
such that $rA=0$. That is, $r$ zero-divides all of the entries of $A$.
If $A\in \mathbb{M}_{n,n}(R)$, then $A$ has rank $n$ (full rank) if and only if $\det A$ is a
regular element.
\begin{exercise}
Let $R=\mathbb{Z}/6\mathbb{Z} $, and let $A=diag(0,2,4)$, $diag(1,2,4)$, $diag(1,2,3)$, $diag(1,5,5)$
($3\times 3$ matrices). Compute the rank of $A$ in each case.
\end{exercise}
\begin{solution}\raisebox{-2\baselineskip}{
$\begin{array}{c|cccc}
A & \mathcal{D}_1(A) & \mathcal{D}_2(A) & \mathcal{D}_3(A) & \\ \hline
diag(0,2,4) & (2) & (2) & (0) & 3\cdot (2)=0\text{, so }rk=0 \\
diag(1,2,4) & R & (2) & (2) & 3\cdot (2)=0\text{, so }rk=1 \\
diag(1,2,3) & R & R & (2) & 3\cdot (2)=0\text{, so }rk=2 \\
diag(1,5,5) & R & R & R & \text{so }rk=3
\end{array}$}
\end{solution}
\subsection{Lecture 2}
Let $A\in \mathbb{M}_{n,m}(R)$. If $R$ is a field, the rank of $A$ is the dimension of the
image of $A:R^m\to R^n$, and $m-rk(A)$ is the dimension of the null space. That
is, whenever $rk(A)< m$, there is a solution to the system of linear equations
\begin{equation}
0 = A\cdot x \label{lec02ast}
\end{equation}
which says that the columns $\alpha_i\in R^n$ of $A$ satisfy the dependence $\sum
x_i\alpha_i=0$. The following theorem of McCoy generalizes this so that $R$ can be any
non-zero commutative ring.
\begin{theorem}[McCoy]\label{lec02T:McCoy3}
If $R$ is not the zero ring, the following are equivalent:
\begin{enumerate}
\item The columns $\alpha_1$, \dots, $\alpha_m$ are linearly dependent.
\item Equation \ref{lec02ast} has a nontrivial solution.
\item $rk(A)<m$.
\end{enumerate}
\end{theorem}
\begin{corollary}
If $R\ne 0$, the following hold
\begin{enumerate}
\item[(a)] If $n<m$ (i.e.\ if there are ``more variables than equations''), then
Equation \ref{lec02ast} has a nontrivial solution.
\item[(b)] $R$ has the ``strong rank property'':
$R^m\hookrightarrow R^n \Longrightarrow m\le n$.
\item[(c)] $R$ has the ``rank property'':
$R^n\twoheadrightarrow R^m \Longrightarrow m\le n$.
\item[(d)] $R$ has the ``invariant basis property'':
$R^m\cong R^n \Longrightarrow m=n$.
\end{enumerate}
\end{corollary}
\begin{proof}[Proof of Corollary]
$(a)$ If $n<m$, then $rk(A)\le \min\{n,m\} =n< m$, so by Theorem \ref{lec02T:McCoy3},
Equation \ref{lec02ast} has a non-trivial solution.
$(a\Rightarrow b)$ If $m>n$, then by $(a)$, any $R$-linear map $R^m\to R^n$
has a kernel. Thus, $R^m\hookrightarrow R^n$ implies $m\le n$.
$(b\Rightarrow c)$ If $R^n\twoheadrightarrow R^m$, then since $R^m$ is free,
there is a section $R^m\hookrightarrow R^n$ (which must be injective), so $m\le n$.
$(c\Rightarrow d)$ If $R^m\cong R^n$, then we have surjections both ways, so
$m\le n\le m$, so $m=n$.
\end{proof}
\begin{corollary}
Let $R\ne 0$, and $A$ some $n\times n$ matrix. Then the following are equivalent
(1) $\det A\in \mathcal{C}(R)$; (2) the columns of $A$ are linearly independent; (3) the rows of
$A$ are linearly independent.
\end{corollary}
\begin{proof}
The columns are linearly independent if and only if Equation \ref{lec02ast} has no
non-trivial solutions, which occurs if and only if the rank of $A$ is equal to $n$,
which occurs if and only if $\det A$ is a non-zero-divisor.
The transpose argument shows that $\det A\in \mathcal{C}(R)$ if and only if the rows are
independent.
\end{proof}
\begin{proof}[Proof of the Theorem]
$0=Ax = \sum \alpha_i x_i$ if and only if the $\alpha_i$ are dependent, so $(1)$ and
$(2)$ are equivalent.
$(2\Rightarrow 3)$ Let $x\in R^m$ be a non-zero solution to $A\cdot x=0$. If $n<m$,
then $rk(A)\le n <m$ and we're done. Otherwise, let $B$ be any $m\times m$ minor of
$A$ (so $B$ has as many columns as $A$, but perhaps is missing some rows). Then
$Bx=0$; multiplying by the adjoint of $B$, we get $(\det B)x=0$, so each $x_i$
annihilates $\det B$. Since $x\neq 0$, some $x_i$ is non-zero, and we have shown that
$x_i\cdot \mathcal{D}_m(A)=0$, so $rk(A)<m$.
$(3\Rightarrow 2)$ Assume $r=rk(A)<m$. We may assume $r< n$ (adding a row of
zeros to $A$ if needed). Fix a nonzero element $a$ such that $a\cdot \mathcal{D}_{r+1}(A)=0$.
If $r=0$, then take $x$ to be the vector with an $a$ in each place. Otherwise, there
is some $r\times r$ minor not annihilated by $a$. We may assume it is the upper left
$r\times r$ minor. Let $B$ be the upper left $(r+1)\times (r+1)$ minor, and let $d_1$,
\dots, $d_{r+1}$ be the cofactors along the $(r+1)$-th row. We claim that the column
vector $x = (ad_1,\dots, ad_{r+1},0,\dots, 0)$ is a solution to Equation
\ref{lec02ast} (note that it is non-zero because $ad_{r+1}\neq 0$ by assumption). To
check this, consider the product of $x$ with the $i$-th row, $(a_{i1},\dots, a_{im})$.
This will be equal to $a$ times the determinant of $B'$, the matrix $B$ with the
$(r+1)$-th row replaced by the $i$-th row of $A$. If $i\le r$, the determinant of $B'$
is zero because it has two repeated rows. If $i> r$, then $B'$ is an $(r+1)\times
(r+1)$ minor of $A$, so its determinant is annihilated by $a$.
\end{proof}
\begin{corollary}
Suppose a module ${}_RM$ over a non-zero ring $R$ is generated by $\beta_1,\dots,
\beta_n\in M$. If $M$ contains $n$ linearly independent vectors, $\gamma_1,\dots,
\gamma_n$, then the $\beta_i$ form a free basis.
\end{corollary}
\begin{proof}
Since the $\beta_i$ generate, we have $\gamma = \beta\cdot A$ for some $n\times n$
matrix $A$. If $Ax=0$ for some non-zero $x$, then $\gamma \cdot x = \beta Ax = 0$,
contradicting independence of the $\gamma_i$. By Theorem \ref{lec02T:McCoy3},
$rk(A)=n$, so $d=\det(A)$ is a regular element.
Over $R[d^{-1}]$, there is an inverse $B$ to $A$. If $\beta\cdot
y=0$ for some $y\in R^n$, then $\gamma By = \beta y=0$. But the $\gamma_i$ remain
independent over $R[d^{-1}]$ since we can clear the denominators of any linear
dependence to get a dependence over $R$ (this is where we use that $d\in \mathcal{C}(R)$), so
$By=0$. But then $y=A\cdot 0 = 0$. Therefore, the $\beta_i$ are linearly independent,
so they are a free basis for $M$.
\end{proof}
\section{Finite presentation}
\label{noetheriandescent}
\subsection{Compact objects in a category}
Let $\mathcal{C}$ be a category.
In general, colimits tell one how to map \emph{out of} them, not into them,
and there is no a priori reason to assume that if $F: I \to \mathcal{C}$ is a
functor, that
\begin{equation} \label{filtcolimhom} \varinjlim_i \hom(X, Fi) \to \hom(X,
\varinjlim Fi) \end{equation}
is an isomorphism.
In practice, though, it often happens that when $I$ is
\emph{filtered}, the above map is an isomorphism. For simplicity, we shall
restrict to the case when $I$ is a \emph{directed }set
(which is naturally a category); in this case, we call the limits
\textbf{inductive.}
\begin{definition}
The object $X$ is called \textbf{compact} if \eqref{filtcolimhom} is an
isomorphism whenever $I$ is inductive.
\end{definition}
The following example motivates the term ``compact.''
\begin{example}
Let $\mathcal{C}$ be the category of Hausdorff topological spaces and
\emph{closed inclusions} (so that we do not obtain a full subcategory of the
category of topological spaces), and let $X$
be a compact space. Then $X$ is a compact object in $\mathcal{C}$.
Indeed, suppose $\left\{X_i\right\}_{i \in I}$ is an inductive system of
Hausdorff spaces and closed inclusions. Suppose given a map $f:X \to \varinjlim
X_i$. Then each $X_i$ is a closed subspace of the colimit, so we need to show that
$f(X)$ lands inside one of the $X_i$. This will easily imply compactness.
Suppose not. Then $f(X)$ contains, for each $i$, a point $x_i$ that belongs to
no $X_j, j < i$. Choose a countable subset $T \subset I$ (if $I$ is finite,
then this is automatic!). For each $t \in T$, we get an element $x_t \in
f(X)$ that belongs to no $X_i$ for $i < t$. Note that if $t' \in T$, then it
follows that $X_{t'} \cap \left\{x_t\right\}$ is finite.
In particular, if $F \subset \left\{x_t\right\}$ is \emph{any} subset, then
$X_{t'} \cap F$ is closed for each $t' \in T$.
Thus $\varinjlim_T X_{t'}$ contains the set $F$ as a closed
subset, and since this embeds as a closed subset of $\varinjlim X_i$, $F$ is
thus closed in there too.
The induced topology on $\left\{x_t\right\}$ is thus the discrete one.
We have thus seen that the set $\left\{x_t\right\}$ is an infinite, discrete
closed subset of $\varinjlim X_i$. However, it is a subset of $f(X)$ as well,
which is compact, so it is itself compact; this is a contradiction.
This example allows one to run the ``small object argument'' of Quillen for
the category of topological spaces, and in particular to construct the
\emph{Quillen model structure} on it. See \cite{Ho07}. As an simple example,
we may note that if we have a sequence of closed subspaces (such as the
skeleton filtration of a CW complex)
\[ X_1 \subset X_2 \subset \dots \]
it then follows easily from this that (where $[K, -]$ denotes homotopy
classes of maps)
\[ [K, \varinjlim X_i] = \varinjlim [K, X_i] \]
for any compact space $K$. Taking $K$ to be a sphere, one finds that the
homotopy group functors commute with inductive limits of closed inclusions.
\end{example}
This notion is closely related to that of ``smallness'' introduced in
\cref{smallness} to prove an object can be imbedded in an injective module.
For instance, smallness with respect to any limit ordinal and the class of all
maps is basically equivalent to compactness in this sense.
\add{this should be clarified. Can we replace any inductive limit by an
ordinal one, assuming there's no largest element?}
\subsection{Finitely presented modules}
Let us recall that a module $M$ over a ring $R$ is said to be \emph{finitely
presented} if there is an exact sequence
\[ R^m \to R^n \to M \to 0. \]
In particular, $M$ can be described by a ``finite amount of data:'' $M$ is
uniquely determined by the matrix describing the map $R^m \to R^n$.
Thus, to hom out of $M$ into an $R$-module $N$ is to specify the images of the $n$ generators
(that are the images of the standard basis elements in $R^n$), that is to
pick $n$ elements of $N$, and these
images are required to satisfy $m$ relations (that come from the map $R^m \to
R^n$).
Note that the theory of finitely presented modules is only special and new
when one works with a non-noetherian rings; over a noetherian ring, every
finitely generated module is finitely presented. Nonetheless, the techniques
described here are useful even if one restricts one's attention to noetherian
rings.
\begin{exercise}
Show that a finitely generated \emph{projective} module is finitely presented.
\end{exercise}
\begin{proposition} \label{fpcompact}
In the category of $R$-modules, the compact objects are the finitely presented
ones.
\end{proposition}
\begin{proof}
First, let us show that a finitely presented module is in fact finite.
Suppose $M$ is finitely presented and $\left\{N_i, i \in I\right\}$ is an
inductive system of modules. Suppose given $M \to \varinjlim N_i$; we show
that it factors through one of the $N_i$.
There are finitely many generators $m_1, \dots,
m_n$, and in the colimit
\[ N = \varinjlim N_i , \]
they must all lie in the image of some $N_j, j \in I$. Thus we can choose
$r^{(j)}_1, \dots, r^{(j)}_n$ such that $r^{(j)}_k$ and $m_k$ both map to the
same thing in $\varinjlim N_i$.
This alone does not enable us to conclude that $M \to \varinjlim N_i$
factors through $N_j$, since the relations between the $m_1, \dots, m_n$ may not be
satisfied between the putative liftings $r^{(j)}_k$ to $N_j$.
However, we know that the relations \emph{are} satisfied when we push down to
the colimit. Since there are only finitely many relations that we need to
have satisfied, we can choose $j' > j$
such that the relations all do become satisfied by the images of the
$r^{(j)}_k$ in $N_{j'}$. We thus get a lifting $M \to N_{j'}$.
We see from this that the map
\[ \varinjlim \hom_R(M, N_i) \to \varinjlim \hom_R( M, \varinjlim N_i) \]
is in fact surjective. To see that it is injective, note that if two maps $f,g:M
\to N_j$ become the same map $M \to \varinjlim N_i$, then the finite set of
generators $m_1, \dots, m_n$ must both be mapped to the same thing in some
$N_{j'}, j' > j$.
Now suppose $M$ is a compact object in the category of $R$-modules.
First, we claim that $M$ is finitely generated. Indeed, we know that $M$ is
the \emph{inductive} limit of its finitely generated submodules.
Thus we get a map
\[ M \to \varinjlim_{M_F \subset M, \text{f. gen}} M_F ,\]
and by hypothesis it factors as $M \to M_F$ for some $M_F$. This
implies that $M \to M_F \to M $ is the identity, and so $M = M_F$ and $M$ is
finitely generated.
Finally, we need to see that $M$ is finitely presented. Choose a surjection
\[ R^n \twoheadrightarrow M \]
and let the kernel be $K$. We would like to show that $K$ is finitely
generated. Now $M \simeq R^n/K$, and consequently $M$ is the inductive limit
$\varinjlim R^n/ K_F$ for $K_F$ ranging over the finitely generated submodules
of $K$. It follows that the natural isomorphism $M \simeq \varinjlim R^n/K_F$
factors as $M \to R^n/K_F$ for some $K_F$, which is thus an isomorphism. Hence
$M$ is finitely presented.
\end{proof}
The above argument shows, incidentally, that if $M$ is finitely
\emph{generated}, then
$\varinjlim \hom_R(M, N_i) \to \varinjlim \hom_R( M, \varinjlim N_i) $ is
always \emph{injective.}
\add{any module is an inductive limit of finitely presented modules}
\add{Lazard's theorem on flat modules}
\subsection{Finitely presented algebras}
Let $R$ be a commutative ring.
\begin{definition}
An $R$-algebra $A$ is called \textbf{finitely presented} if $A$ is isomorphic
to an $R$-algebra of the form $R[x_1, \dots, x_n]/I$, where $I \subset R[x_1,
\dots, x_n]$ is a finitely generated ideal in the polynomial ring.
A morphism of rings $\phi: R \to R'$ is called \textbf{finitely presented} if
it makes $R'$ into a finitely presented $R$-algebra.
\end{definition}
For instance, a quotient of $R$ by a finitely generated ideal is a finitely
presented $R$-algebra. If $R$ is noetherian, then by the Hilbert basis
theorem, an $R$-algebra is finitely presented if and only if it is finitely
generated.
\begin{proposition}
The finitely presented $R$-algebras are the compact objects in the category of
$R$-algebras.
\end{proposition}
We leave the proof to the reader, as it is analogous to \cref{fpcompact}.
The notion of a finitely presented algebra is analogous to that of a finitely
presented module, insofar as a finitely presented algebra can be specified by a
finite amount of ``data.''
Namely, this data consists of the generators $x_1, \dots, x_n$ and the
finitely many relations that they are required to satisfy (these finitely
many relations can be taken to be generators of $I$).
Thus, to hom out of $A$ is ``easy:'' to map into an $R$-algebra $B$, we need
to specify $n$ elements of $B$, which have to satisfy the finitely many
relations that generate the ideal $I$.
Like most nice types of morphisms, finitely presented morphisms have a
``sorite.''
\begin{proposition}[Le sorite for finitely presented morphisms] \label{soritefp}
Finitely presented morphisms are preserved under composite and base-change.
That is, if $\phi: A \to B$ is a finitely presented morphism, then:
\begin{enumerate}
\item If $A'$ is any $A$-algebra, then $\phi \otimes A': A' \to B \otimes_A
A'$ is finitely presented.
\item If $\psi: B \to C$ is finitely presented, then $C$ is a finitely
presented over $A$ (that is, $\psi \circ \phi$ is finitely presented).
\end{enumerate}
\end{proposition}
\begin{proof}
First, we show that finitely presented morphisms are preserved under base-change.
Suppose $B$ is finitely presented over $A$, thus isomorphic to a quotient $A[x_1, \dots,
x_n]/I$, where $I$ is a finitely generated ideal in the polynomial ring. Then
for any $A$-algebra $A'$, we have that
\[ B \otimes_A A' = A'[x_1, \dots, x_n]/ I' \]
where $I'$ is the ideal in $A'[x_1, \dots, x_n]$ generated by $I$. (This
follows by right-exactness of the tensor product.) Thus $I'$ is finitely
presented and $B \otimes_A A'$ is finitely presented over $A'$.
Next, we show that finitely presented morphisms are closed under composition.
Suppose $A \to B$ and $B \to C$ are finitely presented morphisms. Then $B$ is isomorphic as
$A$-algebra to $A[x_1, \dots, x_n/I$ and $C$ is isomorphic as $B$-algebra to
$B[y_1, \dots, y_m]/J$, where $I, J$ are finitely generated ideals.
Thus $C \simeq A[x_1, \dots, x_n, y_1, \dots, y_m]/(I+J)$ for $I+J$ the ideal
generated by $I, J$ in $A[x_1, \dots, x_n, y_1, \dots, y_m]$. This is clearly a
finitely generated ideal.
\end{proof}
Finitely presented morphisms have a curious cancellation property that we
tackle next. In algebraic geometry, one often finds properties $\mathcal{P}$ of morphisms
of schemes such that if a composite
\[ X \stackrel{f}{\to} Y \stackrel{g}{\to} Z \]
has $\mathcal{P}$, then so does $f$ (possibly with weak conditions on $g$).
One example of this (in any category) is the class of monomorphisms. A more
interesting example (for schemes) is the property of separatedness; the
interested reader
may consult \cite{EGA}.
In our case, we shall illustrate this cancellation phenomenon in the category
of commutative rings. Since arrows for schemes go in the opposite direction as
arrows of rings, this will look slightly different.
\begin{proposition}
Suppose we have a composite
\[ A \stackrel{f}{\to} B \stackrel{g}{\to} C \]
such that $g \circ f: A \to C$ is finitely presented, and $f$ is of finite
type (that is, $B$ is a finitely generated $A$-algebra). Then $g: B \to C$
is finitely presented.
\end{proposition}
\begin{proof}
We shall prove this using the fact that the \emph{codiagonal} map in the
category of commutative rings is finitely presented if the initial map is finitely generated:
\begin{lemma}
Let $S$ be a finitely generated $R$-algebra. Then the map $S \otimes_R S \to
S$ is finitely presented.
\end{lemma}
\begin{proof}
We shall show that the kernel $I$ of $S \otimes_R S \to S$ is a \emph{finitely generated} ideal. This will
clearly imply the claim, as $S \otimes_R S \to S$ is obviously a surjection.
To see this, let $\alpha_1, \dots, \alpha_n \in S$ be generators for $S$ as an
$R$-algebra. The claim is that the elements $1 \otimes \alpha_i - \alpha_i
\otimes 1$ generate $I$ as an $S \otimes_R S$-module.
Clearly these live in $I$. Conversely, it is clear $I$ is generated by
elements of the form $ x \otimes 1 - 1 \otimes x$ (because if $z=\sum x_k
\otimes y_k \in I$, then $z = \sum (x_k \otimes 1) \left( 1 \otimes y_k - y_k
\otimes y_k \right) + \sum x_k y_k \otimes 1$ and the last term vanishes by
definition of $I$).
In other words, if we define $d(\alpha) = \alpha \otimes 1 - 1 \otimes
\alpha$ for $\alpha \in S$, then $I$ is generated by elements $d(\alpha)$.
Now $d$ is clearly $R$-linear, and we have the identity
\begin{align*} d(\alpha \beta) & = \alpha \beta \otimes 1 - 1 \otimes \alpha
\beta \\
& =
\alpha \beta \otimes 1 - \alpha \otimes \beta + \alpha \otimes \beta - 1 \otimes \alpha
\beta \\
& = (\alpha \otimes 1) d(\beta) + (1 \otimes \beta) d(\alpha).
\end{align*}
Thus $d(\alpha \beta)$ is in the $S \otimes_R S$-module spanned by $d(\alpha)$
and $d(\beta)$.
From this, it is clear that $d(\alpha_1), d(\alpha_2), \dots, d(\alpha_n)$
generate $I$ as a $S \otimes_R S $-module.
\end{proof}
From this lemma, we will be able to prove the theorem as follows.
We can write $g: B \to C$ as the composite
\[ B \to B \otimes_A C \to C \]
where the first map is the base-change of the finitely presented morphism $A
\to C$ and the second morphism is the base-change of the finitely presented
morphism $B \otimes_A B \to B$. Thus the composite $B \to C$ is finitely
presented.
\end{proof}
\section{Inductive limits of rings}
We shall now find ourselves in the following situation. We shall have an
inductive system $\left\{A_\alpha\right\}_{\alpha \in I}$ of rings, indexed by a
directed set $I$. With $A = \varinjlim A_\alpha$, we will be interested in relating
categories of modules and algebras over $A$ to the categories over $A_\alpha$.
The basic idea will be as follows. Given an object (e.g. module) $M$ of finite presentation of
$A$, we will be able to find an object $M_\alpha$ of finite presentation over some
$A_\alpha$ such that $M$ is obtained from $M_\alpha$ by base-change $A_\alpha
\to A$.
Moreover, given a morphism $M \to N$ of objects over $A$, we will be able to
``descend'' this to a morphism $M_\alpha \to N_\alpha$ of objects of finite
presentation over some $A_\alpha$, which will induce $M \to N$ by base-change.
In other words, the \emph{category} of objects over $A$ of finite presentation
will be the inductive limit of the \emph{categories} of such objects over the
$A_\alpha$.
\subsection{Prologue: fixed points of polynomial involutions over $\mathbb{C}$}
Following \cite{Se09}, we give an application of these ideas to a simple
concrete problem. This will help illustrate some of them, even though we have
not formally developed the machinery yet.
If $k$ is an algebraically closed field, a map $k^n \to k^n$ is called \emph{polynomial} if each of
the components is a polynomial function in the input coordinates.
So if we identify $k^n $ with the closed points of $\spec
k[x_1, \dots, x_n]$, then a polynomial function is just the
restriction to to the closed points of an endomorphism of $\spec
k[x_1, \dots, x_n]$ induced by an algebra endomorphism.
\begin{theorem}
Let $F: \mathbb{C}^n \to \mathbb{C}^n$ be a polynomial map with $F \circ F =
1_{\mathbb{C}^n}$. Then $F$ has a fixed point.
\end{theorem}
We can phrase this alternatively as follows. Let $\sigma: \mathbb{C}[x_1,
\dots, x_n] \to \mathbb{C}[x_1, \dots, x_n]$ be a $\mathbb{C}$-involution.
Then the map on the $\spec$'s has a fixed point (which is a closed
point\footnote{One can show that if there is a fixed point, there is a fixed
point that is a closed point.}).
\begin{proof}
It is clear that the presentation of $\sigma$ involves only a finite amount of
data, so as in \cref{} we can construct a finitely generated
$\mathbb{Z}$-algebra $R \subset \mathbb{C}$ and an involution
\[ \overline{\sigma}: R[x_1, \dots, x_n] \to R[x_1, \dots, x_n] \] such that $\sigma$ is obtained from
$\overline{\sigma}$ by base-changing $R \to \mathbb{C}$.
We can assume that $\frac{1}{2} \in R$ as well.
To see this explicitly, we simply need only add to $R$ the coefficients of the
polynomials $\sigma(x_1), \dots, \sigma(x_n)$, and $\frac{1}{2}$, and
consider the $\mathbb{Z}$-algebra they generate.
Suppose now the system of equations $\sigma(x_1, \dots, x_n) - (x_1, \dots,
x_n)$ has no solution in $\mathbb{C}^n$. This is equivalent to stating that a
finite
system of polynomials (namely, the $\sigma(x_i) - x_i$) generate the unit ideal in $\mathbb{C}[x_1, \dots,
x_n]$, so that there are polynomials $P_i \in \mathbb{C}[x_1, \dots, x_n]$
such that $\sum P_i \left( \sigma(x_i) - x_i \right) = 1$.
Let us now enlarge $R$ so that the coefficients of the $P_i$ lie in $R$.
Since the coefficients of the $\sigma(x_i)$ are already in $R$, we find
that the polynomials $\sigma(x_i) - x_i$ will generate the unit ideal in
$R[x_1, \dots, x_n]$.
If $R'$ is a homomorphic image of $R$, then this will be true in $R'[x_1,
\dots, x_n]$.
Choose a maximal ideal $\mathfrak{m} \subset R$. Then $R/\mathfrak{m}$ is a
finite field, and $\sigma$ becomes an involution
\[ (R/\mathfrak{m})[x_1, \dots, x_n] \to (R/\mathfrak{m})[x_1, \dots, x_n]. \]
If we let $\overline{k}$ be the algebraic closure of $R/\mathfrak{m}$, then we
have an involution
\[ \widetilde{\sigma}: k[x_1, \dots, x_n] \to k[x_1, \dots, x_n]. \]
But the induced map by $\widetilde{\sigma}$ on $k^n$ has \emph{no fixed points.} This follows because the
$\widetilde{\sigma(x_i)} - x_i$ generate the unit ideal in $k[x_1, \dots,
x_n]$ (because we can consider the images of the $P_i$ in $k[x_1, \dots, x_n]$).
Moreover, $\mathrm{char} k \neq 2$ as $\frac{1}{2} \in R$, so $2$ is
invertible in $k$ as well.
So from the initial fixed-point-free involution $F$ (or $\sigma$), we have
induced a
polynomial map $k^n \to k^n$ with no fixed points. We need only now prove:
\begin{lemma} \label{easycaseoffptheorem}
If $k$ is the algebraic closure of $\mathbb{F}_p$ for $p \neq 2$, then any
involution $F: k^n \to k^n$ which is a polynomial map has a fixed point.
\end{lemma}
\begin{proof}
This is very simple. There is a finite field $\mathbb{F}_q$ in which the
coefficients of $F$ all lie; thus $F$ induces a map
\[ \mathbb{F}_q^n \to \mathbb{F}_q^n \]
which is necessarily an involution. But an involution on a finite set of odd
cardinality necessarily has a fixed point (or all orbits would be even).
\end{proof}
\end{proof}
\begin{remark}
An alternative approach to the above proof is to use a little bit of model
theory. There is a general principle due to Abraham Robinson, that can be
stated roughly as follows. If a sentence $P$ in the first-order logic of fields
(that is, one is allowed to refer to the elements $0,1$ and to addition and
multiplication; in addition, one is allowed to make existential and universal
quantifications, negations, disjunctions, and conjunctions) has the property
that $P$ is true for an algebraically closed field of characteristic $p$ for
each $p \gg 0$, then $P$ holds in \emph{every} algebraically closed field of
characteristic zero.
This principle follows from a combination of the compactness theorem and the
fact that the theory of algebraically closed fields of a fixed characteristic
is \emph{complete}: any statement is true in all of them, or in none of them.
Consider the statement $S_{n,d}$ that for any polynomial map $F: k^n \to k^n$
consisting of polynomials of degree $\leq d$ such that $F \circ F$, there is
$(x_1, \dots, x_n) \in k^n$ with $F(x_1, \dots, x_n) = (x_1, \dots, x_n)$.
Then $S_{n,d}$ is clearly a statement of first-order logic.
\cref{easycaseoffptheorem} shows that $S_{n,d}$ holds in
$\overline{\mathbb{F}_p}$ whenever $p > 2$. Thus, $S_{n,d}$ holds in
$\mathbb{C}$ by Robinson's principle.
These types of model-theoretic arguments can be used to prove the \textbf{Ax-Grothendieck
theorem}: an injective polynomial map $\mathbb{C}^n \to \mathbb{C}^n$ is
surjective. See \cite{Ma02}.
\end{remark}
\subsection{The inductive limit of categories}
\add{general formalism to clarify all this}
\subsection{The category of finitely presented modules}
Throughout, we let $\left\{A_{\alpha}\right\}_{\alpha \in I}$ be an inductive
system of rings, and $A = \varinjlim A_\alpha$.
We are going to relate the category of finitely presented modules over $A$ to
the categories of finitely presented modules over the $A_\alpha$.
We start by showing that any module over $A$ ``descends'' to one of the
$A_\alpha$.
\begin{proposition} \label{descentfpmodule}
Suppose $M$ is a finitely presented module over $A$. Then there is $\alpha \in
I$ and a finitely presented $A_\alpha$-module $M_\alpha$ such that $M \simeq
M_\alpha \otimes_{A_\alpha} A$.
\end{proposition}
\begin{proof}
Indeed, $M$ is the cokernel of a morphism
\[ f: A^m \to A^n \]
by definition. This morphism is described by a $m$-by-$n$ (or $n$-by-$m$,
depending on conventions) matrix with coefficients in $A$. Each of these
finitely many coefficients must come from various $A_\alpha$ in the image (by
definition of the inductive limit), and choosing $\alpha$ ``large'' we can
assume that every coefficient in the matrix is in the image of $A_\alpha \to A$.
Then we have a morphism
\[ f_\alpha: A_\alpha^m \to A_\alpha^n \]
that induces $f$ by base-change to $A$. Then we may let $M_\alpha$ be the
cokernel of $f_\alpha$ since the tensor product is right-exact.
\end{proof}
Now, we want to show that if the base-change of two finitely presented modules
over $A_\alpha$ to $A$ become isomorphic, then they ``become isomorphic'' at some
$A_\beta $ (for $\beta > \alpha$).
We shall actually prove a more general result.
Namely, we shall see that
a morphism at the colimit ``descends'' to one of the steps.
\begin{proposition} \label{colimfpmodules} We keep the same notation as above.
Suppose $M_\alpha, N_\alpha$ are finitely presented modules over $A_\alpha$.
Write $M_\beta = M_\alpha \otimes_{A_\alpha} A_\beta, N_\beta = N_\alpha
\otimes_{A_\alpha} A_\beta$ for each $\beta > \alpha$ and $M, N$ for the
base-changes to $N$.
Suppose there is a morphism $f: M \to N$. Then there is $\beta \geq \alpha$ such
that $f$ is obtained by base-changing a morphism $f_\beta: M_\beta \to N_\beta$.
If $f_\beta, f_\gamma$ are any two morphisms that do this, then there is
$\delta \geq \beta, \gamma$ such that $f_\beta, f_\gamma$ become equal when
base-changed to $A_\delta$.
\end{proposition}
The conclusion of this result is then
\[ \hom_A(M, N) = \varinjlim_{\beta} \hom_{A_\beta}(M_\beta, N_\beta). \]
The last part is essentially the ``uniqueness'' that we were discussing previously.
\begin{proof} Suppose the transition maps $A_\alpha \to A_\beta$ are denoted
$\phi_{\alpha \beta}$, and the natural maps $A_\alpha \to A$ are denoted
$\phi_\alpha$.
We know that there are exact sequences
\[ A_\alpha^m \stackrel{\textbf{M}}{\to} A_\alpha^n \to M_\alpha \to 0, \]
and
\[ A_\alpha^p \to N_\alpha \to 0. \]
These are preserved by tensoring with $A$. Here $\textbf{M}$ is a suitable matrix.
So we get exact sequences
\begin{gather*}
A^m \stackrel{\phi_\alpha(\textbf{M})}{\to} A^n \to M \to 0 \\
A^p \to N \to 0
\end{gather*}
and the projectivity of $A^p$ shows that the map $A^n \to M \to N$ can be
lifted to a map $A^n \to A^p$ given by some matrix $\textbf{M}'$ with coefficients
in $A$. We know that there is $\textbf{M}' \circ \phi_\alpha(\textbf{M}) = 0$
because the map factors through $M$.
Now $\textbf{M}'$ can be written as $\phi_\beta(\textbf{M}'')$ for some matrix
with coefficients in $A_\beta$, or in other words a map $A_\beta^n \to
A_\beta^p$. We would like to use this to get a map $M_\beta \to A_\beta^p \to
N_\beta$, but for this we need to check that $A_\beta^n \to A_\beta^p$ pulls
back to zero in $A_\beta^m$. In other words, we need that
$\textbf{M}'' \phi_{\alpha \beta}( \textbf{M}) = 0$. This need not be true, but we know
that it is true if base-change to a bigger $\beta$ (since this matrix product
is zero in the colimit). This allows us to get the map $M_\beta \to N_\beta$.
Finally, we need uniqueness. Suppose $f_\beta: M_\beta \to N_\beta$ and
$f_\gamma: M_\gamma \to N_\gamma$ both are such that the base-changes to $A$
are the same morphism $M \to N$. We need to find a $\delta$ as in the
proposition. By replacing $\beta, \gamma$ with a mutual upper bound, we may
suppose that $\beta = \gamma$; we shall write the two morphisms as $f_\beta,
g_\beta$ then.
Consider the pull-backs $A_\beta^n \stackrel{f_\beta, g_\beta}{\to } N_\beta$.
These uniquely determine $f_\beta, g_\beta$ (since the map $A_\beta^n \to
M_\beta$ is a surjection). These pull-backs are specified by $n$ elements of
$N_\beta$. If the base-changes of $f_\beta, g_\beta$ via $\phi_{\beta}:
A_\beta \to A$ are the same, then these $n$ elements of $N_\beta$ become the
same in $N = \varinjlim_{\beta'} N \otimes_{A_\beta} A_{\beta'}$; thus they
become equal at some finite stage, so there is $\beta' > \beta$ such that the
base changes $f_{\beta'} = g_{\beta'}$.
\end{proof}
\begin{remark}
The idea of the above proof was to exploit the idea that the homomorphism
carries a finite amount of data, that is the images of the generators and the
condition that these images satisfy finitely many relations. In essence, it is
analogous to the argument that finitely presented modules over a \emph{fixed}
ring are compact objects in that category.
\end{remark}
\begin{remark}
In fact, we can give an alternative (and slightly simpler) argument for \cref{colimfpmodules}.
We know that
\[ \hom_{A_\beta}(M_\beta, N_\beta) = \hom_{A_\alpha}(M_\alpha, N_\beta) \]
by the adjoint property of the tensor product, and similarly
\[ \hom_{A}(M,N) = \hom_{A_\alpha}(M_\alpha, N). \]
So the assertion we are trying to prove is
\[ \hom_{A_\alpha}(M_\alpha, N) = \varinjlim_{\beta} \hom_{A_\alpha}(M_\alpha,
N_\beta) , \]
which follows from \cref{fpcompact}.
\end{remark}
\begin{exercise}
Give a proof of the following claim. If $M$ is a finitely generated module
over a noetherian ring $R$, $\mathfrak{p} \in \spec R$ is such that
$M_{\mathfrak{p}}$ is free over $R_{\mathfrak{p}}$, then there is $f \in R - \mathfrak{p}$ such that
$M_f$ is free over $R_f$.
\end{exercise}
\subsection{The category of finitely presented algebras}
We can treat the category of finitely presented algebras over such an
inductive limit in a similar manner.
As before, let $\left\{A_\alpha\right\}_{\alpha \in I}$ be an inductive system
of rings with $A = \varinjlim A_\alpha$.
For each $\alpha$, there is a functor from the category of finitely presented $A_\alpha$-algebras
to the category of finitely presented $A$-algebras sending $C \mapsto C
\otimes_{A_\alpha} A$.
(Note that morphisms of finite presentation are preserved under base-change by
\cref{soritefp}.)
\begin{proposition}
Suppose $B$ is a finitely presented $A$-algebra. Then there is $\alpha \in I$
and a finitely presented $A_\alpha$-algebra $B_\alpha$ such that $B \simeq
B_\alpha \otimes_{A_\alpha} A$.
\end{proposition}
\begin{proof}
This is analogous to the proof of \cref{descentfpmodule}.
\end{proof}
\add{analog of the next result}
\subsection{$\spec$ and inductive limits}
Suppose $\left\{A_\alpha\right\}_{\alpha\in I}$ is an inductive system of
commutative rings, as before; we let $A = \varinjlim A_\alpha$.
Since $\spec$ is a contravariant functor, we thus find that $\spec A_\alpha$
is a \emph{projective} system of topological spaces.\footnote{Or schemes.}
We are now interested in relating $\spec A$ to the individual $\spec A_\alpha$.
\begin{proposition}
$\spec A$ is the projective limit $\varprojlim \spec A_\alpha$ in the category
of topological spaces.
\end{proposition}
Recall that if $\left\{X_\alpha\right\}$ is a projective system of topological
spaces with transition maps $\phi_{\beta \alpha}: X_\beta \to X_\alpha$ whenever $\alpha \leq
\beta$, then the projective limit $\varprojlim X_\alpha$ can be constructed as
follows. One considers the subset of $\prod X_\alpha$ consisting of sequences
$(x_\alpha)$ such that $\phi_{\beta \alpha}(x_\alpha) = x_\beta$ for every
$\alpha \leq \beta$. One can easily check that this has the universal property
of the projective limit.
\begin{proof}
Let us first verify that the assertion is true as \emph{sets.} There are maps
\[ \spec A \to \spec A_\alpha \]
for each $\alpha \in I$, which are obviously compatible (since the
$\left\{A_\alpha\right\}$ form an inductive system) so that they lead to a
(continuous) map of topological spaces
\[ \spec A \to \varprojlim \spec A_\alpha. \]
We first verify injectivity. Suppose two primes $\mathfrak{p}, \mathfrak{p}'$ were sent to the same element
of $\varprojlim \spec A_\alpha$. This means that if $\phi_\alpha: A_\alpha \to
A$ is the natural morphism for each $\alpha$, we have
$\phi_\alpha^{-1}(\mathfrak{p}) = \phi_\alpha^{-1}(\mathfrak{p}')$ for all
$\alpha$. It follows that the intersections of $\mathfrak{p}, \mathfrak{p}'$
with the image of $A_\alpha$ are identical; since $A$ is the union of
$\phi_\alpha(A_\alpha)$ over all $\alpha$, this implies $\mathfrak{p} =
\mathfrak{p}'$.
Now let us verify surjectivity. Suppose given a sequence $\mathfrak{p}_\alpha$
of primes in $A_\alpha$, for each $\alpha$, such that $\mathfrak{p}_\alpha$ is
the pre-image of $\mathfrak{p}_\beta$ under $A_\alpha \to A_\beta$ whenever
$\alpha \leq \beta$. We want to form a prime ideal $\mathfrak{p} \in \spec A$
pulling back to all these. To do this, we decide that $x \in \mathfrak{p}$ if
and only if there exists $\alpha \in I$ such that $x \in
\phi_\alpha(\mathfrak{p}_\alpha)$ (recall that $\phi_\alpha: A_\alpha \to A$
is the natural map). This does not depend on the choice of $\alpha$, and one
verifies easily that this is a prime ideal with the appropriate properties.
We now have to show that the map $\spec A \to \varprojlim \spec A_\alpha$ is
in fact a homeomorphism. We have seen that it is continuous and bijective, so
we must prove that it is open. If $a \in A$, we will be done if we can show
that the image of the basic open set $D(a) \subset \spec A$ is open in
$\varprojlim \spec A_\alpha$.
Suppose $a = \phi_\beta(a_\beta)$ for some $a_\beta \in A_\beta$. Then the
claim is that the image of $D(a)$ is precisely the subset of $\varprojlim
\spec A_\beta$ such that the $\beta$th coordinate (which is in $\spec A_\beta$!)
lies in $D(a_\beta)$. This is clearly an open set, so if we prove this, then
we are done. Indeed, if $\mathfrak{p} \in D(\alpha) \subset \spec A$,
then clearly the preimage in $A_\beta$ cannot contain $a_\beta$ (since
$a_\beta$ maps to $a$). Conversely, if we have a compatible sequence
$\left\{\mathfrak{p}_\alpha\right\}$ of primes such that $\mathfrak{p}_\beta
\in D(a_\beta)$, then the above construction of a prime $\mathfrak{p} \in
\spec A$ from this shows that $a \notin \mathfrak{p}$.
\end{proof}