%!TEX program = xelatex
\documentclass[t,12pt,aspectratio=169]{beamer} % 16:9 宽屏比例，适合现代投影
%\usepackage{ctex} % 中文支持
\usepackage{amsmath, amsthm, amssymb, bm} % 数学公式与符号
\usepackage{graphicx}
\usepackage{hyperref}
\usepackage{color}

% 设置段落间距
\usepackage{setspace}
\onehalfspacing
\setlength{\parskip}{1em}  % 增加段落之间的间距为1em

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% 每页增加与上面标题行的距离
\addtobeamertemplate{frametitle}{}{\vspace*{0.7em}}

\usetheme{Madrid} % 主题设置（推荐简洁风格）
\usecolortheme{default} % 可选：seahorse, beaver, dolphin 等

%\linespread{1.3}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% 信息设置
\title{Chapter 19: Stability of Differential Equations}
\author{SCC ET AL}
%\institute[XX大学]{XX大学\quad 数学与统计学院\quad 数学与应用数学专业}
%\date{2025年6月}

\begin{document}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% 封面页
\begin{frame}
  \titlepage
\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% 目录页
\begin{frame}{Contents}
  \tableofcontents
\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Section 0
%\section{INTRO.}
\begin{frame}{intro. }
    
In this chapter we investigate the global asymptotic stability of a system of ordinary differential equations on the plane. 

This is closely related to the Jacobian conjecture. 

{\color{red}Holonomic} modules will make a special appearance in \S2.

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Section 1
\section{Asymptotic Stability}
\begin{frame}[allowframebreaks]{A. }

We begin with some basic facts about the stability of singular points of systems of differential equations. 

Let
\[
G : \mathbb{R}^n \rightarrow \mathbb{R}^n
\]
be a function of class $C{\,}^r$ for some $r > 2$, and assume that $G(0) = 0$. 

Consider the differential equation
\begin{equation}
\dot{X} = G(X).
\label{tag-1-1}
\end{equation}

By the uniqueness theorem [Arnold 81, Ch. 2, \S8.3], the solution $\phi$ with initial condition $\phi(0) = 0$ is $\phi = 0$. 

We are interested in the behaviour of solutions with neighbouring initial conditions. 

The singular point $X = 0$ of equation (\ref{tag-1-1}) is {\color{red}asymptotically stable} if:

\begin{enumerate}
    \item Given $\epsilon > 0$, there exists $\delta > 0$ (depending only on $\epsilon$ and not on $t$) such that, for every $P_0$ with $|P_0| < \delta$, the solution $\phi$ of (\ref{tag-1-1}) with initial condition $\phi(0) = P_0$ can be extended to the whole half line $t > 0$ and satisfies $|\phi(t)| < \epsilon$ for every $t > 0$.
    \item There exists $\eta > 0$ such that $\lim_{t \to +\infty} \phi(t) = 0$ for all solutions $\phi$ of (\ref{tag-1-1}) which satisfy $\phi(0) < \eta$.
\end{enumerate}

Condition (1) above means that if the solution is initially within a ball of radius $\delta$ around the origin then it will never leave a ball of radius $\epsilon$. 

Asymptotic stability is easy to determine for linear systems.


\textbf{Theorem 1.2. }% 1.2. 
Let $A$ be an $n \times n$ matrix with entries in $\mathbb{R}$. 

The origin is an {\color{red}asymptotically stable} singular point of $\dot{X} = A \cdot X$ if and only if all the eigenvalues of $A$ have negative real part.


Lyapunov showed that this can be extended to give a criterion to determine whether $0$ is {\color{red}asymptotically stable} in terms of the linearized system $\dot{X} = JG(0) \cdot X$.

\textbf{Theorem 1.3. }% 1.3. 
 If the real part of every eigenvalue of $JG(0)$ is negative, then $0$ is an {\color{red}asymptotically stable} point of (\ref{tag-1-1}).


For a proof of this theorem see [Arnold 81, Ch. 3, Theorem 23.3]. 

We shall say that $0$ is {\color{red}globally asymptotically stable} if $\eta$ may be taken to be $\infty$ in (2) above. 

For a linear system, if $0$ is {\color{red}asymptotically stable}, then it is {\color{red}globally asymptotically stable}. 

Markus and Yamabe conjectured in [Markus and Yamabe 60] the following criterion for global stability.

\textbf{Conjecture 1.4. }% 1.4. 
The origin is {\color{red}globally asymptotically stable} for (\ref{tag-1-1}) if, for each $P \in \mathbb{R}^n$, the origin is an {\color{red}asymptotically stable} point of the system $\dot{X} = JG(P) \cdot X$.

It is shown in [Gutierrez 93] that the conjecture is true if $n = 2$. 

[Barabanov 88] gives a counter-example for $n \geq 4$. 

In this chapter we will study the case $n = 2$ when $G$ is polynomial, which was settled in [Meisters and Olech 88].

Let us return to the hypothesis in Conjecture 1.4. 

For systems on the plane, the Jacobian $JG(P)$ is a $2 \times 2$ matrix. 

By Theorem 1.2 the system $\dot{X} = JG(P) \cdot X$ has the origin as an {\color{red}asymptotically stable} point if and only if the eigenvalues of $JG(P)$ have negative real part. 

Note that since we are in the $2 \times 2$ case, this is equivalent to saying that the matrix $JG(P)$ has positive determinant and negative trace. 

This suggests a definition.

Let $\mathcal{F}$ be the class of $C^1$ maps $F : \mathbb{R}^2 \rightarrow \mathbb{R}^2$ which satisfy the following properties:

\begin{enumerate}
    \item $F(0) = 0$;
    \item $\text{tr} JF(P) < 0$ for all $P \in \mathbb{R}^2$;
    \item $\det JF(P) > 0$ for all $P \in \mathbb{R}^2$.
\end{enumerate}


For an example of a polynomial function in $\mathcal{F}$ which is not linear, see Exercise 4.5.3. 

We may now state the result of Meisters and Olech, [Meisters and Olech 88, Theorem 1].

\textbf{Theorem 1.5. } % 1.5
Let $F$ be a polynomial function in $\mathcal{F}$ then the origin is a {\color{red}globally asymptotically stable} point of the system $\dot{X} = F(X)$.


One of the key lemmas in the proof of this result has a purely $D$-module theoretic proof due to van den Essen, which we discuss in \S2. 

Before we close this section, let us see how Theorem 1.5 can be applied to the Jacobian conjecture.

\textbf{Proposition 1.6. } %1.6
Suppose that the origin is a global {\color{red}asymptotically stable} point of $\dot{X} = F(X)$ for every polynomial map $F \in \mathcal{F}$. 

Then the polynomial maps in $\mathcal{F}$ are injective.


Proof: Suppose, by contradiction, that $F$ is not injective. 

Then there exist points $P_1, P_2 \in \mathbb{R}^2$ such that $F(P_1) = F(P_2) = Q$. 

Consider the system $\dot{X} = H(X)$ where $H(X) = F(X + P_1) - Q$. 

Note that it has two distinct critical points, one at the origin and one at $P_2 - P_1 \neq 0$. 

Thus the origin cannot be {\color{red}globally asymptotically stable}. 

However, $J(H)(X) = J(F(X + P_1))$, and so $H$ is a polynomial map in $\mathcal{F}$, which contradicts the hypothesis.

Since, by Theorem 1.5, the hypothesis in Proposition 1.6 is always satisfied by maps in $\mathcal{F}$, we conclude that these maps are always invertible. 

This is especially interesting since S. Pinchuk has recently given an example of a polynomial map on the plane whose determinant is everywhere positive, but which does not have an inverse, see [Pinchuk 94].


\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Section 2
\section{Global Upper Bound}
\begin{frame}[allowframebreaks]{B. }

In this section we prove one of the key lemmas used to settle Theorem 1.5. 

The proof we give, due to van den Essen, is purely $D$-module theoretic and works over any field of characteristic zero. 

In this section we will make free use of the results of Ch. 4, \S4.

Let $F : K^n \rightarrow K^n$ be a polynomial map and denote by $F_1,\ldots,F_n$ its coordinate functions. 

Let $\Delta(x) = \det JF(x)$. 

Throughout this section we will assume that $\Delta(x) \neq 0$ for every $x \in K^n$. 

Note that since we are not assuming that $K$ is algebraically closed, this does not imply that $\Delta(x)$ is constant. 

Put $d = \deg F = \max\{\deg F_i : 1 \leq i \leq n\}$.

Let $g \in K[X,\Delta^{-1}]$ and consider the derivations $D_i$ of $K[X,\Delta^{-1}]$ defined for $i=1,\ldots,n$ by
\[
D_i(g) = \Delta^{-1} \det J(F_1,\ldots,F_{i-1},g,F_{i+1},\ldots,F_n)
\]
as in Ch. 4, \S4. 

Note that
\begin{equation}
D_i = \Delta^{-1} \sum_{k=1}^{n} a_{ik} \partial_k
\label{tag-2-1}
\end{equation}
where $a_{ik}$ is the $ik$ cofactor. 

Hence, $\deg(a_{ik}) \leq (n-1)d$. 

By Lemma 4.4.1, these derivations satisfy
\[
[D_i,F_j] = \delta_{ij} \quad \text{and} \quad [D_i,D_j] = 0
\]
for $1 \leq i,j \leq n$.

We shall use the $D_i$ to define an $A_n$-module structure on $K[X,\Delta^{-1}]$ as follows:
\begin{align*}
x_i \bullet g &= F_i \cdot q, \\
\partial_i \bullet g &= D_i(q),
\end{align*}
where $q \in K[X,\Delta^{-1}]$. 

A routine argument using Appendix 1 shows that $\bullet$ gives a well-defined action of $A_n$-module on $K[X,\Delta^{-1}]$. 

We denote this module by $M(F)$.

\textbf{Lemma 2.2.}% 2.2
As an $A_n$-module, $M(F)$ is {\color{red}holonomic} and its multiplicity cannot exceed $2^n(2nd+1)^n$.

Proof: The proof follows the argument of Theorem 10.3.2. 

For $v \in \mathbb{N}$, put
\[
\Gamma_v = \{g \cdot \Delta^{-2v} \in K[X,\Delta^{-1}] : \deg(g) \leq 2v(2nd+1)\}.
\]

We show that $\{\Gamma_v\}_{v \geq 0}$ is a filtration of $M(F)$.

Let us show first that $B_i \cdot \Gamma_v \subseteq \Gamma_{v+i}$. 

It is enough to prove this for $i=1$, because $B_i = B_1^i$. 

Let $q = g \Delta^{-2v} \in \Gamma_v$. 

Using the chain rule, we have that
\[
\partial_i \bullet q = D_i(g) \Delta^{-2v} + (-2v) g \Delta^{-(2v+1)} D_i(\Delta).
\]

Substituting for $D_i$ the formula in (\ref{tag-2-1}) we get
\[
\partial_i \bullet q = \Delta^{-2(v+1)} \left( \Delta \sum_{k=1}^{n} a_{ik} \partial_k(g) - 2vg \sum_{k=1}^{n} a_{ik} \partial_k(\Delta) \right).
\]

Since $\deg(\Delta) \leq nd$ and $\deg(a_{ik}) \leq (n-1)d$ we conclude that $\partial \bullet q \in \Gamma_{v+1}$. 

A similar argument shows that $x_i \bullet q \in \Gamma_{v+1}$.

Finally we show that $\bigcup \Gamma_v = M(F)$. 

If $q \in K[X,\Delta^{-1}]$ then $q = g \Delta^{-r}$ where $g \in K[X]$ has degree $s$ and $r \geq 0$. 

Put $v = \max\{r,s\}$. 

Thus $q = g (\Delta^{2v-r}) \Delta^{-2v}$ and since $v \geq s$,
\[
\deg(g \Delta^{2v-r}) \leq s + (2v-r)nd \leq s + 2vnd \leq 2v(2nd+1).
\]

Thus $g \Delta^{-r} \in \Gamma_v$, which proves that $\{\Gamma_v\}_{v \geq 0}$ is a filtration of $M(F)$.

On the other hand, $\Gamma_v$ is a $K$-vector space of dimension equal to that of the subspace of $K[X]$ of polynomials of degree $\leq 2v(2nd+1)$. 

Hence,
\[
\dim_K \Gamma_v \leq \frac{2^n(2nd+1)^n}{n!} v^n + \text{terms of smaller degree in } v.
\]

By Lemma 10.3.1, the module $M(F)$ is {\color{red}holonomic}.

We are now ready to prove the main theorem of this section. 

Its purpose is to give a global bound on the number of elements in the inverse image $F^{-1}(P)$ of a point $P \in K^n$. 

Let $P = (P_1,\ldots,P_n)$. 

It is easy to see that the number of elements in $F^{-1}(P)$ equals the number of solutions of the system $F_1 - P_1 = \cdots = F_n - P_n = 0$.

 
\textbf{Theorem 2.3. } %2.3
Let $F : K^n \rightarrow K^n$ be a polynomial map. 

If $\det J(F) \neq 0$ everywhere in $K^n$, then there exists a positive integer $b$ such that $F^{-1}(P)$ does not have more than $b^n$ points for every $P \in K^n$.


Proof: Let $P \in K^n$ and consider the polynomial map $F - P$. 

Since $J(F-P) = J(F)$, we have that $\Delta = \det J(F-P) = \det J(F) \neq 0$ everywhere in $K^n$. 

Put $M(P) = M(F-P)$. 

Note that $M(P) = K[X,\Delta^{-1}]$ for all $P \in K^n$, it is only the action of $A_n$ on $M(P)$ that depends on $P$.

By Lemma 2.2, $M(P)$ is {\color{red}holonomic} and its multiplicity is $2^n(2nd+1)^n = b$. 

By Theorem 18.1.4,
\[
M(P) / \sum_{i=1}^{n} (F_i - P_i) M(P) = M(P) / \left( \sum_{i=1}^{n} x_i \bullet M(P) \right)
\]
is a vector space over $K$ of dimension $\leq b$. 

In particular, the classes of $1, x_1, x_1^2, \ldots, x_1^b$ in $M(P)/\sum_{i=1}^{n} x_i M(P)$ must be linearly dependent. 

Thus there exists a polynomial $g_1(x_1) \in K[x_1]$ of degree $\leq b$ and a positive integer $r$ such that
\begin{equation}
\Delta^r \cdot g(x_1) \in \sum_{i=1}^{n} (F_i - P_i) K[X].
\label{tag-2-4}
\end{equation}

Finally, if $Q = (Q_1,\ldots,Q_n) \in K^n$ satisfies $F(Q) = P$, then by (\ref{tag-2-4}) we have that $\Delta(Q)^r \cdot g(Q_1) = 0$. 

Since $\Delta$ has no zeros on $K^n$, it follows that $g(Q_1) = 0$. 

Hence there are at most $b$ possibilities for the first coordinate of $Q$. 

Arguing similarly for the other coordinates we have that $F^{-1}(P)$ cannot have more than $b^n$ elements. 

Since $b$ is independent of $P$ the theorem follows.

Theorem 2.3 and its proof are due to A. van den Essen [van den Essen 91]. 

In the special case $K = \mathbb{R}$ this result follows from topological arguments; see [Bochnak, Coste and Roy 87, Theorem 11.5.2].



\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Section 3
\section{Global Stability on the Plane}
\begin{frame}[allowframebreaks]{C. }

We may now conclude the story that began in \S1, by proving Theorem 1.5 on the global asymptotical stability of polynomial systems on the plane. 

The main ingredients of the proof are Theorem 2.3 and the following result.

\textbf{Theorem 3.1. }% 3.1
Let $F \in \mathcal{F}$. If there exist positive constants $\rho$ and $r$ such that
\[
|F(X)| \geq \rho \quad \text{whenever} \quad |X| \geq r
\]
then the origin is a {\color{red}globally asymptotically stable} point of the system $\dot{X} = F(X)$.



Since the proof of this Theorem is purely analytic and not very straightforward, we will not give it here. 

It was first proved in [Olech 63] where it follows from an application of Green's theorem. 

See also [Gasull, Libre and Sotomayor 91]. 

Let us show how Theorems 2.3 and 3.1 can be used to prove Theorem 1.5.

Proof of Theorem 1.5: Since $F \in \mathcal{F}$ it follows from Theorem 2.3 that
\[
\sup \{\# F^{-1}(Y) : Y \in \mathbb{R}^2\} = K < \infty.
\]

Let $P \in \mathbb{R}^2$ be a point at which the maximum is attained: that is $\# F^{-1}(P) = K$. 

Let $Q_1,\ldots,Q_K$ be the elements of $F^{-1}(P)$. 

By the inverse function theorem $F$ is invertible in the neighbourhood of every point of $\mathbb{R}^2$. 

Hence for $1 \leq i \leq K$, it is possible to choose $\rho > 0$ and a neighbourhood $V_i$ of $Q_i$ such that
\[
F : V_i \rightarrow B_\rho(P)
\]
is a diffeomorphism, where $B_\rho(P)$ is the open ball centred on $P$ of radius $\rho$. 

By decreasing the value of $\rho$, if necessary, we may also assume that $V_i \cap V_j = \emptyset$ if $i \neq j$. 

Let us prove that, under these hypotheses,
\begin{equation}
F^{-1}(B_\rho(P)) = V_1 \cup \cdots \cup V_K.
\label{tag-3-2}
\end{equation}

It is clear that the union of the $V_i$'s is contained in $F^{-1}(B_\rho(P))$. 

We prove the opposite inclusion. 

Suppose, by contradiction, that it does not hold. 

Thus there exists a point $W$ not in $V_1 \cup \cdots \cup V_K$ such that $F(W) \in B_\rho(P)$. 

Since $F(V_i) = B_\rho(P)$, there are points $Y_i \in V_i$ such that
\[
F(Y_i) = F(W)
\]
for $i=1,\ldots,K$. 

Note that if $i \neq j$ then $Y_i \neq Y_j$, because $V_i \cap V_j = \emptyset$. 

Furthermore $Y_i \neq W$ since $W$ does not belong to the union $V_1 \cup \cdots \cup V_K$. 

Hence,
\[
\{W,Y_1,\ldots,Y_K\} \subseteq F^{-1}(F(W)).
\]

Thus $F^{-1}(F(W))$ cannot have less than $K+1$ elements, a contradiction. 

Thus (\ref{tag-3-2}) holds.

Now choose $r{\,}' > 0$ so large that $B_{r{\,}'}(0)$ contains $V_1 \cup \cdots \cup V_K$. 

For this $r{\,}'$ and the previously chosen $\rho$ we have that
\begin{equation}
|F(X) - P| \geq \rho \quad \text{if} \quad |X| \geq r{\,}'.
\label{tag-3-3}
\end{equation}

Consider the translated function $G(X) = F(X + Q_1) - P$. 

It follows from (3.3) that $G(X)$ satisfies the hypothesis of Theorem 3.1 for $r = r{\,}' + |Q_1|$. 

Hence the system $\dot{X} = G(X)$ has the origin as a {\color{red}globally asymptotically stable} point. 

Thus by Proposition 1.6 the map $G$ is injective. 

But for $i=1,\ldots,K$,
\[
G(Q_i - Q_1) = F(Q_i) - P = 0 = G(0).
\]

Since $G$ is injective, we must have that $K = 1$. 

This means that $F^{-1}(P)$ has at most one point for any $P \in \mathbb{R}^2$. 

Thus (\ref{tag-3-3}) is satisfied by $P = 0$. 

But this is the hypothesis of Theorem 3.1, from which Theorem 1.5 immediately follows.



\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Section 4
\section{Exercises}
\begin{frame}[allowframebreaks]{D. }

\textbf{Exercise 4.1.}
Let $A$ be a $2 \times 2$ matrix with real coefficients. 

Show that the origin is a {\color{red}globally asymptotically stable} point of the system $\dot{X} = A \cdot X$ if and only if the real part of the eigenvalues of $A$ are negative.

\newpage

\textbf{Exercise 4.2.} 
Let $F \in \mathcal{F}$. 

Show that if $F$ is globally invertible in $\mathbb{R}^2$ then the origin is a {\color{red}globally asymptotically stable} point of the system $\dot{X} = F(X)$.

Hint: By the inverse function theorem there exist $\rho, r > 0$ such that $F$ maps $B_\rho(0)$ into $B_r(0)$. 

Since $F$ is globally one-to-one the points outside $B_\rho(0)$ must be sent outside $B_r(0)$. But this is the hypothesis of Theorem 3.1.

\newpage

\textbf{Exercise 4.3.} 
Let $F \in \mathbb{R}[x,y]$. 

Use Green's theorem to show that if $\det J(F) = 1$ everywhere on $\mathbb{R}^2$ then $F$ is a map of $\mathbb{R}^2$ that preserves area.

\end{frame}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\end{document}


