% LaTeX中文模板，编译需要XeLaTeX，相关软件可到 http://www.ctex.org 下载CTeX套装。

\documentclass{ctexart}

\usepackage{amsfonts}   % TeX fonts from the American Mathematical Society.
\usepackage{amsmath}    % AMS mathematical facilities for LaTeX.
\usepackage{amsthm}     % Provide proclamations environment.
\usepackage{amssymb}    % AMS symbols
\usepackage[pagebackref]{hyperref}   % Extensive support for hypertext in LaTeX; MUST be on the last \usepackage line in the preamble.


% ******************************* fullpage.sty ******************************************
% This is FULLPAGE.STY by H.Partl, Version 2 as of 15 Dec 1988.
% Document Style Option to fill the paper just like Plain TeX.
\typeout{Style Option FULLPAGE Version 2 as of 15 Dec 1988}

\topmargin 0pt \advance \topmargin by -\headheight \advance
\topmargin by -\headsep

\textheight 8.9in

\oddsidemargin 0pt \evensidemargin \oddsidemargin \marginparwidth
0.5in

\textwidth 6.5in
% For users of A4 paper: The above values are suited for American 8.5x11in
% paper. If your output driver performs a conversion for A4 paper, keep
% those values. If your output driver conforms to the TeX standard (1in/1in),
% then you should add the following commands to center the text on A4 paper:

% \advance\hoffset by -3mm  % A4 is narrower.
% \advance\voffset by  8mm  % A4 is taller.
% ***************************** end of fullpage.sty *************************************


% ************* Proclamations (theorem-like structures) ******************
% [section] option provides numbering within a section.
\theoremstyle{plain}
\newtheorem{theorem}{定理}[section]
\newtheorem{prop}{命题}[section]
\newtheorem{corollary}{推论}[section]
\newtheorem{lemma}{引理}[section]
\newtheorem{remark}{注}
% ************************************************************************


% ***************************** 重定义 ***********************************
\renewcommand\proofname{证明}              % 若proof环境由amsthm提供，通过修改\proofname将"Proof"变成“证明”。

\newenvironment{solution}                  % Solutions use a modified proof environment
  {\begin{proof}[解答]}
  {\end{proof}}
% ************************************************************************


% ************* Frequently used commands as shorthand ********************
\newcommand{\norm}{|\!|}
% ************************************************************************



\begin{document}
\title{ \Huge{吴崇试编著《数学物理方法（第二版）》} \\ \Huge{习题解答} }
\author{曾焰}
\date{版本：0.1.4，最后修订于2019-08-31}

\maketitle

\begin{abstract}
吴崇试\cite{吴崇试2003}一书的习题解答\footnote{该书有相应的习题指导（不是习题解答）：《数学物理方法习题指导》（周治宁，吴崇试，钟毓澍编著）\cite{周治宁等2004}。}。 若发现任何错误或遗漏，请发送邮件至zypublic@hotmail.com。
\end{abstract}

\tableofcontents

\newpage

\section{复数和复变函数}

本章习题过于简单，略。

\section{解析函数}

\subsection{正文练习题}

\noindent  $\blacktriangleright$ 2.1. 证明：柯西-黎曼方程等价于
\[
i\frac{\partial f}{\partial x} = \frac{\partial f}{\partial y}.
\]
\begin{proof}
\[
i\frac{\partial f}{\partial x}=i\left(\frac{\partial u}{\partial x} + i\frac{\partial v}{\partial x}\right) = -\frac{\partial v}{\partial x} + i\frac{\partial u}{\partial x}, \; \frac{\partial f}{\partial y}=\frac{\partial u}{\partial y} + i\frac{\partial v}{\partial y}.
\]
所以$i\frac{\partial f}{\partial x}=\frac{\partial f}{\partial y}$当且仅当柯西-黎曼方程成立。
\end{proof}

\noindent $\blacktriangleright$ 2.2. 如果把复变函数$f=u+iv$看成是$(x,y)$的二元函数，即
\[
f(x,y) = u(x,y) + iv(x,y),
\]
再进一步看成是$z=x+iy$和$z^*=x-iy$的复合二元函数，证明柯西-黎曼方程等价于
\[
\frac{\partial f}{\partial z^*} = 0.
\]
\begin{proof}
因为$x=\frac{1}{2}(z+z^*)$, $y=-\frac{i}{2}(z-z^*)$, 故有$\frac{\partial x}{\partial z^*}=\frac{1}{2}$ and $\frac{\partial y}{\partial z^*}=\frac{i}{2}$. 所以
\[
\frac{\partial f}{\partial z^*}=\left(\frac{1}{2}\frac{\partial u}{\partial x} + \frac{i}{2}\frac{\partial u}{\partial y}\right)+i\left(\frac{1}{2}\frac{\partial v}{\partial x}+\frac{i}{2}\frac{\partial v}{\partial y}\right)=\frac{1}{2}\left(\frac{\partial u}{\partial x}-\frac{\partial v}{\partial y}\right)+\frac{i}{2}\left(\frac{\partial u}{\partial y}+\frac{\partial v}{\partial x}\right).
\]
于是$\frac{\partial f}{\partial z^*}=0$当且仅当柯西-黎曼方程成立.
\end{proof}

\noindent $\blacktriangleright$ 2.3. 证明：
\begin{eqnarray*}
|f'(z)|^2
&=& \left( \frac{\partial u}{\partial x} \right)^2 + \left( \frac{\partial v}{\partial x} \right)^2 = \left( \frac{\partial u}{\partial y} \right)^2 + \left( \frac{\partial v}{\partial y} \right)^2 \\
&=& \left( \frac{\partial u}{\partial x} \right)^2 + \left( \frac{\partial u}{\partial y} \right)^2 = \left( \frac{\partial v}{\partial x} \right)^2 + \left( \frac{\partial v}{\partial y} \right)^2.
\end{eqnarray*}
\begin{proof}运用柯西-黎曼方程即可.\end{proof}

\noindent $\blacktriangleright$ 2.4. 证明：
\begin{eqnarray*}
\frac{d}{dz}\left[ f(z) + g(z) \right] = \frac{df(z)}{dz} + \frac{dg(z)}{dz}; & \frac{d}{dz}\left[f(z)g(z)\right] = \frac{df(z)}{dz}g(z) + f(z) \frac{dg(z)}{dz}; \\
\frac{d}{dz}\frac{f(z)}{g(z)} = \frac{f'(z)g(z)-f(z)g'(z)}{g^2(z)}, g(z) \ne 0; & \frac{d}{dz}f(g(z)) = f'(g(z))g'(z).
\end{eqnarray*}
\begin{proof}使用定义即可，证明与实变量函数的情形类似。证明略。\end{proof}

\noindent $\blacktriangleright$ 2.5. 举例说明中值定理不适用于解析函数：若函数$f(z)$在$G$中解析，$z_1$和$z_2$以及连接两点的线段均在$G$中，在此线段上不一定存在$z_0$点，使得
\[
\frac{f(z_1)-f(z_2)}{z_1-z_2}=f'(z_0).
\]
\begin{solution}
取复平面上任意两点$z_1$和$z_2$. 定义$f(z)=\exp\left(i\frac{2z-(z_1+z_2)}{z_2-z_1}\pi\right)$. 则$f(z)$解析并有$f(z_1)=f(z_2)=-1$. 但
$f'(z)=\frac{2i\pi}{z_2-z_1}\exp\left(i\frac{2z-(z_1+z_2)}{z_2-z_1}\pi\right)\ne
0$, $\forall z\in\mathbb C$。所以中值定理不适用于$f(z)$ (此例来自Qazi \cite{Qazi2006})。
\end{solution}

\noindent  $\blacktriangleright$ 2.6. 假设函数$f(z)$在区域$G$内的任何一点都满足$f'(z)=0$，证明$f(z)$在$G$内为常数。
\begin{proof}
设$f(z)=u(x,y)+iv(x,y)$。根据柯西-黎曼方程，$f'(z)$在$G$中为零意味着$\frac{\partial u}{\partial x}$，$\frac{\partial u}{\partial y}$，$\frac{\partial v}{\partial x}$，
$\frac{\partial v}{\partial y}$在$G$中都为零。根据对实变量函数的理论结果，可以推断$u$和$v$在$G$中为常数。于是$f$在$G$中也是一个常数。
\end{proof}

\noindent $\blacktriangleright$  2.7. 若函数$f(z)$在区域$G$内解析，且$\text{Im} f(z)=0$，证明$f(z)$在$G$内为常数。
\begin{proof} 这是柯西-黎曼方程的直接推论，证明略。\end{proof}

\noindent  $\blacktriangleright$ 2.8. 若函数$f(z)=u(x,y)+iv(x,y)$在区域内解析，且$au(x,y)+bv(x,y)=c$，$a$, $b$和$c$是不为0的实常数，证明$f(z)$必为常数。

如果$a$, $b$和$c$是不为0的复常数，这个结论还成立吗？
\begin{proof}从条件$au(x,y)+bv(x,y)=c$可推知
\[
\begin{cases}
a\frac{\partial u}{\partial x} + b\frac{\partial v}{\partial x}=0\\
a\frac{\partial u}{\partial y} + b\frac{\partial v}{\partial y}=0.
\end{cases}
\]
所以
\[
\frac{b^2}{a^2}\frac{\partial v}{\partial x}=\left(-\frac{b}{a}\right)\frac{\partial v}{\partial x}\left(-\frac{b}{a}\right)=\frac{\partial u}{\partial x}\left(-\frac{b}{a}\right)=\frac{\partial v}{\partial y}\left(-\frac{b}{a}\right)=\frac{\partial u}{\partial y}=-\frac{\partial v}{\partial x},
\]
也即$\left(\frac{b^2}{a^2}+1\right)\frac{\partial v}{\partial x}=0$。这意味着$\frac{\partial v}{\partial x}=0$. 类似地，
\[
\frac{b^2}{a^2}\frac{\partial v}{\partial y}=\left(-\frac{b}{a}\right)\frac{\partial v}{\partial y}\left(-\frac{b}{a}\right)=\frac{\partial u}{\partial y}\left(-\frac{b}{a}\right)=-\frac{\partial v}{\partial x}\left(-\frac{b}{a}\right)=-\frac{\partial u}{\partial x}=-\frac{\partial v}{\partial y},
\]
也即$\left(\frac{b^2}{a^2}+1\right)\frac{\partial v}{\partial y}=0$, 这意味着$\frac{\partial v}{\partial y}=0$. 综合起来，我们可以推知$v$在$G$内为常数。根据柯西-黎曼方程，我们也能推知$u$ 在$G$内为常数。所以$f(z)$在$G$内为常数。

如果$a$, $b$, and $c$是不为0的复常数，这个结论仍然成立。注意我们对于实常数的情形，已经证明了当
$\frac{b^2}{a^2} + 1\ne 0$时, $f(z)$是$G$内的常数。现在假定$\frac{b^2}{a^2}+1=0$，则我们有两种情形：$b=ia$和$b=-ia$。在前一种情况中，
我们有$af(z)=au(x,y)+iav(x,y)=au(x,y)+bv(x,y)=c$。所以$f(z)=c/a$是一个常数。在后一种情况中，我们有$af^*(z)=au(x,y)-iav(x,y)=au(x,y)+bv(x,y)=c$。所以
$f(z)=(c/a)^*$也是一个常数。证毕。
\end{proof}

\noindent $\blacktriangleright$ 2.9. 如果$z$沿不同辐角方向趋于$\infty$点，试讨论函数$e^z$的变化趋势。

又设常数$\alpha\ne 0$，试设计一个无穷序列${z_n}$，使$z$依此序列趋于无穷远点时，函数$e^z$趋于$\alpha$。
\begin{proof} 我们把$z$写成极坐标形式：$z=re^{i\theta}=r(\cos\theta+i\sin\theta)$。则$e^z=e^{r\cos\theta}e^{i\sin\theta}$。所以当$z$按照固定的辐角趋于$\infty$时，$e^{z}$的模可以依$\cos\theta$的符号而趋于$\infty$，或趋于$0$，或保持常数。

将$\alpha$的主辐角记作$\theta$，则$z_n=\log|a|+(\theta+2\pi n)i$满足要求。
\end{proof}

\medskip

\noindent ****************************** UPDATE STOPPED HERE *************************************

\medskip
\noindent 2.10. \begin{proof}All the equalities can be proved via the equalities for trigonometric functions and the relation between hyperbolic functions and trigonometric functions. We only prove the the two inequality.

First, we observe
\begin{eqnarray*}
|\sinh y|&=&\frac{1}{2}|e^{y}-e^{-y}|
=\frac{1}{2}[e^{2y}+e^{-2y}-2]^{1/2}
\le \frac{1}{2}[e^{-2y}+e^{2y}-2+4\sin^2x]^{1/2}
=|\sin(x+iy)|\\
&=&\frac{1}{2}[e^{2y}+e^{-2y}+2-4\cos^2x]^{1/2}
\le \frac{1}{2}[e^{2y}+e^{-2y}+2]^{1/2}
=\cosh y.
\end{eqnarray*}
Similarly, we have
\begin{eqnarray*}
|\sinh y| &=& \frac{1}{2}|e^{-y}-e^y|=\frac{1}{2}[e^{2y}+e^{-2y}-2]^{1/2}\le \frac{1}{2}[e^{-2y}+e^{2y}+4\cos^2x-2]^{1/2}=|\cos(x+iy)|\\
&=&\frac{1}{2}[e^{2y}+e^{-2y}+2-4\sin^2x]^{1/2}
\le \frac{1}{2}[e^{2y}+e^{-2y}+2]^{1/2}
=\cosh y.
\end{eqnarray*}
\end{proof}

\noindent 2.11. \begin{proof}
We first assume $|f(z)|$ is a constant in $G$. If this constant is $0$, we have nothing to prove. So without loss of generality, we assume $|f(z)|$ is a non-zero constant in $G$. Suppose $f(z)=u(x,y)+iv(x,y)$, then $\frac{\partial}{\partial x}|f(z)|^2=\frac{\partial}{\partial y}|f(z)|^2=0$ gives
\begin{eqnarray*}
\begin{cases}
2u(x,y)\frac{\partial u(x,y)}{\partial x} + 2v(x,y)\frac{\partial v(x,y)}{\partial x} = 0 \\
2u(x,y)\frac{\partial u(x,y)}{\partial y} + 2v(x,y)\frac{\partial v(x,y)}{\partial y} = 0, \;\forall (x,y)\in G.
\end{cases}
\end{eqnarray*}
Using Cauchy-Riemann equations, we have
\begin{eqnarray*}
\begin{cases}
u\frac{\partial v}{\partial y} + v\frac{\partial v}{\partial x} = 0\\
v\frac{\partial v}{\partial y} - u\frac{\partial v}{\partial x} = 0.
\end{cases}
\end{eqnarray*}
Since $\left|\begin{matrix}u(x,y) & v(x,y) \\ v(x,y) & -u(x,y)\end{matrix}\right|=|f(z)|^2\ne 0$ in $G$, solving the above linear equations gives $\frac{\partial v(x,y)}{\partial y} = \frac{\partial v(x,y)}{\partial x} =0$ in $G$. That is, $v(x,y)$ is a constant in $G$. Cauchy-Riemann equations imply $u(x,y)$ is a constant in $G$ as well. So $f(z)$ is a constant in $G$.

We then assume $\theta=\theta(z) := \arg f(z)$ is a constant in $G$. Write $f$ in polar coordinate: $f(z)=r(x,y)e^{i\theta}$. Then Cauchy Riemann equations become
\[
\begin{cases}
\frac{\partial}{\partial x}r(x,y)\cos\theta = \frac{\partial }{\partial y} r(x,y)\sin\theta\\
\frac{\partial}{\partial y}r(x,y)\cos\theta = -\frac{\partial}{\partial x}r(x,y)\sin\theta.
\end{cases}
\]
If $\sin\theta = 0$, we have $\cos\theta \ne 0$ and $\frac{\partial
}{\partial x}r=\frac{\partial}{\partial y}r = 0$. If $\cos\theta =
0$, we have $\sin\theta \ne 0$ and $\frac{\partial }{\partial
x}r=\frac{\partial}{\partial y}r =0$. If $\sin\theta\ne 0$ and
$\cos\theta\ne 0$, we have $\frac{\partial}{\partial
x}r=\frac{\partial }{\partial y}r\tan\theta=-\frac{\partial
r}{\partial x}\tan^2\theta$, which implies $\frac{\partial}{\partial
x}r=0$. Consequently, $\frac{\partial}{\partial y}r = 0$. In either
of the three cases, we always have $\frac{\partial }{\partial
x}r=\frac{\partial}{\partial y}r=0$ in $G$. So $r$ is a constant in
$G$ and $f(z)$ is a constant in $G$ by the result of first half.
\end{proof}

\noindent 2.12. \begin{proof}$d\xi = \frac{\partial \xi}{\partial x}dx + \frac{\partial \xi}{\partial y}dy$, $d\eta = \frac{\partial \eta}{\partial x}dx + \frac{\partial \eta}{\partial y}dy$. So by Cauchy-Riemann equations, we have
\[
d\xi d\eta = \frac{\partial \xi}{\partial x}\frac{\partial \eta}{\partial y}dxdy-\frac{\partial \xi}{\partial y}\frac{\partial \eta}{\partial x}dxdy=\left(\frac{\partial \eta}{\partial y}\right)^2dxdy+\left(\frac{\partial \xi}{\partial y}\right)^2dxdy =|f'(z)|^2dxdy.
\]
\end{proof}


\subsection{章末习题}

\noindent 1. \begin{proof}The basic method is to verify that $\mbox{Re}f$ and $\mbox{Im}f$ are differentiable as functions of real variables, and that they satisfy Cauchy-Riemann equations.\end{proof}

\noindent 2. \begin{proof}Since $\begin{cases}\begin{matrix}x=r\cos\theta\\ y=r\sin\theta\end{matrix}\end{cases}$, we have
\[
\left[\begin{matrix}\frac{\partial }{\partial r} \\ \frac{\partial}{\partial \theta}\end{matrix}\right]=\left[\begin{matrix}\cos\theta & \sin\theta \\ -r\sin\theta & r\cos\theta\end{matrix}\right]\left[\begin{matrix}\frac{\partial }{\partial x} \\ \frac{\partial}{\partial y}\end{matrix}\right]:= A(\theta, r)\left[\begin{matrix}\frac{\partial }{\partial x} \\ \frac{\partial}{\partial y}\end{matrix}\right].
\]
It's easy to see $A^{-1}(\theta,r)=\frac{1}{r}\left[\begin{matrix}r\cos\theta & -\sin\theta \\ r\sin\theta & \cos\theta\end{matrix}\right]$. Writing Cauchy-Riemann equations in matrix form, we get
\[
\left[\begin{matrix}\frac{\partial }{\partial x} \\ \frac{\partial }{\partial y} \end{matrix}\right]u=\left[\begin{matrix}0 & 1\\ -1 & 0\end{matrix}\right]\left[\begin{matrix}\frac{\partial }{\partial x} \\ \frac{\partial }{\partial y} \end{matrix}\right]v.
\]Therefore, under the polar coordinate, the Cauchy-Riemann equations become
\[
\left[\begin{matrix}\frac{\partial }{\partial r} \\ \frac{\partial}{\partial \theta}\end{matrix}\right]u=A(\theta, r)\left[\begin{matrix}\frac{\partial }{\partial x} \\ \frac{\partial}{\partial y}\end{matrix}\right]u=A(\theta, r)\left[\begin{matrix}0 & 1\\ -1 & 0\end{matrix}\right]\left[\begin{matrix}\frac{\partial }{\partial x} \\ \frac{\partial }{\partial y} \end{matrix}\right]v=A(\theta, r)\left[\begin{matrix}0 & 1\\ -1 & 0\end{matrix}\right]A^{-1}(\theta,r)\left[\begin{matrix}\frac{\partial }{\partial r} \\ \frac{\partial }{\partial \theta} \end{matrix}\right]v=\left[\begin{matrix}0 & \frac{1}{r} \\ -r & 0\end{matrix}\right]\left[\begin{matrix}\frac{\partial }{\partial r} \\ \frac{\partial }{\partial \theta} \end{matrix}\right]v.
\]
\end{proof}

\noindent 3. \begin{proof} Fix $z=re^{i\theta}$, then
\[
f'(z)=\lim_{\Delta r\to 0}\frac{u(r+\Delta r,\theta)+iv(r+\Delta r,\theta)-[u(r,\theta)+iv(r,\theta)]}{\Delta r\cdot e^{i\theta}}=\frac{\partial u}{\partial r}e^{-i\theta}+i\frac{\partial v}{\partial v}e^{-i\theta}=\frac{r}{z}\left[\frac{\partial u}{\partial r} + i\frac{\partial v }{\partial r}\right].
\]
By the result of previous problem, we have
\[
\frac{r}{z}\left[\frac{\partial u}{\partial r} + i\frac{\partial v }{\partial r}\right]=\frac{1}{z}\left[\frac{\partial v}{\partial \theta}-i\frac{\partial u}{\partial \theta}\right].
\]
\end{proof}

\noindent 4. We use the following result from the theory of ordinary
differential equations (see, for example, 丁同仁等\cite{丁同仁等2004},
Chapter 2, Theorem 1).

\begin{theorem}
Suppose function $P(x,y)$ and $Q(x,y)$ are continuous on $U=(\alpha,
\beta) \times (\gamma, \delta)$, and they have continuous partial
derivatives $\frac{\partial}{\partial y}P$ and
$\frac{\partial}{\partial x} Q$. Then the 1-form $\omega = P(x,y)dx
+ Q(x,y)dy$ is exact if and only if $\frac{\partial}{\partial
y}P=\frac{\partial }{\partial x}Q$ on $U$. Moreover, the 0-form
whose differential is $\omega$ can be represented as
\[
\int_{x_0}^xP(\xi,y)d\xi + \int_{y_0}^yQ(x_0,\eta)d\eta + C,
\]
where $C$ is a constant.
\end{theorem}

(1) \begin{proof}$P(x,y)=\frac{\partial v(x,y)}{\partial x} =
-\frac{\partial u(x,y)}{\partial y} = 2y$, $Q(x,y)=\frac{\partial
v(x,y)}{\partial y} = \frac{\partial u(x,y)}{\partial x} = 2x+1$. So
\[
v(x,y)=\int_0^xP(\xi,y)d\xi + \int_0^yQ(0,\eta)d\eta + C = 2xy + y +
C,
\]
where $C\in\mathbb R$ is a constant, and
\[
f(z)=u(x,y)+iv(x,y)=(x^2-y^2+x) + i(2xy+y+C) = (x+iy)^2 + (x+iy) +
iC = z^2 + z + iC.
\]
\end{proof}

(2) \begin{proof}$P(x,y)= \frac{\partial v(x,y)}{\partial x} =
-\frac{\partial u(x,y)}{\partial y} = \frac{2xy}{(x^2+y^2)^2}$,
$Q(x,y)=\frac{\partial v(x,y)}{\partial y} = \frac{\partial
u(x,y)}{\partial x} = \frac{y^2-x^2}{(x^2+y^2)^2}$. So from some
constant $C\in \mathbb R$,
\begin{eqnarray*}
v(x,y) &=& \int_1^x\frac{2\xi y}{(\xi^2+y^2)^2}d\xi + \int_0^y
\frac{\eta^2-1}{(1+\eta^2)^2}d\eta + C\\
&=& y\int_0^{x^2}\frac{du}{(u+y^2)^2}  +
\int_0^y\left[\frac{1}{1+\eta^2} - \frac{2}{(1+\eta^2)^2}\right]
d\eta
+ C \\
&=& y\left[-\frac{1}{x^2+y^2}+\frac{1}{1+y^2}\right] + \arctan y -
2\int_0^{\arctan y}\frac{d\tan\theta}{(1+\tan^2\theta)^2} + C \\
&=& y\left[-\frac{1}{x^2+y^2}+\frac{1}{1+y^2}\right] + \arctan y -
\int_0^{\arctan y} (\cos2\theta + 1)d\theta+
C \\
&=& y\left[-\frac{1}{x^2+y^2}+\frac{1}{1+y^2}\right] -
\frac{1}{2}\sin(2\arctan y) + C \\
&=& y\left[-\frac{1}{x^2+y^2}+\frac{1}{1+y^2}\right] -
\frac{\tan(\arctan y)}{1+\tan^2(\arctan y)} + C \\
&=& -\frac{y}{x^2+y^2} + C.
\end{eqnarray*}
Therefore $f(z)=u(x,y)+iv(x,y) = \frac{x-yi}{x^2+y^2} + iC =
\frac{1}{z} + iC$.
\end{proof}

(3) \begin{proof} $P(x,y)=\frac{\partial v(x,y)}{\partial x} =
-\frac{\partial u(x,y)}{\partial y} = -\frac{\partial (e^y\cos
x)}{\partial y} = -e^y\cos x$, $Q(x,y)=\frac{\partial
v(x,y)}{\partial y} = \frac{\partial u(x,y)}{\partial x} = -e^y\sin
x$. So $v(x,y)=\int_0^xP(\xi,y)d\xi + \int_0^yQ(0,\eta)d\eta =
-e^y\sin x + C$ where $C\in \mathbb R$ is a constant. So
$f(z)=e^y\cos x - ie^y\sin x + iC = e^{y-ix}+iC = e^{-iz} + iC$.
\end{proof}

(4)\begin{proof}$P(x,y)=\frac{\partial v(x,y)}{\partial x} =
-\frac{\partial u(x,y)}{\partial y} = -\cos x\sinh y$,
$Q(x,y)=\frac{\partial v(x,y)}{\partial y} = \frac{\partial
u(x,y)}{\partial x} = -\sin x \cosh y$. So $v(x,y) =
-\int_0^x\cos\xi\sinh y d\xi + C = -\sin x \sinh y + C$, where $C\in
\mathbb R$ is a constant. So $f(z) = \cos x \cosh y - i\sin x \sinh
y + iC = \cos x \cos(iy) -\sin x \sin(iy) + iC = \cos(x+iy)+ iC =
\cos z + iC$.
\end{proof}

\noindent 5. (1) \begin{proof} $f'(z) = \frac{\partial u}{\partial
x} + i\frac{\partial v}{\partial x} = \frac{\partial u}{\partial x}
- i\frac{\partial u}{\partial y} = 1-i$.
\end{proof}

(2)\begin{proof} $f'(z) = \frac{\partial u}{\partial
x}-i\frac{\partial u}{\partial y} = \cos x \cosh y - i\sin x \sinh y
= \cos x \cos(iy) - \sin \sin(iy) = \cos (x+iy) = \cos z$.
 \end{proof}

\noindent 6. \begin{proof} We note $\frac{\partial u}{\partial x} +
\frac{\partial u}{\partial y} = \frac{\partial u}{\partial x} -
\frac{\partial v}{\partial x} = (x^2+4xy+y^2) + (x-y)(2x+4y)$ and
$\frac{\partial u}{\partial y} - \frac{\partial u}{\partial x} =
\frac{\partial u}{\partial y} - \frac{\partial v}{\partial y} =
-(x^2+4xy+y^2)+(x-y)(4x+2y)$. Solving these two equations for
$\frac{\partial u}{\partial x}$ and $\frac{\partial u}{\partial y}$,
we get
\[
\frac{\partial u}{\partial x} = 6xy, \; \frac{\partial u}{\partial
y} = 3(x^2-y^2).
\]
Then it's easy to see $u(x,y)= 3x^2y-y^3+C$ for some constant $C\in
\mathbb R$ and consequently, $v=u-(x-y)(x^2+4xy+y^2)= -x^3+3xy^2+C$.
Therefore
\[
f(z) = (3x^2y-y^3+C)+i(-x^3+3xy^2+C) = (ix-y)^3+ C(1+i) =
-iz^3+C(1+i).
\]
\end{proof}

\noindent 7. (1) \begin{proof} We have $\frac{e^{iz}-e^{-iz}}{2i} =
\frac{3+i}{4}$, which is equivalent to $(e^{iz})^2 -
\frac{-1+3i}{2}e^{iz}-1=0$. Solving this quadratic equation gives us
\[
e^{iz} = \frac{\frac{-1+3i}{2} \pm \sqrt{\frac{(-1+3i)^2}{4}+4}}{2}
= \frac{(-1+3i)\pm \sqrt{8-6i}}{4} = \frac{(-1+3i)\pm (i-3)}{4}.
\]
So $e^{iz} = i-1=e^{\frac{\ln 2}{2} + (2n\pi+\frac{3}{4}\pi)i}$ or
$e^{iz} = \frac{i+1}{2} = e^{-\frac{\ln
2}{2}+(2n\pi+\frac{\pi}{4})i}$, $n\in\mathbb Z$. Therefore $z =
\frac{3}{4}\pi + 2n\pi -\frac{i}{2}\ln 2$ or
$\frac{\pi}{4}+2n\pi+\frac{i}{2}\ln 2$, $n\in \mathbb Z$.
\end{proof}

(2) \begin{proof} $\cos z = 4$ is equivalent to $(e^{iz})^2 -
8e^{iz} + 1 = 0$. So $e^{iz}=
4\pm\sqrt{15}=e^{\pm\ln(4+\sqrt{15})}$. Therefore $z=2n\pi \pm
i\ln(4+\sqrt{15})$ ($n\in \mathbb Z$). \end{proof}

(3) \begin{proof} $\tan z = i$ gives
$\frac{\frac{e^{iz}-e^{-iz}}{2i}}{\frac{e^{iz}+e^{-iz}}{2}} =
-i\frac{e^{2iz}-1}{e^{2iz}+1}=i$. So the equation becomes
$e^{2iz}-1=-e^{2iz}-1$, which has no solution.
\end{proof}

(4) \begin{proof} The equation can be written as $(2\cosh z -
1)(\cosh z - 1) = 0$. So $\cosh z = 1$ or $\frac{1}{2}$. Consider
the equation $\cosh z = \frac{e^z+e^{-z}}{2}=a$. We reduce it to the
quadratic equation of $e^z$: $e^{2z}-2ae^z+1=0$. So
$e^z=a\pm\sqrt{a^2-1}$. If $a=1$, we get $e^z=1$, which implies
$z=2n\pi i$ ($n\in \mathbb Z$). If $a=\frac{1}{2}$, we get
$e^z=\frac{1}{2}\pm \frac{\sqrt{3}}{2}i$, which implies $z=(2n \pm
\frac{1}{3})\pi i$ ($n\in\mathbb Z$).
\end{proof}

\noindent 15. \begin{proof}This is Exercise 2.11.\end{proof}

\section{复变积分}

\noindent 1. (1)\begin{proof} (i) $\int_0^{2+i}\mbox{Re}zdz = \int_0^2xdx+\int_2^{2+i}xdy = 2+2i$. (ii) $\int_0^{2+i}\mbox{Re}zdz = \int_0^12t(2dt+idt)=2+i$.
\end{proof}

(2)\begin{proof} (i) $\int_C\frac{dz}{\sqrt{z}} = 2\sqrt{z}|_{1}^{-1} = 2(e^{\frac{\pi}{2}i}-1)=2(i-1)$. (ii) $\int_C\frac{dz}{\sqrt{z}}=2\sqrt{z}|_{1}^{-1}=2(e^{-\frac{\pi}{2}i}-1)=-2(i+1)$.
\end{proof}

\noindent 2. (1) \begin{proof}By Cauchy integral formula, $\oint_{|z|=1}\frac{dz}{z}=2\pi i \cdot 1 = 2\pi i$.
\end{proof}

(2) \begin{proof} $\oint_{|z|=1}\frac{|dz|}{z}=\int_0^{2\pi}\frac{rd\theta}{re^{i\theta}}=0$. \end{proof}

(3) \begin{proof} $\oint_{|z|=1}\frac{dz}{|z|}=\int_0^{2\pi}\frac{re^{i\theta}d\theta}{r}=0$. \end{proof}

(4) \begin{proof} $\oint_{|z|=1}\left|\frac{dz}{z}\right|=\int_0^{2\pi}d\theta = 2\pi$. \end{proof}

\noindent 3. (1) \begin{proof} Denote $\frac{1}{z^2-1}\sin\frac{\pi z}{4}$ by $f(z)$.

(i) $f(z)$ is analytic on $\{z: |z|< \frac{1}{2} \}$ and continuous on $\{z:|z|=\frac{1}{2}\}$. So by Cauchy integral theorem, $\oint_{|z|=\frac{1}{2}}f(z)dz =0$.

(ii) $f(z)$ has one singular point $z_0=1$ in $\{z: |z-1|<1\}$. So by Cauchy integral formula, $\oint_{|z-1|=1}f(z)dz = \oint_{|z-1|=1}\frac{1}{z-1}\frac{\sin\frac{\pi z}{4}}{z+1}dz =2\pi i\frac{\sin\frac{\pi z}{4}}{z+1}|_{z=1}=\frac{\sqrt{2}}{2}\pi i$.

(iii) $f(z)$ has two singular points $\pm 1$ in $\{z:|z|=3\}$. So by Cauchy integral theorem for multiply connected region, $\oint_{|z|=3}f(z)dz = \oint_{|z-1|=\delta}f(z)dz + \oint_{|z+1|=\delta}f(z)dz$, where $\delta >0$ is sufficiently small so that $\{|z-1|\le \delta\}\cup \{|z+1|\le \delta\}\subset \{|z|<3\}$. By Cauchy integral formula, $\oint_{|z-1|=\delta}f(z)dz =2\pi i \frac{\sin\frac{\pi z}{4}}{z+1}|_{z=1}=\frac{\sqrt{2}}{2}\pi i$, and $\oint_{|z+1|=\delta}f(z)dz =2\pi i \frac{\sin\frac{\pi z}{4}}{z-1}|_{z=-1}=\frac{\sqrt{2}}{2}\pi i$. Combined, we conclude $\oint_{|z|=3}f(z)dz = \sqrt{2}\pi i$.

(iv) $\sqrt{2}\pi i$. The calculation is similar to that of (iii).
\end{proof}

(2) \begin{proof} Denote $\frac{1}{z^2+1}e^{iz}$ by $f(z)$.

(i) $f(z)$ has a singular point $z_0=i$ in $\{|z-i|<1\}$. So by Cauchy integral formula, $\oint_{|z-i|=1}f(z)dz = 2\pi i \frac{e^{iz}}{z+i}|_{z=i}=\frac{\pi}{e}$.

(ii) $f(z)$ has two singular points $\pm i$ in $\{|z|<2\}$. So by Cauchy integral theorem for multiply connected region and Cauchy integral formula, for $\delta>0$ sufficiently small,
\[
\oint_{|z|=2}f(z)dz = \oint_{|z+i|=\delta}f(z)dz + \oint_{|z-i|=\delta}f(z)dz=2\pi i\left[\frac{e^{iz}}{z-i}|_{z=-i} + \frac{e^{iz}}{z+i}|_{z=i}\right]=-2\pi\sinh1.
\]

(iii) $-2\pi\sinh1$. The calculation is similar to that of (iii).

(iv) To have a closed curve, $\theta$ must take all the values between $0$ and $4\pi$ (see figure). This closed curve forms two contours, each of which contains the two singular points $\pm i$ of $f(z)$. So by a calculation similar to that of (ii) and (iii), $\oint_{\{z=re^{i\theta}:r=3-\sin^2\frac{\theta}{4}\}}f(z)dz=2\cdot (-2\pi\sinh1)=-4\pi\sinh1$.
%\begin{figure}[http]
%\centering
%\includegraphics[width=0.8\textwidth,clip]{wu_fig1.eps}
%\caption{$r=3-\sin^2\frac{\theta}{4}$, $\theta\in[0,4\pi]$}
%\end{figure}

 \end{proof}

\noindent 4. (1)\begin{proof}By Cauchy integral formula, $\oint_{|z|=2}\frac{\cos z}{z}dz=2\pi i\cos z|_{z=0}=2\pi i$. \end{proof}

(2)\begin{proof}$\frac{z^2-1}{z^2+1}$ has two singular points $\pm i$ in $\{|z|<2\}$. So by Cauchy integral theorem for multiply connected region and Cauchy integral formula, for $\delta>0$ sufficiently small,
\[
\oint_{|z|=2}\frac{z^2-1}{z^2+1}dz=\oint_{|z-i|=\delta}\frac{1}{z-i}\frac{z^2-1}{z+i}dz + \oint_{|z+i|=\delta}\frac{1}{z+i}\frac{z^2-1}{z-i}dz = 2\pi i\left[\frac{z^2-1}{z+i}|_{z=i}+\frac{z^2-1}{z-i}|_{z=-i}\right]=0.
\]
\end{proof}

(3) \begin{proof}By Cauchy integral formula, $\oint_{|z|=2}\frac{\sin e^z}{z}dz=2\pi i\sin(e^z)|_{z=0}=2\pi i\sin1$.\end{proof}

(4) \begin{proof}$\cosh z = 0$ if and only if $z=(\frac{\pi}{2}+n\pi)i$, $n\in \mathbb Z$. So $\{|z|<2\}$ contains two singular points $\pm \frac{\pi}{2}i$. By Cauchy integral theorem for multiply connected region, for $\delta >0$ sufficiently small, we have
\begin{eqnarray*}
\oint_{|z|=2}\frac{e^z}{\cosh z}dz
&=& \oint_{|z-\frac{\pi}{2}i|=\delta}\frac{e^z}{\cosh z}dz + \oint_{|z+\frac{\pi}{2}i|=\delta}\frac{e^z}{\cosh z}dz\\
&=& \oint_{|z|=\delta}\frac{e^{z+\frac{\pi}{2}i}}{\cosh(z+\frac{\pi}{2}i)}dz + \oint_{|z|=\delta}\frac{e^{z-\frac{\pi}{2}i}}{\cosh(z-\frac{\pi}{2}i)}dz\\
&=&2\oint_{|z|=\delta}\frac{e^z}{\sinh z}dz\\
&=&4\oint_{|z|=\delta}\frac{e^z}{e^z-e^{-z}}dz\\
&=&4\oint_{|z|=\delta}\frac{1}{e^{2z}-1}dz.
\end{eqnarray*}
Using power series expansion of $e^{2z}$, we can see $\lim_{z\to 0}\frac{2z}{e^{2z}-1}=1$ uniformly.\footnote{We note $\left|\frac{2z}{e^{2z}-1}-1\right|=\left|\frac{1}{1+\sum_{k=2}^{\infty}\frac{(2z)^{k-1}}{k!}}-1\right|\le\frac{\sum_{k=2}^{\infty}\frac{|2z|^{k-1}}{k!}}{1-\frac{|2z|^{k-1}}{k!}}\to 0$ uniformly when $z\to 0$.} So by Lemma 3.1,
\[
\lim_{\delta \to 0} \oint_{|z|=\delta}\frac{1}{e^{2z}-1}dz=2\pi i \cdot\frac{1}{2}=\pi i.
\]
So $\oint_{|z|=2}\frac{e^z}{\cosh z}dz = \lim_{\delta\to 0}4\oint_{|z|=\delta}\frac{1}{e^{2z}-1}dz =4\pi i$.
\end{proof}

\noindent 5. (1) \begin{proof}
By Cauchy integral formula,
\[
\oint_{|z|=2}\frac{\sin z}{z^2}dz = 2\pi i \cdot \left(\sin z\right)'|_{z=0} = 2\pi i.
\]
\end{proof}

(2) \begin{proof}
By Cauchy integral formula,
\[
\oint_{|z|=2}\frac{|z|e^z}{z^2}dz=2\cdot 2\pi i\frac{1}{2\pi i}\oint_{|z|=2}\frac{e^z}{z^2}dz=4\pi i (e^z)'|_{z=0}=4\pi i.
\]
\end{proof}

(3) \begin{proof}
By Cauchy integral formula,
\[
\oint_{|z|=2}\frac{\sin z}{z^4}dz=\frac{2\pi i}{3!}\cdot \frac{3!}{2\pi i}\oint_{|z|=2}\frac{\sin z}{z^4}dz=\frac{\pi i}{3}(\sin z)^{(3)}|_{z=0}=-\frac{\pi}{3}i.
\]
\end{proof}

(4)\begin{proof}
By Cauchy integral formula,
\[
\oint_{|z|=2}\frac{dz}{z^2(z^2+16)}=2\pi i \left(\frac{1}{z^2+16}\right)'|_{z=0}=0.
\]
\end{proof}

\noindent 6. (1) \begin{proof}By Cauchy integral formula,
\[
\oint_{|z|=1}\frac{e^z}{z^3}dz=\frac{2\pi i}{2!}\cdot \frac{2!}{2\pi i}\oint_{|z|=1}\frac{e^z}{z^3}dz = \pi i\cdot (e^z)^{(2)}|_{z=0}=\pi i.
\]
\end{proof}

(2)\begin{proof}$F(z)$ is univalent if and only if any closed curve $C$, $\oint_Ce^z\left(\frac{1}{z}+\frac{a}{z^3}\right)dz=0$. When the interio of $C$ does not contain $0$, this is true by Cauchy integral theorem. So we only need to consider the case where the interior of $C$ contains $0$. Without loss of generality, assume $C=\{|z|=1\}$. Then by Cauchy integral formula
\[
\int_Ce^z\left(\frac{1}{z}+\frac{a}{z^3}\right)dz=\frac{2\pi i}{2!}[(z^2+a)e^z]^{(2)}|_{z=0}=a+2.
\]
So when $a=-2$, $F(z)$ is univalent.
 \end{proof}

\section{无穷级数}

\subsection{正文练习题}

\noindent 4.1. \begin{proof}(1) $a_n=\frac{1}{n\ln n}$. (2) $a_{2n}=\frac{1}{n^2+1}$, $a_{2n+1}=\frac{1}{n^2}$. (3) $a_{2n}=\frac{1}{n^3}$, $a_{2n+1}=\frac{1}{n^2}$. (4) $a_n=0$ if $n$ is even, $a_n=1$ if $n$ is odd; $b_n=1-a_n$.\end{proof}

\noindent 4.2. (1) \begin{proof} If $x=0$, the series is clearly convergent. If $x\ne 0$, then by noting the series is a geometric series, we can calculate it converges to $1$. To see the convergence is not uniform, note $\forall n\ge 1$
\[
\left|1-\sum_{k=1}^n\frac{x^2}{(1+x^2)^k}\right|=\left|1-\frac{x^2}{1+x^2}\cdot \frac{1-\frac{1}{(1+x^2)^n}}{1-\frac{1}{1+x^2}}\right|=\frac{1}{(1+x^2)^n}.
\]
No matter how big $n$ is, we can always find an $x>0$ (dependent on $n$), so that $\frac{1}{(1+x^2)^n}>\frac{1}{2}$. This shows the convergence is not uniform.
\end{proof}

(2) \begin{proof}
We note
\[
\left|\frac{(-1)^n}{n+x^2}+\frac{(-1)^{n+1}}{n+1+x^2}\right| =\frac{1}{(n+1+x^2)(n+x^2)}\le\frac{1}{(n+1)n}.
\]
So $\left|\sum_{n=N}^{\infty}\frac{(-1)^n}{n+x^2}\right|\le \sum_{n=N}^{\infty}\frac{1}{n(n+1)}$, which implies $\sum_{n=1}^{\infty}\frac{(-1)^n}{n+x^2}$ is uniformly convergent. But clearly, $\sum_{n=1}^{\infty}\frac{1}{n+x^2}=\infty$.
\end{proof}

\noindent 4.3. (1)\begin{proof}$\min\{R_1,R_2\}$. From the special case $a_n\equiv 0$ or $b_n\equiv 0$, we can see this radius cannot be improved. \end{proof}

(2) \begin{proof}By Cauchy's criterion, $R_1=\varliminf_{n\to \infty}|\frac{1}{a_n}|^{1/n}$ and $R_2=\varliminf_{n\to\infty}|\frac{1}{b_n}|^{1/n}$. Since for any positive sequence $(x_n)_{n\ge 1}$ and $(y_n)_{n\ge 1}$, $\varliminf_{n\to\infty}x_n\varliminf_{n\to\infty}y_n\le \varliminf_{n\to\infty}(x_ny_n)$, the radius of convergence $R$ for $\sum_{n=1}^{\infty}a_nb_nz^n$ satisfies
\[
R=\varliminf_{n\to\infty}\left|\frac{1}{a_nb_n}\right|^{1/n}\ge \varliminf_{n\to\infty}\left|\frac{1}{a_n}\right|^{1/n}\left|\frac{1}{b_n}\right|^{1/n}=R_1R_2.
\]
The special case where $a_n=b_n\equiv 1$ shows the result $R\ge R_1R_2$ cannot be improved.
\end{proof}

(3) \begin{proof}When $\lim_{n\to\infty}\left|\frac{1}{a_n}\right|^{1/n}$ exists, the radius of convergence is $\frac{1}{R_1}$. Otherwise, the best result we can obtain is $R=\varliminf_{n\to\infty}|a_n|^{1/n}=\frac{1}{\varliminf_{n\to\infty}\left|\frac{1}{a_n}\right|^{1/n}}$.\end{proof}

(4) \begin{proof}
\[
\varliminf_{n\to\infty}\left|\frac{1}{\frac{b_n}{a_n}}\right|^{1/n} \ge \varliminf_{n\to\infty}\left|\frac{1}{b_n}\right|^{1/n}\varliminf_{n\to\infty}|a_n|^{1/n}\ge R_2\frac{1}{\varlimsup_{n\to\infty}\left|\frac{1}{a_n}\right|^{1/n}}.
\]
When $\lim_{n\to\infty}\left|\frac{1}{a_n}\right|^{1/n}$ exists, the right side of the above inequality becomes $\frac{R_2}{R_1}$.
\end{proof}

\subsection{章末习题}

\noindent 1. (1) \begin{proof} $\sum_{n=2}^{\infty}\left|\frac{i^n}{\ln n}\right|=\sum_{n=2}^{\infty}\frac{1}{\ln n} >\sum_{n=2}^{\infty}\frac{1}{n}=\infty$. So the series is not absolutely convergent. Meanwhile,
\begin{eqnarray*}
\sum_{n=2}^{\infty}\frac{i^n}{\ln n} &=& \sum_{k=0}^{\infty}-\left[\left(\frac{1}{\ln(4k+2)}-\frac{1}{\ln(4k+4)}\right)+i\left(\frac{1}{\ln(4k+3)}-\frac{1}{\ln(4k+5)}\right)\right].
\end{eqnarray*}
By Leibnitz's criterion for the convergence of alternating series,
\[
\sum_{k=0}^{\infty}-\left[\frac{1}{\ln(4k+2)}-\frac{1}{\ln(4k+4)}\right]=\sum_{m=1}^{\infty}\frac{(-1)^m}{\ln(2m)}
\]
is convergent. Similarly,
\[
\sum_{k=0}^{\infty}-i\left[\frac{1}{\ln(4k+3)}-\frac{1}{\ln(4k+5)}\right]=\sum_{m=1}^{\infty}i\frac{(-1)^m}{\ln(2m+1)}
\]
is convergent. Therefore $\sum_{n=2}^{\infty}\frac{i^n}{\ln n}$ is convergent.
\end{proof}

(2) \begin{proof}By argument similar to that of (1), $\sum_{n=1}^{\infty}\frac{i^n}{n}$ is convergent, but not absolutely convergent.\end{proof}

\noindent 2. \begin{proof}Suppose $|z|<1$. Then $\frac{z^{n-1}}{(1-z^n)(1-z^{n+1})}\sim z^{n-1}$. So $\sum_{n=1}^{\infty}\frac{z^{n-1}}{(1-z^n)(1-z^{n+1})}$ is absolutely convergent on $|z|<1$. To find the sum function in this case, note for $|z|<1$,
\begin{eqnarray*}
\sum_{n=1}^{\infty}\frac{z^{n-1}}{(1-z^n)(1-z^{n+1})}&=&\sum_{n=1}^{\infty}z^{n-1}\frac{1}{1-z}\left(\frac{1}{1-z^n}-\frac{z}{1-z^{n+1}}\right)\\
&=&\frac{1}{1-z}\left(\sum_{n=1}^{\infty}\frac{z^{n-1}}{1-z^n}-\sum_{n=1}^{\infty}\frac{z^n}{1-z^{n+1}}\right)\\
&=&\frac{1}{(1-z)^2}.
\end{eqnarray*}

Now suppose $|z|>1$. Then  $\frac{z^{n-1}}{(1-z^n)(1-z^{n+1})}=\frac{1}{z^{n+2}(1-z^{-n})(1-z^{-(n+1)})}\sim \frac{1}{z^{n+2}}$. So $\sum_{n=1}^{\infty}\frac{z^{n-1}}{(1-z^n)(1-z^{n+1})}$ is absolutely convergent on $|z|>1$. To find the sum function in this case, note for $|z|>1$,
\begin{eqnarray*}
\sum_{n=1}^{\infty}\frac{z^{n-1}}{(1-z^n)(1-z^{n+1}}&=& \sum_{n=1}^{\infty}\frac{1}{z^{n+2}(1-z^{-n})(1-z^{-(n+1)})} \\
&=& \sum_{n=1}^{\infty}\frac{1}{z^{n+2}}\frac{1}{z-1}\left(\frac{z}{1-z^{-n}}-\frac{1}{1-z^{-(n+1)}}\right) \\
&=& \frac{1}{z-1}\left(\sum_{n=1}^{\infty}\frac{z^{-(n+1)}}{1-z^{-n}} - \sum_{n=1}^{\infty}\frac{z^{-(n+2)}}{1-z^{-(n+1)}}\right)\\
&=& \frac{1}{z-1}\frac{z^{-2}}{1-z^{-1}}\\
&=& \frac{1}{z(z-1)^2}.
\end{eqnarray*}
\end{proof}

\noindent 3. (1)\begin{proof}
Since for $|z|>1$, $|z|^{n!}>|z|$ and $\sum_{n=1}^{\infty}|z|=\infty$, the series is divergent on $|z|>1$. Since for $|z|<1$, $|z|^{n!}<|z|^n$ and $\sum_{n=1}^{\infty}|z|^n<\infty$, the series is convergent on $|z|<1$.
\end{proof}

(2) \begin{proof}
The series converges over $\left|\frac{z}{1+z}\right|<1$. Solving the inequality $\left|\frac{x+yi}{1+x+yi}\right|<1$ gives $x>-\frac{1}{2}$.
\end{proof}

(3) \begin{proof} $|z^2+2z+2|<1$.\end{proof}

(4) \begin{proof}Suppose $z=x+yi$. Then
\begin{eqnarray*}
\sin z =\frac{e^{iz}-e^{-iz}}{2i}=\frac{1}{2i}(e^{-y+ix}-e^{y-ix})=\frac{1}{2i}\left[e^{ix}(e^{-y}-e^y)+e^y(e^{ix}-e^{-ix})\right]=\frac{1}{2i}e^{ix}(e^{-y}-e^y)+e^y\sin x.
\end{eqnarray*}
As $z\to 0$, $e^y\sin x\sim x$ and $\frac{1}{2i}e^{ix}(e^{-y}-e^y)\sim \frac{1}{2i}(-2y)=yi$. So $\sum_{n=1}^{\infty}2^n\left[\frac{1}{2i}e^{i\frac{x}{3^n}}(e^{-\frac{y}{3^n}}-e^{\frac{y}{3^n}})\right]$ is convergent if and only if $\sum_{n=1}^{\infty}2^n\frac{y}{3^n}i$ is convergent and $\sum_{n=1}^{\infty}2^ne^{\frac{y}{3^n}}\sin\frac{x}{3^n}$ is convergent if and only if $\sum_{n=1}^{\infty}2^n\frac{x}{3^n}$ is convergent. Since $\sum_{n=1}^{\infty}2^n\sin\frac{z}{3^n}=\sum_{n=1}^{\infty}2^n\left[\frac{1}{2i}e^{i\frac{y}{3^n}}(e^{-\frac{y}{3^n}}-e^{\frac{y}{3^n}})+e^{\frac{y}{3^n}}\sin\frac{x}{3^n}\right]$, and since for arbitrary $x,y\in\mathbb R$, $\sum_{n=1}^{\infty}\left(\frac{2}{3}\right)^nyi$ and $\sum_{n=1}^{\infty}\left(\frac{2}{3}\right)^nx$ both converge, we conclude $\forall z \in\mathbb C$, $\sum_{n=1}^{\infty}2^n\sin\frac{z}{3^n}$ is convergent.
\end{proof}

\noindent 4. \begin{proof}
At $z=1$, the sum is equal to $\sum_{n=0}^{\infty}\left(\frac{1}{n+1}-\frac{2}{2n+3}\right)=\sum_{n=0}^{\infty}\left(\frac{2}{2n+2}-\frac{2}{2n+3}\right)=2(-1+\frac{1}{2}-\frac{1}{3}+\frac{1}{4}+\cdots+1)=2(1-\ln 2)$. For $|z|<1$, $\sum_{n=0}^{\infty}\frac{z^{n+1}}{n+1}=\sum_{n=0}^{\infty}\int_0^zw^ndw=\int_0^z(\sum_{n=0}^{\infty}w^n)dw=\int_0^z\frac{dw}{1-w}$. Similarly, $\sum_{n=0}^{\infty}\frac{2z^{2n+3}}{2n+3}=\sum_{n=0}^{\infty}\int_0^z2w^{2n+2}dw=\int_0^z2w^2\frac{dw}{1-w^2}=-2z+\int_0^z\frac{dw}{1-w}-\int_0^z\frac{dw}{1+w}$. So $\sum_{n=0}^{\infty}\left(\frac{z^{n+1}}{n+1}-\frac{2z^{2n+3}}{2n+3}\right) = 2z-\ln (1+z)$ for $|z|<1$. In summary,
\begin{eqnarray*}
S(z)=\sum_{n=0}^{\infty}\left(\frac{z^{n+1}}{n+1}-\frac{2z^{2n+3}}{2n+3}\right)=\begin{cases}
2z-\ln(1+z), & |z|<1\\ 2-2\ln 2, & z=1.
\end{cases}
\end{eqnarray*}
\end{proof}

\noindent 5. \begin{proof}
\[
\ln(1-z)=\int_0^z\frac{-dw}{1-w}=-\int_0^z\sum_{n=0}^{\infty}w^ndw=-\sum_{n=0}^{\infty}\frac{z^{n+1}}{n+1}.
\]
So for any $r\in (-1,1)$, by letting $z=re^{i\theta}$, we get
\[
\ln(1+re^{i\theta})=-\sum_{n=1}^{\infty}\frac{(-re^{i\theta})^n}{n}=\sum_{n=1}^{\infty}\frac{(-1)^{n+1}}{n}r^ne^{in\theta}=\sum_{n=1}^{\infty}\frac{(-1)^{n+1}}{n}(r^n\cos n\theta + i r^n\sin n \theta).
\]
Suppose $\ln(1+re^{i\theta})=x+yi$. Then $e^x\cos y = 1+r\cos \theta$ and $e^x\sin y = r\sin\theta$. Solving for $x$ and $y$ gives $x=\frac{1}{2}\ln(1+2r\cos\theta+r^2)$, $y=\arctan \frac{r\sin\theta}{1+r\cos\theta}$. Matching the real part and imaginary part in the equality
\[
\frac{1}{2}\ln(1+2r\cos\theta+r^2)+i\arctan\frac{r\sin\theta}{1+r\cos\theta}=\sum_{n=1}^{\infty}\frac{(-1)^{n+1}}{n}(r^n\cos n\theta + i r^n\sin n \theta)
\]
gives the desired equalities.
\end{proof}

\noindent 6. (1) \begin{proof}In the first equality of Problem 5, replace $\theta$ with $\theta+\pi$ and let $r\to 1$, we get
\[
\cos\theta+\frac{\cos2\theta}{2}+\frac{\cos3\theta}{3}+\cdots = -\frac{1}{2}\ln(2-2\cos\theta)=-\ln(2\sin\frac{\theta}{2}).
\]
To justify the process of taking limit, according to Abel's second theorem, it suffices to show the series $\sum_{n=1}^{\infty}\frac{\cos n\theta}{n}$ is convergent. Since $(\frac{1}{n})_{n\ge 1}$ is monotone decreasing to $0$, by Dirichlet's criterion, it suffices to show $\sum_{k=1}^{n}\cos k\theta$ is bounded for any $n$. This can be seen by noting $\frac{1-e^{i(n+1)\theta}}{1-e^{i\theta}}=\sum_{k=0}^ne^{ik\theta}$. After writing both sides in terms of real and imaginary parts, we have by comparison
\[
\left|\sum_{k=1}^n\cos k\theta\right| =\left|\frac{\cos(n+2)\theta-\cos(n+1)\theta}{2(1-\cos\theta)}-\frac{1}{2}\right|\le \frac{1}{1-\cos\theta}+\frac{1}{2}.
\]

Similarly, in the second equality of Problem 5, replace $\theta$ with $\theta+\pi$ and let $r\to 1$, we get
\[
\sum_{n=1}^{\infty}\frac{\sin n\theta}{n}=\arctan\frac{\sin\theta}{1-\cos\theta}=\arctan \cot\frac{\theta}{2}=\frac{1}{2}(\pi-\theta).
\]
\end{proof}

(2) \begin{proof} Let $r\to 1$ in the first equality of Problem 5, we have $\sum_{n=1}^{\infty}(-1)^{n+1}\frac{\cos n \theta}{n}=\frac{1}{2}\ln(2+2\cos\theta)$. In (i), we have shown $\sum_{n=1}^{\infty}\frac{\cos n \theta}{n}=-\ln(2\sin\frac{\theta}{2})$. Add up these two equalities and divide both sides by $2$, we get (note $1+\cos\theta=2\cos^2\frac{\theta}{2}$)
\[
\cos\theta + \frac{\cos 3 \theta}{3}+\frac{\cos 5\theta}{5}+\frac{\cos 7\theta}{7}+\cdots = \frac{1}{2}\ln\cot\frac{\theta}{2}.
\]
Similar argument gives (note $\sin\theta=2\sin\frac{\theta}{2}\cos\frac{\theta}{2}$)
\begin{eqnarray*}
& & \sin\theta+\frac{\sin3\theta}{3}+\frac{\sin5\theta}{5}+\frac{\sin7\theta}{7}+\cdots\\
&=& \frac{1}{2}\left(\arctan\frac{\sin\theta}{1+\cos\theta}+\frac{1}{2}(\pi-\theta)\right)\\
&=&\frac{1}{2}\left[\arctan\left(\tan\frac{\theta}{2}\right)+\frac{1}{2}(\pi-\theta)\right]\\
&=&\frac{\pi}{4}.
\end{eqnarray*}
\end{proof}

(3) \begin{proof} Integrating from $\frac{\pi}{2}$ to $\theta$ on both sides of
\[
\sin\theta+\frac{\sin3\theta}{3}+\frac{\sin5\theta}{5}+\frac{\sin7\theta}{7}+\cdots = \frac{\pi}{4},
\]we get
\[
\cos\theta + \frac{\cos3\theta}{3^2}+\frac{\cos5\theta}{5^2}+\frac{\cos7\theta}{7^2}\cdots = \frac{\pi}{4}\left(\frac{\pi}{2}-\theta\right).
\]
Replace $\theta$ with $\theta+\frac{\pi}{2}$, we get
\[
-\sin\theta + \frac{\sin3\theta}{3^2}-\frac{\sin5\theta}{5^2}+\frac{\sin7\theta}{7^2}\cdots = \frac{\pi}{4}(-\theta).
\]
Therefore
\[
\sin\theta - \frac{\sin3\theta}{3^2}+\frac{\sin5\theta}{5^2}-\frac{\sin7\theta}{7^2}+\cdots = \frac{\pi}{4}\theta.
\]
 \end{proof}

(4) \begin{proof} From part (2), we know
\[
\cos\theta + \frac{\cos 3 \theta}{3}+\frac{\cos 5\theta}{5}+\frac{\cos 7\theta}{7}+\cdots = \frac{1}{2}\ln\cot\frac{\theta}{2}.
\]
Form part (3), we obtain by differentiation
\[
\cos\theta - \frac{\cos3\theta}{3}+\frac{\cos5\theta}{5}-\frac{\cos7\theta}{7}+\cdots = \frac{\pi}{4}.
\]
\end{proof}

\noindent 7. \begin{proof}
(1) $R=\varliminf_{n\to\infty}|n^n|^{1/n}=+\infty$.

(2) $R=\infty$.

(3) $R=\lim_{n\to\infty}\frac{n!/n^n}{(n+1)!/(n+1)^{n+1}}=\lim_{n\to\infty}\frac{1}{n+1}(1+\frac{1}{n})^n\cdot (n+1)=e$.

(4) $R=\infty$.

(5) $R=\lim_{n\to\infty}(n^{-\ln n})^{1/n} = \lim_{n\to\infty}(e^{\ln n\cdot \ln n^{-1}})^{1/n} = \lim_{n\to\infty}e^{-\frac{(\ln n)^2}{n}}=1$.

(6) $R=2$.

(7) $R=\lim_{n\to\infty}\frac{\ln n^n/n!}{\ln(n+1)^{n+1}/(n+1)!}=\lim_{n\to\infty}(n+1)\frac{n\ln n}{(n+1)\ln (n+1)}=\infty$.

(8) $R=\lim_{n\to\infty}|(1-\frac{1}{n})^{-n}|^{1/n}=\lim_{n\to\infty}(1-\frac{1}{n})^{-1}=1$.
\end{proof}


\section{解析函数的局域性展开}

\subsection{正文练习题}

\noindent 5.1. \begin{proof} If $g(z)\equiv 0$, the problem is
ill-posed; if $f(z)\equiv 0$, there is nothing to prove. So without
loss of generality, we can assume $z_0$ is a zero of $f(z)$ of order
$n$ and a zero of $g(z)$ of order $m$. Then $f(z)$ can be written as
$f(z)=(z-z_0)^n\phi(z)$ where $\phi(z)$ is analytic at $z_0$ and is
non-zero in a neighborhood of $z_0$. Similarly, $g(z)$ can be
written as $g(z)=(z-z_0)^m\psi(z)$ where $\psi(z)$ is analytic at
$z_0$ and is non-zero in a neighborhood of $z_0$. Then we have
\begin{eqnarray*}
\frac{f(z)}{g(z)} =
\begin{cases}
(z-z_0)^{n-m}\cdot\frac{\phi(z)}{\psi(z)} & \mbox{if $n>m$} \\
\frac{\phi(z)}{\psi(z)}                   & \mbox{if $n=m$} \\
\frac{1}{(z-z_0)^{m-n}}\cdot\frac{\phi(z)}{\psi(z)} & \mbox{if
$n<m$},
\end{cases}
\end{eqnarray*}
which implies
\begin{eqnarray*}
\lim_{z\to z_0}\frac{f(z)}{g(z)} =
\begin{cases}
0 & \mbox{if $n>m$} \\
\frac{\phi(z_0)}{\psi(z_0)} & \mbox{if $n=m$} \\
\infty & \mbox{if $n<m$}.
\end{cases}
\end{eqnarray*}
Similarly, we have
\begin{eqnarray*}
\frac{f'(z)}{g'(z)} =
\frac{n(z-z_0)^{n-1}\phi(z)+(z-z_0)^n\phi'(z)}{m(z-z_0)^{m-1}\psi(z)+(z-z_0)^m\psi'(z)}
=
\begin{cases}
\frac{n(z-z_0)^{n-m}\phi(z)+(z-z_0)^{n-m+1}\phi'(z)}{m\psi(z)+(z-z_0)\psi'(z)} & \mbox{if $n>m$} \\
\frac{n\phi(z)+(z-z_0)\phi'(z)}{n\psi(z)+(z-z_0)\psi'(z)}                   & \mbox{if $n=m$} \\
\frac{n\phi(z)+(z-z_0)\phi'(z)}{m(z-z_0)^{m-n}\psi(z)+(z-z_0)^{m-n+1}\psi'(z)}
& \mbox{if $n<m$},
\end{cases}
\end{eqnarray*}
which implies
\begin{eqnarray*}
\lim_{z\to z_0}\frac{f'(z)}{g'(z)} =
\begin{cases}
0 & \mbox{if $n>m$} \\
\frac{\phi(z_0)}{\psi(z_0)} & \mbox{if $n=m$} \\
\infty & \mbox{if $n<m$}.
\end{cases}
\end{eqnarray*}
Therefore we must have
\[
\lim_{z\to z_0}\frac{f(z)}{g(z)}=\lim_{z\to z_0}\frac{f'(z)}{g'(z)}.
\]

\end{proof}

\noindent 5.2. \begin{proof}The solution is similar to that of
problem 5.1 once we write $f(z)$ and $g(z)$ as
$\frac{\phi(z)}{(z-z_0)^n}$ and $\frac{\psi(z)}{(z-z_0)^m}$,
respectively.
\end{proof}

\noindent 5.3. \begin{proof} $z_n=\frac{1}{(2n+1)\pi i}$,
$z_n=\frac{1}{2(n+1)\pi i}$, $z_n=\frac{1}{(2n+\frac{1}{2})\pi i}$,
and $z_n=\frac{1}{(2n-\frac{1}{2})\pi i}$. \end{proof}

\noindent 5.4. \begin{proof} By considering the singularity of $0$
for $f(1/z)$, we can conclude the following results: if $\infty$ is
a removable singularity of $f(z)$, then $f(z)$ has the form of
$\sum_{n=0}^{\infty}\frac{a_n}{z^n}$ in a neighborhood of $\infty$;
if $\infty$ is a pole of $f(z)$, then $f(z)$ has the form of
$\sum_{n=0}^{\infty}\frac{a_n}{z^n}+\sum_{k=1}^mb_kz^k$ for some
positive integer $m$ in a neighborhood of $\infty$ and $b_m\ne 0$;
finally, if $\infty$ is an essential singularity of $f(z)$, then
$f(z)$ has the form of
$\sum_{n=0}^{\infty}\frac{a_n}{z^n}+\sum_{k=1}^{\infty}b_kz^k$ where
infinitely many $b_k$'s are non-zero.
\end{proof}

\noindent 5.5. \begin{proof} In formula (5.48), we already obtained
\[
\mbox{sech} \frac{z}{2} =
\sum_{n=0}^{\infty}\frac{E_n}{n!}\left(\frac{z}{2}\right)^n,\;
|z|<\pi.
\]
Replace $\frac{z}{2}$ by $iw$ with $|w|<\frac{\pi}{2}$.
\end{proof}

\subsection{章末习题}

\noindent 1. (1) \begin{proof}$1-z^2=1-[(z-1)+1]^2 =
-2(z-1)-(z-1)^2$. Radius of convergence $R= \infty$.
\end{proof}

(2) \begin{proof} $\sin z =
(-1)^n\sin(z-n\pi)=(-1)^n\sum_{k=0}^{\infty}\frac{(-1)^k}{(2k+1)!}(z-n\pi)^{2k+1}
= \sum_{k=0}^{\infty}\frac{(-1)^{n+k}}{(2k+1)!}(z-n\pi)^{2k+1}$.
Radius of convergence $R=\infty$.
\end{proof}

(3) \begin{proof}Solve the equation $1+z+z^2$ to get two roots:
$z_1=e^{\frac{2}{3}\pi i}=\frac{-1+\sqrt{3}i}{2}$ and
$z_2=e^{\frac{4}{3}\pi i}=\frac{-1-\sqrt{3}i}{2}$. Then
\begin{eqnarray*}
\frac{1}{1+z+z^2} &=& \frac{1}{z_1-z_2}\left ( \frac{1}{z-z_1} -
\frac{1}{z-z_2} \right) \\
&=&\frac{-1}{\sqrt{3}i}\left(\frac{1}{z_1}\frac{1}{1-\frac{z}{z_1}} - \frac{1}{z_2}\frac{1}{1-\frac{z}{z_2}}\right)\\
&=&
\frac{i}{\sqrt{3}}\left[\frac{1}{z_1}\sum_{n=0}^{\infty}\left(\frac{z}{z_1}\right)^n
- \frac{1}{z_2}\sum_{n=0}^{\infty}\left(\frac{z}{z_2}\right)^n
\right] \\
&=& \frac{i}{\sqrt{3}} \sum_{n=0}^{\infty}z^n\left(
e^{-\frac{2}{3}(n+1)\pi i} - e^{-\frac{4}{3}(n+1)\pi i}\right) \\
&=&
-\frac{2}{\sqrt{3}}\sum_{n=0}^{\infty}z^n\frac{e^{-\frac{2}{3}(n+1)\pi
i} - e^{\frac{2}{3}(n+1)\pi i}}{2i}\\
&=& \sum_{n=0}^{\infty}
\frac{\sin\frac{2}{3}(n+1)\pi}{\sin\frac{2}{3}\pi}z^n.
\end{eqnarray*}
The radius of convergence $R=1$.
\end{proof}

(4) \begin{proof}
\[
\frac{\sin z}{1-z}=\sum_{k=0}^{\infty}\frac{(-1)^k}{(2k+1)!}z^{2k+1}
\cdot \sum_{m=0}^{\infty}z^m =
\sum_{n=1}^{\infty}\left[\sum_{2k+1+m=n}\frac{(-1)^k}{(2k+1)!}\right]z^n
=
\sum_{n=0}^{\infty}\left[\sum_{k=0}^{[(n-1)/2]}\frac{(-1)^k}{(2k+1)!}\right]
z^n.
\]
The radius of convergence $R=1$. \end{proof}

(5) \begin{proof} It's clear that $1$ is a singularity. So the
radius of convergence $R=1$ and the function is analytic inside the
unit disc $D(0,1)$. Suppose
$e^{\frac{1}{1-z}}=\sum_{k=0}^{\infty}a_kz^k$. Differentiate both
sides and we get $\frac{e^{\frac{1}{1-z}}}{(1-z)^2} =
\sum_{k=1}^{\infty}ka_kz^{k-1}$. Therefore
\[
\sum_{k=0}^{\infty}a_kz^k = e^{\frac{1}{1-z}} =
(1-2z+z^2)\sum_{k=1}^{\infty}ka_k z^{k-1} = \sum_{n=0}^{\infty}
(n+1)a_{n+1}z^n - 2\sum_{n=1}^{\infty} na_nz^n +
\sum_{n=2}^{\infty}(n-1)a_{n-1}z^n.
\]
So $a_0=a_1$, $a_1=2a_2-2a_1$ and
$a_{n+1}=\frac{2n+1}{n+1}a_n-\frac{n-1}{n+1}a_{n-1}$ for $n\ge 2$.
By recursion, we get $a_0=e$, $a_1=e$, $a_2=\frac{3}{2}e$, $a_3=
\frac{13}{6}e$, and $a_4=\frac{73}{24}e$. So
$e^{\frac{1}{1-z}}=e\left(1+z+\frac{3}{2}z^2+\frac{13}{6}z^3+\frac{73}{24}e^4+O(z^5)\right)$.
\end{proof}

\noindent 2. (1) \begin{proof}Since $z=0$ is a singularity for $\ln
z$, the radius of convergence $R=1$. Using the power series of
$\ln(1-z)$, we have
\[
\ln z = \ln [i(1-i(z-i))] = \ln i
-\sum_{n=0}^{\infty}\frac{[i(z-i)]^n}{n} = \frac{\pi}{2}i
-\sum_{n=0}^{\infty}\frac{i^n}{n}(z-i)^n.
\]
\end{proof}

(2) \begin{proof} The solution is similar to that of problme (1),
only that $\ln i = -\frac{3}{2}\pi$. \end{proof}

(3) \begin{proof} We note $\arctan z$ is an od function, so its
series expansion must have the form
$\sum_{n=0}^{\infty}a_{2n+1}z^{2n+1}$. Differentiate both sides and
we get $\frac{1}{1+z^2}=\sum_{n=0}^{\infty}a_{2n+1}(2n+1)z^{2n}$. So
\[
1=(1+z^2)\sum_{n=0}^{\infty}a_{2n+1}(2n+1)z^{2n} =
\sum_{n=0}^{\infty}a_{2n+1}(2n+1)z^{2n} +
\sum_{n=1}^{\infty}a_{2n-1}(2n-1)z^{2n}.
\]
Therefore $a_1=1$ and $a_{2n+1}=-\frac{2n-1}{2n+1}a_{2n-1}$ for
$n\ge 1$. This implies $a_{2n+1}=\frac{(-1)^n}{2n+1}$. Since we have
used $\frac{1}{1+z^2}$ which has $\pm i$ as poles, the radius of
convergence $R=1$.
\end{proof}

(4) \begin{proof}
\begin{eqnarray*}
\ln\frac{1+z}{1-z} &=&
\ln(-1)+\ln\left(1+\frac{1}{z}\right)-\ln\left(1-\frac{1}{z}\right)\\
&=& (2k+1)\pi i + \sum_{n=1}^{\infty}\left[(-1)^{n+1}+1\right]\frac{z^{-n}}{n}\\
&=& (2k+1)\pi i + \sum_{n=0}^{\infty}\frac{2}{2n+1}z^{-(n+1)}.
\end{eqnarray*}
We used the assumption $\frac{1}{|z|}<1$ in the above derivation, so
the domain of convergence is $|z|>1$.
\end{proof}

\noindent 3. (1) \begin{proof} Let
$f(z)=\sum_{n=0}^{\infty}\frac{z^{2n+1}}{2n+1}$. Then
\[
f'(z) =
\sum_{n=0}^{\infty}z^{2n}=\frac{1}{1-z^2}=\frac{1}{2}\left(\frac{1}{1-z}+\frac{1}{1+z}\right).
\]
Therefore $f(z)=\frac{1}{2}\ln\frac{1+z}{1-z}$ with $f(0)=0$.
\end{proof}

(2) \begin{proof}Inspired by definition of trigonometric functions
via exponential function, we have
\[
\sum_{n=0}^{\infty}\frac{z^{2n}}{(2n)!} =
\frac{1}{2}\left[\sum_{n=0}^{\infty}\frac{z^n}{n!}+\sum_{n=0}^{\infty}\frac{(-1)^nz^n}{n!}\right]=\frac{1}{2}(e^z+e^{-z})=\cosh
z.
\]
\end{proof}


\noindent 4. (1) \begin{proof} Assume
$\frac{1}{z^2(z-1)}=\sum_{n=-\infty}^{\infty}a_n(z-1)^n$. Then by
Theorem 5.4,
\[
a_n = \frac{1}{2\pi i}\int_{|z-1|=\rho}\frac{1}{z^2(z-1)}\cdot
\frac{dz}{(z-1)^{n+1}} = \frac{1}{2\pi
i}\int_{|z-1|=\rho}\frac{1}{z^2}\cdot\frac{dz}{(z-1)^{n+2}}.
\]
If $n\le -2$, by Cauchy's theorem, $a_n=0$. If $n=-1$, by Cauchy's
integral formula, $a_n=\frac{1}{z^2}|_{z=1}=1$. If $n \ge 0$, by
Cauchy's integral formula
\[
a_n =
\frac{1}{(n+1)!}\left.\frac{d^{n+1}}{dz^{n+1}}(z^{-2})\right|_{z=1}=\left.\frac{1}{(n+1)!}(-2)(-3)\cdots[-(n+2)]z^{-(n+3)}\right|_{z=1}
= (n+2)(-1)^{n+1}.
\]
So $\frac{1}{z^2(z-1)}=\sum_{n =
0}^{\infty}(n+2)(-1)^{n+1}(z-1)^n+(z-1)^{-1} = \sum_{n =
-1}^{\infty}(-1)^{n+1}(n+2)(z-1)^n$.
 \end{proof}

(2) \begin{proof}
\[
\frac{1}{z^2(z-1)} = \frac{1}{z^3}\cdot \frac{1}{1-\frac{1}{z}} =
\frac{1}{z^3}\sum_{n=0}^{\infty}\left(\frac{1}{z}\right)^n =
\sum_{n=3}^{\infty}z^{-n}.
\]
 \end{proof}

(3) \begin{proof}
\begin{eqnarray*}
\frac{1}{z^2-3z+2} &=& \frac{1}{z-2}-\frac{1}{z-1} = -\frac{1}{2(1-\frac{z}{2})} - \frac{1}{z(1-\frac{1}{z})}\\
 &=& -\frac{1}{2}\sum_{n=0}^{\infty}\left(\frac{z}{2}\right)^n - \sum_{n=0}^{\infty}\frac{1}{z^{n+1}}=-\sum_{n=-1}^{-\infty}z^n-\sum_{n=0}^{\infty}\frac{z^n}{2^{n+1}}.
\end{eqnarray*}
\end{proof}

(4) \begin{proof}
\[
\frac{1}{z^2-3z+2}=\frac{1}{z-2}-\frac{1}{z-1}=\frac{1}{z}\left(\frac{1}{1-\frac{2}{z}}
- \frac{1}{1-\frac{1}{z}}\right) =
\sum_{n=1}^{\infty}(2^{n-1}-1)z^{-n}.
\]
\end{proof}

(5) \begin{proof}
\begin{eqnarray*}
\frac{(z-1)(z-2)}{(z-3)(z-4)} &=& (z^2-3z+2)\left(-\frac{1}{4(1-\frac{z}{4})} -\frac{1}{z(1-\frac{3}{z})}\right) \\
&=& (z^2-3z+2)\left[-\frac{1}{4}\sum_{n=0}^{\infty}\left(\frac{z}{4}\right)^n-\frac{1}{z}\sum_{n=0}^{\infty}\left(\frac{3}{z}\right)^n\right]\\
&=& 1-\frac{3}{2}\sum_{n=0}^{\infty}\frac{z^n}{2^{2n}} -
2\sum_{n=-1}^{-\infty}\frac{z^n}{3^{n+1}}.
\end{eqnarray*}
\end{proof}

(6)\begin{proof}
\begin{eqnarray*}
\frac{(z-1)(z-2)}{(z-3)(z-4)}&=&(z^2-3z+2)\left(\frac{1}{z-4}-\frac{1}{z-3}\right)\\
 &=& \left(z-3+\frac{2}{z}\right)\left[\sum_{n=0}^{\infty}\left(\frac{4}{z}\right)^n - \sum_{n=0}^{\infty}\left(\frac{3}{z}\right)^n\right]\\
 &=& 1+\sum_{n=1}^{\infty}(3\cdot 2^{2n-1}-2\cdot 3^{n-1})z^{-n}.
\end{eqnarray*}
\end{proof}

\noindent 6. (1)
\begin{proof}$\frac{1}{z^2+a^2}=\frac{1}{2ai}\left[\frac{1}{z-ai}-\frac{1}{z+ai}\right]$.
So $\pm ai$ are poles of order 1. Since
$\lim_{z\to\infty}\frac{1}{z^2+a^2}=0$, $\infty$ is a removable
singularity.
 \end{proof}

 (2) \begin{proof}
 \[
 \frac{\cos az}{z^2}= \frac{\sum_{n=0}^{\infty}\frac{(-1)^n}{(2n)!}z^{2n}}{z^2}=\sum_{n=0}^{\infty}\frac{(-1)^n}{(2n)!}z^{2n-2}.
 \]
 So $0$ is a pole of order 2 and $\infty$ is an essential singularity.
 \end{proof}

(3) \begin{proof}
\[
\frac{\cos az-\cos bz}{z^2} =
\frac{1}{z^2}\sum_{n=0}^{\infty}\frac{(-1)^n}{(2n)!}\left[a^{2n}-b^{2n}\right]z^{2n}=\sum_{n=1}^{\infty}\frac{(-1)^n}{(2n)!}\left[a^{2n}-b^{2n}\right]z^{2n-2}.
\]
So $0$ is a removable singularity and $\infty$ is an essential
singularity.
\end{proof}

(4) \begin{proof}
\[
\frac{\sin z}{z^2}-\frac{1}{z}=\frac{\sin z -z}{z^2} =
\frac{\sum_{n=1}^{\infty}\frac{(-1)^n}{(2n+1)!}z^{2n+1}}{z^2}=\sum_{n=1}^{\infty}\frac{(-1)^n}{(2n+1)!}z^{2n-1}.
\]
So $0$ is a removable singularity and $\infty$ is an essential
singularity.
\end{proof}

(5)\begin{proof}
\[
\cos\frac{1}{\sqrt{z}}=\sum_{n=0}^{\infty}\frac{(-1)^n}{(2n)!}(\sqrt{z})^{2n}=\sum_{n=0}^{\infty}\frac{(-1)^n}{(2n)!}z^{n}.
\]
So $\infty$ is an essential singularity.
\end{proof}

(6)\begin{proof}
\[
\frac{\sqrt{z}}{\sin\sqrt{z}}=\frac{\sqrt{z}}{\sum_{n=0}^{\infty}\frac{(-1)^n}{(2n+1)!}z^n\cdot
\sqrt{z}}=\frac{1}{\sum_{n=0}^{\infty}\frac{(-1)^n}{(2n+1)!}z^n}.
\]
So $0$ is a removable singularity. Meanwhile, $(n\pi)^2$ ($n\in
\mathbb N$) are poles of order 1 and $\infty$ is a non-isolated
singularity.
 \end{proof}

(7) \begin{proof}If we stipulate $\ln z|_{z=1}\ne 0$, then $1$ is a
pole of order 1. If we stipulate $\ln z|_{z=1}=0$, then by
l'Hospitale rule for analytic functions (Exercise problem 5.1 and
5.2 in the text), we have
\[
\lim_{z\to 1}\frac{z-1}{\ln z -\ln 1} = \lim_{z\to 1} z = 1.
\]
So $\frac{1}{(z-1)\ln z}=\frac{1}{(z-1)^2}\cdot\frac{z-1}{\ln z}$
has $1$ as a pole of order 2. $\infty$ is a removable singularity.
 \end{proof}

(8) \begin{proof}
\[
\int_0^z\frac{\sinh\sqrt{\zeta}}{\sqrt{\zeta}}d\zeta = \int_0^z
\sum_{k=1}^{\infty}\frac{\zeta^{k-1}}{(2k-1)!} d\zeta
=\sum_{k=1}^{\infty}\frac{z^{k}}{k(2k-1)!}.
\]
So the function is an entire function with $\infty$ an essential
singularity.
 \end{proof}

\noindent 7. \begin{proof}$z^2$ has $\infty$ as a pole of order of
2. $\frac{1}{z}$ has $\infty$ as a removable singularity.
$\frac{\cos z}{z}$ has $\infty$ as an essential singularity (see
problem 6(2)). $\frac{z}{\cos z}$ has $\infty$ as a non-isolated
singularity since $n\pi+\frac{\pi}{2}$ $(n\in \mathbb Z)$ are poles.
$\frac{z^2+1}{e^z}$ has $\infty$ as an essential singularity since
$\lim_{z\to\infty}\frac{z^2+1}{e^z}$ does not exist:
$\lim_{z\to\infty, z\in \mathbb R}\frac{z^2+1}{e^z}=0\ne \infty =
\lim_{z\to\infty, z\in \mathbb Ri}\frac{z^2+1}{e^z}$.
$\exp\{-\frac{1}{z^2}\}$ has $\infty$ as a removable singularity
since $\lim_{z\to\infty}\exp\{-\frac{1}{z^2}\}=e^0=1$.
$\frac{1}{\cosh\sqrt{z}}$ has $-(n\pi+\frac{\pi}{2})^2$ as poles
$(n\in \mathbb Z)$, so it has $\infty$ as a non-isolated
singularity. The function
$\sqrt{\left(\frac{1}{z}-1\right)\left(\frac{1}{z}-2\right)}=\frac{\sqrt{(1-z)(1-2z)}}{z}$
has $0$ as a pole of order 1, so the function $\sqrt{(z-1)(z-2)}$
has $\infty$ as a pole of order 1.\end{proof}

\noindent 8. \begin{proof} The series $\sum_{n=0}^{\infty}(\alpha
z)^n$ is convergent in $U_1=\{z: |z|<\frac{1}{|\alpha|}\}$. The
series
$\frac{1}{1-z}\sum_{n=0}^{\infty}(-1)^{n}\frac{[(1-\alpha)z]^n}{(1-z)^n}$
is convergent in $U_2=\{z: |1-\alpha||z|<|1-z|\}$. On $U_3=U_1\cap
U_2$, both of the series represent the analytic function
$\frac{1}{1-\alpha z}$. So the functions represented by these two
series are analytic continuation of each other. \end{proof}

\noindent 9. \begin{proof} Denote by $f(z)$ the analytic function
represented by the series in $|z|<1$ and by $g(z)$ the analytic
function represented by the series in $|z|>1$. Since the roots of
$z^n=1$ $(n\in \mathbb N)$ are singularities of the series and
consist of a dense subset of $\partial D(0,1)$, $f(z)$ and $g(z)$
cannot be analytic continuation of each other. Of course, we need to
verify that the series indeed converges to analytic functions both
in $|z|<1$ and in $|z|>1$. When $|z|\le \rho < 1$, we note
\[
\left|\frac{1}{1-z^{n+1}}-\frac{1}{1-z^n}\right|=\left|\frac{z^n(z-1)}{(1-z^{n+1})(1-z^n)}\right|
\le \frac{\rho^n}{(1-\rho^n)(1-\rho^{n+1})} \le
\frac{\rho^n}{(1-\rho)(1-\rho)}.
\]
By Weierstrass criterion, the series is uniformly convergent to an
analytic function on $|z|\le \rho$. So $f(z)$ is well-defined in
$|z|<1$. When $|z|\ge R>1$, we note
\begin{eqnarray*}
\left|\frac{1}{1-z^{n+1}}-\frac{1}{1-z^n}\right| &=& \left|\frac{1}{z^{n+1}(\frac{1}{z^{n+1}}-1)}-\frac{1}{z^n(\frac{1}{z^n}-1)} \right| \\
&\le& \frac{1}{R^{n+1}}\frac{1}{1-\frac{1}{R^{n+1}}} + \frac{1}{R^{n}}\frac{1}{1-\frac{1}{R^{n}}} \\
&\le& \frac{1}{R^{n+1}}\frac{1}{1-\frac{1}{R}} +
\frac{1}{R^{n}}\frac{1}{1-\frac{1}{R}}.
\end{eqnarray*}
By Weierstrass criterion, the series is uniformly convergent to an
analytic function on $|z|\ge R$. So $g(z)$ is well-defined in
$|z|>1$.
\end{proof}

\noindent 10. \begin{proof} Clearly the series is convergent in
$D(0,1)$ and is divergent at $z=1$. So its radius of convergent
$R=1$. Then $f(z)$ is analytic in $D(0,1)$ and $z=1$ is a
singularity of $f(z)$. We note
$f(z)=z+\sum_{n=1}^{\infty}z^{2^n}=z+\sum_{n=1}^{\infty}z^{2\cdot
2^{n-1}}=z+\sum_{n=1}^{\infty}(z^2)^{2^{n-1}}=z+f(z^2)$. Therefore,
we have
\[
f(z)=z+f(z^2)=z+z^2+f(z^4)=z+z^2+z^4+f(z^8)=\cdots.
\]
So the roots of equations $z^2=1$, $z^4=1$, $z^8=1$,$\cdots$,
$z^{2^n}=1$, $\cdots$, etc. are all singularities of $f$. These
roots form a dense subset of $\partial D(0,1)$, so $f(z)$ can not be
analytically continued to the outside of its circle of convergence
$D(0,1)$.
\end{proof}

\section{二阶线性常微分方程的幂级数解法}

\subsection{正文练习题}

\noindent 6.1. \begin{proof} It suffices to note we have the
following linear equations
\[
\begin{cases}
p(z)w_1'(z) + q(z) w_1(z) = - w_1''(z) \\
p(z)w_2'(z) + q(z) w_2(z) = - w_2''(z).
\end{cases}
\]
Then we can apply results in linear algebra (i.e. Cramer's rule).
\end{proof}

\subsection{章末习题}

\noindent 1. (1) \begin{proof} Let $w(z)=c_0w_1(z)+c_1w_2(z)$. Then
$w'(z)=c_0+c_1e^z$ and $w''(z)=c_1e^z$. Then
$w(z)-zw'(z)=c_1e^z-c_1ze^z=(1-z)w''$. So the differential equation
satisfied by $w(z)$ is
\[
(z-1)w'' - zw' + w = 0.
\]
\end{proof}

(2) \begin{proof} Let $w(z)=c_0w_1(z)+c_1w_2(z)$. Then
\[
w'=-\frac{c_0}{z^2}w_1+\frac{2c_1}{z^2}w_2.
\]
So $z^2w'=-c_0e^{\frac{1}{z}}+2c_1e^{-\frac{2}{z}}$ (*).
Differentiating both sides and multiply both sides by $z^2$, we get
$2z^3w'+z^4w''=c_0e^{\frac{1}{z}} + 4c_1e^{-\frac{2}{z}}$ (**).
Combining (*) and (**) gives us
$6c_1e^{-\frac{2}{z}}=(z^2+2z^3)w'+z^4w''$ and
$3c_0e^{\frac{1}{z}}=(2z^3-2z^2)w'+z^4w''$. So
\[
w=\frac{1}{3}[(2z^3-2z^2)w'+z^4w'']+\frac{1}{6}[(z^2+2z^3)w'+z^4w'']
= \left (z^3-\frac{z^2}{2} \right)w'+\frac{z^4}{2}w''.
\]
This is equivalent to $z^4w''+(2z^3-z^2)w'-2w=0$.
 \end{proof}

(3) \begin{proof} Let $w=c_0w_1+c_1w_2$. Then we note
$w_1'=\frac{a}{z^2}w_2$ and $w_2'=-\frac{a}{z^2}w_1$. So
$w'=c_0\cdot \frac{a}{z^2}w_2-c_1\cdot \frac{a}{z^2}w_1$.
Multiplying both sides by $z^2$, we get $z^2w'=c_0aw_2-c_1aw_1$ (*).
Differentiating both sides again, we have
$2zw'+z^2w''=c_0a\left(-\frac{a}{z^2}\right)w_1-c_1a\left(\frac{a}{z^2}\right)w_2$,
i.e. $2z^3w'+z^4w''=-c_0a^2w_1-c_1a^2w_2$ (**). Combining (*) and
(**), we can get
\begin{eqnarray*}
\begin{cases}
ac_1z^2w'+c_0(2z^3w'+z^4w'')=-(c_1^2+c_0^2)a^2w_1 \\
ac_0z^2w'-c_1(2z^3w'+z^4w'')=(c_0^2+c_1^2)a^2w_2.
\end{cases}
\end{eqnarray*}
Using the fact $w=c_0w_1+c_1w_2$, we can easily get
$z^4w''+2z^3w'+a^2w=0$.
\end{proof}

(4) \begin{proof} Let $w=c_0w_1+c_1w_2$. Then $(z^2-1)w =
c_0z^2+c_1z$. Differentiating both sides of the equation gives
$2zw+(z^2-1)w'=2c_0z+c_1$. Differentiate again, we get
$2w+2zw'+2zw'+(z^2-1)w''=2c_0$. Solving for $c_0$, $c_1$, and plug
the expressions into the equation $w=c_0w_1+c_1w_2$, we get
\[
z^2(z^2-1)w''+2z(z^2+1)w'-2w=0.
\]
\end{proof}

\noindent 2. (1) \begin{proof}$p(z)=z^2$ and $q(z)=0$ are both
analytic. So the solution $w(z)$ is analytic and assumes the form
$w(z)=\sum_{k=0}^{\infty}a_kz^k$. Then
\[
w''-z^2w=\sum_{k=2}^{\infty}a_k\cdot k(k-1)z^{k-2} -
\sum_{k=0}^{\infty}a_kz^{k+2} =
2a_2+6a_3z+\sum_{k=2}^{\infty}[a_{k+2}\cdot(k+2)(k+1)-a_{k-2}]z^k=0.
\]
Clearly, $a_0=w(0)$ and $a_1=w'(0)$. By the uniqueness of power
series representation of an analytic function, we have $a_2=a_3=0$,
and $a_{k+2}=\frac{a_{k-2}}{(k+2)(k+1)}$. So
\[
a_{4k}=\frac{a_{4(k-1)}}{4k\cdot (4k-1)} = \frac{a_0}{4k\cdot
(4k-1)\cdot \cdots \cdot 4\cdot 3} = \frac{a_0}{4^{2k} \cdot k!
\cdot
(k-\frac{1}{4})\cdots(1-\frac{1}{4})}=\frac{a_0\Gamma(\frac{3}{4})}{2^{4k}\cdot
k!\cdot \Gamma(k+\frac{3}{4})}
\]
and
\[
a_{4k+1} = \frac{a_{4(k-1)+1}}{(4k+1)\cdot 4k} =
\frac{a_1}{(4k+1)\cdot 4k \cdot \cdots \cdot 5\cdot 4} =
\frac{a_1}{4^{2k}\cdot k! \cdot (k+\frac{1}{4})\cdot
[(k-1)+\frac{1}{4}]\cdots(1+\frac{1}{4})} =
\frac{a_1\Gamma(\frac{5}{4})}{2^{4k}\cdot k! \cdot
\Gamma(k+\frac{5}{4})}.
\]
Let $w_1(z)=\sum_{n=0}^{\infty}\frac{\Gamma(\frac{3}{4})}{n!\cdot
\Gamma(n+\frac{3}{4})}\left(\frac{z}{2}\right)^{4n}$ and
$w_2(z)=\sum_{n=0}^{\infty}\frac{2\Gamma(\frac{5}{4})}{n!\cdot
\Gamma(n+\frac{5}{4})}\left(\frac{z}{2}\right)^{4n+1}$. Then
$w(z)=a_0w_1(z)+a_1w_2(z)$.
\end{proof}

(2) \begin{proof}$p(z)=z$ and $q(z)=0$ are both analytic. So the
solution $w(z)$ is analytic and assumes the form
$w(z)=\sum_{k=0}^{\infty}a_kz^k$. Then
\begin{eqnarray*}
w''-zw &=& \sum_{k=2}^{\infty}a_k\cdot k(k-1)z^{k-2} -
\sum_{k=0}^{\infty}a_kz^{k+1} \\
&=& \sum_{k=0}^{\infty}a_{k+2}(k+2)(k+1)z^k -
\sum_{k=1}^{\infty}a_{k-1}z^k \\
&=& 2a_2+\sum_{k=1}^{\infty}[a_{k+2}(k+2)(k+1)-a_{k-1}]z^k \\
&=& 0.
\end{eqnarray*}
Clearly, $a_0=w(0)$ and $a_1=w'(0)$. By the uniqueness of power
series representation of an analytic function, we have $a_2=0$, and
$a_{k+2}=\frac{a_{k-1}}{(k+2)(k+1)}$. So
\[
a_{3k}=\frac{a_{3(k-1)}}{3k\cdot(3k-1)} = \cdots =
\frac{a_0}{3k\cdot (3k-1)\cdot \cdots \cdot 3\cdot 2} =
\frac{a_0}{3^{2k}\cdot k! \cdot
(k-\frac{1}{3})\cdots(1-\frac{1}{3})} =
\frac{a_0\Gamma(\frac{2}{3})}{3^{2k} \cdot k! \cdot
\Gamma(k+\frac{2}{3})}
\]
and
\[
a_{3k+1}=\frac{a_{3(k-1)+1}}{(3k+1)\cdot 3k} = \cdots =
\frac{a_1}{(3k+1)\cdot 3k \cdot \cdots \cdot 4\cdot 3} =
\frac{a_1}{3^{2k} \cdot k! \cdot
(k+\frac{1}{3})\cdots(1+\frac{1}{3})} =
\frac{a_1\Gamma(\frac{4}{3})}{3^{2k}\cdot k! \cdot
\Gamma(k+\frac{4}{3})}.
\]
Let $w_1(z)=\sum_{n=0}^{\infty}\frac{\Gamma(\frac{2}{3})}{n! \cdot
\Gamma(n+\frac{2}{3})}\frac{z^{3n}}{3^{2n}}$ and
$w_2(z)=\sum_{n=0}^{\infty}\frac{\Gamma(\frac{4}{3})}{n! \cdot
\Gamma(n+\frac{4}{3})}\frac{z^{3n+1}}{3^{2n}}$. Then
$w(z)=a_0w_1(z)+a_1w_2(z)$.
\end{proof}

(3) \begin{proof}
The equation can be written as $w''+p(z)w+q(z)=0$ with $p(z)=\frac{z}{z^2-1}$ and $q(z)=-\frac{1}{z^2-1}$. So $\pm 1$ are two singularities of the equation and the equation has an analytic solution in a neighborhood of $0$ (e.g. $D(0,1):=\{z:|z|<1\}$). Suppose $w=\sum_{k=0}^{\infty}a_kz^k$. Then
\begin{eqnarray*}
(z^2-1)w''+zw'-w = \sum_{k=2}^{\infty}[(k-1)(k+1)a_k - (k+1)(k+2)a_{k+2}]z^k-(a_0+2a_2)-6a_3z = 0.
\end{eqnarray*}
Therefore, $a_0=w(0)$, $a_1=w'(0)$, $a_2=-\frac{w(0)}{2}$, $a_3=0$, and for $k\ge 2$,
\[
a_{k+2}=\frac{(k-1)a_k}{k+2}.
\]
From $a_3=0$, we conclude $a_{2k+1}=0$ for $k\ge 1$. Moreover
\[
a_{2k}=\frac{2k-3}{2k}a_{2k-2}=\frac{2k-3}{2k}\cdot\frac{2k-5}{2k-2}a_{2k-4}=\cdots=\frac{2^k[(k-1)-\frac{1}{2}][(k-2)-\frac{1}{2}]\cdots(0-\frac{1}{2})}{2^k\cdot k!}a_0=\frac{\Gamma(k-\frac{1}{2})}{k!\cdot \Gamma(-\frac{1}{2})}a_0.
\]
Let $w_1(z)=\sum_{k=0}^{\infty}\frac{\Gamma(k-\frac{1}{2})}{k!\cdot \Gamma(-\frac{1}{2})}z^{2k}$ and $w_2(z)=z$, we get $w(z)=a_0w_1(z)+a_1w_2(z)$.

\begin{remark}
Note $w_1(z)$ is the Taylor series of $\sqrt{1-z^2}$.
\end{remark}
\end{proof}

(4) \begin{proof}
The equation can be written as $w''+p(z)w+q(z)=0$ with $p(z)=\frac{2(1+2z)}{1+z+z^2}$ and $q(z)=\frac{2}{1+z+z^2}$. So $\frac{-1\pm\sqrt{3}i}{2}$ are two singularities of the equation and the equation has an analytic solution in $D(0,1)$. Suppose $w=\sum_{k=0}^{\infty}a_kz^k$. Then
\[
(1+z+z^2)w''+2(1+2z)w'+zw = \sum_{k=2}^{\infty}(k+1)(k+2)(a_k+a_{k+1}+a_{k+2})z^k +2(a_0+a_1+a_2)+6(a_1+a_2+a_3)z = 0.
\]
Therefore, we have
\begin{eqnarray*}
\begin{cases}
a_0+a_1+a_2=0\\
a_1+a_2+a_3=0\\
a_k+a_{k+1}+a_{k+2}=0 \;(k\ge 2).
\end{cases}
\end{eqnarray*}
Solving it gives us $a_{3k}=a_0$, $a_{3k+1}=a_1$, and
$a_{3k+2}=-(a_0+a_1)$. Plugging in the values of $a_k$'s, we get
after simplification $w(z)=a_0w_1(z)+a_1w_2(z)$, where
$w_1(z)=\frac{1+z}{1+z+z^2}$ and $w_2(z)=\frac{z}{1+z+z^2}$.
Equivalently, we can choose another basis with two linearly
independent solutions: $\widetilde w_1=\frac{1}{1+z+z^2}$ and
$\widetilde w_2(z)=\frac{z}{1+z+z^2}$.

\begin{remark}
We can solve the problem more directly once we note
\[
\frac{d^2}{dz^2}[(1+z+z^2)w(z)] = (1+z+z^2)w''(z) + 2(1+2z)w'(z) +
2w(z).
\]
So $(1+z+z^2)w(z)=a_0+a_1z$, i.e.
\[
w(z)=\frac{a_0}{1+z+z^2}+\frac{a_1z}{1+z+z^2}.
\]
\end{remark}
\end{proof}

\noindent 3. (1) \begin{proof} The equation can be transformed to $w''+p(z)w'+q(z)w=0$, where $p(z)=\frac{1-3z}{z(1-z)}$ and $q(z)=-\frac{1+z}{z^2(1-z)}$. So $0$ is the singularity of the equation. Since $zp(z)=\frac{1-3z}{1-z}$ and $z^2q(z) =-\frac{1+z}{1-z}$ are analytic in $0<|z|<1$, the equation has two regular solutions in $0<|z|<1$:
\[
\begin{cases} w_1(z)=z^{\rho_1}\sum_{k=0}^{\infty}c_kz^k \;(c_0\ne 0) \\
 w_2(z)=gw_1(z)\ln z +z^{\rho_2}\sum_{k=0}^{\infty}d_kz^k\;(g\ne 0 \; \mbox{or} \; d_0\ne 0)
 \end{cases}
\]
for some constants $g$, $\rho_1$ and $\rho_2$. $\rho_1$ and $\rho_2$ satisfy the index equation $\rho(\rho-1)+a_0\rho+b_0=0$, where $a_0=\lim_{z\to 0}zp(z)=1$ and $b_0=\lim_{z\to 0}z^2q(z)=-1$. So the equation for $\rho$ becomes $\rho^2=1$ and $\rho$ is therefore 1. Suppose $w(z)$ has the form of $z^{\rho}\sum_{n=0}^{\infty}c_nz^n$, then we can get the following the recursive relation
\[
[(n+\rho)(n+\rho-1)+a_0(n+\rho)+b_0]c_n+\sum_{l=0}^{n-1}[a_{n-l}(l+\rho) + b_{n-l}]c_l=0\; (n\ge 1),
\]
where $a_n$'s and $b_n$'s come from the Laurent series of $p(z)=\sum_{l=0}^{\infty}a_lz^{l-1}$ and $q(z)=\sum_{l=0}^{\infty}b_lz^{l-2}$, respectively.
We then find the Laurent series expansion of $p(z)$ and $q(z)$ as follows:
\[
p(z)= \frac{1}{z} - 2\sum_{l=0}^{\infty}z^l, \; q(z)=-\frac{1}{z^2}-2\sum_{l=1}^{\infty}z^{l-2}.
\]
Therefore the recursive relation can be simplified to
\[
[(n+1)n+(n+1)-1]c_n + \sum_{l=0}^{n-1}[-2(l+1)+(-2)]c_l = 0.
\]
Define $\xi_n = (n+2)c_n$ $(n\ge 2)$. Then this relation can be further simplified to
\[
\xi_n = 2\cdot \frac{\sum_{k=0}^{n-1}\xi_k}{n}.
\]
It's easy to see by induction that $\xi_n=(n+1)\xi_0=(n+1)c_0$. Therefore
\begin{eqnarray*}
w(z) = z^{\rho}\sum_{k=0}^{\infty}c_kz^k = c_0 \sum_{k=0}^{\infty}\frac{k+1}{k+2}z^{k+1} = c_0\sum_{k=0}^{\infty}z^{k+1}-c_0\sum_{k=0}^{\infty}\frac{z^{k+1}}{k+2} = \frac{c_0z}{1-z}-\frac{c_0}{z}[-\ln(1-z)-z]=c_0w_1(z),
\end{eqnarray*}
where $w_1(z) = \frac{1}{1-z} + \frac{\ln(1-z)}{z}$. From formula (6.27b), we can conjecture $w_2(z)=\frac{1}{z}$. Then it's easy to verify this conjecture is indeed true.
\end{proof}

(2) \begin{proof}
The equation can be transformed to $w''+p(z)w'+q(z)w = 0$, where $p(z)= -\frac{5}{3z}$ and $q(z)=\frac{7}{9z^2}+4z^2$. So $0$ is the singularity of the equation. Since $zp(z)=-\frac{5}{3}$ and $z^2q(z)=\frac{7}{9}+4z^4$ are analytic in $0<|z|$, the equation has two regular solutions in $0<|z|$:
\[
\begin{cases}
w_1(z)=z^{\rho_1}\sum_{k=0}^{\infty}c_kz^k, \; (c_0\ne 0)\\
w_2(z)=gw_1(z)\ln z + z^{\rho_2}\sum_{k=0}^{\infty}d_kz^k, \; (g\ne 0\;\mbox{or} \; d_0\ne 0)
\end{cases}
\]
for some constants $g$, $\rho_1$ and $\rho_2$. $\rho_1$ and $\rho_2$ satisfy the index equation
\[
\rho(\rho-1)+a_0\rho+b_0=0,
\]
where $a_0=\lim_{z\to\infty}zp(z)=-\frac{5}{3}$ and $b_0=\lim_{z\to 0}z^2q(z)=\frac{7}{9}$. Solving the above equation gives $\rho_1=\frac{7}{3}$ and $\rho_2=\frac{1}{3}$. We note the Laurent series expansion of $p(z)$ and $q(z)$ are, respectively, $-\frac{5}{3z}$ and $\frac{7}{9z^2}+4z^2$. So $a_0=-\frac{5}{3}$ and $a_n=0$ for $n\ge 1$; $b_0=\frac{7}{9}$, $b_4=4$, and $b_n=0$ for $n\ge 1$ and $n\ne 4$. Therefore, the recursion equation
\[
[(n+\rho)(n+\rho-1)+a_0(n+\rho)+b_0]c_n+\sum_{l=0}^{n-1}[a_{n-l}(l+\rho) + b_{n-l}]c_l=0\; (n\ge 1)
\]
is simplified to
\[
\left[(n+\rho)^2-\frac{8}{3}(n+\rho)+\frac{7}{9}\right]c_n + 1_{\{n\ge 4\}}4c_{n-4} = 0\; (n\ge 1)
\]
and we can conclude
\begin{eqnarray*}
c_n = \begin{cases}
0 & \mbox{if $n=1, 2, 3$} \\
-\frac{4c_{n-4}}{(n+\rho)(n+\rho-\frac{8}{3})+\frac{7}{9}} & \mbox{if $n\ge 4$}.
\end{cases}
\end{eqnarray*}

Now let  $\rho = \rho_1 = \frac{7}{3}$. Then for $n\ge 4$,
\[
c_n = -\frac{4c_{n-4}}{n(n+2)}
\]
Define $\xi_k=c_{4k}$ $(k\ge 0)$. Then for $k\ge 1$,
\[
\xi_k=-\frac{4\xi_{k-1}}{4k\cdot (4k+2)} = -\frac{\xi_{k-1}}{(2k)\cdot (2k+1)}.
\]
By induction, it's easy to see $\xi_k=\frac{(-1)^k}{(2k+1)!}\xi_0 = \frac{(-1)^k}{(2k+1)!}c_0$. Therefore
\[
w(z)=  c_0z^{\frac{7}{3}}\sum_{k=0}^{\infty}\frac{(-1)^k}{(2k+1)!} z^{4k} = c_0z^{\frac{1}{3}}\sum_{k=0}^{\infty}\frac{(-1)^k}{(2k+1)!} z^{4k+2} = c_0w_1(z),
\]
where $w_1(z)=z^{\frac{1}{3}}\sin(z^2)$. Similarly, by choosing $\rho=\rho_2=\frac{1}{3}$, the recursion equation can be written as $(n^2-2n)c_n+4c_{n-4}=0$, which leads to the solution $c_{4k}=\frac{(-1)^k}{(2k)!}c_0$. Therefore, the other solution is
\[
w_2(z)=z^{\frac{1}{3}}\sum_{k=0}^{\infty}\frac{(-1)^k}{(2k)!}z^{4k} = z^{\frac{1}{3}}\cos(z^2).
\]
\end{proof}

(3) \begin{proof} The equation can be transformed to $w''(z)-w'(z) + \frac{1}{z}w(z)=0$. Let $p(z)=-1$ and $q(z)=\frac{1}{z}$. Then $zp(z) = -z$ and $z^2q(z) = z$ are both analytic in $|z|>0$. By Theorem 6.3, the equation has two regular solutions in $|z|>0$:
\begin{eqnarray*}
\begin{cases}
w_1(z) = z^{\rho_1}\sum_{k=0}^{\infty}c_kz^k & (c_0\ne 0) \\
w_2(z) = gw_1(z)\ln z + z^{\rho_2}\sum_{k=0}^{\infty}d_kz^k & (g\ne 0 \;\mbox{or}\; d_0\ne 0)
\end{cases}
\end{eqnarray*}
for some constants $g$, $\rho_1$ and $\rho_2$. $\rho_1$ and $\rho_2$ satisfy the index equation
\[
\rho(\rho-1)+a_0\rho+b_0=0,
\]
where $a_0=\lim_{z\to 0}zp(z) = 0$ and $b_0= \lim_{z\to 0}z^2q(z) =0$. So $\rho_1=1$ and $\rho_2 = 0$.

Suppose $w(z)$ has the form $z^{\rho}\sum_{n=0}^{\infty}c_nz^n$. We can get the following recursion equation
\[
[(n+\rho)(n+\rho-1)+a_0(n+\rho)+b_0]c_n + \sum_{l=0}^{n-1}[a_{n-l}(l+\rho)+b_{n-l}]c_l = 0\; (n\ge 1).
\]
Since $a_0=b_0=0$, $a_1=-1$, $a_n=0$ $(n\ge 2)$, $b_1=1$, $b_n=0$ $(n\ge 2)$, the above equation can be further simplified: if $n=1$, the equation becomes
\[
(\rho+1)\rho\cdot c_1 = 0;
\]
if $n\ge 2$, the equation becomes
\[
(n+\rho)(n+\rho-1)c_n + (-n + 2 -\rho)c_{n-1} = 0.
\]

We first let $\rho=\rho_1 =1$. Then $c_1=0$ and $(n+1)nc_n = (n-1)c_{n-1}$ for $n\ge 2$. Therefore, $c_n=0$ for $n\ge 1$, and one solution of the equation is
\[
w(z)=z^{\rho_1}\sum_{n=0}^{\infty}c_nz^n = c_0z.
\]
So we can let $w_1(z)=z$. We then let $\rho = \rho_2 = 0$. Then $c_1$ can be any number and $n(n-1)c_n=(n-2)c_{n-1}$ for $n\ge 2$. This implies $c_n=0$ for $n\ge 2$. The corresponding solution is therefore
\[
z^{\rho_2}\sum_{n=0}^{\infty}c_nz^n = c_0 + c_1 z.
\]
So we get back to the same solution $w(z)=z$. This means we have to try the other form of the solution
\[
w_2(z)=gw_1(z)\ln z + z^{\rho_2}\sum_{n=0}^{\infty} d_n z^n = gz\ln z + \sum_{n=0}^{\infty}d_n z^n,
\]
where $g$ is a constant. Note
\[
w_2'(z) = g\ln z + g + \sum_{n=1}^{\infty}d_nnz^{n-1}\;\mbox{and} \; w_2''(z) = \frac{g}{z} + \sum_{n=2}^{\infty}d_nn(n-1)z^{n-2}.
\]
So
\begin{eqnarray*}
& & w_2''(z) + p(z) w_2'(z) + q(z) w_2(z) \\
&=& \left[\frac{g}{z} + \sum_{n=2}^{\infty}d_nn(n-1)z^{n-2}\right] - \left[g\ln z + g + \sum_{n=1}^{\infty}d_nnz^{n-1}\right] + \left[g\ln z + \sum_{n=0}^{\infty}d_nz^{n-1}\right] \\
&=& \frac{g+d_0}{z}+(2d_2-g)+\sum_{n=1}^{\infty}[d_{n+2}(n+2)(n+1)-d_{n+1}n]z^n.
\end{eqnarray*}
Therefore the necessary and sufficient condition for $w_2''(z) + p(z) w_2'(z) + q(z) w_2(z)=0$ to hold is
\begin{eqnarray*}
\begin{cases}
g+d_0=0\\
g=2d_2\\
d_{n+2}(n+2)(n+1)=d_{n+1}n\; (n\ge 1).
\end{cases}
\end{eqnarray*}
We have $g=-d_0$ and $d_2=-\frac{1}{2}d_0$. By defining $\xi_n=(n-1)d_n$ $(n\ge 2)$ and working by induction, it's easy to deduce $d_n = \frac{1}{(n-1)\cdot n!}$ for $n\ge 2$. So we have
\[
w_2(z)=-d_0z\ln z + d_0 + d_1z - \sum_{n=2}^{\infty}\frac{d_0}{(n-1)\cdot n!}z^n.
\]
Note $w(z)=z$ is already a solution, so we can let $w_2(z) = z\ln z - 1 + \sum_{n=2}^{\infty}\frac{z^n}{n!(n-1)}$.
\end{proof}

(4) \begin{proof} The equation can be transformed to $w''(z)+p(z)w'(z)+q(z)w(z)=0$, where $p(z)=1-\frac{1}{z}$ and $q(z) = \frac{1}{z}$. Since $zp(z) = z-1$ and $z^2q(z) = z$ are both analytic in $|z|>0$, the equation has two regular solution in $|z|>0$:
\begin{eqnarray*}
\begin{cases}
w_1(z) = z^{\rho_1}\sum_{k=0}^{\infty}c_kz^k & (c_0\ne 0) \\
w_2(z) = gw_1(z)\ln z + z^{\rho_2}\sum_{k=0}^{\infty}d_kz^k & (g\ne 0 \;\mbox{or}\; d_0\ne 0)
\end{cases}
\end{eqnarray*}
for some constants $g$, $\rho_1$ and $\rho_2$. $\rho_1$ and $\rho_2$ satisfy the index equation
\[
\rho(\rho-1)+a_0\rho+b_0=0,
\]
where $a_0=\lim_{z\to 0}zp(z) = -1$ and $b_0= \lim_{z\to 0}z^2q(z) =0$. So $\rho_1=2$ and $\rho_2 = 0$.

Suppose $w(z)$ has the form $z^{\rho}\sum_{n=0}^{\infty}c_nz^n$. We can get the following recursion equation
\[
[(n+\rho)(n+\rho-1)+a_0(n+\rho)+b_0]c_n + \sum_{l=0}^{n-1}[a_{n-l}(l+\rho)+b_{n-l}]c_l = 0\; (n\ge 1).
\]
Since $a_0 = -1$, $a_1 = 1$, and $a_n=0$ $(n\ge 2)$; $b_0=0$, $b_1 = 1$, and $b_n=0$ $(n\ge 2)$, the above recursion equation can be further simplified to $(n+\rho-2)c_n + c_{n-1} = 0$.

We first let $\rho = \rho_1 = 2$. Then it's easy to see $c_n = \frac{(-1)^n}{n!}c_0$. So we can let $w_1(z)=z^2\sum_{n=0}^{\infty}\frac{(-1)^n}{n!} z^{n}=z^2e^{-z}$. We then let $\rho = \rho_2 = 0$, and the recursion equation becomes $(n-2)c_n+c_{n-1}=0$ ($n\ge 1$). It's easy to see $c_0=c_1=0$ and $c_n = \frac{(-1)^n}{(n-2)!}c_2$ for $n\ge 2$. Plugging these values into the formula $w(z)=z^{\rho_2}\sum_{n=0}^{\infty}c_nz^n$, we get $w(z)=c_2z^2e^{-z}$. This is the same as the first solution, so we have to try the other form of the solution
\[
w_2(z)=gw_1(z)\ln z + z^{\rho_2}\sum_{n=0}^{\infty} d_n z^n = gz^2e^{-z}\ln z + \sum_{n=0}^{\infty}d_n z^n,
\]
where $g$ is a constant. Note
\[
w_2'(z) = ge^{-z}(2z\ln z - z^2\ln z + z) + \sum_{n=1}^{\infty}d_nnz^{n-1}
\]
and
\[
w_2''(z) = ge^{-z}[3-2z+(2-4z+z^2)\ln z] + \sum_{n=2}^{\infty}(n-1)nd_nz^{n-2}.
\]
So
\[
w''(z)+p(z)w'(z)+q(z)w(z) = -ge^{-z}(-2+z) + \frac{d_0-d_1}{z} + \sum_{n=0}^{\infty}(n+2)(nd_{n+2}+d_{n+1})z^n.
\]
Set $w''(z)+p(z)w'(z)+q(z)w(z)=0$. Then it's easy to see $g=0$ and by induction we must have $d_0=d_1=0$, $d_{n+1}=\frac{(-1)^{n-1}}{(n-1)!}d_2$ for $n\ge 2$. This leads us back to the first solution.

Therefore, we apply Liouville's formula, formula (6.30), to get the second solution: (note $p(z)=1-\frac{1}{z}$)
\begin{eqnarray*}
w_2(z) &=& w_1(z)\int^z\left\{\frac{1}{[w_1(\eta)]^2}\exp\left[-\int^{\eta}p(\xi)d\xi\right]\right\}d\eta \\
&=& w_1(z)\int^z\left\{\frac{1}{[w_1(\eta)]^2}\exp\left[-\eta +\ln\eta\right]\right\}d\eta \\
&=& z^2e^{-z} \int^z \frac{\eta e^{-\eta}}{\eta^4e^{-2\eta}}d\eta \\
&=& z^2e^{-z} \int^z \frac{e^{\eta}}{\eta^3}d\eta.
\end{eqnarray*}
Denote by $Ei(z)$ the exponential integral function $Ei(z)=\int_{-\infty}^z\frac{e^{\xi}}{\xi}d\xi$. Then by integration-by-part formula, we have
\[
w_2(z) = z^2e^{-z} \cdot \frac{-e^z(1+z)+z^2Ei(i)}{2z^2} = \frac{1}{2}[-(1+z)+z^2e^{-z}Ei(z)].
\]

\begin{remark}
Verify the $w_2(z)$ represented in this form is the same as the
$w_2(z)$ given by the textbook's solution.
\end{remark}
\end{proof}

\noindent 4. \begin{proof}In the given equation, we have $p(z)=\frac{2}{z}$ and $q(z)=m^2$. Since $zp(z)=2$ and $z^2q(z)=m^2z^2$ are both analytic in $|z|>0$, the equation has two regular solution in $|z|>0$:
\begin{eqnarray*}
\begin{cases}
w_1(z) = z^{\rho_1}\sum_{k=0}^{\infty}c_kz^k & (c_0\ne 0) \\
w_2(z) = gw_1(z)\ln z + z^{\rho_2}\sum_{k=0}^{\infty}d_kz^k & (g\ne 0 \;\mbox{or}\; d_0\ne 0)
\end{cases}
\end{eqnarray*}
for some constants $g$, $\rho_1$ and $\rho_2$. $\rho_1$ and $\rho_2$ satisfy the index equation
\[
\rho(\rho-1)+a_0\rho+b_0=0,
\]
where $a_0=\lim_{z\to 0}zp(z) = 2$ and $b_0= \lim_{z\to 0}z^2q(z) =0$. So $\rho_1=0$ and $\rho_2 = -1$.

Let $w_1(z) = z^{\rho_1}\sum_{n=0}^{\infty}c_nz^n=\sum_{n=0}^{\infty}c_nz^n$. Then
\[
w_1'(z) = \sum_{n=0}^{\infty}c_{n+1}(n+1)z^n\;\;\mbox{and}\;\; w_1''(z) = \sum_{n=0}^{\infty}c_{n+2}(n+2)(n+1)z^n.
\]
Therefore
\[
w''(z)+p(z)w'(z)+q(z)w(z) = \sum_{n=0}^{\infty}[c_{n+2}(n+2)(n+3)+m^2c_n]z^n + \frac{2c_1}{z}=0.
\]
This implies
\[
\begin{cases}
c_1 = 0 \\
c_{n+2}(n+2)(n+3)+m^2c_n=0, & n\ge 0.
\end{cases}
\]
If $m=0$, the equation has only a constant solution $w(z)=c_0$. If $m\ne 0$, we must have
\[
c_{n+2}=\frac{-m^2}{(n+2)(n+3)}c_n,\; n\ge 0.
\]
Define $\xi_k=c_{2k}$ $(k\ge 0)$. Then the above relation can be written as
\begin{eqnarray*}
\xi_{k+1}&=&c_{2k+2} = \frac{-m^2}{(2k+3)(2k+2)}c_{2k} = \frac{-m^2}{(2k+3)(2k+2)}\xi_{k} \\
&=& \cdots \\
&=& \frac{-m^2}{(2k+3)(2k+2)}\cdot \frac{-m^2}{(2k+1)\cdot 2k}\cdots \frac{-m^2}{3\cdot 2}c_0 \\
&=& \frac{(-1)^{k+1}m^{2(k+1)}}{(2(k+1)+1)!}c_0.
\end{eqnarray*}
Therefore
\[
w_1(z)=\sum_{n=0}^{\infty}c_nz^n = c_0 + c_0\sum_{k=1}^{\infty}\frac{(-1)^km^{2k}}{(2k+1)!}z^{2k} = \frac{c_0}{mz}\left[mz + \sum_{k=1}^{\infty}\frac{(-1)^km^{2k+1}}{(2k+1)!}z^{2k+1}\right] = c_0\frac{\sin mz}{mz}.
\]

Let $w_2(z)=z^{\rho_2}\sum_{n=0}^{\infty}d_nz^n = \frac{d_0}{z} + \sum_{n=0}^{\infty}d_{n+1}z^n$. So $w'_2(z)=-\frac{d_0}{z^2}+\sum_{n=1}^{\infty}d_{n+1}nz^{n-1}$ and $w_2''(z) = \frac{2d_0}{z^3} + \sum_{n=2}^{\infty}d_{n+1}n(n-1)z^{n-2}$. Therefore
\begin{eqnarray*}
w_2''(z)+p(z)w_2'(z) + q(z) w_2(z)  = \frac{2d_2+m^2}{z} + \sum_{n=0}^{\infty}[(n+2)(n+3)d_{n+3} + m^2d_{n+1}]z^n.
\end{eqnarray*}
Then we have the equations for $d_n$'s:
\[
\begin{cases}
2d_2+m^2=0 \\
(n+2)(n+3)d_{n+3}+m^2d_{n+1}=0, \; n\ge 0.
\end{cases}
\]
Following a procedure similar to that of the first solution, we can easily find $d_n$'s and prove the second solution is $w_2(z) = \frac{\cos mz}{mz}$.
\end{proof}

\noindent 5. \begin{proof}
In the given equation, we have $p(z)=\frac{1}{z}$ and $q(z)=-m^2$. Since $zp(z)=1$ and $z^2q(z)=-m^2z^2$ are both analytic in $|z|>0$, the equation has two regular solution in $|z|>0$:
\begin{eqnarray*}
\begin{cases}
w_1(z) = z^{\rho_1}\sum_{k=0}^{\infty}c_kz^k & (c_0\ne 0) \\
w_2(z) = gw_1(z)\ln z + z^{\rho_2}\sum_{k=0}^{\infty}d_kz^k & (g\ne 0 \;\mbox{or}\; d_0\ne 0)
\end{cases}
\end{eqnarray*}
for some constants $g$, $\rho_1$ and $\rho_2$. $\rho_1$ and $\rho_2$ satisfy the index equation
\[
\rho(\rho-1)+a_0\rho+b_0=0,
\]
where $a_0=\lim_{z\to 0}zp(z) = 1$ and $b_0= \lim_{z\to 0}z^2q(z) =0$. So $\rho_1=\rho_2 = 0$.

 Let $w(z)=z^{\rho}\sum_{n=0}^{\infty}c_nz^n = \sum_{n=0}^{\infty} c_n z^n$. Then
 \[
 w''(z)+p(z)w'(z)+q(z)w(z) = \frac{c_1}{z} + \sum_{n=0}^{\infty} [c_{n+2}(n+2)^2-m^2c_n]z^n.
 \]
 So we have the equations for $c_n$'s:
 \begin{eqnarray*}
 \begin{cases}
 c_1 = 0 \\
 c_{n+2} = \frac{m^2}{(n+2)^2}c_n, \; n\ge 0.
 \end{cases}
 \end{eqnarray*}
 Working by induction, it's easy to see $(k\ge 0)$
 \begin{eqnarray*}
 c_n =
 \begin{cases}
 0, & n = 2k+1\\
 \frac{\left(\frac{m}{2}\right)^{2k}}{(k!)^2}c_0, & n = 2k.
 \end{cases}
 \end{eqnarray*}
 Therefore, one solution of the ordinary differential equation is
 \[
 w_1(z) = \sum_{k=0}^{\infty}\frac{1}{(k!)^2}\left(\frac{mz}{2}\right)^{2k}= I_0(mz),
 \]
 where $I_{\alpha}(z)$ is the modified Bessel function
 \[
 I_{\alpha}(z) = \sum_{k=0}^{\infty} \frac{1}{k! \Gamma(k+\alpha+1)}\left(\frac{z}{2}\right)^{2k+\alpha}.
 \]Note the series representation for Bessel function $J_n$ is
 \[
 J_n(z) = \sum_{k=0}^{\infty}\frac{(-1)^k\left(\frac{z}{2}\right)^{2k+n}}{k!(k+n)!},
 \]
 $w_1(z)$ can also be written as $J_0(imz)$.

 To get the other solution, we let $w(z) = gI_0(mz)\ln z + \sum_{n=0}^{\infty}d_nz^n$, where $g$ is a constant. Plug this representation into the equation $w''(z) + p(z) w'(z) + q(z) w(z) = 0$, we can get equations for $g$ and $d_n$'s.

 Now the computation becomes really messy, so we omit the details for this version. Mathematica command {\bf{DSolve[w''[z]+w'[z]/z - m\^\;2w[z] == 0, w[z], z]}} gives the two solutions as $J_0(-imz)=J_0(imz)$ and $Y_0(-imz)$. Here $Y_{\alpha}(z)$ is Bessel function of the second kind and is defined as
 \[
 Y_{\alpha}(z) = \frac{J_{\alpha}(z)\cos(\alpha \pi) - J_{-\alpha}(z)}{\sin(\alpha z)}.
 \]

 \begin{remark}
 Verify $w_2(z)$ represented in this form is the same as the $w_2(z)$ given by the textbook's solution.
 \end{remark}
 \end{proof}



\section{留数定理及其应用}

\subsection{Exercises in the text}

\noindent 7.1. \begin{proof}
Suppose $f(z)$ has Laurent series $\sum_{n=-\infty}^{\infty}a_nz^n$ in a neighborhood of $0$. Then
\[
f(z)=\frac{1}{2}[f(z)+f(-z)] = \frac{1}{2}\sum_{n=-\infty}^{\infty}a_nz^n[1+(-1)^n] = \sum_{k=-\infty}^{\infty}a_{2k}z^{2k}.
\]
So for $\rho$ sufficiently small, $\mbox{Res}(f,0)=\sum_{k=-\infty}^{\infty}\frac{1}{2\pi i}\int_{|z|=\rho}z^{2k}dz=0$, where the last equality is due to Cauchy's integral formula.
\end{proof}

\noindent 7.2.
$f(z)$ can be written as $z^ng(z)$ in a neighborhood of $0$, where $g(z)$ is analytic near 0 and $g(0)\ne 0$. Without loss of generality, we assume $n\ge 1$. Otherwise, all the residues below equal to 0.

(1)\begin{proof}
\[
\frac{f'(z)}{f(z)}=\frac{nz^{n-1}g(z)+z^ng'(z)}{z^ng(z)} = \frac{n}{z} + \frac{g'(z)}{g(z)}.
\]
So $\mbox{Res}(f'/f,0)=n$.\end{proof}

(2)\begin{proof} If $n=1$,
\[
\frac{f''(z)}{f(z)}=\frac{[g(z)+zg'(z)]'}{f(z)}=\frac{2g'(z)+zg''(z)}{zg(z)} = \frac{2}{z}\cdot \frac{g'(z)}{g(z)} + \frac{g''(z)}{g(z)}.
\]
So $\mbox{Res}(f''/f, 0)=2g'(0)/g(0)$. If $n\ge 2$,
\[
\frac{f''(z)}{f(z)}=\frac{n(n-1)z^{n-2}g(z)+2nz^{n-1}g'(z)+z^ng''(z)}{f(z)} = \frac{n(n-1)}{z^2} + \frac{2n}{z} \cdot \frac{g'(z)}{g(z)} + \frac{g''(z)}{g(z)}.
\]
So $\mbox{Res}(f''/f,0)=2ng'(0)/g(0)$. Combined, we conclude $\mbox{Res}(f''/f,0)=2ng'(0)/g(0)$.\end{proof}

(3)
\begin{proof}
If $n=1$,
\[
\frac{f''(z)}{f'(z)}=\frac{2g'(z)+zg''(z)}{g(z)+zg'(z)}
\]
is analytic near 0. So its residue is equal to 0. If $n\ge 2$,
\[
\frac{f''(z)}{f'(z)}=\frac{n(n-1)z^{n-2}g(z)+2nz^{n-1}g'(z)+z^ng''(z)}{nz^{n-1}g(z)+z^ng'(z)}=\frac{1}{z}\cdot \frac{n(n-1)g(z)+2nzg'(z)+z^2g''(z)}{ng(z)+zg'(z)}.
\]
Note the second term in the product is analytic near 0, so $\mbox{Res}(f''/f',0)= \left.\frac{n(n-1)g(z)+2nzg'(z)+z^2g''(z)}{ng(z)+zg'(z)}\right|_{z=0}=n-1$. Combined, we conclude the residue is equal to $n-1$.
\end{proof}

(4)
\begin{proof}
If $n=1$,
\[
\frac{(n-1)f'(z)-zf''(z)}{f(z)} = -z\frac{f''(z)}{f(z)} = -z\cdot \frac{2g'(z)+zg''(z)}{g(z)+zg'(z)}
\]
is analytic near 0, so its residue at 0 is equal to 0. If $n\ge 2$,
\[
\frac{(n-1)f'(z)-zf''(z)}{f(z)}=(n-1)\frac{f'(z)}{f(z)}-z\frac{f''(z)}{f(z)}= n(n-1)-n(n-1)=0,
\]
where the second to last equality is due to part (1) and (2).
\end{proof}

\noindent 7.3. $f(z)$ can be written as $z^{-n}g(z)$, where $g$ is analytic near 0 and $g(0)\ne 0$. Without loss of generality, we assume $n\ge 1$. Otherwise,  all the residues below are equal to 0.

(1) \begin{proof}
\[
\frac{f'(z)}{f(z)}=\frac{-nz^{-n-1}g(z)+z^{-n}g'(z)}{z^{-n}g(z)} = \frac{-ng(z)+zg'(z)}{zg(z)}=-\frac{n}{z}+\frac{g'(z)}{g(z)}.
\]
So $\mbox{Res}(f'/f,0)=-n$.
\end{proof}

(2) \begin{proof}
\[
\frac{f''(z)}{f(z)}=\frac{n(n+1)z^{-n-2}g(z)-2nz^{-n-1}g'(z)+z^{-n}g''(z)}{z^{-n}g(z)} = \frac{n(n+1)}{z^2} - \frac{2n}{z}\frac{g'(z)}{g(z)} + \frac{g''(z)}{g(z)}.
\]
So the residue equals to $-2n\frac{g'(0)}{g(0)}$.
\end{proof}

(3) \begin{proof}
\[
\frac{f''(z)}{f'(z)}=\frac{n(n+1)z^{-n-2}g(z)-2nz^{-n-1}g'(z)+z^{-n}g''(z)}{-nz^{-n-1}g(z)+z^{-n}g'(z)} = \frac{1}{z} \cdot \frac{n(n+1)g(z)-2nzg'(z)+z^2g''(z)}{-ng(z)+zg'(z)}.
\]
Note the second term in the above product is analytic near 0, so the residue is equal to \[\left.\frac{n(n+1)g(z)-2nzg'(z)+z^2g''(z)}{-ng(z)+zg'(z)}\right.|_{z=0}=-(n+1).\]
\end{proof}

(4) \begin{proof} $(n+1)f'(z)+zf''(z)=-(n+1)nz^{-n-1}g(z)+(n+1)z^{-n}g'(z)+n(n+1)z^{-n-1}g(z)-2nz^{-n}g'(z)+z^{-n+1}g''(z)=-(n-1)z^{-n}g'(z)+z^{-n+1}g''(z)$. So
\[
\frac{(n+1)f'(z)+zf''(z)}{f(z)} = \frac{-(n-1)z^{-n}g'(z)+z^{-n+1}g''(z)}{z^{-n}g(z)} = -(n-1)\frac{g'(z)}{g(z)}+\frac{zg''(z)}{g(z)}.
\]
So the residue is equal to 0.
\end{proof}


\noindent 7.4. \begin{proof}
\begin{center}
\begin{tabular}{|c|c|c|c|}
\hline
function & given conditions & type of singularity & residue \\
\hline
$\frac{g(z)}{f(z)}$ & $z_0$ are zeros of $g(z)$, $f(z)$ and has the same order & removable & 0 \\
\hline
$\frac{g(z)}{f(z)}$ & $g(z_0)\ne 0$, $f(z_0)=0$, $f'(z_0)\ne 0$ & pole of order 1 & $\frac{g(z_0)}{f'(z_0)}$\\
\hline
$\frac{g(z)}{f(z)}$ & $z_0$ is zero of $g(z)$ of order m and zero of $f(z)$ of order m+1 & pole of order 1 & $\frac{(m+1)g^{(m)}(z_0)}{f^{(m+1)}(z_0)}$ \\
\hline
$\frac{g(z)}{f(z)}$ & $g(z_0)\ne 0$, $f(z_0)=f'(z_0)=0$, $f''(z_0)\ne 0$& pole of order 2 & \\
\hline
$\frac{g(z)}{(z-z_0)^2}$ & $g(z_0)\ne 0$ & pole of order 2 & $g'(z_0)$\\
\hline
$\frac{g(z)}{f(z)}$ & $z_0$ is zero of $f(z)$ of order m and $g(z_0)\ne 0$& pole of order m & \\
\hline
$\frac{g(z)}{f(z)}$ & $z_0$ is zero of $g(z)$ of order m and zero of $f(z)$ of order m+n & pole of order n & \\
\hline
\end{tabular}
\end{center}
\end{proof}

\noindent 7.5. \begin{proof} $f^2(z)=c_0^2 + \frac{2c_0c_1}{z} + \frac{1}{z^2}P(z)$, where $P(z)$ is a power series of $\frac{1}{z}$. By formula (7.12), $\mbox{Res}(f,\infty)=-2c_0c_1$. \end{proof}

\noindent 7.6. \begin{proof} Suppose the singularities of $f$ are $a_1$, $\cdots$, $a_n$. Let $R$ be large enough so that all the finite singularities of $f$ fall in the disc $|z|<R$. Then Cauchy's theorem implies
\[
\frac{1}{2\pi i}\int_{|z|=R}f(z)dz - \sum_{i=1, |a_i|<\infty}^n\mbox{Res}(f,a_i)=0,
\]
i.e. $\sum_{i=1}^n\mbox{Res}(f,a_i)+\mbox{Res}(f,\infty)=0$. So the sum of residues of $f$ on $\bar{\mathbb{C}}$ is 0.
\end{proof}

\noindent 7.7. \begin{proof} Let $\theta_k  = \arg\beta_k$ $(k=1,2,\cdots,m)$ and $\rho$ be a positive number that is sufficiently small. Define $C_{\rho}=\{z: |z|=1, |\arg z-\theta_k|\ge\rho, 1\le k \le m\}$. Define $\gamma_k$ as the arc that starts from $e^{i(\theta_k-\rho)}$, ends at $e^{i(\theta_k+\rho)}$, has $\beta_k=e^{i\theta_k}$ as the center, and dents toward origin. Then by Residue Theorem, for $\rho > 0$ sufficiently small, we have
\[
\int_{C_{\rho}\cup \gamma_1 \cup \cdots \cup \gamma_m}\frac{f(z)}{iz}dz = 2\pi \sum_{|z|<1} res\left\{\frac{f(z)}{z}\right\}.
\]
Then for each $k \in \{1, 2, \cdots, m\}$, we have
\[
\int_{-\gamma_k}\frac{f(z)}{z}dz = \int_{\phi_1(\rho)}^{\phi_2(\rho)}\frac{f(\beta_k + \rho e^{i\alpha})}{\beta_k + \rho e^{i\alpha}} \rho e^{i\alpha}id\alpha,
\]
where $\phi_2(\rho)-\phi_1(\rho)\to \pi$ as $\rho\to 0$. Since $\beta_k$ is a pole of order 1, in a neighborhood of $\beta_k$, we can write $f(z)$ as $\frac{g(z)}{z-\beta_k}$ where $g(z)$ is analytic near $\beta_k$. Then (note $res\left\{\frac{f(z)}{z},\beta_k\right\} = \frac{g(\beta_k)}{\beta_k}$)
\[
\int_{-\gamma_k}\frac{f(z)}{z}dz = \int_{\phi_1(\rho)}^{\phi_2(\rho)}\frac{g(\beta_k + \rho e^{i\alpha})}{(\beta_k + \rho e^{i\alpha})\rho e^{i\alpha}} \rho e^{i\alpha}id\alpha \to \pi i\frac{g(\beta_k)}{\beta_k}= i\pi res\left\{\frac{f(z)}{z},\beta_k\right\},\;\mbox{as $\rho\to 0.$}
\]
Therefore
\begin{eqnarray*}
\int_0^{2\pi}R(\sin\theta,\cos\theta)d\theta &=& \lim_{\rho\to 0}\int_{C_{\rho}}\frac{f(z)}{iz}dz = 2\pi \sum_{|z|<1} res\left\{\frac{f(z)}{z}\right\} + \lim_{\rho\to 0}\sum_{k=1}^m\int_{-\gamma_k}\frac{f(z)}{iz}dz \\
&=& 2\pi \sum_{|z|<1} res\left\{\frac{f(z)}{z}\right\} + \pi \sum_{k=1}^m res\left\{\frac{f(z)}{z},\beta_k\right\}.
\end{eqnarray*}

\begin{remark}
The above result and the trick of {\it indenting the contour} can be found in Whittaker and Watson \cite{WW1927}, \S 6.23, page 117.
\end{remark}

 \end{proof}

\noindent 7.8. \begin{proof} We compute a more general integral $\int_0^{\infty}\frac{dx}{1+x^p}$ where $p \in (1,\infty)$. Choose two positive numbers $r$ and $R$
such that $0<r<1<R$. Let $\gamma_1=\{z: r\le |z| \le R, \arg z =
0\}$, $\gamma_2=\{z: r\le |z| \le R, \arg z = 2\pi\}$,
$\gamma_R=\{z: |z|=R, 0<\arg z < 2\pi\}$ and $\gamma_r=\{z: |z|=r,
0<\arg z <2\pi\}$. Define $f(z)=\frac{z^{1/p}}{p(z+1)z}$ where
$z^{1/p} = e^{\frac{\log z}{p}}$ is defined on $\mathbb C\setminus
[0,\infty)$. Note by substituting  $y^{\frac{1}{p}}$ for $x$, we get
\[
\int_0^{\infty}\frac{dx}{1+x^p} = \int_0^{\infty}
\frac{y^{\frac{1}{p}}dy}{p(y+1)y}.
\]
By Residue Theorem,
\[
\int_{\gamma_1 + \gamma_R - \gamma_2 - \gamma_r}f(z)dz = 2\pi
\mbox{Res}(f,-1) = 2\pi i\cdot \frac{(-1)^{\frac{1}{p}}}{-p} =
-\frac{2\pi i}{p}e^{\frac{\log e^{\pi i}}{p}} = -2 i \alpha
e^{\alpha i},
\]where $\alpha = \frac{\pi}{p}$. We have the estimates
\[
\left| \int_{\gamma_R}f(z)dz \right| = \left|
\int_0^{2\pi}\frac{e^{\frac{1}{p}\log(Re^{i\theta})}}{p(Re^{i\theta}+1)Re^{i\theta}}
Re^{i\theta} \cdot id\theta\right| \le \frac{2\pi
R^{\frac{1}{p}}}{p(R-1)} \to 0
\]
as $R\to\infty$,
\[
\left| \int_{\gamma_r}f(z)dz \right| = \left|
\int_0^{2\pi}\frac{e^{\frac{1}{p}\log(re^{i\theta})}}{p(re^{i\theta}+1)re^{i\theta}}
re^{i\theta} \cdot id\theta\right| \le \frac{2\pi
r^{\frac{1}{p}}}{p(1-r)} \to 0
\]
as $r\to 0$, and
\[
\int_{\gamma_2}f(z)dz = - \int_r^R\frac{(xe^{2\pi
i})^{\frac{1}{p}}dx}{p(x+1)x} =
-\int_r^R\frac{x^{\frac{1}{p}}dx}{p(x+1)x}\cdot e^{2\alpha i}.
\]
Therefore by letting $r\to 0$ and $R \to \infty$, we have
\[
\int_0^{\infty}\frac{dx}{1+x^p} =
\int_0^{\infty}\frac{x^{\frac{1}{p}dx}}{p(x+1)x} = \frac{-2i\alpha
e^{\alpha i}}{1-e^{2\alpha i}} = \frac{-2i\alpha e^{\alpha
i}}{-2\sin\alpha e^{(\frac{\pi}{2}+\alpha)i}} =
\frac{\alpha}{\sin\alpha} =
\frac{\pi}{p}\csc\left(\frac{\pi}{p}\right).
\]\end{proof}

\noindent 7.9. \begin{proof} This is a special case of exercise 7.10. Answer: $\frac{\pi^4}{90}$ (verified via Mathematica: \bf{Sum[1/n\^\;4, {n, 1, Infinity}]}). \end{proof}

\noindent 7.10. \begin{proof} Let $C_N$ be the contour used in Lemma 7.2: $C_N=[N+\frac{1}{2}, (N+\frac{1}{2})i, -(N+\frac{1}{2}), -(N+\frac{1}{2})i, N+\frac{1}{2}]$. Then by Residue Theorem,
\[
\oint_{C_N}\frac{\pi\cot\pi z}{z^{2k}}dz = 2\pi i\sum_{n=-N}^{N}Res\left(\frac{\pi\cot\pi z}{z^{2k}}, n\right).
\]
$z=0$ is a pole of order 1 for $\cot z$. So we can assume the Laurent series of $\cot z$ near 0 is $\sum_{n=0}^{\infty}b_{2n-1}z^{2n-1}$. Then
\[
\frac{\pi\cot \pi z}{z^{2k}} = \sum_{n=0}^{\infty}\frac{\pi\cdot b_{2n-1}(\pi z)^{2n-1}}{z^{2k}} =  \sum_{n=0}^{\infty}\frac{b_{2n-1}\pi^{2n}}{z^{2(n-k)+1}}
\]
Therefore, for $\rho>0$ sufficiently small,
\[
Res\left(\frac{\pi\cot\pi z}{z^{2k}}, 0\right) = \frac{1}{2\pi i}\int_{|z|=\rho}\frac{\pi\cot\pi z}{z^{2k}}dz = b_{2k-1}\pi^{2k}.
\]
For $n\ne 0$, take $\rho>0$ sufficiently large, we have
\[
Res\left(\frac{\pi\cot\pi z}{z^{2k}},n\right) = \frac{1}{2\pi i}\int_{|z-n|=\rho}\frac{(-1)^n\cos\pi z\cdot \pi (z-n)}{z^{2k}\sin\pi(z-n)}\cdot\frac{1}{z-n}dz = \frac{1}{n^{2k}}.
\]
Combined, we can conclude
\[
\oint_{C_N}\frac{\pi\cot\pi z}{z^{2k}}dz = 2\pi i\left(b_{2k-1}\pi^{2k}+2\sum_{n=1}^N\frac{1}{n^{2k}}\right).
\]
By Lemma 7.2, $\lim_{N\to \infty}\oint_{C_N}\frac{\pi\cot\pi z}{z^{2k}}dz = 0$. So $\sum_{n=1}^{\infty}\frac{1}{n^{2k}}=-\frac{b_{2k-1}}{2}\pi^{2k}$. From formula (5.25), we have $b_{-1}=1$, $b_1=-\frac{1}{3}$, $b_3=-\frac{1}{45}$, $b_5=-\frac{2}{945}$,etc. In particular, for $k=2$, $b_{2k-1}=b_3=-\frac{1}{45}$. So $\sum_{n=1}^{\infty}\frac{1}{n^4}=\frac{\pi^4}{90}$, which gives the answer to exercise 7.9.
\end{proof}

\noindent 7.11. \begin{proof} We first deduce the Laurent series of $\frac{1}{\sin z}$ near $0$. $0$ is a pole of order 1 for $\frac{1}{\sin z}$. Note $\sin z$ is an odd function, we can assume its Laurent series near 0 is $\sum_{l=0}^{\infty}b_{2l-1}z^{2l-1}$. Then
\[
1 = \sin z \cdot \sum_{n=0}^{\infty}b_{2n-1}z^{2n-1} = \sum_{k=0}^{\infty}\frac{(-1)^k}{(2k+1)!}z^{2k+1} \cdot \sum_{l=0}^{\infty}b_{2l-1}z^{2l-1} = \sum_{n=0}^{\infty}\left[\sum_{l=0}^nb_{2l-1}\frac{(-1)^{(n-l)}}{(2(n-l)+1)!}\right] z^{2n}.
\]
So $b_{-1}=1$ and $\sum_{l=0}^nb_{2l-1}\frac{(-1)^{(n-l)}}{(2(n-l)+1)!}=0$ for $n\ge 1$. Using this, we can easily get the Laurent series of $\frac{1}{\sin z}$ (verified by Mathematica: {\bf{Series[z/Sin[z], \{z, 0, 10\}]}})
\[
\frac{1}{\sin z} = \frac{1}{z} + \frac{z}{6}+\frac{7z^3}{360} + \frac{31z^5}{15120} + \frac{127z^7}{604800} + \frac{73 z^9}{3421440} + o(z^{10}).
\]

Let $C_N$ be the contour used in Lemma 7.2: $C_N=[N+\frac{1}{2}, (N+\frac{1}{2})i, -(N+\frac{1}{2}), -(N+\frac{1}{2})i, N+\frac{1}{2}]$. Then by Residue Theorem,
\[
\oint_{C_N}\frac{\pi}{z^2\sin\pi z}dz = 2\pi i \sum_{n=-N}^{N}Res\left(\frac{\pi}{z^2\sin\pi z},n\right).
\]
From the Laurent series of $\frac{1}{\sin\pi z}$, we can deduce
\[
Res\left(\frac{\pi}{z^2\sin\pi z},0\right) = \frac{\pi^2}{6}.
\]
For $n\ne 0$, we can find $\rho >0$ sufficiently small, so that
\[
Res\left(\frac{\pi}{z^2\sin\pi z},n\right) = \frac{1}{2\pi i}\int_{|z-n|=\rho} \frac{(-1)^n\pi(z-n)}{z^2\sin\pi(z-n)}\cdot \frac{1}{z-n}dz = \frac{(-1)^{n}}{n^2}.
\]
Therefore $\oint_{C_N}\frac{\pi}{z^2\sin\pi z}dz = 2\pi i \left[\frac{\pi^2}{6}-2\sum_{n=1}^N\frac{(-1)^{n-1}}{n^2}\right]$. Suppose $\lim_{N\to \infty}\oint_{C_N}\frac{\pi}{z^2\sin\pi z}dz = 0$, then we can conclude $\sum_{n=1}^{\infty}\frac{(-1)^{n-1}}{n^2} = \frac{\pi^2}{12}$ (verified by Mathematica: {\bf{Sum[(-1)\^\;(n - 1)/n\^\;2, {n, 1, Infinity}]}}).
\end{proof}


\subsection{章末习题}

\noindent 1. (1) \begin{proof} $Res\left(\frac{e^{z^2}}{z-1},1\right)=e^{z^2}|_{z=1}=e$.\end{proof}

(2) \begin{proof} $Res\left(\frac{e^{z^2}}{(z-1)^2},1\right)=\frac{d}{dz}e^{z^2}|_{z=1}=2e$. \end{proof}

(3) \begin{proof} $1-\cos z = \sum_{n=1}^{\infty}\frac{(-1)^{n+1}}{(2n)!}z^{2n}$. So $0$ is a pole of order 2 for $\left(\frac{z}{1-\cos z}\right)^2$. For sufficiently small $\rho >0$, we have
\[
Res\left(\left(\frac{z}{1-\cos z}\right)^2,0\right) = \frac{1}{2\pi i}\int_{|z|=\rho}\frac{z^4}{(1-\cos z)^2}\cdot \frac{dz}{z^2} = \frac{d}{dz}\left.\left[\frac{z^4}{(1-\cos z)^2}\right]\right|_{z=0}
\]
By repeatedly applying l'Hospitale's rule for analytic functions (see exercise 5.1, 5.2 in the text, page 62) and the fact $\lim_{z\to 0}\frac{\sin z}{z}=1$, we can conclude the residue is equal to 0.
\end{proof}

(4) \begin{proof}By repeatedly using l'Hospitale's rule for analytic functions and the fact $\lim_{z\to 0}\frac{\sin z}{z}=1$, we have
\[
Res\left(\frac{1}{z^2\sin z},0\right) = Res \left(\frac{1}{z^3}\cdot\frac{z}{\sin z},0\right) = \left.\frac{d^2}{dz^2}\left(\frac{z}{\sin z}\right)\right|_{z=0} = \frac{1}{6}.
\]
 \end{proof}

(5)\begin{proof}
\[
Res\left(\frac{e^z}{z^2-1},1\right)=Res\left(\frac{e^z}{(z-1)^2(z+1)^2},1\right) = \frac{d}{dz}\left.\frac{e^z}{(z+1)^2}\right|_{z=1} = 0.
\]
\end{proof}

(6) \begin{proof}
Let $z_n=-\left(\frac{2n+1}{2}\pi\right)^2$. If we take the convention that $\sqrt{-1}=i$, we have $\sqrt{z_n}=(n\pi+\frac{\pi}{2})i$. It's easy to see $\cosh\sqrt{z_n}=\cosh[(n\pi+\frac{\pi}{2})i]=\cos(n\pi+\frac{\pi}{2})=0$, and
\[
\lim_{z\to z_n}(\cosh\sqrt{z})' = \lim_{z\to z_n}\frac{i\sinh\sqrt{z}}{2\sqrt{z}} = \frac{\sin(n\pi+\frac{\pi}{2})}{(2n+1)\pi} = \frac{(-1)^n}{(2n+1)\pi}.
\]
So $z_n$ is a pole of order 1 for $\frac{1}{\cosh\sqrt{z}}$ and
\[
Res\left(\frac{1}{\cosh\sqrt{z}},z_n\right) =Res\left(\frac{z-z_n}{\cosh\sqrt{z}}\cdot \frac{1}{z-z_n}, z_n\right) = \lim_{z\to z_n}\frac{z-z_n}{\cosh\sqrt{z}}=\frac{1}{(\cosh\sqrt{z})'|_{z=z_n}} = (-1)^n(2n+1)\pi.
\]
\end{proof}

\noindent 2. (1) \begin{proof} Let $f(z)=\frac{1}{z^3(1-z)(1+z)}$. Then
\[
\mbox{Res}(f,0)=\frac{1}{2!}\left.\frac{d^2}{dz^2}\frac{1}{1-z^2}\right|_{z=0}=1,
\]
$\mbox{Res}(f,1)=-\frac{1}{2}$, and $\mbox{Res}(f,-1)=-\frac{1}{2}$. $\infty$ is a removable singularity.
\end{proof}

(2) \begin{proof} $\infty$ is a removable singularity. Let $f(z)=\frac{1}{(z^2+1)^{m+1}}=\frac{1}{(z+i)^{m+1}}\cdot \frac{1}{(z-i)^{m+1}}$. Then
\[
\mbox{Res}(f,i)=\frac{1}{m!}\frac{d^m}{dz^m}\left.\left[\frac{1}{(z+i)^{m+1}}\right]\right|_{z=i} = \left.\frac{1}{m!}[-(m+1)][-(m+2)]\cdots(-2m)(z+i)^{-2m-1}\right|_{z=i} = \frac{-i}{2^{2m+1}}\frac{(2m)!}{(m!)^2}.
\]
Similarly, we have $\mbox{Res}(f,-i)=\frac{i}{2^{2m+1}}\frac{(2m)!}{(m!)^2}$.
\end{proof}

(3) \begin{proof} $1-\cos z = 2\sin^2\frac{z}{2}$. So $z_n=2n\pi$
$(n\in \mathbb Z)$ is a pole of order 2 for the function
$f(z)=\frac{z}{1-\cos z}$. Note for each $n$,
\[
f(z) = \frac{z}{2\sin^2\frac{z}{2}} =
\frac{z}{2\left(\frac{z-2n\pi}{2}\right)^2} \cdot
\frac{\left(\frac{z-2n\pi}{2}\right)^2}{\sin^2\frac{z-2n\pi}{2}}
=\frac{1}{(z-2n\pi)^2}\cdot
\frac{2z\left(\frac{z-2n\pi}{2}\right)^2}{\sin^2\frac{z-2n\pi}{2}}.
\]
Define $h(w)=\frac{w}{\sin w}$. Since the Laurent series of
$\frac{1}{\sin w}$ near 0 is \[ \frac{1}{\sin z} = \frac{1}{z} +
\frac{z}{6}+\frac{7z^3}{360} + \frac{31z^5}{15120} +
\frac{127z^7}{604800} + \frac{73 z^9}{3421440} + o(z^{10}),
\]we have the Luarent series of $h(w)$ near $0$:
\[
h(w)=\frac{w}{\sin w} = 1 + \frac{w^2}{6}+\frac{7w^4}{360} +
\frac{31w^6}{15120} + \frac{127w^8}{604800} + \frac{73
w^{10}}{3421440} + o(w^{11}).
\]
Therefore, $h(0)=1$ and $h'(0)=0$. Moreover,
\begin{eqnarray*}
\mbox{Res}(f,2n\pi) &=& Res\left(\frac{1}{(z-2n\pi)^2}\cdot
2zh^2\left(\frac{z-2n\pi}{2}\right), 2n\pi\right) \\
&=& \lim_{z\to
2n\pi}\frac{d}{dz}\left[2zh^2\left(\frac{z}{2}-n\pi\right)\right] \\
&=& \lim_{z\to 2n\pi} \left[2h^2\left(\frac{z}{2}-n\pi\right) + 2z
\cdot
2h\left(\frac{\pi}{2}-n\pi\right)h'\left(\frac{z}{2}-n\pi\right)\cdot\frac{1}{2}\right]
\\
&=& 2.
\end{eqnarray*}
\end{proof}

(4) \begin{proof} Let $f(z) = \frac{\sqrt{z}}{\sinh \sqrt{z}}$. Then
$\sinh\sqrt{z}=0$ if and only if $z=-(n\pi)^2$ $(n\in \mathbb Z)$.
Let $z_n=-(n\pi)^2$. Then $(z_n)_{n\in \mathbb Z}$ are singularities
of $f(z)$. For $n=0$, $z_0=0$ is a removable singularity since
\[
\lim_{z\to 0}\frac{\sqrt{z}}{\sinh \sqrt{z}} = \frac{1}{\cosh 0} =
1.
\]
Therefore $\mbox{Res}(f,0)=0$. For $n\ne 0$, we have (suppose $\rho>0$ is
sufficiently small)
\[
\mbox{Res}(f,z_n) = \frac{1}{2\pi i}\int_{|z-z_n|=\rho}\frac{1}{z-z_n}\cdot
\frac{\sqrt{z}(\sqrt{z}-\sqrt{z_n})(\sqrt{z}+\sqrt{z_n})}{\sinh\sqrt{z}-\sinh\sqrt{z_n}}
dz = \frac{\sqrt{z_n}(\sqrt{z_n}+\sqrt{z_n})}{\cosh\sqrt{z_n}} =
2z_n = -2(n\pi)^2.
\]
\begin{remark}
This solution has a different result from that of the textbook's
solution. Check.
\end{remark}
\end{proof}


(5) \begin{proof} Both $0$ and $\infty$ are essential singularities
of $f(z) = \exp\left[\frac{1}{2}\left(z-\frac{1}{z}\right)\right]$,
as can be seen by letting $z\to \infty$ and $z\to 0$ along positive
and negative real axis.

To find the residue of the function at $0$, we note
\[
\exp\left[\frac{1}{2}\left(z-\frac{1}{z}\right)\right] =
\sum_{n=0}^{\infty} \frac{1}{2^nn!} \left(z-\frac{1}{z}\right)^n.
\]
For each $n$,
\[
\left(z-\frac{1}{z}\right)^n = \sum_{k=0}^n\left(\begin{matrix} k\\
n \end{matrix}\right)z^k(-z^{-1})^{n-k}.
\]
So the expansion of $\left(z-\frac{1}{z}\right)^n$ contains $z^{-1}$
term if and only if $n$ is an odd number, and in this case, the
coefficient of $z^{-1}$ is $(-1)^{\frac{n+1}{2}}\left(\begin{matrix} \frac{n-1}{2}\\
n \end{matrix}\right)$. So in the expansion of
$\left(z-\frac{1}{z}\right)^n $, the coefficient of $z^{-1}$ is
\[
\sum_{m=0}^{\infty} (-1)^{m+1} \frac{1}{2^{2m+1}(2m+1)!}\left(\begin{matrix} m\\
2m+1 \end{matrix}\right) =
\sum_{m=0}^{\infty}\frac{(-1)^{m+1}\left(\frac{1}{2}\right)^{2m+1}}{m!(m+1)!}
= -J_1(1),
\]
where $J_{\alpha}(z)$ is Bessel function of the first kind:
\[J_{\alpha}(z) =
\sum_{m=0}^{\infty}\frac{(-1)^m}{m!\Gamma(m+\alpha+1)}\left(\frac{z}{2}\right)^{2m+\alpha}.\]
So $\mbox{Res}(f,0)=-J_1(1)$. Since the above expansion is valid in
$\mathbb C\setminus \{0\}$, by remark on page 87 (formula (7.12)),
we conclude $\mbox{Res}(f,\infty)=-\mbox{Res}(f,0) = J_1(1)$.

\begin{remark}
The results in the above solution have signs opposite to those of
the textbook's solution. Check.
\end{remark}
\end{proof}

(6) \begin{proof} $\infty$ is a removable singularity and $0$ is a
pole. Using the Taylor series of cosine function near $0$, we
have
\[
\cos\frac{1}{\sqrt{z}} =
\sum_{n=0}^{\infty}\frac{(-1)^n}{(2n)!}\frac{1}{z^n}.
\]
So the residue is equal to the coefficient of $\frac{1}{z}$, which
is $-\frac{1}{2}$.
\end{proof}

(7) \begin{proof} Let $f(z) = \frac{1}{(z-1)\ln z}$. Then $1$ is a
pole of $f(z)$ and $\infty$ is a removable singularity. When $\ln
1=2n\pi i$ $(n\in \mathbb Z \setminus \{0\})$, for $\rho>0$
sufficiently small,
\[
\mbox{Res}(f, 1) = \frac{1}{2\pi i} \int_{|z-1|=\rho}f(z) dz = \frac{1}{\ln
1} = \frac{1}{2n\pi i}.
\]
When $\ln 1 = 0$, $\frac{z-1}{\ln z}$ is analytic near $0$. So for
$\rho
>0$ sufficiently small, by applying l'Hospitale's rule, we have
\[
\mbox{Res}(f, 1) = \frac{1}{2\pi i}\int_{|z-1|=\rho}\frac{1}{(z-1)^2}\cdot
\frac{z-1}{\ln z} dz = \left.\frac{d}{dz} \left[\frac{z-1}{\ln
z}\right]\right|_{z=1} = \frac{1}{2}.
\]
\end{proof}

(8) \begin{proof} Let $f(z) = \frac{1}{z} \left[1+\frac{1}{z+1} +
\frac{1}{(z+1)^2}+\cdots+\frac{1}{(z+1)^n}\right]$. Then $0$ and
$-1$ are poles of $f$, while $\infty$ is a removable singularity of
$f$. For $\rho>0$ sufficiently small,
\begin{eqnarray*}
\mbox{Res}(f, 0) &=& \frac{1}{2\pi
i}\int_{|z|=\rho}\frac{1}{z}\left[1+\frac{1}{z+1} +
\frac{1}{(z+1)^2}+\cdots+\frac{1}{(z+1)^n}\right] dz \\
&=& \left. \left[1+\frac{1}{z+1} +
\frac{1}{(z+1)^2}+\cdots+\frac{1}{(z+1)^n}\right]\right|_{z=0}\\
&=& n+1.
\end{eqnarray*}
Since $f(z)$ can be written as
\[
\frac{1}{z} \frac{1-\frac{1}{(z+1)^{n+1}}}{1-\frac{1}{z+1}} =
\frac{1}{z^2}\left[(z+1)-\frac{1}{(z+1)^n}\right],
\]
we have
\[
\mbox{Res}(f,-1) = \frac{1}{2\pi i}\int_{|z+1|=\rho}\frac{-1}{z^2(z+1)^n}dz
=
-\left.\frac{1}{(n-1)!}\frac{d^{n-1}}{dz^{n-1}}z^{-2}\right|_{z=-1}
= -\left.\frac{(-2)\cdots(-n)z^{-(n+1)}}{(n-1)!}\right|_{z=-1} = -n.
\]
\end{proof}

\noindent 3. (1) \begin{proof} Note $\frac{1}{z}$ is analytic on $\mathbb C\setminus\{0\}$, so $\mbox{Res}(\frac{1}{z},\infty)=-\mbox{Res}(\frac{1}{z},0)=-1$. \end{proof}

(2) \begin{proof} Note $\frac{\cos z}{z}$ is analytic on $\mathbb C\setminus \{0\}$, so $\mbox{Res}(\frac{\cos z}{z},\infty)=-\mbox{Res}(\frac{\cos z}{z},0) = -1$. \end{proof}

(3)\begin{proof} Since $\cos(2n\pi + \frac{\pi}{2}) = 0$ ($n\in \mathbb Z$), $\infty$ is not an isolated singularity. \end{proof}

(4) \begin{proof} Since $(z^2+1)e^z$ is analytic on $\mathbb C$, $\mbox{Res}((z^2+1)e^z,\infty)=0$. \end{proof}

(5) \begin{proof} $e^{-\frac{1}{z^2}}=\sum_{n=0}^{\infty}\frac{(-1)^n}{z^{2n}n!}$, which has no $z^{-1}$ term. So $\mbox{Res}(e^{-\frac{1}{z^2}},\infty)=0$.\end{proof}

(6) \begin{proof} Recall $\mbox{Res}(\sqrt{(z-1)(z-2)},\infty)$ is equal to the coefficient of the term $z$ in the power series expansion of $-\sqrt{\left(\frac{1}{z}-1\right)\left(\frac{1}{z}-2\right)}$ near 0. By generalized Newton's formula, we have
\[
\sqrt{(1-z)(1-2z)}=1-\frac{3z}{2}-\frac{z^2}{8}-\frac{3z^3}{16}-\cdots
\]
Depending on the branch we choose, we have $\sqrt{z^2}=\pm z$. So the power series expansion of $-\sqrt{\left(\frac{1}{z}-1\right)\left(\frac{1}{z}-2\right)}$ near 0 is
\[
\mp \left(\frac{1}{z}-\frac{3}{2}-\frac{z}{8}-\frac{3z^2}{16}-\cdots\right).
\]
Therefore, $\mbox{Res}(\sqrt{(z-1)(z-2)},\infty)=\pm \frac{1}{8}$.
\end{proof}

\noindent 4. (1) \begin{proof} The equation $z^4+1=0$ has four roots: $z_1=e^{\frac{\pi}{4}i}$, $z_2 = e^{-\frac{\pi}{4}i}$, $z_3=e^{\frac{3\pi}{4}i}$, and $z_4=e^{\frac{5\pi}{4}i}$. The intersection points of $|z-1|=1$ and $|z|=1$ are $e^{\pm\frac{\pi}{3}i}$. So only $z_1$ and $z_2$ fall within the disc $|z-1|<1$. By Residue Theorem, we have
\begin{eqnarray*}
\oint_{|z-1|=1}\frac{dz}{1+z^4} &=& 2\pi i \left[Res\left(\frac{1}{1+z^4},e^{\frac{\pi}{4}i}\right) + Res\left(\frac{1}{1+z^4}, e^{-\frac{\pi}{4}i}\right)\right] \\
&=& 2\pi i \left(\lim_{z\to z_1}\frac{z-z_1}{z^4+1} + \lim_{z\to z_2}\frac{z-z_2}{z^4+1}\right) \\
&=& 2\pi i \cdot \left(\frac{1}{4z_1^3}+\frac{1}{4z_2^3}\right) \\
&=& 2\pi i \cdot \frac{z_1+z_2}{-4} \\
&=& - \frac{\pi i}{\sqrt{2}}.
\end{eqnarray*}
\end{proof}

(2) \begin{proof} Using the same notation as in part (1), we first show all the roots of $z^4+1=0$ fall within the circle $|z-1|=2$. Indeed, for any $\theta\in [0,2\pi)$, $|e^{i\theta}-1|=2|\sin\frac{\theta}{2}| \le 2$, where the equality holds if and only if $\theta = \pi$. So $z_i$'s $(i=1,2,3,4,)$ all fall within the circle $|z-1|=2$. Then by Residue Theorem and an argument similar to part (1), we have
\[
\oint_{|z-1|=2}\frac{dz}{1+z^4} = 2\pi i\cdot \frac{z_1+z_2+z_3+z_4}{-4} = 0.
\]  \end{proof}

(3) \begin{proof} By Residue Theorem,
\[
\oint_{|z-1|=1}\frac{1}{z^2-1}\sin\frac{\pi z}{4}dz = 2\pi i \cdot \frac{\sin\frac{\pi}{4}}{1+1} = \frac{2i}{\sqrt{2}}.
\]
\end{proof}

(4) \begin{proof}
By Residue Theorem,
\[
\oint_{|z|=3}\frac{1}{z^2-1}\sin\frac{\pi z}{4} dz = 2\pi i \left(\frac{\sin\frac{\pi}{4}}{1+1}+\frac{\sin\left(-\frac{\pi}{4}\right)}{-1-1}\right) = \sqrt{2}\pi i.
\]
\end{proof}

(5) \begin{proof} The singularities that fall within the circle $|z|=n$ are $k+\frac{1}{2}$ with $k=-n, -n+1,\cdots, n-1$. By Residue Theorem, we have
\[
\oint_{|z|=n}\tan\pi z dz = 2\pi i \sum_{k=-n}^{n-1}\lim_{z\to k+\frac{1}{2}}\frac{[z-(k+\frac{1}{2})]\sin(\pi z)}{\cos(\pi z)} = 2\pi i\sum_{k=-n}^{n-1}\frac{\sin\left(k\pi+\frac{1}{2}\pi\right)}{-\pi \sin\left(k\pi+\frac{1}{2}\pi\right)}=-4ni.
\]
\end{proof}

(6) \begin{proof} Let $z_n$ $(1\le n \le 10)$ be the $n$-th root of the equation $z^{10}=2$. For example, we can let $z_n = 2^{\frac{1}{10}}e^{\frac{2n\pi i}{10}}$. Then similar to our solution of part (1), Residue Theorem gives
\[
\oint_{|z|=2}\frac{dz}{z^3(z^{10}-2)} = 2\pi i \left[\frac{1}{2!}\left.\frac{d^2}{dz^2}(z^{10}-2)^{-2}\right|_{z=0} + \sum_{n=1}^{10}\frac{1}{z_n^3}\frac{1}{10z_n^9}\right] =\frac{\pi i}{10}\sum_{i=1}^{10}\frac{1}{z_i^2} = \frac{\pi i}{10 \cdot 2^{\frac{1}{5}}}\sum_{i=1}^{10}e^{-\frac{2n\pi i}{5}}.
\]
Note $\sum_{i=1}^{10}e^{-\frac{2n\pi i}{5}}= e^{-\frac{2\pi i}{5}}\frac{1-(e^{-\frac{2\pi i}{5}})^{10}}{1-e^{-\frac{2\pi i}{5}}}=0$. So the integral is evaluated to $0$.
\end{proof}

(7) \begin{proof}By Residue Theorem,
\[
\oint_{|z|=1}\frac{e^z}{z^3}dz = 2\pi i \cdot \frac{1}{2!} \left.\frac{d^2}{dz^2}e^z\right|_{z=0} = \pi i.
\]
\end{proof}

(8) \begin{proof} We note $e^{2\pi i z^3}-1=0$ if and only if for some $k\in \mathbb Z$, $z^3=k$. Since $n<R^3<n+1$, a number $z_*$ is a root of $e^{2\pi i z^3}-1=0$ within the circle $|z|=R$ if and only if $z_*^3=k$ for some $k\in [-n, n]$. Suppose those roots are $z_j$. Then by Residue Theorem, we have (assume $\rho>0$ is sufficiently small)
\[
\oint_{|z|=R}\frac{z^2}{e^{2\pi iz^3}-1}dz = 2\pi i\sum_{z_j\ne 0}\frac{z_j^2}{2\pi i \cdot 3 z_j^2e^{2\pi i z_j^3}} + \oint_{|z|=\rho} \frac{z^2}{2\pi i z^3 + \frac{1}{2!}(2\pi i z^3)^2+ \cdots}dz = \frac{1}{3} \sum_{j\ne 0, |j|\le n}1 + 1 = \frac{2n}{3}+1.
\]
\begin{remark}
Note our result is different from the textbook's solution.
\end{remark}
\end{proof}

\noindent 5. (1) \begin{proof}
\begin{eqnarray*}
\int_0^{2\pi}\cos^{2n}\theta d\theta &=&
\int_{|z|=1}\left(\frac{z+z^{-1}}{2}\right)^{2n}\frac{dz}{iz} =
\frac{2\pi}{2^{2n}(2n)!}\frac{(2n)!}{2\pi
i}\int_{|z|=1}\frac{(z^2+1)^{2n}}{z^{2n+1}}dz \\
&=& \frac{2\pi}{2^{2n}(2n)!}
\left.\frac{d^{2n}}{dz^{2n}}(z^2+1)^{2n}\right|_{z=0} =
\frac{2\pi}{2^{2n}(2n)!}
\left.\frac{d^{2n}}{dz^{2n}}\left(\begin{matrix}n \\ 2n\end{matrix}\right)z^{2n}\right|_{z=0} \\
&=& \frac{\pi}{2^{2n-1}}\frac{(2n)!}{(n!)^2}.
\end{eqnarray*}
\end{proof}

(2) \begin{proof}
\begin{eqnarray*}
\int_0^{2\pi}\frac{dx}{(a+b\cos x)^2} &=&
\int_{|z|=1}\frac{1}{\left(a+b\frac{z+z^{-1}}{2}\right)^2}\frac{dz}{iz}
= \int_{|z|=1}\frac{4z}{(bz^2+2az+b)^2}\frac{dz}{i}.
\end{eqnarray*}
The equation $bz^2+2az+b=0$ has two solutions:
$z_1=\frac{-a+\sqrt{a^2-b^2}}{b}$ and
$z_2=\frac{-a-\sqrt{a^2-b^2}}{b}$. Clearly $|z_2|>1$ and $|z_1|<1$.
So by Residue Theorem, we have ($f(z) := \frac{4z}{(bz^2+2az+b)^2i}$
and $\rho>0$ is sufficiently small)
\begin{eqnarray*}
\int_0^{2\pi}\frac{dx}{(a+b\cos)^2} &=& 2\pi i \mbox{Res}(f, z_1) \\
&=&
\int_{|z-z_1|=\rho}\frac{4z}{b^2(z-z_1)^2(z-z_2)^2}\frac{dz}{i}\\
&=& \frac{4}{b^2i}\cdot 2\pi i
\left.\frac{d}{dz}\frac{z}{(z-z_2)^2}\right|_{z=z_1} \\
&=& \frac{8\pi}{b^2}\frac{z_1+z_2}{(z_2-z_1)^3} \\
&=& \frac{2a\pi}{(a^2-b^2)^{3/2}}.
\end{eqnarray*}
\end{proof}

(3) \begin{proof} We note $\sin^2\theta = \frac{1}{2}[1 -
\cos(2\theta)]$. Using the substitution rule $\theta =
\frac{1}{2}\alpha$, we have
\begin{eqnarray*}
\int_0^{\pi}\frac{d\theta}{1+\sin^2\theta} =
\int_0^{2\pi}\frac{d\alpha}{3-\cos\alpha} =
2i\int_{|z|=1}\frac{dz}{z^2-6z+1}.
\end{eqnarray*}
The equation $z^2-6z+1=0$ has two roots: $z_1=3+2\sqrt{2}$ and
$z_2=3-2\sqrt{2}$. It's clear that $|z_1|>1$ and $|z_2|<1$. By
Residue Theorem, we have
\[
\int_0^{\pi}\frac{d\theta}{1+\sin^2\theta} = 2i \cdot \frac{2\pi
i}{2\pi i}\int_{|z|=1}\frac{dz}{(z-z_1)(z-z_2)} = -4\pi \cdot
\frac{1}{z_2-z_1} = \frac{\pi}{\sqrt{2}}.
\]
\end{proof}

(4) \begin{proof}Similar to problem (3), we have
 the following argument:
\begin{eqnarray*}
\int_0^{\pi}\frac{d\theta}{(1+\sin^2\theta)^2} &=&
2\int_0^{2\pi}\frac{d\alpha}{(3-\cos\alpha)^2} = \frac{8}{i}
\int_{|z|=1}\frac{zdz}{(z^2-6z+1)^2} = 16\pi \cdot \frac{1}{2\pi
i}\int_{|z|=1}\frac{zdz}{(z-z_1)^2(z-z_2)^2} \\
&=& 16\pi \cdot
\left.\frac{d}{dz}\left[\frac{z}{(z-z_1)^2}\right]\right|_{z=z_2} =
16\pi \frac{z_1+z_2}{(z_1-z_2)^3} = \frac{3\pi}{4\sqrt{2}}.
\end{eqnarray*}
\end{proof}

\noindent 6. (1) \begin{proof} This is a special case of (3), with
$n=2$ and $m=1$. See the solution there. \end{proof}

(2) \begin{proof} Let $f(z)=\frac{1}{(1+z^2)^{n+1}}$. Then for
$C_R=\{z: 0\le \arg z \le \pi, |z|=R\}$ $(R>0)$. When $R>1$, we have
by Residue Theorem and Problem 2 (2) of this chapter
\[
\int_{-R}^Rf(z)dz+\int_{C_R}f(z)dz  = 2\pi i \mbox{Res}(f,i) = 2\pi i \cdot
\frac{-i}{2^{2n+1}}\frac{(2n)!}{(n!)^2} =
\frac{\pi}{2^{2n}}\frac{(2n)!}{(n!)^2}.
\]
Furthermore, we note
\[
\left|\int_{C_R}f(z)dz \right| =
\left|\int_0^{\pi}\frac{Re^{i\theta}i
d\theta}{(1+R^2e^{2i\theta})^{n+1}}\right| \le
\int_0^{\pi}\frac{Rd\theta}{(R^2-1)^{n+1}} = \frac{\pi
R}{(R^2-1)^{n+1}} \to 0
\]
as $R\to\infty$. So
$\int_{-\infty}^{\infty}\frac{1}{(1+x^2)^{n+1}}dx =
\frac{\pi}{2^{2n}}\frac{(2n)!}{(n!)^2}$.
\end{proof}

(3) \begin{proof} We note
\[
\int_{-\infty}^{\infty}\frac{x^{2m}}{1+x^{2n}}dx =
2\int_0^{\infty}\frac{x^{2m}}{1+x^{2n}}dx =
\frac{1}{n}\int_0^{\infty}\frac{y^{\frac{2m+1}{2n}}}{(1+y)y}dy
\]
by the substitution rule $x^{2n}=y$. Define $p=\frac{2n}{2m+1}$,
then $p>1$. In our solution of exercise 7.8 in the text, we already
showed
\[
\int_0^{\infty}\frac{y^{\frac{1}{p}}}{(1+y)y}dy = \pi
\csc\left(\frac{\pi}{p}\right).
\]
So $\int_{-\infty}^{\infty}\frac{x^{2m}}{1+x^{2m}}dx =
\frac{\pi}{n}\csc\left(\frac{2m+1}{2n}\pi\right)$.
\end{proof}

(4) \begin{proof}
We note $\cosh(\frac{\pi}{2}z)=0$ if and only if $z=(2n+1)i$ $(n\in \mathbb Z)$. Define $f(z) = \frac{1}{(z^2+1)\cosh(\frac{\pi}{2}z)}$. Then for $n\ne 0, -1$, $z_n$ is a pole of order 1 for $f(z)$ and we have (assume $\rho>0$ is sufficiently small)
\[
\mbox{Res}(f,z_n)=\frac{1}{2\pi i}\int_{|z-z_n|=\rho}\frac{1}{z-z_n}\cdot \frac{\frac{\pi}{2}z - \frac{\pi}{2}z_n}{\cosh(\frac{\pi}{2}z)-\cosh(\frac{\pi}{2}z_n)} \frac{dz}{\frac{\pi}{2}(z^2+1)} = \frac{1}{\sinh(\frac{\pi}{2}z_n)} \cdot \frac{1}{\frac{\pi}{2}(z_n^2+1)} = \frac{i}{2\pi}\frac{(-1)^{n+1}}{n(n+1)}.
\]

To find the residue of $f(z)$ at $i$, define $h(z) = \frac{\pi}{2}\frac{z-i}{\cosh(\frac{\pi}{2}z)-\cosh(\frac{\pi}{2}i)}$. Then $h(i) = \frac{1}{\sinh(\frac{\pi}{2}i)} = -i$. Applying l'Hospitale's rule, we have
\begin{eqnarray*}
h'(i) &=& \lim_{z\to i}\frac{\frac{\pi}{2}\cosh(\frac{\pi}{2}z)-\frac{\pi}{2}(z-i)\frac{\pi}{2}\sinh(\frac{\pi}{2}z)}{[\cosh(\frac{\pi}{2}z)]^2}
= \lim_{z\to i} \frac{\frac{\pi^2}{4}\sinh(\frac{\pi}{2}z)-\frac{\pi^2}{4}[\sinh(\frac{\pi}{2}z)+(z-i)\frac{\pi}{2}\cosh(\frac{\pi}{2}z)]}{2\cosh(\frac{\pi}{2}z)\frac{\pi}{2}\sinh(\frac{\pi}{2}z)}\\
&=& \lim_{z\to i}\frac{-\frac{\pi}{2}(z-i)}{\pi \sinh(\frac{\pi}{2}z)}\cdot \frac{\pi^2}{4} = 0.
\end{eqnarray*}
Therefore
\[
\mbox{Res}(f,i) = \left.\frac{d}{dz}\left[\frac{2h(z)}{\pi(z+i)}\right]\right|_{z=i} = \left.\frac{2[h'(z)(z+i)-h(z)]}{\pi(z+i)^2}\right|_{z=i} = \frac{1}{2\pi i}.
\]
So
\[
\sum_{n=0}^{\infty}\mbox{Res}(f,z_n) = \frac{1}{2\pi i} + \sum_{n=1}^{\infty}\frac{i}{2\pi}\frac{(-1)^{n+1}}{n(n+1)} = \frac{1}{2\pi i} + \frac{1}{2\pi i}(-1+2\ln 2) = \frac{2\ln 2}{2\pi i}.
\]

Now we consider the path $C_N=[-N, N, N+4Ni, -N+4Ni, -N]$. Then
\[
\oint_{C_N}f(z)dz = I+I\!I + I\!I\!I + I\!I,
\]
where
\[
I = \int_{-N}^Nf(x)dx, \; I\!I = \int_0^{4N}f(N+iy)idy,\; I\!I\!I = \int_N^{-N}f(x+4Ni)dx, \; I\!V = \int_{4N}^{0}f(-N+iy)idy.
\]
We note
\[
|I\!I| \le \int_0^{4N}\frac{dy}{(|N+iy|^2-1)\cdot \frac{e^{\pi N/2}-e^{-\pi N/2}}{2}} \le \frac{2}{e^{\frac{\pi N}{2}}-e^{-\frac{\pi N}{2}}} \cdot \frac{4N}{N^2-1} \to 0
\]
as $N\to \infty$. Similarly, we can show $|I\!V|\to 0$ as $N\to \infty$. Meanwhile, we have
\[
|I\!I\!I| \le \int_{-N}^N\frac{dx}{(|x+4Ni|^2-1)\frac{e^{\frac{\pi}{2}}+e^{-\frac{\pi}{2}}}{2}} \le \frac{2N}{16N^2-1} \to 0
\]
as $N\to \infty$. Therefore, $\int_{-\infty}^{\infty}f(x)dx = \lim_{N\to\infty}\oint_{C_N}f(z)dz$ and by Residue Theorem, we have
\begin{eqnarray*}
\int_{-\infty}^{\infty}f(x)dx &=& \lim_{N\to\infty}\oint_{C_N}f(z)dz = \lim_{N\to\infty}2\pi i \sum_{n=0}^{2N-1}\mbox{Res}(f,z_n) = 2\pi i \sum_{n=0}^{\infty}\mbox{Res}(f,z_n) = 2\pi i \cdot \frac{2\ln 2}{2\pi i} = 2\ln 2.
\end{eqnarray*}

\begin{remark}
In the proof, we used the following facts from calculus (see, for example, 沈燮昌\cite{沈燮昌1986}, page 221):
\[
\sum_{n=1}^{\infty}\frac{(-1)^{n-1}}{n} = \ln 2
\] and
\begin{eqnarray*}
\sum_{n=1}^{\infty}\frac{(-1)^n}{n(n+1)} &=& \sum_{n=1}^{\infty}(-1)^n\left(\frac{1}{n}-\frac{1}{n+1}\right) \\
&=& \left(\frac{1}{2}-1\right)+\left(\frac{1}{2}-\frac{1}{3}\right)+\left(\frac{1}{4}-\frac{1}{3}\right)+\cdots \\
&=& 1- 2\left(1-\frac{1}{2}+\frac{1}{3}-\frac{1}{4}+\cdots\right) \\
&=& 1-2\ln 2.
\end{eqnarray*}
The calculus proof of $\sum_{n=1}^{\infty}\frac{(-1)^{n-1}}{n} = \ln 2$ needs a little bit trick. However, if we use theory of analytic functions, then the proof becomes straightforward. Indeed, we note in the unit disc,
\[
\ln(1+z) = \sum_{n=1}^{\infty}\frac{(-1)^{n-1}z^n}{n}.
\]
The series $\sum_{n=1}^{\infty}\frac{(-1)^{n-1}z^n}{n}$ clearly converges at $z=1$. So by Abel's Second Theorem (see, for example, Fang \cite{方企勤1996}, page 121), we must have
\[
\ln2 = \lim_{z\in\mathbb R, z\to 1}\ln(1+z)=\lim_{z\in \mathbb R, z\to 1}\sum_{n=1}^{\infty}\frac{(-1)^{n-1}z^n}{n} = \sum_{n=1}^{\infty}\frac{(-1)^{n-1}}{n}.
\]
\end{remark}
\end{proof}

\noindent 7. (1) \begin{proof}
Let $f(z) = \frac{e^{iz}}{1+z^4}$ and $C_R=\{z:|z|=R, 0\le \arg z \le \pi\}$. The equation $1+z^4=0$ has four roots: $z_1=e^{\frac{\pi}{4}i}$, $z_2=e^{\frac{3\pi}{4}i}$, $z_3=e^{-\frac{\pi}{4}i}$, and $z_4=e^{-\frac{3\pi}{4}i}$, where $z_1$ and $z_2$ fall in the upper half plane. For $R$ large enough, we have by Residue Theorem
\[
\int_{-R}^Rf(z)dz + \int_{C_R}f(z)dz = 2\pi i (\mbox{Res}(f,z_1)+\mbox{Res}(f,z_2)).
\]
It's easy to see $\mbox{Res}(f,z_1) = \lim_{z\to z_1}\frac{z-z_1}{1+z^4}e^{iz} = \frac{e^{iz_1}}{4z_1^3}= -\frac{1}{4}e^{iz_1}z_1 = -\frac{1+i}{4\sqrt{2}}e^{\frac{-1+i}{\sqrt{2}}}$ and $\mbox{Res}(f,z_2) = -\frac{-1+i}{4\sqrt{2}}e^{\frac{-1-i}{\sqrt{2}}}$. So
\[
2\pi i (\mbox{Res}(f,z_1)+\mbox{Res}(f,z_2)) = 2\pi i \cdot  \frac{-1}{4\sqrt{2}}e^{-\frac{1}{\sqrt{2}}}\cdot 2i \left(\cos\frac{1}{\sqrt{2}}+\sin \frac{1}{\sqrt{2}}\right) = \frac{\pi}{\sqrt{2}}e^{-\frac{1}{\sqrt{2}}}\left(\cos\frac{1}{\sqrt{2}}+\sin \frac{1}{\sqrt{2}}\right).
\]
Meanwhile, for $z\in C_R$, $\left|\frac{1}{1+z^4}\right| \le \frac{1}{R^4-1} \to 0$ as $R\to \infty$. By by Jordan's lemma (Lemma 7.1), \[\lim_{R\to\infty}\int_{C_R}f(z)dz = 0.\]
Combined, we conclude
\[
\int_0^{\infty}\frac{\cos x}{1+x^4}dx = \frac{1}{2}\int_{-\infty}^{\infty}\frac{\cos x}{1+x^4}dx = \frac{1}{2} \cdot  2\pi i (\mbox{Res}(f,z_1)+\mbox{Res}(f,z_2)) = \frac{\pi}{2\sqrt{2}}e^{-\frac{1}{\sqrt{2}}}\left(\cos\frac{1}{\sqrt{2}}+\sin \frac{1}{\sqrt{2}}\right).
\]
\end{proof}

(2) \begin{proof}Let $f(z) = \frac{e^{iz}}{(1+z^2)^3}$. Then similar to our solution for part (1), we have
\begin{eqnarray*}
\int_0^{\infty}\frac{\cos x}{(1+x^2)^3}dx &=& \frac{1}{2}\int_{-\infty}^{\infty}\frac{\cos x}{(1+x^2)^3}dx
= \pi i \mbox{Res}(f,i) = \pi i \cdot \frac{1}{2!}\left.\frac{d^2}{dz^2}[e^{iz}(z+i)^{-3}]\right.|_{z=i} = \frac{7\pi}{16e}.\end{eqnarray*}
\end{proof}

(3) \begin{proof} Let $f(z) = \frac{ze^{iz}}{z^2-2z+2}$. Define $C_R=\{z:|z|=R, 0\le \arg z \le \pi\}$. Then for $z\in C_R$, $\left|\frac{z}{z^2-2z+2}\right| \le \frac{R}{R^2-2R-2}\to 0$ as $R\to 0$. So by Jordan's lemma (Lemma 7.1) and Residue Theorem
\[
\int_{-\infty}^{\infty}f(z)dz = 2\pi i \mbox{Res}(f, 1+i) = 2\pi i \cdot \left.\frac{ze^{iz}}{[z-(1-i)]}\right|_{z=1+i} = \frac{\pi}{e}[(\cos 1- \sin 1)+i(\cos 1+\sin 1)].
\]
Compare the real and imaginary parts of both sides of the equality, we have $\int_{-\infty}^{\infty}\frac{x\sin x}{x^2-2x+2}dx = \frac{\pi}{e}(\cos 1 + \sin 1)$.
\end{proof}

\noindent 8. (1) \begin{proof}
\begin{eqnarray*}
& & \mbox{v.p.}\int_{-\infty}^{\infty} \frac{dx}{x(x-1)(x-2)}\\
&=& \lim_{\delta\to 0}\left[\int_{-\infty}^{-\delta}\frac{dx}{x(x-1)(x_2)} + \int_{\delta}^{1-\delta}\frac{dx}{x(x-1)(x-2)} + \int_{1+\delta}^{2-\delta}\frac{dx}{x(x-1)(x-2)} + \int^{\infty}_{2+\delta}\frac{dx}{x(x-1)(x-2)}\right].
\end{eqnarray*}
We note $\frac{1}{x(x-1)(x-2)} = \frac{1}{2(x-2)} -\frac{1}{x-1}+\frac{1}{2x}$. So
\begin{eqnarray*}
\int_{-\infty}^{-\delta}\frac{dx}{x(x-1)(x-2)} &=& \lim_{N\to \infty}\int_{-N}^{-\delta}\left[\frac{1}{2(x-2)} -\frac{1}{x-1}+\frac{1}{2x}\right]dx \\
&=& \lim_{N\to \infty} \frac{1}{2}\ln \left[\frac{(N+1)^2}{N(N+2)}\right] + \frac{1}{2}\ln\frac{\delta(\delta+2)}{(\delta+1)^2}\\
&=&\frac{1}{2}\ln\frac{\delta(\delta+2)}{(\delta+1)^2},
\end{eqnarray*}
\begin{eqnarray*}
\int_{\delta}^{1-\delta}\frac{dx}{x(x-1)(x-2)}=\int_{\delta}^{1-\delta}\left[\frac{1}{2(x-2)} -\frac{1}{x-1}+\frac{1}{2x}\right]dx = \frac{1}{2}\ln\left[\frac{(1-\delta)^3(1+\delta)}{(2-\delta)\delta^3}\right],
\end{eqnarray*}
\[
\int_{1+\delta}^{2-\delta}\frac{dx}{x(x-1)(x-2)}=\int_{1+\delta}^{2-\delta}\left[\frac{1}{2(x-2)} -\frac{1}{x-1}+\frac{1}{2x}\right]dx = \frac{1}{2}\ln\left[\frac{\delta}{1-\delta}\frac{\delta^2}{(1-\delta)^2}\frac{2-\delta}{1+\delta}\right],
\]
and
\begin{eqnarray*}
\int_{2+\delta}^{\infty}\frac{dx}{x(x-1)(x-2)} &=& \lim_{N\to \infty}\int^{N}_{2+\delta}\left[\frac{1}{2(x-2)}  -\frac{1}{x-1}+\frac{1}{2x}\right]dx \\
&=& \lim{N\to\infty}\frac{1}{2}\left[\frac{N-2}{\delta}\frac{(1+\delta)^2}{(N-1)^2}\frac{N}{2+\delta}\right] \\
&=& \frac{1}{2}\ln\frac{(1+\delta)^2}{\delta(2+\delta)}.
\end{eqnarray*}
Therefore
\[\mbox{v.p.}\int_{-\infty}^{\infty} \frac{dx}{x(x-1)(x-2)} = \lim_{\delta\to 0}\frac{1}{2}\ln\left[\frac{\delta(\delta+2)}{(\delta+1)^2}\frac{(1-\delta)^3(1+\delta)}{(2-\delta)\delta^3}\frac{\delta^3(2-\delta)}{(1-\delta)^3(1+\delta)}\frac{(1+\delta)^2}{\delta(2+\delta)}\right]
=0.\]
\end{proof}

(2) \begin{proof} Note $\sin(x+a)\sin(x-a)= -\frac{1}{2}[\cos(2x)-\cos(2a)]$. So
\[
\int_0^{\infty}\frac{\sin(x+a)\sin(x-a)}{x^2-a^2}dx = -\frac{1}{2}\int_0^{\infty}\frac{\cos(2x)-\cos(2a)}{x^2-a^2}dx = -\frac{1}{4} \int_{-\infty}^{\infty}\frac{\cos(2x)-\cos(2a)}{x^2-a^2}dx.
\]
Define $f(z) = \frac{e^{2zi}-e^{2ai}}{z^2-a^2}$. Let $C_R=\{z:|z|=R, 0\le \arg z\le R\}$ $(R>0)$, $c_r(a) = \{z: |z-a|=r, 0\le \arg z \le \pi\}$, $c_r(-a)=\{z:|z+a|=r, 0\le \arg z \le R\}$. Then by Residue Theorem,
\[
\int_{(-R,-a-r)\cup c_r(-a) \cup (-a+r, a-r) \cup c_r(a) \cup (a+r,R) \cup C_R}f(z)dz = 0.
\]
Since $a$ is a pole of order 1 for $f(z)$, $f(z)$ can be written as $\frac{g(z)}{z-a}$ near $a$ where $g(z)$ is analytic near $a$ and $g(a)\ne 0$. So
\[
\int_{c_r(a)}f(z)dz = \int_{c_r(a)}\frac{g(z)}{z-a}dz = \int_{\pi}^0\frac{g(a+re^{i\alpha})}{re^{i\alpha}}re^{i\alpha}\cdot id\alpha = -i\int_0^{\pi}g(a+re^{i\alpha})d\alpha \to -i\pi g(a) = -i\pi \mbox{Res}(f, a)
\]
as $r\to 0$. Similarly, $\int_{c_r(-a)}f(z)dz = -i\pi \mbox{Res}(f,-a)$. It's easy to see $\lim_{R\to\infty}\int_{C_R}f(z)dz =0$. So by letting $r\to 0$ and $R \to \infty$, we have
\[
\int_{-\infty}^{\infty}f(z)dz = i \pi [\mbox{Res}(f,a)+\mbox{Res}(f,-a)] = i\pi\left.\frac{e^{2zi}-e^{2ai}}{z+a}\right|_{z=a} + i\pi \left.\frac{e^{2zi}-e^{2ai}}{z-a}\right|_{z=-a} = -\frac{\pi}{a}\sin(2a).
\]
So $\int_0^{\infty}\frac{\sin(x+a)\sin(x-a)}{x^2-a^2}dx = \frac{\pi}{4a}\sin(2a)$.
\end{proof}

(3) \begin{proof}Define $f(z) = \frac{iz-e^{iz}}{z^3(1+z^2)}$. Let $R>r>0$, $C_R=\{z:|z|=R, 0\le \arg z \le\pi\}$, and $C_r=\{z:|z|=r, 0\le \arg z \le pi\}$. Then by Residue Theorem
\[
\int_{-R}^{-r}f(z)dz + \int_{C_r}f(z)dz + \int_r^R f(z)dz + \int_{C_R}f(z)dz = 2\pi i [\mbox{Res}(f,i)+\mbox{Res}(f,-i)].
\]
Note $\mbox{Res}(f,i) = \left.\frac{iz-e^{iz}}{z^3(z+i)}\right|_{z=i} = -\frac{1+e^{-1}}{2}$ and $\mbox{Res}(f,-i)=\left.\frac{iz-e^{iz}}{z^3(z-i)}\right|_{z=-i}=\frac{1-e}{2}$. So $\mbox{Res}(f,i)+\mbox{Res}(f,-i)=-\frac{e+e^{-1}}{2} = -\cosh 1$. Also, we note
\[
\int_{C_r}f(z)dz = -i\int_0^{\pi}\frac{ire^{i\alpha}-e^{ire^{i\alpha}}}{r^2e^{2\alpha i}}d\alpha.
\]
By repeatedly using l'Hospitale's rule, we have
\[
\lim_{r\to 0} \frac{ire^{i\alpha}-e^{ire^{i\alpha}}}{r^2e^{2\alpha i}(1+r^2e^{2\alpha i})} = \lim_{r \to 0} \frac{ie^{i\alpha} - ie^{i\alpha}e^{ire^{i\alpha}}}{2re^{2\alpha i}} = \frac{1}{2}.
\]
So $\lim_{r\to 0}\int_{C_r}f(z)dz = -\frac{\pi}{2}i$. It's easy to see $\lim_{R\to\infty}\int_{C_R}f(z)dz = 0$ by Jordan's lemma. Therefore, by letting $R\to\infty$ and $r\to 0$, we have
\[
\int_{-\infty}^{\infty}f(z)dz = 2\pi i \cdot (-\cosh 1) + \frac{\pi}{2} i.
\]
By comparing the real and imaginary parts of both sides of the equality, we obtain
\[
\int_0^{\infty}\frac{x-\sin x}{x^3(1+x^2)}dx = \frac{1}{2}\int_{-\infty}^{\infty}\frac{x-\sin x}{x^3(1+x^2)}dx = \frac{\pi}{2}\left(\frac{1}{2}-e-\frac{1}{e}\right).
\]

\begin{remark}
The above result is different from the textbook's solution. I think I made a calculational mistake somewhere. Check.
\end{remark}
 \end{proof}

(4) \begin{proof}We shall use the following result: if $\alpha\ne 0$
and $(\beta/\alpha)\ne \pm 1, \pm 2, \cdots$, then
\[
\frac{\pi}{\alpha}\cot\frac{\pi\beta}{\alpha} = \sum_{n=0}^{\infty}
\left\{\frac{1}{n\alpha+\beta} - \frac{1}{n\alpha +
(\alpha-\beta)}\right\}.
\]
For a proof, see Conway \cite{Conway78}, Chapter V, Exercise 2.8
(page 122), or my solution manual for Gong \cite{Gong2007}, Chapter 3,
Exercise 11 (iii) (page 119).

We have
\begin{eqnarray*}
\int_{-\infty}^{\infty}\frac{e^{px}-e^{qx}}{1-e^x}dx &=& \int_0^{\infty}\frac{e^{-(1-p)x}-e^{-(1-q)x}}{e^{-x}-1}dx + \int_0^{\infty} \frac{e^{-py}-e^{-qy}}{1-e^{-y}}dy \\
&=& \int_0^{\infty} \frac{e^{-px}-e^{-(1-p)x}}{1-e^{-x}}dx - \int_0^{\infty} \frac{e^{-qx}-e^{-(1-q)x}}{1-e^{-x}}dx \\
&=& \int_0^{\infty}[e^{-px}-e^{-(1-p)x}]\sum_{n=0}^{\infty} e^{-nx}dx - \int_0^{\infty} [e^{-qx}-e^{-(1-q)x}]\sum_{n=0}^{\infty} e^{-nx}dx \\
&=& \sum_{n=0}^{\infty} \int_0^{\infty}[e^{-(n+p)x}-e^{-(n+1-p)x}]dx - \sum_{n=0}^{\infty}\int_0^{\infty}[e^{-(n+q)x} - e^{-(n+1-q)x}]dx \\
&=& \sum_{n=0}^{\infty}\left(\frac{1}{n+p}-\frac{1}{n+1-p}\right) - \sum_{n=0}^{\infty}\left(\frac{1}{n+q}-\frac{1}{n+1-q}\right) \\
&=& \pi \cot(p\pi) - \pi \cot(q\pi).
\end{eqnarray*}

\end{proof}

\noindent 9. A class of integration problems can be solved by the
following general result (Whittaker and Watson \cite{WW1927}, \S 6.24,
{\it Evaluation of integrals of the form
$\int_0^{\infty}x^{\alpha-1}Q(x)dx$}).

\begin{theorem}
Let $Q(x)$ be a rational function of $x$ such that it has no poles on the positive part of the real axis and $x^{a}Q(x)\to 0$ both when $x\to 0$ and when $x\to \infty$. If $\sum r$ denote the sum of the residues of $(-z)^{a-1}Q(z)$ at all its poles, then
\[
\int_0^{\infty}x^{a-1}Q(x)dx = \pi \csc(\alpha \pi)\sum r.
\]
\end{theorem}

\begin{corollary}
If $Q(x)$ has a number of simple poles on the positive part of the real axis, it may be shown by indenting the contour that
\[
\mbox{v.p.}\int_0^{\infty}x^{a-1}Q(x)dx = \pi \csc(a\pi) \sum r - \pi \cot(a\pi) \sum r',
\]
where $\sum r'$ is the sum of the residues of $z^{a-1}Q(z)$ at these poles.
\end{corollary}

(1) \begin{proof} By the above theorem, we have
\[
\int_0^{\infty}\frac{x^{s-1}}{1-x}dx = - \pi \cot (s\pi) \mbox{Res}\left(\frac{x^{s-1}}{1-x}, 1\right) = \pi \cot(s\pi).
\]
\end{proof}

(2) \begin{proof} If $s=1$, then
\[
\int_0^{\infty}\frac{xdx}{(1+x^2)^2} = \frac{1}{2}\int_0^{\infty}\frac{dy}{(1+y)^2} = \left.-\frac{1}{2(1+y)}\right|_0^{\infty} = \frac{1}{2}.
\]

To calculate the case where $s\ne 1$, we choose $r$ and $R$ such
that $0<r<R$. Let $\gamma_1=\{z:r\le |z|\le R, \arg z = 0\}$,
$\gamma_2=\{z: r\le |z|\le R, \arg z = \pi \}$, $\gamma_R=\{z:|z|=R,
0<\arg z <\pi\}$, and $\gamma_r=\{z:|z|=r, 0<\arg z <\pi\}$. Define
$f(z) = \frac{z^s}{(1+z^2)^2}$. Suppose $r$ is sufficiently small
and $R$ is sufficiently large so that all the poles of $f(z)$ are
contained in the contour formed by $\gamma_1$, $\gamma_2$,
$\gamma_r$, and $\gamma_R$. Then
\[
\int_{\gamma_1+\gamma_R+\gamma_2-\gamma_r}f(z)dz = 2\pi i \mbox{Res}(f,i) = 2\pi i \cdot \left.\frac{d}{dz}\frac{z^s}{(z+i)^2}\right|_{z=i}
= 2\pi i \cdot \frac{s-1}{-4}e^{\frac{\pi}{2}(s-1)i} = -\frac{\pi}{2}(s-1)e^{\frac{\pi}{2}si}.
\]
We have (note $-1<s<3$)
\[
\left|\int_{\gamma_R}f(z)dz\right| = \left|\int_0^{\pi}\frac{(Re^{i\theta})^s}{(1+R^2e^{2i\theta})^2}Re^{i\theta}\cdot id\theta\right| \le \frac{\pi R^{s+1}}{(R^2-1)^2} \to 0
\]
as $R\to \infty$,
\[
\left|\int_{\gamma_r}f(z)dz\right| = \left|\int_0^{\pi}\frac{(re^{i\theta})^s}{(1+r^2e^{2i\theta})^2}re^{i\theta}\cdot id\theta\right| \le \frac{\pi r^{s+1}}{(1-r^2)^2} \to 0
\]
as $r\to 0$, and
\[
\int_{\gamma_2}f(z)dz = \int_0^{\infty}\frac{(xe^{\pi i})^s}{(1+x^2)^2}dx = e^{s\pi i}\int_0^{\infty}\frac{x^s}{(1+x^2)^2}dx.
\]
Since $s\ne 1$, $e^{s\pi i}\ne -1$. Therefore
\[
\int_0^{\infty}\frac{x^s}{(1+x^2)^2}dx = \frac{2\pi i \mbox{Res}(f,i)}{1+ e^{s\pi i}} = \frac{\pi}{4}\frac{1-s}{\cos\left(\frac{\pi}{2}s\right)}.
\]
Combining all the cases and regarding the cases where $s\in \mathbb Z$ as limit case of the formula $\frac{\pi}{4}\frac{1-s}{\cos\left(\frac{\pi}{2}s\right)}$, we conclude the integral is evaluated to $\frac{\pi}{4}\frac{1-s}{\cos\left(\frac{\pi}{2}s\right)}$.

\begin{remark}
We could have used the general theorem, but we still go to the
specific solution so that some insight can be shed on how the
general theorem is proved.
\end{remark}
\end{proof}

(3) \begin{proof}We choose $r$ and $R$ such that $0<r<R$. Let $\gamma_1=\{z:r\le |z|\le R, \arg z = 0\}$, $\gamma_2=\{z: r\le |z|\le R, \arg z = 2\pi \}$, $\gamma_R=\{z:|z|=R, 0<\arg z < 2\pi\}$, and $\gamma_r=\{z:|z|=r, 0<\arg z < 2\pi\}$. Define $f(z) = \frac{z^{\alpha-1}\ln z}{1+z}$. Suppose $r$ is sufficiently small and $R$ is sufficiently large so that all the poles of $f(z)$ are contained in the contour formed by $\gamma_1$, $\gamma_2$, $\gamma_r$, and $\gamma_R$. Then by Residue Theorem
\[
\int_{\gamma_1+\gamma_R-\gamma_2-\gamma_r}f(z)dz = 2\pi i \mbox{Res}(f,-1) = 2\pi i (-1)^{\alpha-1}\ln(-1) = -2\pi^2 e^{(\alpha-1)\pi i}.
\]
We note
\[
\left|\int_{\gamma_R}f(z)dz \right| \le \left|\int_0^{2\pi}\frac{(Re^{i\theta})^{\alpha-1}\ln(Re^{i\theta})}{1+Re^{i\theta}}Re^{i\theta}\cdot id\theta\right| \le \frac{2\pi R^{\alpha}(\ln R + 2\pi)}{R-1} \to 0
\]
as $R\to\infty$,
\[
\left|\int_{\gamma_r}f(z)dz \right| \le \left|\int_0^{2\pi}\frac{(re^{i\theta})^{\alpha-1}\ln(re^{i\theta})}{1+re^{i\theta}}re^{i\theta}\cdot id\theta\right| \le \frac{2\pi r^{\alpha}(\ln r + 2\pi)}{1-r} \to 0
\]
as $r\to 0$, and
\begin{eqnarray*}
\int_{\gamma_2}f(z)dz &=& \int_r^R \frac{(xe^{2\pi i})^{\alpha-1}\ln(xe^{2\pi i})}{1+x}dx \\
&=& \int_r^R \frac{x^{\alpha-1}e^{2(\alpha-1)\pi i}(\ln x +2\pi i)}{1+x}dx \\
&=& e^{2(\alpha-1)\pi i}\int_r^R\frac{x^{\alpha-1}\ln x}{1+x}dx + e^{2(\alpha-1)\pi i}2\pi i \int_r^R\frac{x^{\alpha-1}}{1+x}dx.
\end{eqnarray*}
It's not hard to show $\int_0^{\infty}\frac{x^{\alpha-1}}{1+x}dx = \pi \csc (\alpha\pi)$. So by letting $r\to 0$ and $R\to\infty$, we have
\begin{eqnarray*}
 \int_0^{\infty} \frac{x^{\alpha-1}\ln x}{1+x}dx &=& \frac{-2\pi^2e^{(\alpha-1)\pi i} +  e^{2(\alpha-1)\pi i}2\pi i \cdot \pi \csc(\alpha \pi)}{1-e^{2(\alpha-1)\pi i}} \\
 &=& \frac{2\pi^2 e^{\alpha\pi i} + 2\pi^2\csc(\alpha \pi)e^{2\alpha \pi i}i}{1-e^{2\alpha \pi i}} \\
 &=& 2\pi^2\frac{1+\csc(\alpha\pi)i[\cos(\alpha\pi)+i\sin(\alpha\pi)]}{e^{-\alpha\pi i}-e^{\alpha\pi i}} \\
 &=& -\pi^2\frac{\cos(\alpha\pi)}{\sin^2(\alpha \pi)}.
\end{eqnarray*}

 \end{proof}


\section{$\Gamma$函数}

\noindent 1. (1) \begin{proof}$(2n)!!=(2n)\cdot (2n-2)\cdots 2 = 2^n\cdot n!=2^n\Gamma(n+1)$. \end{proof}

(2) \begin{proof} $(2n-1)!!=\frac{2n\cdot (2n-1)\cdot (2n-2) \cdots 3\cdot 2\cdot 1}{2n\cdot (2n-2)\cdots 2}=\frac{(2n)!}{2^n\cdot n!}=\frac{\Gamma(2n+1)}{2^n\Gamma(n+1)}$. \end{proof}

(3) \begin{proof}
$\Gamma(n+v+1)=(n+v)\Gamma(n+v)=\cdots = (n+v)(n-1+v)\cdots(1+v)\Gamma(1+v)$. So $(1+v)(2+v)\cdots (n+v)=\frac{\Gamma(n+v+1)}{\Gamma(v+1)}$.
\end{proof}

(4) \begin{proof}
\begin{eqnarray*}
\prod_{l=0}^n[l(l+1)-v(v+1)] &=& \prod_{l=0}^n[(l-v)(l+v+1)] \\
&=& \prod_{l=0}^n(l-v) \cdot \prod_{l=0}^n(l+v+1) \\
&=& \frac{\Gamma(n-v+1)}{\Gamma(-v)} \cdot \frac{\Gamma(n+v+2)}{\Gamma(v+1)} \\
&=& \frac{\Gamma(n+v+2)\Gamma(n-v+1)}{\frac{\pi}{\sin\pi(v+1)}} \\
&=& -\frac{\sin\pi v}{\pi}\Gamma(n+v+2)\Gamma(n-v+1).
\end{eqnarray*}
\end{proof}

\noindent 2. (1) \begin{proof} We first assume $\alpha\in (0,1)$.
Let $C_R=\{z: |z|=R, 0\le \arg z \le \frac{\pi}{2}\}$, $C_r=\{z:
|z|=r, 0\le \arg z \le \frac{\pi}{2} \}$, and assume $R>r$. Then by
Residue Theorem
\[
\int_r^R\frac{e^{iz}}{z^{\alpha}}dz +
\int_{C_R}\frac{e^{iz}}{z^{\alpha}}dz + \int_{iR}^{ir}
\frac{e^{iz}}{z^{\alpha}}dz + \int_{C_r}\frac{e^{iz}}{z^{\alpha}}dz
= 0.
\]Then it's easy to see
\[
\left| \int_{C_r}\frac{e^{iz}}{z^{\alpha}}dz\right| \le \left|
\int_{\frac{\pi}{2}}^0\frac{e^{ire^{i\theta}}}{(re^{i\theta})^{\alpha}}
re^{i\theta}\cdot id\theta \right| \le
\int_0^{\frac{\pi}{2}}r^{1-\alpha}e^{-r\sin\theta}d\theta \le
\frac{\pi}{2}r^{1-\alpha} \to 0
\]
as $r\to 0$, and
\[
\left|\int_{C_R}\frac{e^{iz}}{z^{\alpha}}dz \right| \le
\left|\int_0^{\frac{\pi}{2}}
\frac{e^{iRe^{i\theta}}}{(Re^{i\theta})^{\alpha}}Re^{i\theta} \cdot
id\theta \right| \le
\int_0^{\frac{\pi}{2}}R^{1-\alpha}e^{-R\sin\theta}d\theta \le
\int_0^{\frac{\pi}{2}}R^{1-\alpha}R^{-R\frac{2\theta}{\pi}}d\theta =
\frac{\pi}{2R^{\alpha}}(1-e^{-R}) \to 0
\]
as $R\to \infty$. So by letting $r\to 0$ and $R\to\infty$, we have
\[
\int_0^{\infty}\frac{e^{iz}}{z^{\alpha}}dz =
\int_0^{\infty}\frac{e^{i(ix)}}{(ix)^{\alpha}}d(ix) = i\cdot
(-i)^{\alpha}\int_0^{\infty}\frac{e^{-x}}{x^{\alpha}}dx = i
e^{-\frac{\pi}{2}\alpha i}\Gamma(1-\alpha).
\]
This implies
\[
\int_0^{\infty}x^{-\alpha}\cos x dx +
i\int_0^{\infty}x^{-\alpha}\sin xdx = \left(\sin\frac{\alpha}{2}\pi
+ i \cos \frac{\alpha}{2}\pi \right)\Gamma(1-\alpha).
\]
Compare and equal the real and imaginary parts of the two sides, we
get $\int_0^{\infty}x^{-\alpha}\sin x dx =
\Gamma(1-\alpha)\cos\frac{\alpha}{2}\pi$ and
$\int_0^{\infty}x^{-\alpha}\cos x dx =
\Gamma(1-\alpha)\sin\frac{\alpha}{2}\pi$. For $\alpha \in (1, 2)$,
we note
\[
\int_0^{\infty}x^{-(\alpha-1)}\cos xdx = \left.x^{-(\alpha-1)}\sin
x\right.|_0^{\infty} +(\alpha-1) \int_0^{\infty}x^{-\alpha}\sin x dx
= (\alpha-1)\int_0^{\infty}x^{-\alpha}\sin x dx.
\]
So for $\alpha \in (1, 2)$, $\int_0^{\infty}x^{-\alpha}\sin x dx =
\frac{1}{\alpha-1}\Gamma(1-(\alpha-1))\sin\frac{\alpha-1}{2}\pi =
\Gamma(1-\alpha)\cos\frac{\alpha}{2}\pi$. That is, the formula for
$\int_0^{\infty}x^{-\alpha}\sin x dx$ is the same when $\alpha\in
(0,1)$ and $\alpha\in (1,2)$. When $\alpha =1$, Example 7.9 in the
textbook shows $\int_0^{\infty}\frac{\sin x}{x}dx=\pi$, which cannot
be obtained by plugging $\alpha = 1$ into
$\Gamma(1-\alpha)\cos\frac{\alpha}{2}\pi$.
\end{proof}

(2) \begin{proof}Let $C_R=\{z:|z|=R, 0\le \arg z\le \theta\}$,
$C_r=\{z: |z|=r, 0\le \arg z \le \theta\}$ and assume $R>r$. Then by
Residue Theorem
\[
\int_r^R z^{\alpha-1}e^{-z}dz + \int_{C_R}z^{\alpha-1}e^{-z}dz +
\int_R^{r}(xe^{i\theta})^{\alpha-1}e^{-xe^{i\theta}}d(xe^{i\theta})
+ \int_{C_r}z^{\alpha-1}e^{-z}dz = 0.
\]
Note $\cos$ function is positive on $(-\frac{\pi}{2},
\frac{\pi}{2})$, we have
\begin{eqnarray*}
\left|\int_{C_R}z^{\alpha-1}e^{-z}dz \right| = \left|\int_0^{\theta}
(Re^{i\xi})^{\alpha-1}e^{-Re^{i\xi}}d(Re^{i\xi})\right| \le
\int_0^{\theta} R^{\alpha-1}e^{-R\cos\xi}\cdot Rd\xi =
\int_0^{\theta}R^{\alpha} e^{-R\cos\xi}d\xi.
\end{eqnarray*}
On the interval $[-\frac{\pi}{2},\frac{\pi}{2}]$,
$\cos\xi=\sin(\xi+\frac{\pi}{2})\ge
\frac{2}{\pi}(\xi+\frac{\pi}{2})$. So
\[
\left|\int_{C_R}z^{\alpha-1}e^{-z}dz \right| \le
\int_0^{\theta}R^{\alpha}e^{-\frac{2R}{\pi}(\xi+\frac{\pi}{2})} d\xi
=  \frac{\pi
R^{\alpha-1}}{2}\left[e^{-R}-e^{-R(1+\frac{2}{\pi}\theta)}\right].
\]
Since $\theta \in (-\frac{\pi}{2},\frac{\pi}{2})$,
$1+\frac{2}{\pi}\theta > 0$. So $\lim_{R\to\infty} \frac{\pi
R^{\alpha-1}}{2}\left[e^{-R}-e^{-R(1+\frac{2}{\pi}\theta)}\right] =
0$. Also, we note
\[
\left|\int_{C_r}z^{\alpha-1}e^{-z}dz\right| \le \int_0^{\theta}
r^{\alpha}e^{-R\cos\xi}d\xi \le \theta r^{\alpha} \to 0
\]
as $r\to 0$. So by letting $r\to 0$ and $R\to \infty$, we have
\begin{eqnarray*}
\int_0^{\infty}x^{\alpha-1}e^{-x}dx &=&
\int_0^{\infty}x^{\alpha-1}e^{i\theta(\alpha-1)}e^{-xe^{i\theta}}e^{i\theta}dx
= \int_0^{\infty}x^{\alpha-1}e^{i\theta\alpha}e^{-xe^{i\theta}}dx
\\
&=& e^{i\theta\alpha}
\int_0^{\infty}x^{\alpha-1}e^{-x\cos\theta}[\cos(x\sin\theta)-i\sin(x\sin\theta)]dx.
\end{eqnarray*}
Let
$I=\int_0^{\infty}x^{\alpha-1}e^{-x\cos\theta}\cos(x\sin\theta)dx$
and
$I\!I=\int_0^{\infty}x^{\alpha-1}e^{-x\cos\theta}\sin(x\sin\theta)dx$.
Then we have
\[\Gamma(\alpha)=(\cos\theta\alpha+i\sin\theta\alpha)(I-iI\!I) =
(I\cos\theta\alpha + I\!I\sin\theta\alpha) +
i(I\sin\theta\alpha-I\!I\cos\theta\alpha).
\]
Equating the real and imaginary parts of the terms on both sides of
the equation, we can get two equations of $I$ and $I\!I$. Solving
these two equations gives us
\[
I = \Gamma(\alpha)\cos\alpha\theta,\; I\!I =
\Gamma(\alpha)\sin\alpha\theta.
\]
\end{proof}

\noindent 3. (1) \begin{proof} Since $\Gamma(z+1)=z\Gamma(z)$, we
have $\Gamma'(z+1)=\Gamma(z)+z\Gamma'(z)$. So
\[
\Psi(z+1)=\frac{\Gamma'(z+1)}{\Gamma(z+1)} =
\frac{\Gamma(z)+z\Gamma'(z)}{z\Gamma(z)} = \frac{1}{z}+\Psi(z).
\]
\end{proof}

 (2)\begin{proof}
$\Psi(z+n)=\frac{1}{z+n-1}+\Psi(z+n-1)=\frac{1}{z+n-1}+\frac{1}{z+n-2}+\Psi(z+n-2)=\cdots=\frac{1}{z+n-1}+\frac{1}{z+n-2}+\cdots
+ \frac{1}{z}+\Psi(z)$.\end{proof}

(3) \begin{proof}By $\Gamma(z)\Gamma(1-z)=\frac{\pi}{\sin\pi z}$, we
have $\ln\Gamma(z)+\ln\Gamma(1-z)=\ln\pi-\ln(\sin\pi z)$.
Differentiating both sides, we have
$\Psi(z)-\Psi(1-z)=-\frac{\cos\pi z}{\sin\pi z} \cdot \pi$. So
$\Psi(1-z)-\Psi(z)=\pi\cot\pi z$.
\end{proof}

(4) \begin{proof} By the formula
$\Gamma(2z)=2^{2z-1}\pi^{-\frac{1}{2}}\Gamma(z)\Gamma(z+\frac{1}{2}$,
we have
\[
\ln\Gamma(2z)=(2z-1)\ln2-\frac{1}{2}\ln\pi+\ln\Gamma(z)+\ln\Gamma(z+\frac{1}{2}).
\]
Differentiating both sides, we get $2\Psi(2z) = 2\ln +
\Psi(z)+\Psi(z+\frac{1}{2})$.
\end{proof}

\noindent 4. (1) \begin{proof} Use the substitution $x=2y-1$, we
get
\[
\int_{-1}^1(1-x)^p(1+x)^qdx=\int_0^1(2-2y)^p(2y)^q\cdot 2dy =
2^{p+q+1}\int_0^1(1-y)^py^qdy = 2^{p+q+1}\mbox{B}(p+1,q+1).
\]
\end{proof}

(2) \begin{proof} Let $p=\frac{1+\alpha}{2}$ and
$q=\frac{1-\alpha}{2}$. Then $p, q>0$, $p+q=1$, and
\[
\int_0^{\frac{\pi}{2}}\tan^{\alpha}\theta d\theta =
\int_0^{\frac{\pi}{2}} \sin^{\alpha}\theta \cos^{-\alpha}\theta
d\theta = \int_0^{\frac{\pi}{2}} \sin^{2p-1}\theta\cos^{2q-1}\theta
d\theta = \frac{1}{2}\mbox{B}(p,q)= \frac{\pi}{2\sin\pi q} =
\frac{\pi}{2\cos\frac{\alpha\pi}{2}}.
\]
\end{proof}

\noindent 5. (1) \begin{proof}We note
\begin{eqnarray*}
\sum_{n=1}^{\infty}\frac{1}{n(4n^2-1)} &=&
\sum_{n=1}^{\infty}\frac{1}{2}\left(\frac{1}{2n-1}-\frac{1}{2n+1}\right)
= \sum_{n=1}^{\infty}\left(\frac{1}{2n-1}-\frac{1}{2n}-\frac{1}{2n}
+\frac{1}{2n+1}\right)\\
 &=&
\sum_{n=1}^{\infty}\frac{1}{2}\left(\frac{1}{n-\frac{1}{2}} -
\frac{2}{n}  + \frac{1}{n+\frac{1}{2}}\right).
\end{eqnarray*}
By formula (8.27), we conclude
$\sum_{n=1}^{\infty}\frac{1}{n(4n^2-1)} =
-\frac{1}{2}\left[\Psi(-\frac{1}{2})-2\Psi(0)+\Psi(\frac{1}{2})\right]$.
By the formula
$\Psi(2z)=\frac{1}{2}\Psi(z)+\frac{1}{2}\Psi(z+\frac{1}{2})+\ln 2$,
we get $\Psi(0)=\Psi(\frac{1}{2})+2\ln 2 = -\gamma$, where $\gamma$
is the Euler constant.  This implies
\[
\sum_{n=1}^{\infty}\frac{1}{n(4n^2-1)} =
-\frac{1}{2}\left[(-\gamma-2\ln2+2)-2(-\gamma)+(-\gamma-2\ln2)\right]
= 2\ln 2- 1.
\]
\end{proof}


(2) \begin{proof} Using Mathematica command {\bf{Apart[1/(z\^\;2 -
a\^\;2)\^\;2}}, we have
\begin{eqnarray*}
\sum_{n=-\infty}^{\infty}\frac{1}{(n^2+1)^2} &=& 2\sum_{n=0}^{\infty}\frac{1}{(n^2+1)^2}-1 = 2\sum_{n=0}^{\infty}\left[- \frac{1}{4(z-i)^2} - \frac{i}{4(z-i)}-\frac{1}{4(z+i)^2}+\frac{i}{4(z+i)}\right]-1  \\
&=& -\frac{1}{2}\sum_{n=0}^{\infty}\left[\frac{i}{z-i} + \frac{1}{(z-i)^2} - \frac{i}{z+i} + \frac{1}{(z+i)^2}\right]-1.
\end{eqnarray*}
By formula (8.29c),
\[
\sum_{n=-\infty}^{\infty}\frac{1}{(n^2+1)^2} =-\frac{1}{2}\cdot (-1) \cdot [i\Psi(-i) - \Psi'(-i) - i \Psi(i) - \Psi'(i)] - 1.
\]
By the formula $\Psi(z)-\Psi(-z)=-\frac{1}{z}-\pi\cot\pi z$, we have $\Psi(-i)-\Psi(i)=-\frac{1}{-i}-\pi \cot(-\pi i) = i(-1-\pi\coth\pi)$. And by $\Psi'(z)+\Psi'(-z)=\frac{1}{z^2}-\pi^2\csc^2(\pi z)$, we have $\Psi'(i)+\Psi'(-i)=-1-\pi^2\csc^2(i\pi)=-1-\frac{\pi^2}{\sinh^2\pi}$. Therefore
\[
\sum_{n=-\infty}^{\infty}\frac{1}{(n^2+1)^2} = \frac{1}{2}\left(1+\pi\cot\pi z + 1 + \frac{\pi^2}{\sinh^2\pi}\right) - 1 = \frac{\pi}{2}\coth\pi + \frac{\pi^2}{2\sinh^2\pi}.
\]
\end{proof}

\section{拉普拉斯变换}

\subsection{正文练习题}

\noindent 9.1. \begin{proof}
\[
{\cal L}\{f(t-\tau)\} = \int_0^{\infty}e^{-pt}f(t-\tau)\eta(t-\tau)dt = e^{-p\tau}\int_0^{\infty}e^{-p(t-\tau)}f(t-\tau)\eta(t-\tau)dt = e^{-p\tau}F(p).
\]
\[
{\cal L}\{f(at)\} = \int_0^{\infty}e^{-pt}f(at)dt = \frac{1}{a}\int_0^{\infty}e^{-\frac{p}{a}(\xi)}f(\xi)d\xi = \frac{1}{a}F(\frac{p}{a}).
\]
\[
{\cal L}\{e^{p_0t}f(t)\} = \int_0^{\infty}e^{-(p-p_0)t}f(t)dt = F(p-p_0).
\]
\end{proof}

\noindent 9.2. \begin{proof}
\[
{\cal L}\left\{\int_0^{\infty}f(t,\tau)d\tau\right\} = \int_0^{\infty}e^{-pt}\int_0^{\infty}f(t,\tau)d\tau dt = \int_0^{\infty}d\tau \int_0^{\infty}e^{-pt}f(t,\tau)dt=\int_0^{\infty}F(p,\tau)d\tau.
\]
Define $g(t)=\int_t^{\infty}\frac{f(\tau)}{\tau}d\tau$, then $g'(t) = -\frac{f(t)}{t}$. By Property 4, ${\cal L}\{g'(t)\} = p{\cal L}\{g(t)\} - g(0)$.
So $-\int_0^{\infty}e^{-pt}\frac{f(t)}{t}dt = p{\cal L}\{g(t)\}-\int_0^{\infty}\frac{f(\tau)}{\tau}d\tau$, which implies
\[
{\cal L}\{g(t)\} = \frac{1}{p}\int_0^{\infty}\frac{1-e^{-pt}}{t}f(t)dt = \frac{1}{p}\int_0^{\infty}\int_0^pe^{-qt}dq f(t)dt = \frac{1}{p}\int_0^p\int_0^{\infty}e^{-qt}f(t)dt dq = \frac{1}{p}\int_0^pF(q)dq.
\]
 \end{proof}

\subsection{章末习题}

\noindent 1. (1) \begin{proof}$F_n(p)={\cal L}\{t^n\}=\frac{n!}{p^{n+1}}$ for $p\in \mathbb C$ with $\mbox{Re} p>0$. To prove this, we work by induction. When $n=0$, this is just Example 9.1. Assume the formula is true for $k=0, 1, \cdots, n$. Then
\[
F_{n+1}(p)={\cal L}\{t^{n+1}\} = \int_0^{\infty}e^{-pt}t^{n+1}dt = -\frac{1}{p}\left(e^{-pt}t^{n+1}|_{0}^{\infty} - \int_0^{\infty}e^{-pt}(n+1)t^ndt\right) = \frac{n+1}{p}F_n(p)= \frac{(n+1)!}{p^{n+2}}.
\]
Here we have used $\mbox{Re} p >0$ to conclude $e^{-pt}t^{n+1}|_{0}^{\infty}=0$. By induction, we proved our claim.
 \end{proof}

(2) \begin{proof}
$F(p)=\frac{\Gamma(\alpha+1)}{p^{\alpha+1}}$ for $p\in \mathbb C$ with $\mbox{Re} p > 0$. Indeed,
\[
{\cal L}\{t^{\alpha}\} = \int_0^{\infty} e^{-pt}t^{\alpha}dt = \frac{1}{p^{\alpha+1}}\int_0^{\infty} e^{-pt}(pt)^{\alpha}d(pt) = \frac{1}{p^{\alpha+1}}\int_L e^{-t}t^{(\alpha+1)-1}dt,
\]
where $L$ is the radial straight line that goes from $0$ to $\infty$, with angle $\arg p$. By the extended definition of $\Gamma$ function (8.3), we have $F(p)=\frac{\Gamma(\alpha+1)}{p^{\alpha+1}}$ $(\mbox{Re}p>0)$.
\end{proof}

(3) \begin{proof}The problem and its solution in the textbook do not match. So we calculate the Laplace transform both for $e^{\lambda t}\sin\omega t$ and $e^{-\lambda t}\sin\omega t$.
\[
{\cal L}\{e^{-\lambda t}\sin\omega t\} = \int_0^{\infty}e^{-pt}e^{-\lambda t}\sin\omega tdt = \int_0^{\infty}e^{-(p+\lambda)t}\sin\omega t dt = \frac{\omega}{(p+\lambda)^2+\omega^2}
\]
where we require $\mbox{Re}p>-\lambda$, and
\[
{\cal L}\{e^{\lambda t}\sin\omega t\} = \int_0^{\infty}e^{-pt}e^{\lambda t}\sin\omega tdt = \int_0^{\infty}e^{-(p-\lambda)t}\sin\omega t dt = \frac{\omega}{(p-\lambda)^2+\omega^2}
\]
where we require $\mbox{Re}p>\lambda$.
 \end{proof}

(4) \begin{proof}By the formula ${\cal L}\left\{\frac{f(t)}{t}\right\} = \int_p^{\infty}F(q)dq$, we have
\[
{\cal L}\left\{\frac{\sin\omega t}{t}\right\} = \int_p^{\infty}{\cal L}\{\sin\omega t\}dq = \int_p^{\infty}\frac{\omega}{q^2+\omega^2}dq = \left.\arctan x\right|_{\frac{p}{\omega}}^{\infty} = \frac{\pi}{2} - \arctan \frac{p}{\omega} = \arctan \frac{\omega}{p},
\]
where we require $\mbox{Re}p>0$.

\begin{remark}
The above result differs from the textbook's solution, but matches with the result of {\bf Mathematica}.
\end{remark}
\end{proof}

(5) \begin{proof} By applying the formula ${\cal L}\left\{\frac{f(t)}{t}\right\} = \int_p^{\infty}F(q)dq$ twice, we have
\begin{eqnarray*}
{\cal L}\left\{\frac{1-\cos\omega t}{t^2}\right\}
&=& \int_p^{\infty}{\cal L}\left\{\frac{1-\cos\omega t}{t}\right\}dq = \int_p^{\infty}\int_q^{\infty}{\cal L}\{1-\cos\omega t\}drdq \\
&=& \int_p^{\infty}\int_q^{\infty}\left(\frac{1}{r}-\frac{r}{r^2+\omega^2}\right)drdq \\
&=& \int_p^{\infty}\lim_{N\to\infty}\left[\ln N - \ln q -\frac{1}{2}\ln(N^2+\omega^2)+\frac{1}{2}\ln(q^2+\omega^2)\right]dq \\
&=& \lim_{N\to\infty}\int_p^N\left[\frac{1}{2}\ln(q^2+\omega^2)-\ln q\right]dq\\
&=& \lim_{N\to\infty} N\ln\frac{\sqrt{N^2+\omega^2}}{N} + \int_p^{\infty}\frac{\omega^2}{q^2+\omega^2}dq + p\ln p -\frac{1}{2}p\ln(p^2+\omega^2) \\
&=& w\arctan\frac{\omega}{p} - \frac{p}{2}\ln\frac{p^2+\omega^2}{p^2}.
\end{eqnarray*}
\end{proof}

(6) \begin{proof} We require $p$ satisfy $\mbox{Re}p>0$. Then
\begin{eqnarray*}
{\cal L} \left\{\int_t^{\infty}\frac{\cos\tau}{\tau}d\tau\right\} &=& \lim_{\delta\to 0}\int^{\infty}_{\delta}e^{-pt}\left(\int_t^{\infty}\frac{\cos\tau}{\tau}d\tau\right)dt.
\end{eqnarray*}
By integration-by-parts formula, we have
\begin{eqnarray*}
\int^{\infty}_{\delta}e^{-pt}\left(\int_t^{\infty}\frac{\cos\tau}{\tau}d\tau\right)dt &=& -\frac{1}{p}\left[e^{-pt}\left.\int_t^{\infty}\frac{\cos\tau}{\tau}d\tau\right|_{\delta}^{\infty} + \int_{\delta}^{\infty}e^{-pt}\frac{\cos t}{t}dt\right] \\
&=&\frac{1}{p}\int_{\delta}^{\infty}(e^{-p\delta}-e^{-p\tau})\frac{\cos\tau}{\tau}d\tau.
\end{eqnarray*}
So by the formula $\int_0^{\infty}F(p)dp = \int_0^{\infty}\frac{f(t)}{t}dt$, we have
\begin{eqnarray*}
{\cal L} \left\{\int_t^{\infty}\frac{\cos\tau}{\tau}d\tau\right\} &=& \frac{1}{p}\int_0^{\infty}(1-e^{-pt})\frac{\cos t}{t}dt = \frac{1}{p}\int_0^{\infty}{\cal L}\{(1-e^{-pt})\cos t\}dq \\
&=& \frac{1}{p}\int_0^{\infty}\left[\frac{q}{q^2+1}-{\cal L}\{e^{-pt}\cos t\}\right]dq \\
&=& \frac{1}{p}\int_0^{\infty}\left[\frac{q}{q^2+1}-\frac{p+q}{(p+q)^2+1}\right]dq \\
&=&\lim_{N\to\infty}\frac{1}{p}\int_0^N\left[\frac{1}{2}d\ln(q^2+1)-\frac{1}{2}d\ln((p+q)^2+1)\right]\\
&=&\left.\frac{1}{2p}\ln\frac{q^2+1}{(q+p)^2+1}\right|_{q=0}^{\infty}\\
&=&\frac{1}{2p}\ln(p^2+1).
\end{eqnarray*}

\begin{remark}
If we apply the result of Exercise 9.2, $\int_t^{\infty} \frac{f(\tau)}{\tau} d\tau \downarrow \frac{1}{p}\int_0^p F(q) dq$, the calculation is only one step. The function $-\int_t^{\infty}\frac{\cos\tau}{\tau}d\tau$ is called cosine integral function.
\end{remark}
\end{proof}

\noindent 2. \begin{proof}
\begin{eqnarray*}
F(p)&=&\int_0^{\infty}e^{-pt}f(t)dt = \sum_{n=0}^{\infty}\int_{n\alpha}^{(n+1)\alpha}e^{-pt}f(t)dt = \sum_{n=0}^{\infty}\int_0^{\alpha}e^{-p(t+n\alpha)}f(t+n\alpha)dt \\
&=& \sum_{n=0}^{\infty}\int_0^{\alpha} e^{-pt}f(t)dt \cdot e^{-\alpha p n} = \int_0^{\alpha} e^{-pt}f(t)dt\sum_{n=0}^{\infty}\left(e^{-\alpha p }\right)^n  = \frac{1}{1-e^{-\alpha p}}\int_0^{\alpha} e^{-pt}f(t)dt.
\end{eqnarray*}
\end{proof}

\noindent 3. (1) \begin{proof} $|\sin\omega t|$ has period $\frac{\pi}{\omega}$. Using result of Problem 2, we have
\[
{\cal L}\{|\sin\omega t|\} = \frac{1}{1-e^{-\frac{\pi}{\omega}p}}\int_0^{\frac{\pi}{\omega}}e^{-pt}\sin\omega t dt.
\]
By applying integration-by-parts formula twice, we can easily verify
\[
\int_0^{\frac{\pi}{\omega}}e^{-pt}\sin\omega t dt = \frac{\omega(1+e^{-\frac{\pi}{\omega}p})}{p^2+\omega^2}.
\]
So
\[
{\cal L}\{|\sin\omega t|\} = \frac{(1+e^{-\frac{\pi}{\omega}p})}{1-e^{-\frac{\pi}{\omega}p}}\frac{\omega}{p^2+\omega^2}=\frac{\omega}{p^2+\omega^2}\coth\frac{p\pi}{2\omega}.
\]
\end{proof}

(2) \begin{proof} $f(t)=t-a\left[\frac{t}{a}\right]$ has period $a$. So by Problem 2, we have
\[
{\cal L}\left\{t-a\left[\frac{t}{a}\right]\right\} = \frac{1}{1-e^{-ap}}\int_0^ae^{-pt}\left(t-a\left[\frac{t}{a}\right]\right)dt=\frac{1}{1-e^{-ap}}\int_0^ae^{-pt}tdt = \frac{1}{p^2}-\frac{a}{p}\frac{e^{-ap}}{1-e^{-ap}}.
\]
\end{proof}

\noindent 4. (1) \begin{proof}
By the formula ${\cal L}\{(-t)^nf(t)\} = [{\cal L}\{f(t)\}]^{(n)}$, we have
\begin{eqnarray*}
{\cal L}^{-1}\left\{\frac{a^3}{p(p+a)^3}\right\}&=&{\cal L}^{-1}\left\{ \frac{1}{p} - \frac{a^2}{(a+p)^3} - \frac{a}{(a+p)^2} - \frac{1}{a+p}\right\} \\
&=& \eta(t) - \frac{a^2}{2}{\cal L}^{-1}\left\{\left(\frac{1}{p+a}\right)^{(2)}\right\} + a{\cal L}^{-1}\left\{\left(\frac{1}{p+a}\right)'\right\}-e^{-at}\eta(t)\\
&=& \eta(t) - \frac{a^2}{2}(-t)^2e^{-at}\eta(t)+a(-t)e^{-at}\eta(t)-e^{-at}\eta(t)\\
&=& \left[1-e^{-at}\left(1+at+\frac{a^2t^2}{2}\right)\right]\eta(t).
\end{eqnarray*}
\end{proof}

(2) \begin{proof} We note $\frac{\omega}{p(p^2+\omega^2)}=\frac{1}{\omega p}-\frac{p}{\omega(\omega^2+p^2)}$. So ${\cal L}^{-1}\left\{\frac{\omega}{p(p^2+\omega^2)}\right\} = \frac{1}{\omega}(1-\cos\omega t)\eta(t)$.\end{proof}

(3) \begin{proof} We note $\frac{4p-1}{(p^2+p)(4p^2-1)}=\frac{1}{p}+\frac{5}{3}\frac{1}{p+1}+\frac{1}{3}\frac{1}{p-\frac{1}{2}}-\frac{3}{p+\frac{1}{2}}$. Therefore
\[
{\cal L}^{-1}\left\{\frac{4p-1}{(p^2+p)(4p^2-1)}\right\} = \left(1+\frac{5}{3}e^{-t} + \frac{1}{3}e^{t/2} - 3e^{-t/2}\right)\eta(t).
\]
\end{proof}

(4)\begin{proof} We note
\[
\frac{p^2+\omega^2}{(p^2-\omega^2)^2} = \frac{1}{2(p-\omega)^2} + \frac{1}{2(p+\omega)^2}=-\frac{1}{2}[{\cal L}\{e^{\omega t}\}]' - \frac{1}{2}[{\cal L}\{e^{-\omega t}\}]' = ({\cal L}\{-\cosh\omega t\})'.
\]
So by the formula ${\cal L}\{(-t)^nf(t)\} = [{\cal L}\{f(t)\}]^{(n)}$, we have
\[
{\cal L}\left\{\frac{p^2+\omega^2}{(p^2-\omega^2)^2}  \right\} = (-t)(-\cosh\omega t)\eta(t)=t\cosh(\omega t)\eta(t).
\]
\end{proof}

(5) \begin{proof}
We note ${\cal L}\{1_{t\ge \tau}\}=\frac{e^{-p\tau}}{p}$. So by the formula ${\cal L}\{\int_0^tf(s)ds\} = \frac{F(p)}{p}$, we have
\[
{\cal L}^{-1}\{\frac{e^{-p\tau}}{p^2}\} = \int_0^t1_{\{s\ge \tau\}}ds = (t-\tau)\eta(t-\tau).
\]
\end{proof}

(6) \begin{proof}We have shown in Problem 3(2) that
\[
{\cal L}\left\{t-\alpha\left[\frac{t}{\alpha}\right]\right\} = \frac{1}{p^2}-\frac{\alpha}{p}\frac{e^{-\alpha p}}{1-e^{-\alpha p}}.
\]
By the formula ${\cal L}\{(-t)^nf(t)\} = [{\cal L}\{f(t)\}]^{(n)}$, we have ${\cal L}\{-t\} = [{\cal L}\{1\}]'=-\frac{1}{p^2}$. So
\[
{\cal L}\{t\} - {\cal L}\left\{t-\alpha\left[\frac{t}{\alpha}\right]\right\} = \frac{\alpha}{p}\frac{e^{-\alpha p}}{1-e^{-\alpha p}},
\]
and
\[
{\cal L}^{-1}\left\{\frac{1}{p}\frac{e^{-\alpha p}}{1-e^{-\alpha p}}\right\} = \left[\frac{t}{\alpha}\right]\eta(t).
\]
\end{proof}

\noindent 5. (3) \begin{proof} Denote by $F(p)$ the Laplace transform of $y(t)$. By the Convolution Theorem (Theorem 9.1), $F(p)=\frac{a}{p^2+1}-2F(p)\frac{p}{p^2+1}$. So $F(p)=\frac{a}{(p+1)^2}$ and
\[
y(t) = {\cal L}^{-1}\left\{\frac{a}{(p+1)^2}\right\} = -a{\cal L}^{-1}\left\{\left(\frac{1}{p+1}\right)'\right\}=ate^{-t}.
\]
\end{proof}

(4) \begin{proof} Denote by $F(p)$ the Laplace transform of $f(t)$. Then $F(p)+2F(p)\frac{p}{p^2+1}=\frac{9}{p-2}$. Therefore
\[
F(p) = \frac{9(p^2+1)}{(p-2)(p+1)^2}=-\frac{6}{(p+1)^2} + \frac{5}{p-2} + \frac{4}{p+1}.
\]
Hence
\[
f(t) = 6{\cal L}^{-1}\left\{\left(\frac{1}{p+1}\right)'\right\} + 5e^{2t} + 4e^{-t} = 5e^{2t}+4e^{-t} -6te^{-t}.
\]
\end{proof}

\noindent 6. (1) \begin{proof}
By the formula $\int_0^{\infty}F(p)dp = \int_0^{\infty}\frac{f(t)}{t}dt$, we have
\begin{eqnarray*}
\int_0^{\infty}\frac{e^{-ax}-e^{-bx}}{x}\cos(cx)dx &=& \int_0^{\infty}{\cal L}\{(e^{-ax}-e^{-bx})\cos(cx)\}dp \\
&=& \int_0^p\left[\frac{a+p}{(a+p)^2+c^2} - \frac{b+p}{(b+p)^2+c^2}\right]dp\\
&=& \frac{1}{2}\ln\frac{b^2+c^2}{a^2+c^2}.
\end{eqnarray*}
Here the Laplace transform inside the integral is obtained by applying integration-by-parts formula twice to integrals of the form $\int_0^{\infty}e^{-\omega x}\cos(cx)dx$.
\end{proof}

(2) \begin{proof} By the formula $\int_0^{\infty}F(p)dp = \int_0^{\infty}\frac{f(t)}{t}dt$ and the formula ${\cal L}\left\{\frac{f(t)}{t}\right\}=\int_p^{\infty}F(q)dq$, we have
\[
\int_0^{\infty}\frac{1-\cos bx}{x^2}dx = \int_0^{\infty}{\cal L}\left\{\frac{1-\cos bx}{x}\right\}dp = \int_0^{\infty}\int_0^{\infty}{\cal L}\{1-\cos bx\}dqdp = \int_0^{\infty}\int_0^{\infty}\left(\frac{1}{q}-\frac{q}{q^2+b^2}\right)dqdp.
\]
Note
\[
\int_p^{\infty}\left(\frac{1}{q}-\frac{q}{q^2+b^2}\right)dq = \lim_{N\to\infty}\left(\ln\frac{N}{p}-\frac{1}{2}\ln\frac{N^2+b^2}{p^2+b^2}\right)=\frac{1}{2}\ln\frac{p^2+b^2}{p^2},
\]
and
\begin{eqnarray*}
\int_0^{\infty}\frac{1}{2}\ln\frac{p^2+b^2}{p^2}dp &=& \frac{1}{2}\left(\left. p\ln\frac{p^2+b^2}{p^2}\right|_{p=0}^{\infty} - \int_0^{\infty}p\left(\frac{2p}{p^2+b^2}-\frac{2p}{p^2}\right)dp\right) \\
&=& \int_0^{\infty}\frac{b^2}{p^2+b^2}dp = b\left.\arctan\frac{p}{b}\right|_{p=0}^{\infty} = \frac{\pi}{2}b.
\end{eqnarray*}
\end{proof}

\noindent 8. (1) \begin{proof}
\begin{eqnarray*}
\sum_{n=0}^{\infty} \frac{(-1)^n}{3n+1} &=& \sum_{n=0}^{\infty} (-1)^n\int_0^{\infty} e^{-t}e^{-3nt}dt =\int_0^{\infty} e^{-t}\sum_{n=0}^{\infty}(-e^{-3t})^n dt = \int_0^{\infty}\frac{e^{-t}}{1+e^{-3t}}dt.
\end{eqnarray*}
Substituting $e^{-t}$ for $y$, we have
\begin{eqnarray*}
\sum_{n=0}^{\infty} \frac{(-1)^n}{3n+1} &=& \int_0^1\frac{dy}{1+y^3} = \int_0^1\left[\frac{1}{3(y+1)}-\frac{1}{3}\frac{y-2}{y^2-y+1}\right]dy \\
&=& \frac{1}{3} \int_0^1\left[\frac{1}{y+1} -\frac{y-\frac{1}{2}}{(y-\frac{1}{2})^2+\frac{3}{4}} + \frac{3}{2}\frac{1}{(y-\frac{1}{2})^2+\frac{3}{4}} \right]dy \\
&=& \frac{1}{3} \left.\left\{\ln(y+1) -\frac{1}{2}\ln\left[\left(y-\frac{1}{2}\right)^2+\frac{3}{4}\right] + \sqrt{3}\arctan\left(\frac{y-\frac{1}{2}}{\frac{\sqrt{3}}{2}}\right) \right\}\right|_{y=0}^1 \\
&=& \frac{1}{3} \left(\ln 2+ 2\sqrt{3}\arctan\frac{1}{\sqrt{3}}\right) \\
&=& \frac{1}{3} \left(\ln 2 + \frac{\pi}{\sqrt{3}}\right).
\end{eqnarray*}
\end{proof}

(2) \begin{proof}
\begin{eqnarray*}
\sum_{n=0}^{\infty}\frac{(-1)^n}{4n+1} = \sum_{n=0}^{\infty}(-1)^n\int_0^{\infty}e^{-(4n+1)t}dt = \int_0^{\infty}e^{-t}\sum_{n=0}^{\infty} (-e^{-4t})^n dt = \int_0^{\infty}\frac{e^{-t}}{1+e^{-4t}}dt.
\end{eqnarray*}
Substituting $e^{-t}$ for $y$, we have
\begin{eqnarray*}
& & \sum_{n=0}^{\infty}\frac{(-1)^n}{4n+1} = \int_0^1\frac{dy}{1+y^4} = -\frac{1}{2\sqrt{2}}\int_0^1\left[\frac{y-\sqrt{2}}{y^2-\sqrt{2}y+1}-\frac{y+\sqrt{2}}{y^2+\sqrt{2}y+1}\right]dy \\
&=& -\frac{1}{2\sqrt{2}}\left.\left\{\frac{1}{2}\ln[(\sqrt{2}y-1)^2+1] - \arctan(\sqrt{2}y-1) -\frac{1}{2}\ln[(\sqrt{2}y+1)^2+1] - \arctan(\sqrt{2}y+1)\right\}\right|_0^1 \\
&=& -\frac{1}{2\sqrt{2}}\left\{\frac{1}{2}\ln\frac{2-\sqrt{2}}{2+\sqrt{2}} - \arctan(\sqrt{2}-1)-\arctan(\sqrt{2}+1)\right\}\\
&=& -\frac{1}{2\sqrt{2}}\left[\frac{1}{2}\ln(3-2\sqrt{2})-\frac{\pi}{2}\right] \\
&=& \frac{1}{4\sqrt{2}}[2\ln(\sqrt{2}+1)+\pi].
\end{eqnarray*}
\end{proof}

(3) \begin{proof}
Suppose $p,q\in \mathbb N$ and $q\le p$, we have (substitute $e^{-\frac{t}{p}}$ for $y$)
\[
\sum_{n=0}^{\infty}\frac{(-1)^n}{n+\frac{q}{p}} = \sum_{n=0}^{\infty}(-1)^n\int_0^{\infty}e^{-(n+q/p)t}dt = \int_0^{\infty}e^{-\frac{q}{p}t}\sum_{n=0}^{\infty}(-1)^ne^{-nt}dt = \int_0^{\infty}\frac{e^{-\frac{q}{p}t}}{1+e^{-t}}dt= \int_0^1\frac{py^{q-1}}{1+y^p}dy.
\]
It's easy to verify (we have proved $\int_0^1\frac{dy}{1+y^3}=\frac{1}{3} \left(\ln 2 + \frac{\pi}{\sqrt{3}}\right)$ in part (1) and note $\frac{y}{1+y^3}= -\frac{1}{3(y+1)}+\frac{y+1}{3(y^2-y+1)}$)
\begin{eqnarray*}
\int_0^1\frac{py^{q-1}}{1+y^p}dy
\begin{cases}
\ln 2 & p=q=1; \\
\ln 2 + \frac{\pi}{\sqrt{3}} & p=3, \; q =1;\\
-\ln 2 + \frac{\pi}{\sqrt{3}}& p=3, \; q =2.
\end{cases}
\end{eqnarray*}
Therefore
\begin{eqnarray*}
\sum_{n=0}^{\infty} \frac{(-1)^n}{(3n+1)(3n+2)(3n+3)} &=& \sum_{n=0}^{\infty}(-1)^n\left[\frac{1}{6(n+1)}+\frac{1}{2(1+3n)}-\frac{1}{2+3n}\right]\\
&=&\frac{1}{6}\sum_{n=0}^{\infty}\frac{(-1)^n}{n+1} + \frac{1}{6} \sum_{n=0}^{\infty} \frac{(-1)^n}{n+\frac{1}{3}} - \frac{1}{3} \sum_{n=0}^{\infty} \frac{1}{n+\frac{2}{3}} \\
&=& \frac{1}{6}\ln 2 + \frac{1}{6} \left(\ln 2+ \frac{\pi}{\sqrt{3}}\right) - \frac{1}{3}\left(-\ln 2+\frac{\pi}{\sqrt{3}}\right) \\
&=& \frac{2}{3}\ln 2 -\frac{\pi}{6\sqrt{3}}.
\end{eqnarray*}
\end{proof}


\section{$\delta$函数}

\noindent 1.  Let $\varphi(x)$ be any test function
 that satisfies certain regularity conditions.

(1)\begin{proof} $\int_{-\infty}^{\infty} \varphi(x) \delta(-x)dx =
\int_{-\infty}^{\infty} \varphi(-x) \delta(x) dx =
\varphi(0)=\int_{-\infty}^{\infty} \varphi(x)\delta(x)dx$. So
$\delta(-x)=\delta(x)$.\end{proof}


(2)\begin{proof} $\int_{-\infty}^{\infty} \varphi(x) \cdot x\delta(x) dx =
\int_{-\infty}^{\infty}(x\varphi(x))\delta(x)dx = 0\cdot
\varphi(0)=0$. So $x\delta(x)=0$.
\end{proof}

(3)\begin{proof} $ \int_{-\infty}^{\infty}\varphi(x)\cdot f(x) \delta(x)dx =
\varphi(0)f(0)=\int_{-\infty}^{\infty}\varphi(x) \cdot
f(0)\delta(x)dx$. So $f(x)\delta(x)=f(0)\delta(x)$.
\end{proof}

(4)\begin{proof} \begin{eqnarray*}\int_{-\infty}^{\infty} \varphi(x) \cdot x\delta'(x)dx &=&
\int_{-\infty}^{\infty}(x\varphi(x))\delta'(x)dx
=\left.x\varphi(x)\delta(x)\right|_{-\infty}^{\infty} -
\int_{-\infty}^{\infty}[x\varphi(x)]'\delta(x)dx \\
&=&
-\varphi(0)=-\int_{-\infty}^{\infty}\varphi(x)\delta(x)dx.
\end{eqnarray*}
 So
$x\delta'(x)=-\delta(x)$.
\end{proof}

(5)\begin{proof} $\int_{-\infty}^{\infty}\varphi(x)\delta(ax)dx =
\int_{-\infty}^{\infty}\varphi(y/a)\delta(y)\frac{dy}{a} =
\frac{1}{a}\varphi(0)$. So $\delta(ax)=\frac{1}{a}\delta(x)$.
\end{proof}

(6)\begin{proof} \begin{eqnarray*} \int_{-\infty}^{\infty}f(x)\delta(x^2-a^2)dx &=& \int_{-\infty}^0f(x)\delta(x^2-a^2)dx + \int_0^{\infty}f(x)\delta(x^2-a^2)dx \\
&=& \int^{-a^2}_{\infty} f(-\sqrt{y+a^2})\delta(y)d(-\sqrt{y+a^2})
+ \int_{-a^2}^{\infty}f(\sqrt{y+a^2})\delta(y)d\sqrt{y+a^2} \\
&=& \int_{-a^2}^{\infty}\frac{f(-\sqrt{y+a^2})}{2\sqrt{y+a^2}}\delta(y)dy + \int_{-a^2}^{\infty}\frac{f(\sqrt{y+a^2})}{2\sqrt{y+a^2}}\delta(y)dy\\
&=& \frac{1}{2a}[f(-a)+f(a)],
\end{eqnarray*}
where the last equality is due to the fact $0\in (-a^2,\infty)$. Therefore, $\delta(x^2-a^2)=\frac{1}{2a}[\delta(x-a)+\delta(x+a)]$.
\end{proof}

\begin{remark}More generally, we have the following useful result
\begin{prop}
Suppose $\varphi(x)$ is a continuously differentiable function and the equation $\varphi(x)=0$ has finitely many roots $(x_k)_{k=1}^N$ with $\varphi(x_k)\ne 0$. Then
\[
\delta[\varphi(x)]=\sum_{k=1}^N\frac{\delta(x-x_k)}{|\varphi'(x_k)|}.
\]
\end{prop}
\begin{proof}For each $k\in \{1, \cdots, N\}$, we prove $\delta(\varphi(x))$ is $C_k\delta(x-x_k)$ for some constant $C_k$ in a neighborhood of $x_k$. Indeed, we can find $\varepsilon >0$ such that $x_1, \cdots, x_{k-1}, x_{k+1},\cdots,x_N \not \in [x_k-\varepsilon, x_k+\varepsilon]$. Clearly $\delta(\varphi(x_k))=\infty$. Furthermore, we have by the change-of-variable formula ($y:=\varphi(x)$)
\begin{eqnarray*}
 \int_{x_k-\varepsilon}^{x_k+\varepsilon}\delta(\varphi(x))dx
&=& \int_{[\varphi(x_k-\varepsilon), \varphi(x_k+\varepsilon]}\frac{\delta(y)dy}{|\varphi'(\varphi^{-1}(y))|} = \frac{1}{|\varphi'(\varphi^{-1}(\varphi(x_k)))|} = \frac{1}{|\varphi'(x_k)|}.
\end{eqnarray*}
So in a sufficiently small neighborhood of $x_k$, $\delta(\varphi(x))=\frac{\delta(x-x_k)}{|\varphi'(x_k)|}$.
\end{proof}
\end{remark}

\noindent 2. (1) \begin{proof}The general solution of the homogeneous equation $\left[\frac{d^2}{dx^2}-k^2\right]g(x;t)=0$ $(x>t)$ is $c_1(t)e^{kx}+c_2(t)e^{-kx}$. By the continuity property of $g(x;t)$ at $x=t$ (formula (10.36a) and formula (10.36b)), we have
\[
\begin{cases}
c_1(t)e^{kt}+c_2(t)e^{-kt} = 0 \\
kc_1(t)e^{kt}-kc_2(t)e^{-kt} = 1.
\end{cases}
\]
Solving this equation, we get $c_1(t) = \frac{e^{-kt}}{2k}$ and $c_2(t) = -\frac{e^{kt}}{2k}$. Therefore, combining with formula (10.39), we have
\[
g(x;t) = [c_1(t)e^{kx} + c_2(t)e^{-kx}]\eta(x-t) = \frac{1}{k}\sinh k(x-t)\eta(x-t).
\]

\begin{remark}
The above result differs from the answer of the textbook. But
according to the textbook's answer to Exercise Problem 3(2) of this
chapter, we see the correct answer is indeed $\frac{1}{k}\sinh
k(x-t)\eta(x-t)$.
\end{remark}
\end{proof}

(2) \begin{proof} By Exercise Problem 2(1) of Chapter 6, the general solution of the homogenous equation $\left[\frac{d^2}{dx^2}-x^2\right]g(x;t)=\delta(x-t)$ $(x>t)$ is $c_1(t)w_1(x)+c_2(t)w_2(x)$ where
\[
w_1(x) = \sum_{n=0}^{\infty}\frac{\Gamma(3/4)}{n!\Gamma(n+3/4)}\left(\frac{x}{2}\right)^{4n}, \; w_2(x)=\sum_{n=0}^{\infty}\frac{\Gamma(5/4)}{n!\Gamma(n+5/4)}\left(\frac{x}{2}\right)^{4n+1}.
\]
By the continuity property of $g(x;t)$ at $x=t$ (formula (10.36a) and formula (10.36b)), we have
\[
\begin{cases}
c_1(t)w_1(t) + c_2(t)w_2(t) = 0 \\
c_1(t)w_1'(t) + c_2(t) w_2'(t) = 1.
\end{cases}
\]
Using the hint that $\left|\begin{matrix}w_1(x) & w_2(x) \\ w_1'(x) & w_2'(x)\end{matrix}\right|=\frac{1}{2}$, we can solve the above equations to get $c_1(t)=-2w_2(t)$ and $c_2(t) = 2w_1(t)$. Therefore, combining with formula (10.39), we have
\[
g(x;t) = 2[w_2(x)w_1(t)-w_1(x)w_2(t)]\eta(x-t).
\]

\begin{remark}
To see why the hint is true, we note $[w_1(x)w_2'(x)-w_1'(x)w_2(x)]' = w_1(x)w_2''(x)-w_1''(x)w_2(x) = w_1(x)\cdot x^2w_2(x)-x^2w_1(x)w_2(x)=0$. Therefore, $w_1(x)w_2'(x)-w_1'(x)w_2(x)=const = w_1(0)w_2'(0)-w_1'(0)w_2(0)=1/2$.
\end{remark}
\end{proof}

(3) \begin{proof} By Exercise Problem 2(4) of Chapter 6, the
homogenous equation
\[
[(1+x+x^2)\frac{d^2}{dx^2}+2(1+2x)\frac{d}{dx}+2]g(x;t)=0
\]
has solution $c_1(t)w_1(x)+c_2(t)w_2(x)$, where
\[
w_1(x)=\frac{1}{1+x+x^2},\; w_2(x)=\frac{x}{1+x+x^2}.
\]
To use the continuity conditions of the Green's function at $x=t$,
we note
\[
\left[(1+x+x^2)\frac{d^2}{dx^2}+2(1+2x)\frac{d}{dx}+2\right]g(x;t) =
\frac{d^2}{dx^2}[(1+x+x^2)g(x;t)].
\]
So by the condition $\left.\frac{d g(x;t)}{dx}\right|_{x<t}=0$ and
integrating both sides of the equation, we have
\[
\frac{d}{dx}[(1+x+x^2)g(x;t)]|_{t-0}^{t+0}=1,
\]
i.e.
$(1+2t)[c_1(t)w_1(t)+c_2(t)w_2(t)]+(1+t+t^2)[c_1(t)w_1'(t)+c_2(t)w_2'(t)]=1$.
And by the continuity of $g(x;t)$ at $x=t$ and $g(x;t)|_{x<t}=0$, we
have $c_1(t)w_1(t)+c_2(t)w_2(t)=0$. Combined, we have the system of
equations
\[
\begin{cases}
c_1(t)w_1(t)+c_2(t)w_2(t)=0 \\
c_1(t)w_1'(t)+c_2(t)w_2'(t)=\frac{1}{1+t+t^2}.
\end{cases}
\]
Let $D(t) = \left|\begin{matrix} w_1(t) & w_2(t) \\ w_1'(t) &
w_2'(t)
\end{matrix}\right|$. Then it's easy to see $D(t) =
\frac{1}{(1+t+t^2)^2}$. Then
\begin{eqnarray*}
\left[\begin{matrix}c_1(t) \\ c_2(t) \end{matrix}\right] =
\left[\begin{matrix}w_1(t) & w_2(t) \\ w_1'(t) & w_2'(t) \end{matrix}\right]^{-1}\left[\begin{matrix} 0 \\
\frac{1}{1+t+t^2}\end{matrix}\right] = D(t)^{-1}\left[\begin{matrix}w_2'(t) & -w_2(t) \\ -w_1'(t) & w_1(t) \end{matrix}\right]^{-1}\left[\begin{matrix} 0 \\
\frac{1}{1+t+t^2}\end{matrix}\right] = \left[\begin{matrix} -t \\
1\end{matrix}\right].
\end{eqnarray*}
So $g(x;t)=\frac{x-t}{1+x+x^2}\eta(x-t)$.
\end{proof}

\noindent 3. (1) \begin{proof} By Example 10.5, the solution to the
homogeneous equation
\[
\begin{cases}
\frac{d^2g(x;t)}{dx^2} + k^2g(x;t) = \delta(x-t),\; x,t>0 \\
g(0;t)=0,\; \left.\frac{dg(x;t)}{dx}\right|_{x=0}=0.
\end{cases}
\]
is $g(x;t)=\frac{1}{k}\sin k(x-t)\eta(x-t)$. By formula (10.60),
\begin{eqnarray*}
y(x) &=& \int_0^xg(x;t) f(t)dt -
\left[A\frac{dg(x;t)}{dt}-Bg(x;t)\right]_{t=0} \\
&=& \int_0^x\frac{1}{k}\sin k(x-t)f(t)dt - [-A\cos
k(x-t)-\frac{B}{k}\sin k(x-t)]_{t=0} \\
&=& \frac{1}{k}\int_0^x\sin k(x-t)f(t)dt + A\cos kx +\frac{B}{k}\sin
kx.
\end{eqnarray*}
\end{proof}

(2) \begin{proof}By Exercise Problem 2(1) of this chapter, the
homogeneous equation
\[
\begin{cases}
\frac{d^2g(x;t)}{dx^2} - k^2g(x;t) = \delta(x-t),\; x,t>0 \\
g(0;t)=0,\; \left.\frac{dg(x;t)}{dx}\right|_{x=0}=0.
\end{cases}
\]
has solution $g(x;t)=\frac{1}{k}\sinh k(x-t)\eta(x-t)$. By formula
(10.60), we have
\[
y(x)=\int_0^xg(x;t)f(t)dt -
\left[A\frac{dg(x;t)}{dt}-Bg(x;t)\right]_{t=0} = A\cosh kx +
\frac{B}{k}\sinh kx + \frac{1}{k}\int_0^x\sinh k(x-t)f(t)dt.
\]
\end{proof}

(3) \begin{proof} By Exercise Problem 2(2) of this chapter, the
Green's function is
\[
g(x;t)=2[w_2(x)w_1(t)-w_1(x)w_2(t)]\eta(x-t),
\]
where
$w_1(x)=\sum_{n=0}^{\infty}\frac{\Gamma(3/4)}{n!\Gamma(n+3/4)}\left(\frac{x}{2}\right)^{4n}$
and
$w_2(x)=\sum_{n=0}^{\infty}\frac{\Gamma(5/4)}{n!\Gamma(n+5/4)}\left(\frac{x}{2}\right)^{4n+1}$.
Define
\[
D_1(t,x)=\left|\begin{matrix}w_1(t) & w_2(t) \\
w_1(x) & w_2(x) \end{matrix}\right|, \;
D_2(t,x)=\left|\begin{matrix}w_1(t) & w_2(t) \\
w_1'(x) & w_2'(x) \end{matrix}\right|.
\]
Then $g(x;t)=2D_1(t,x)\eta(x-t)$ and by formula (10.60)
\begin{eqnarray*}
y(x) &=& \int_0^x g(x;t) f(t) dt - \left[A\frac{dg(x;t)}{dt} - B
g(x;t)\right]_{t=0} \\
&=& 2\int_0^xD_1(t,x)f(t)dt -A\cdot 2[w_2(x)w_1'(0)-w_1(x)w_2'(0)] +
B \cdot 2[w_2(x)w_1(0)-w_1(x)w_2(0)] \\
&=& 2\int_0^xD_1(t,x)f(t)dt + 2A D_2(x,0) + 2BD_1(0,x).
\end{eqnarray*}

\begin{remark}
This result differs from the answer of the textbook. Check.
\end{remark}
\end{proof}

\noindent 4. (1) \begin{proof}
By Exercise Problem 2(1) of this chapter, the general solution to the equation
\[
\left[\frac{d^2}{dx^2}-k^2\right]g(x;t) = \delta(x-t)
\]
is $\frac{1}{k}\sinh k(x-t)\eta(x-t) + C(t) e^{kx} + D(t) e^{-kx}$. Plugging this formula into the boundary conditions, we get
\[
\begin{cases}
C(t)+D(t)=0 \\
\frac{1}{k}\sinh k(1-t) + C(t)e^k + D(t)e^{-k} = 0.
\end{cases}
\]
Solving it gives us $D(t) = \frac{1}{2k} \frac{\sinh k(1-t)}{\sinh k}$ and $C(t)=-\frac{1}{2k}\frac{\sinh k(1-t)}{\sinh k}$. Therefore
\[
g(x;t) = \frac{1}{k}\sinh k(x-t) \eta(x-t) - \frac{\sinh k(1-t)}{k\sinh k}\sinh kx.
\]
\end{proof}

(2) \begin{proof}
By Exercise Problem 2(2) of this chapter, the general solution to the equation
\[
\left[\frac{d^2}{dx^2}-x^2\right]g(x;t)=\delta(x-t)
\]
is $2[w_2(x)w_1(t)-w_1(x)w_2(t)]\eta(x-t)+C(t)w_1(x)+D(t)w_2(t)$, where
\[
w_1(x) = \sum_{n=0}^{\infty}\frac{\Gamma(3/4)}{n! \Gamma(n+3/4)}\left(\frac{x}{2}\right)^{4n}, \; w_2(x) = \sum_{n=0}^{\infty} \frac{\Gamma(5/4)}{n!\Gamma(n+5/4)}\left(\frac{x}{2}\right)^{4n+1}.
\]
Plugging this formula into the boundary conditions, we get (note $w_1(0)=1$ and $w_2(0)=0$)
\[
\begin{cases}
C(t) = 0 \\
2[w_2(1)w_1(t)-w_1(1)w_2(t)] + C(t)w_1(1) + D(t) w_2(1) = 0.
\end{cases}
\]
Solving it gives us $C(t)=0$ and $D(t)=-\frac{2D_1(t,1)}{w_2(1)}$. So
\[
g(x;t) = -\frac{2w_2(x)}{w_2(1)}D_1(t,1) + 2D_1(t,x)\eta(x-t),
\]
where $D_1(t,x) = \left|\begin{matrix}w_1(t) & w_2(t) \\ w_1(x) & w_2(x) \end{matrix}\right|$.

\begin{remark}
The above result differs from the answer in the textbook. But according to the textbook's answer to Exercise Problem 5(3), the above result is the correct one.
\end{remark}
\end{proof}

(3) \begin{proof}
By Exercise Problem 2(3) of this chapter, the general solution to the quation
\[
\left[(1+x+x^2)\frac{d^2}{dx^2} + 2(1+2x)\frac{d}{dx} + 2\right]g(x;t) = \delta(x-t)
\]
is $\frac{x-t}{1+x+x^2}\eta(x-t) + \frac{C(t)}{1+x+x^2} + \frac{D(t)}{1+x+x^2}x$. Plugging this formula into the boundary conditions, we get
$C(t)=0$ and $D(t) = -(l-t)/l$. So
\[
g(x;t) = \frac{x-t}{1+x+x^2}\eta(x-t) - \frac{(l-t)x}{l(1+x+x^2)}.
\]

\begin{remark}
The textbook's answer is wrong, as seen easily by checking the boundary condition at $x=l$.
\end{remark}
\end{proof}

\noindent 5. (1) \begin{proof}
By Example 10.7, the solution to the equation
\[
\begin{cases}
\left[\frac{d^2}{dx^2}+k^2\right]g(x;t)=\delta(x-t) & (0<x,t<1)\\
g(0;t)=0,\; g(1;t) = 0 &
\end{cases}
\]
is $g(x;t)=\frac{1}{k}\sin k(x-t)\eta(x-t) - \frac{1}{k}\frac{\sin k(1-t)}{\sin k}\sin kx$. By formula (10.67), we have
\begin{eqnarray*}
y(x) &=& \int_0^1g(x;t)f(t)dt + B\left. \frac{\cos k(1-t)}{\sin k}\sin kx \right|_{t=1} - A\left[-\cos k(x-t)+\frac{\cos k(1-t)}{\sin k}\sin kx\right]_{t=0} \\
&=& \frac{1}{k}\int_0^x\sin k(x-t)f(t)dt - \frac{\sin kx}{k\sin k} \int_0^1\sin k(1-t)f(t)dt + B\frac{\sin kx}{\sin k} + A\frac{\sin k(1-x)}{\sin k}.
\end{eqnarray*}
\end{proof}

(2) \begin{proof}By Exercise Problem 4(1) of this chapter, the solution to the equation
\[
\begin{cases}
\left[\frac{d^2}{dx^2}-k^2\right]g(x;t) = \delta(x-t) & (0<x,t<1, k>0) \\
g(0;t) = 0,\; g(1,t)=0.
\end{cases}
\]
is $g(x;t) = \frac{1}{k}\sinh k(x-t) \eta(x-t) -\frac{\sinh k(1-t)}{k\sinh k}\sinh kx$. Using formula (10.6), we have
\begin{eqnarray*}
y(x) &=& \frac{1}{k}\int_0^x\sinh k(x-t)f(t)dt - \frac{\sinh kx}{k\sinh k}\int_0^1\sinh k(1-t)f(t)dt + \left.B\frac{\cosh k(1-t)}{\sinh k}\sinh kx\right|_{t=1} \\
& &  - A\left[\cosh k(x-t) +\frac{\cosh k(1-t)}{\sinh k}\sinh kx\right]_{t=0} \\
&=& \frac{1}{k}\int_0^x\sinh k(x-t)f(t)dt - \frac{\sinh kx}{k\sinh k}\int_0^1\sinh k(1-t)f(t)dt + B\frac{\sinh kx}{\sinh k} - A\frac{\sinh k(x+1)}{\sinh k}.
\end{eqnarray*}

\begin{remark}
The above result differs from the textbook's answer. Check.
\end{remark}
 \end{proof}

(3) \begin{proof} The solution to the equation
\[
\begin{cases}
\left[\frac{d^2}{dx^2}-x^2\right]g(x;t) = \delta(x-t) & (0<x,t<1) \\
g(0;t) = 0,\; g(1;t) = 0
\end{cases}
\]
is $g(x;t) = -\frac{2w_2(x)}{w_2(1)}D_1(t,1)+2D_1(t,x)\eta(x-t)$. By formula (10.67)
\begin{eqnarray*}
y(x) &=& \int_0^1g(x;t)f(t)dt + B\left.\frac{dg(x;t)}{dt}\right|_{t=1} - A\left.\frac{dg(x;t)}{dt}\right|_{t=0}\\
&=& 2\int_0^xD_1(t,x)f(t)dt - \frac{2w_2(x)}{w_2(1)}\int_0^1D_1(t,1)f(t)dt - B\frac{2w_2(x)}{w_2(1)}[w_1'(1)w_2(1)-w_2'(1)w_1(1)] \\
& & -A\left\{-\frac{2w_2(x)}{w_2(1)}[w_1'(0)w_2(1)-w_2'(0)w_1(1)] + 2[w_1'(0)w_2(x)-w_2'(0)w_1(x)]\right\}\\
&=& 2\int_0^xD_1(t,x)f(t)dt - \frac{2w_2(x)}{w_2(1)}\int_0^1D_1(t,1)f(t)dt + B\frac{2w_2(x)}{w_2(1)}D_2'(1,1) \\
& & -A\left[\frac{w_1(1)}{w_2(1)}w_2(x)-w_1(x)\right].
\end{eqnarray*}

\begin{remark}
The above result differs from the textbook's answer. Check.
\end{remark}
\end{proof}

\section{Mathematica中的复变函数}

本章无习题。

\section{数学物理方程和定解条件}

\noindent 1. \begin{proof} For the points inside the bar, we can apply the partial differential equation (12.10). For the boundary conditions, the end where $x=0$ is fixed, so $u(0,t)=0$; the end where $x=l$ has no external stress, so by Hooke's law (formula (12.9)) $\left.\frac{\partial u(x,t)}{\partial x}\right|_{x=l}=0$ (see formula (12.36)). For the initial conditions, the initial velocity of every point on the bar is 0, so $\left.\frac{\partial u(x,t)}{\partial t}\right|_{t=0}=0$; at time 0, Hooke's law implies $E\frac{u(x,0)}{x} = P =\frac{F}{S}$, so $u|_{t=0} = \frac{F}{ES}x$. Combined, we have
\begin{eqnarray*}
\begin{cases}
\frac{\partial^2 u}{\partial t^2} - a^2\frac{\partial^2 u}{\partial x^2} = 0, \\
u|_{x=0}=0, & \left.\frac{\partial u}{\partial x}\right|_{x=l}=0, \\
u|_{t=0}=\frac{F}{ES}x, & \left.\frac{\partial u}{\partial t}\right|_{t=0} = 0.
\end{cases}
\end{eqnarray*}
\end{proof}

\noindent 2. \begin{proof}Let $D$ be the rate of diffusion. Then from formula (12.20), we conclude
\[
\frac{\partial u}{\partial t} = D \bigtriangledown^2 u + \alpha u.
\] \end{proof}

\noindent 3. \begin{proof} By Fourier's law (formula (12.15)), we have
\[
\left.\frac{\partial u}{\partial x}\right|_{x=0}=-\frac{q_1}{k}, \; \left.\frac{\partial u}{\partial x}\right|_{x=l} = \frac{q_2}{k}.
\]
\end{proof}

\noindent 4. \begin{proof}
We choose polar coordinate and place the origin of the coordinate at the center of the ball, with axis pointing to the sun. Then by formula (12.41), the boundary conditions are
\begin{eqnarray*}
\left[\frac{\partial u}{\partial r} + \frac{H}{k}u\right]_{r=a} = \frac{H}{k}u_0 = \begin{cases}
\frac{M}{k}\cos\theta, & 0 \le \theta \le \frac{\pi}{2}, \\
0, & \frac{\pi}{2}<\theta\le \pi,
\end{cases}
\end{eqnarray*}
where $H$ is the proportional constant in Newton's law of cooling.
\end{proof}

\section{线性偏微分方程的通解}

\subsection{正文练习题}

\noindent 13.1. \begin{proof}
Suppose the solution has the form $u(x,y)=g(y)\phi(y+\alpha x)$. Then
\[
(D_x - \alpha D_y - \beta) u = g(y) (D_x-\alpha D_y)\phi(y+\alpha x) + \phi(y+\alpha x)(-\alpha D_y - \beta)g(y).
\]
Since $(D_x - \alpha D_y) \phi(y+\alpha x) = 0$, we obtain the ODE for $g(y)$: $(\alpha D_y + \beta) g(y) = 0$, which has a solution $g(y)=e^{-\frac{\beta}{\alpha}y}$. So $u(x,y)=e^{-\frac{\beta}{\alpha}}\phi(y+\alpha x)$.
 \end{proof}

\subsection{章末习题}

\noindent 1. (1) \begin{proof}
The auxiliary equation is $\alpha^2-2\alpha -3=0$, which has roots $3$ and $-1$. So the general solution has the form of $f(3x+y)+g(x-y)$, where $f$ and $g$ are two independent $C^2$ (twice differentiable) functions.
\end{proof}

(2) \begin{proof}The auxiliary equation is $\alpha^2-2\alpha+2=0$, which has roots $1\pm i$. So the general solution has the form of $f(x+y+ix)+g(x+y-ix)$, where $f$ and $g$ are two independent $C^2$ functions. \end{proof}

(3) \begin{proof}
The auxiliary equation is $\alpha^2-\alpha = 0$. So the general solution has the form of $f(y)+g(y+x)$, where $f$ and $g$ are two independent $C^2$ functions.
\end{proof}

(4) \begin{proof}
We consider the PDE for $ru(t,r)$. The original PDE gives us $D_t^2 u = \frac{c^2}{r^2}(2rD_ru+r^2D_r^2u)$, which is equivalent to
\[
[D_t^2-c^2D_r^2](ru)=rD_t^2u - 2c^2D_ru - c^2rD_r^2u = 0.
\]
So the auxiliary equation for $ru(t,r)$ is $\alpha^2-c^2=0$, which has roots $\pm c$. So $ru(t,r)$ has the general form of $f(r+ct)+g(r-ct)$. Therefore $u(t,r)$ has the general form of $\frac{1}{r}[f(r+ct)+g(r-ct)]$.
\end{proof}

(5) \begin{proof}The auxiliary equation is $(a^2-b^2)\alpha^2 + 2a\alpha + 1 = 0$, which has roots $-\frac{1}{a+b}$ and $-\frac{1}{a-b}$. So the general solution has the form of $f\left(t-\frac{1}{a-b}x\right) + g\left(t-\frac{1}{a+b}x\right)$, or equivalently, $f(x-(a+b)t)+g(x-(a-b)t)$, where $f$ and $g$ are two independent $C^2$ functions. \end{proof}

(6) \begin{proof} The auxiliary equation is $\alpha^4-1=0$, which has roots $\pm i$, $\pm 1$. So the general solution has the form of $\phi_1(y+x)+\phi_2(y-x)+\phi_3(y+ix)+\phi_4(y-ix)$. \end{proof}

\noindent 2. (1) \begin{proof}
The general solution to the homogeneous equation
\[
\frac{\partial^2u}{\partial x} + \frac{\partial^2 u}{\partial y^2} = 0
\]
has the form of $f(x+iy)+g(x-iy)$ where $f$ and $g$ are linearly independent (formula (13.11)). To find a special solution, we note
\[
\frac{1}{D_x^2+D_y^2}(x^2+xy) = \frac{1}{D_x^2}\left[\sum_{n=0}^{\infty}(-1)^n\left(\frac{D_y^2}{D_x^2}\right)^n\right](x^2+xy) = \frac{1}{D_x^2}(x^2+xy) = \frac{x^4}{12} + \frac{x^3y}{6}.
\]
So the general solution to the original equation has the form of
\[
f(x+iy)+g(x-iy)+\frac{x^4}{12}+\frac{x^3y}{6}.
\]
\end{proof}

(2) \begin{proof}
The general solution to the homogeneous equation
\[
\frac{\partial^2u}{\partial x} - \frac{\partial^2 u}{\partial y^2} = 0
\]
has the form of $f(x+y)+g(x-y)$ where $f$ and $g$ are linearly independent (formula (13.11)). To find a special solution, we note
\[
\frac{1}{D_x^2-D_y^2}(xy-x) = \frac{1}{D_x^2}\left[\sum_{n=0}^{\infty}\left(\frac{D_y^2}{D_x^2}\right)^n\right](xy-x)=\frac{1}{D_x^2}(xy-x) = \frac{x^3y}{6}-\frac{x^3}{6}.
\]
So the general solution to the original equation has the form of
\[
f(x+y)+g(x-y)+\frac{1}{6}x^3(y-1).
\]

\begin{remark}
The textbook's answer is $f(x+y)+g(x-y)+\frac{1}{6}x^3(y+1)$, which can be easily verified as wrong.
\end{remark}
\end{proof}

(3) \begin{proof}
The auxiliary equation of the homogeneous equation
\[
\frac{\partial^2u}{\partial x^2} - 2\frac{\partial^2u}{\partial x\partial y} + \frac{\partial^2u}{\partial y^2} = 0
\]
is $\alpha^2-2\alpha + 1 = 0$. So the general solution to the homogeneous equation has the form of $x\phi(x+y)+\psi(x+y)$, where $\phi$ and $\psi$ are linearly independent functions. To find a special solution, note
\begin{eqnarray*}
\frac{1}{(D_x - D_y)^2}(x^2+y) &=& \frac{1}{D_x^2}\left[\sum_{n=0}^{\infty}\left(\frac{D_y}{D_x}\right)^n\right]^2(x^2+y) = \frac{1}{D_x^2}\left(1+\frac{D_y}{D_x}+\cdots\right)^2(x^2+y) \\
&=& \frac{1}{D_x^2}\left(1+\frac{2D_y}{D_x}\right)(x^2+y) = \frac{1}{D_x^2}(x^2+y+\frac{2}{D_x}1)= \frac{x^4}{12}+\frac{x^2y}{2}+\frac{x^3}{3}.
\end{eqnarray*}
So the general solution to the original equation has the form of
\[
x\phi(x+y)+\psi(x+y) + \frac{x^4}{12} + \frac{x^3}{3} + \frac{x^2y}{2}.
\]

\begin{remark}
The above result is different from the textbook's answer. Check.
\end{remark}
\end{proof}

\noindent 3. (1) \begin{proof}
Using the transformation $x=e^t$ and $y=e^s$, we have
\begin{eqnarray*}
& & x^2\frac{\partial^2u}{\partial x^2}-2xy\frac{\partial^2u}{\partial x\partial y} + y^2\frac{\partial^2 u}{\partial y^2} + x\frac{\partial u}{\partial x}+y\frac{\partial u}{\partial y} \\
&=& D_t(D_t-1) - 2D_tD_s + D_s(D_s-1) + D_t + D_s \\
&=& (D_t-D_s)^2.
\end{eqnarray*}
So the general solution has the form
\[
u(x,y)=t\phi(t+s)+\psi(t+s) = \ln x \phi(\ln x + \ln y ) + \psi(\ln x + \ln y) = \ln x \cdot f(xy) + g(xy),
\]
where $\phi$ and $\psi$ (or equivalently, $f$ and $g$) are linearly independent functions.
\end{proof}

(2) \begin{proof}It's easy to see $\sin(xy)$ is a special solution to the inhomogeneous equation and $f(x+y)+g(x-y)$ is the generals solution to the corresponding homogeneous equation, where $f$ and $g$ are linearly independent functions. So the general solution to the inhomogeneous equation is $f(x+y)+g(x-y)+\sin(xy)$. \end{proof}

\noindent 4. \begin{proof} The key is to note that
\begin{eqnarray*}
& & \frac{\partial }{\partial x}\left[\left(1-\frac{x}{l}\right)^2\frac{\partial u}{\partial x}\right] - \frac{1}{a^2}\left(1-\frac{x}{l}\right)^2\frac{\partial^2 u}{\partial t^2}\\
&=& \frac{1}{l^2}\left[-2(l-x)\frac{\partial u}{\partial x} + (l-x)^2\frac{\partial^2 u}{\partial x^2} - \frac{1}{a^2}(l-x)^2\frac{\partial^2 u}{\partial t^2}\right]\\
&=& \frac{l-x}{l^2}\left\{\frac{\partial^2}{\partial x^2}[(l-x)u]-\frac{1}{a^2}\frac{\partial^2}{\partial t^2}[(l-x)u]\right\}
\end{eqnarray*}
Define $v(x,t)=(l-x)u(x,t)$, we can get a new system of equations
\[
\begin{cases}
\frac{\partial^2}{\partial x^2}v(x,t)-\frac{1}{a^2}\frac{\partial^2}{\partial t^2}v(x,t) = 0\\
v|_{t=0}=(l-x)\phi(x),\; \left.\frac{\partial v}{\partial t}\right|_{t=0} = (l-x)\psi(x).
\end{cases}
\]
By Example 13.9, we conclude
\[
v(x,t) = \frac{1}{2}[(l-x+at)\phi(x-at)+(l-x-at)\phi(x+at)] + \frac{1}{2a}\int_{x-at}^{x+at}(l-\xi)\phi(\xi)d\xi.
\]
So
\[
u(x,t) = \frac{1}{2(l-x)}[(l-x+at)\phi(x-at)+(l-x-at)\phi(x+at)] + \frac{1}{2a(l-x)}\int_{x-at}^{x+at}(l-\xi)\phi(\xi)d\xi.
\]

\begin{remark}
The above result is different from the textbook's answer. Check.
\end{remark}
\end{proof}


\section{变量分离法}

\subsection{正文练习题}

\noindent 14.1. \begin{proof} If we solve for $T(t)$ directly, we can conclude the general solution to the equation $T''(t)+\lambda a^2T(t)=0$ is $T(t)=A\sin(\sqrt{\lambda}at) + B \cos(\sqrt{\lambda}at)$. Boundary conditions give $A=B=0$. So $T(t)\equiv 0$.

If we apply theory of ordinary differential equations, we note by Theorem 6.1,
\[
\begin{cases}
T''(t)+\lambda a^2T(t) = 0\\
T(0)=0,\; T'(0)=0
\end{cases}
\]
has a unique solution in $(0,\infty)$, which has to be $0$.
\end{proof}

\noindent 14.2. \begin{proof} Assume $u$ is $C^2$, then
\[
\begin{cases}
\frac{\partial^2u}{\partial t^2} - a^2\frac{\partial^2 u}{\partial x^2} = 0, & 0<x<l, t>0, \\
\left.\frac{\partial u(x,t)}{\partial x}\right|_{x=0}=0, \; \left.\frac{\partial u(x,t)}{\partial x}\right|_{x=l}=0, & t \ge 0, \\
u|_{t=0}=\phi(x), \; \left.\frac{\partial u}{\partial t}\right|_{t=0}=\psi(x), & 0 \le x \le l
\end{cases}
\]
implies
\[
\phi'(0)=\lim_{x\to 0}\frac{\partial}{\partial x}(u|_{t=0}) = \lim_{t \to 0}\left.\frac{\partial u(x,t)}{\partial x}\right|_{x=0}= 0, \; \phi'(l) = \lim_{x\to l} \frac{\partial }{\partial x}(u|_{t=0}) = \lim_{t\to 0}\left.\frac{\partial u(x,t)}{\partial x}\right|_{x=l} = 0,
\]
and
\[
\psi'(0) = \lim_{x\to 0} \frac{\partial }{\partial x}\left(\left.\frac{\partial u}{\partial t}\right|_{t=0}\right) =\lim_{t\to 0} \frac{\partial }{\partial t}\left(\left.\frac{\partial u}{\partial x}\right|_{x=0}\right) = 0,\; \psi'(l)=\lim_{x\to l}\frac{\partial}{\partial x}\left(\left.\frac{\partial u}{\partial t}\right|_{t=0}\right) =\lim_{t\to 0}\frac{\partial}{\partial t}\left(\left.\frac{\partial u}{\partial x}\right|_{x=l}\right) = 0.
\]
We should extend $\phi(x)$ and $\psi(x)$ in such a way that the extended functions are at least $C^1$. So
\[
\Phi(x)=\begin{cases}\phi(-x), & -l \le x \le 0,\\ \phi(x), & 0 \le x \le l,\end{cases} \;\; \Psi(x)=\begin{cases}\psi(-x), & -l \le x \le 0,\\ \psi(x), & 0 \le x \le l,\end{cases}
\]
and then extend $\Phi(x)$ and $\Psi(x)$ to $(-\infty, \infty)$ as periodic functions with period $2l$.
\end{proof}

\noindent 14.3. \begin{proof} Assume $u$ is $C^2$, then similar to previous exercise problem,
\[
\begin{cases}
\frac{\partial^2u}{\partial t^2} - a^2\frac{\partial^2 u}{\partial x^2} = 0, & 0<x<l, t>0, \\
u(x,t)|_{x=0}=0, \; \left.\frac{\partial u(x,t)}{\partial x}\right|_{x=l}=0, & t \ge 0, \\
u|_{t=0}=\phi(x), \; \left.\frac{\partial u}{\partial t}\right|_{t=0}=\psi(x), & 0 \le x \le l
\end{cases}
\]
implies
\[
\phi(0)=0, \; \phi'(l)=0, \; \psi(0)=0,\; \psi'(l)=0.
\]
We should extend $\phi(x)$ and $\psi(x)$ in such a way that the extended functions are at least $C^1$. Therefore, we should first extend $\phi(x)$ as
\[
\Phi(x) = \begin{cases}
\phi(2l-x), & l\le x \le 2l,\\
\phi(x), & 0 \le x \le l;
\end{cases}
\]
then we extend $\Phi(x)$ to $[-2l, 2l]$ so that it becomes an odd function; finally, we extend $\Phi(x)$ to $(-\infty, \infty)$ as a periodic function with period $4l$. $\psi(x)$ should be extended similarly.
\end{proof}

\noindent 14.4. \begin{proof}If we take choice (1), then the general solution to equation (14.21) will have the form
\[
u(x,y) = \sum_{n=0}^{\infty}\left[A_n\exp\left\{\frac{2n+1}{2a}\pi y\right\} + B_n \exp\left\{-\frac{2n+1}{2a}\pi y\right\}\right]\sin\frac{2n+1}{2a}\pi x.
\]
Plug this formula into (14.21c), we get
\[
\begin{cases}
\sum_{n=0}^{\infty}(A_n+B_n)\sin\frac{2n+1}{2a}\pi x = f(x) \\
\sum_{n=0}^{\infty}\frac{2n+1}{2a}\pi\left[A_n \exp\left\{\frac{2n+1}{2a}\pi b\right\} - B_n\exp\left\{-\frac{2n+1}{2a}\pi b\right\}\right]\sin\frac{2n+1}{2a}\pi x = 0.
\end{cases}
\]
Using the orthogonality of the system $\left(\sin\frac{2n+1}{2a}\pi x\right)_{n=0}^{\infty}$ over $[0,a]$, we have
\[
\begin{cases}
A_n+B_n = \frac{2}{a}\int_0^a f(x)\sin\frac{2n+1}{2a}\pi x dx \\
A_n \exp\left\{\frac{2n+1}{2a}\pi b\right\} - B_n \exp\left\{-\frac{2n+1}{2a}\pi b\right\} = 0.
\end{cases}
\]
Solving the equations gives us
\[
B_n = \frac{\frac{2}{a}\int_0^af(x)\sin\frac{2n+1}{2a}\pi x dx}{1+\exp\left\{-\frac{2n+1}{a}\pi b\right\}}, \; A_n = \frac{\exp\left\{-\frac{2n+1}{a}\pi b\right\}\frac{2}{a}\int_0^af(x)\sin\frac{2n+1}{2a}\pi x dx}{1+\exp\left\{-\frac{2n+1}{a}\pi b\right\}}.
\]
The result is the same as choosing the following form for $Y_n(y)$:
\[
Y_n(y)=C_n \sinh \frac{2n+1}{2a}\pi y + D_n \cosh \frac{2n+1}{2a} \pi y.
\]
But clearly choice (1) makes the result look messier.

If we take choice (2), the general solution to equation (14.21) will have the form
\[
u(x,y) = \sum_{n=0}^{\infty}\left[A_n\sinh\frac{2n+1}{2a}\pi y + B_n \cosh\frac{2n+1}{2a}\pi(b-y)\right]\sin\frac{2n+1}{2a}\pi x.
\]
Plug this formula into (14.21c), we get
\[
\begin{cases}
\sum_{n=0}^{\infty}B_n\cosh\frac{2n+1}{2a}\pi b \sin\frac{2n+1}{2a}\pi x = f(x) \\
\sum_{n=0}^{\infty}A_n\frac{2n+1}{2a}\pi\cosh\frac{2n+1}{2a}\pi b \sin\frac{2n+1}{2a}\pi x = 0.
\end{cases}
\]
So $A_n=0$ and $B_n = \frac{\frac{2}{a}\int_0^af(x)\sin\frac{2n+1}{2a}\pi xdx}{\cosh\frac{2n+1}{2a}\pi b}$. Thus, choice (2) makes it easier to solve for $A_n$ and $B_n$.
\end{proof}

\noindent 14.5. \begin{proof} Note in equation (14.21), the roles of $x$ and $y$ are symmetric, so the problem can be reduced to solving the following two problems:
\[
\begin{cases}
\frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2} = 0, & 0<x<a, 0<y<b, \\
u|_{x=0}=0,\; \left.\frac{\partial u}{\partial x}\right|_{x=a} = 0, & 0 \le y \le b, \\
u|_{y=0}=\phi(x),\; \left.\frac{\partial u}{\partial y}\right|_{y=b} = \psi(x), & 0 \le x \le a.
\end{cases}
\]
and
\[
\begin{cases}
\frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2} = 0, & 0<x<a, 0<y<b, \\
u|_{x=0}=f(y),\; \left.\frac{\partial u}{\partial x}\right|_{x=a} = g(y), & 0 \le y \le b, \\
u|_{y=0}=0,\; \left.\frac{\partial u}{\partial y}\right|_{y=b} = 0, & 0 \le x \le a.
\end{cases}
\]
These two problems can be solved via separation of variables. Then the solution to the original problem is the sum of the respective solutions to the two new problems.
\end{proof}

\noindent 14.6. \begin{proof}
Suppose $v(x,t)$ is a special solution to the problem
\[
\begin{cases}
\frac{\partial^2 u}{\partial t^2} - a^2\frac{\partial^2 u}{\partial x^2} = f(x,t), & 0<x<l, t>0\\
u|_{x=0}=0,\; u|_{x=l}=0, & t\ge 0.
\end{cases}
\]
Let $w(x,t)=X(x)T(t)$ be a solution to the homogeneous problem
\[
\begin{cases}
\frac{\partial^2 u}{\partial t^2} - a^2\frac{\partial^2 u}{\partial x^2} = 0, & 0<x<l, t>0\\
u|_{x=0}=0,\; u|_{x=l}=0, & t\ge 0.
\end{cases}
\]
Then $w(x,t)$ must have the form of $\sum_{n=1}^{\infty}\left(C_n\sin\frac{n\pi}{l}at+D_n\cos\frac{n\pi}{l}at\right)\sin\frac{n\pi}{l}x$. Let $u(x,t)=v(x,t)+w(x,t)$, then the initial value condition becomes
\[
\begin{cases}
\sum_{n=1}^{\infty}D_n\sin\frac{n\pi}{l}x = -v(x,0)+\phi(x)\\
\sum_{n=1}^{\infty}C_n\frac{n\pi a}{l}\sin\frac{n\pi}{l}x=\left.-\frac{\partial v(x,t)}{\partial t}\right|_{t=0}+\psi(x).
\end{cases}
\]
Using the orthogonality of the eigenfunctions, we have
\[
C_n =\frac{2}{n\pi a}\int_0^l\left(\left.-\frac{\partial v(x,t)}{\partial t}\right|_{t=0}+\psi(x)\right)\sin\frac{n\pi}{l}xdx,\;
D_n = \frac{2}{l}\int_0^l[-v(x,0)+\phi(x)]\sin\frac{n\pi}{l}xdx.
\]
\end{proof}

\noindent 14.7. \begin{proof}
Yes, we can. See \S 14.6 for detailed discussion. Once we obtain a solution $u_1(x,t)$ to equation (14.81) and a solution $u_2(x,t)$ to equation (14.49), $u(x,t) = u_1(x,t)+u_2(x,t)$ will be a solution to the problem under consideration.
\end{proof}

\noindent 14.8. \begin{proof}
No, because we want $\{X_n(x)\}$ to be complete. The boundary condition (14.49b) or (14.74) guarantees the self-adjointness of the differential operator associated with the eigenvalue problem satisfied by $X_n(x)$, which implies the completeness of $\{X_n(x)\}$. See Chapter 18, the discussion after Example 18.6 as well as Proposition 4.
\end{proof}

\noindent 14.9. \begin{proof} $v(x,t)= -\frac{(l-x)^2}{2l}\mu(t) + \frac{x^2}{2l}\nu(t)$. \end{proof}

\noindent 14.10. \begin{proof} $v(x,t) = \frac{l-x}{l}\mu(t) + \frac{x^2}{2l}\nu(t)$. \end{proof}

\noindent 14.11. \begin{proof}

\noindent {\it Step 1}. Find a function $v(x,t)$ satisfying the boundary value condition
\[
v(x,t)|_{x=0} = \mu(t), \; v(x,t)|_{x=l} =\nu(t).
\]

\noindent {\it Step 2}. Find a solution $w(x,t)$ to the problem
\[
\begin{cases}
\frac{\partial^2w}{\partial t^2} - a^2 \frac{\partial^2 w}{\partial x^2} = f(x,t) - \left[\frac{\partial^2v}{\partial t^2} - a^2\frac{\partial^2 v}{\partial x^2}\right], & 0<x<l, t>0 \\
w|_{x=0}=0, \; w|_{x=l} = 0, & t\ge 0 \\
w|_{t=0} = \phi(x)-v(x,0), \; \left.\frac{\partial w}{\partial t}\right|_{t=0} = \psi(x)-\left.\frac{\partial v(x,t)}{\partial t}\right|_{t=0}, & 0\le x \le l.
\end{cases}
\]
This step can be further divided into the following sub-steps.

{\it Step 2.1}. Solve the following eigenvalue problem
\[
\begin{cases}
X''(x)+\lambda X(x) = 0 \\
X(0)=0, \; X(l)=0,
\end{cases}
\]
and obtain a system of orthogonal eigenfunctions $\{X_n(x)\}_{n=1}^{\infty}=\{\sin\frac{n\pi}{l}x\}_{n=1}^{\infty}$.

{\it Step 2.2}. Expand $w(x,t)$ and $f(x,t) - \left[\frac{\partial^2v}{\partial t^2} - a^2\frac{\partial^2 v}{\partial x^2}\right]$ according to eigenfunctions $\{X_n(x)\}$:
\[
\begin{cases}
w(x,t)=\sum_{n=1}^{\infty}T_n(t)X_n(x)\\
f(x,t) - \left[\frac{\partial^2v}{\partial t^2} - a^2\frac{\partial^2 v}{\partial x^2}\right] = \sum_{n=1}^{\infty}g_n(t)X_n(x),
\end{cases}
\]
where $g_n(t) = \frac{2}{l}\int_0^l\left\{f(x,t) - \left[\frac{\partial^2v}{\partial t^2} - a^2\frac{\partial^2 v}{\partial x^2}\right]\right\}\sin\frac{n\pi}{l}xdx $. Plug these formulas back into the partial differential equation for $w(x,t)$:
\[
\sum_{n=1}^{\infty}[T_n''(t)+\lambda_n a^2 T_n(t)]X_n(x) = \sum_{n=1}^{\infty}g_n(t)X_n(x).
\]
Using orthogonality of $\{X_n(x)\}_{n=1}^{\infty}$, we conclude $T_n''(t)+\lambda_na^2T_n(t)=g_n(t)$. Note $w(x,t)$ automatically satisfies the boundary conditions as $X_n(0)=X_n(l)=0$. To make it further satisfy the initial value condition, we need to expand $\phi(x)-v(x,0)$ and $\psi(x)-\left.\frac{\partial v(x,t)}{\partial t}\right|_{t=0}$ according to eigenfunctions $\{X_n(x)\}$:
\[
\begin{cases}
\phi(x)-v(x,0) = \sum_{n=1}^{\infty}a_nX_n(x)\\
\psi(x)-\left.\frac{\partial v(x,t)}{\partial t}\right|_{t=0} = \sum_{n=1}^{\infty}b_nX_n(x),
\end{cases}
\]
where $a_n=\frac{2}{l}\int_0^l[\phi(x)-v(x,0)]\sin\frac{n\pi}{l}xdx$ and $b_n=\frac{2}{l}\int_0^{l}\left[\psi(x)-\left.\frac{\partial v(x,t)}{\partial t}\right|_{t=0}\right]\sin\frac{n\pi}{l}xdx$.
Combined, we can have an equation for $T_n(t)$:
\[
\begin{cases}
T_n''(t)+\lambda_na^2T_n(t)=g_n(t)\\
T_n(0)=a_n, \; T_n'(0)=b_n.
\end{cases}
\]
Once we find $T_n(t)$, the solution to the original PDE can be written as $u(x,t)= v(x,t) + \sum_{n=1}^{\infty}T_n(t)\sin\frac{n\pi}{l}x$.

{\it Step 2.3}. There are many methods to solve the equation
\[
\begin{cases}
T_n''(t)+\lambda_na^2T_n(t)=g_n(t)\\
T_n(0)=a_n, \; T_n'(0)=b_n.
\end{cases}
\]
We apply method of Green's function to refresh our memory of Chapter 10. The Green's function corresponding to the above initial value problem is
\[
\begin{cases}
\frac{d^2}{dt^2}g(t;s)+\lambda_na^2g(t;s)=\delta(t-s), \; t,s > 0\\
g(t;s)|_{t<s} = 0, \; \left.\frac{dg(t;s)}{dt}\right|_{t<s} = 0.
\end{cases}
\]
So $g(x;t)$ has the general form $[A(s)\sin\sqrt{\lambda_n}at + B(s)\cos\sqrt{\lambda_n}at]\eta(t-s)$ where $\eta(\xi)=\begin{cases} 1 & \xi\ge 0 \\ 0 & \xi<0\end{cases}$. By the continuity of $g(t;s)$ at $t=s$ and $\left.\frac{dg(t;s)}{dt}\right|_{s-}^{s+}=1$, we have
\[
\begin{cases}
A(s)\sin\sqrt{\lambda_n}as + B(s)\cos\sqrt{\lambda_n}as = 0 \\
\sqrt{\lambda_n}a[A(s)\cos\sqrt{\lambda_n}as - B(s)\sin\sqrt{\lambda_n}as] = 1,
\end{cases}
\]
which implies
\[
A(s)=\frac{\cos\sqrt{\lambda_n}as}{\sqrt{\lambda_n}a}, \; B(s) = -\frac{\sin\sqrt{\lambda_n}as}{\sqrt{\lambda_n}a}.
\]
So
\begin{eqnarray*}
g(t;s) &=& \frac{1}{\sqrt{\lambda_n}a}[\cos\sqrt{\lambda_n}as\sin\sqrt{\lambda_n}at - \sin\sqrt{\lambda_n}as\cos\sqrt{\lambda_n}at]\eta(t-s) \\
&=& \frac{1}{\sqrt{\lambda_n}a}\sin\sqrt{\lambda_n}a(t-s)\eta(t-s).
\end{eqnarray*}
Therefore by formula (10.60) (recall $\lambda_n=\left(\frac{n\pi}{l}\right)^2$)
\begin{eqnarray*}
T_n(t) &=& \int_0^tg(t;s)g_n(s)ds - \left.\left[a_n\frac{dg(t;s)}{ds}-b_ng(t;s)\right]\right|_{s=0} \\
&=& \frac{l}{n\pi a}\int_0^t\sin\frac{n\pi}{l}a(t-s)g_n(s)ds + a_n\cos\frac{n\pi}{l}at + \frac{b_nl}{n\pi a}\sin\frac{n\pi}{l}at.
\end{eqnarray*}


\noindent {\it Step 3}. $u(x,t)=v(x,t)+\sum_{n=1}^{\infty}T_n(t)\sin\frac{n\pi}{l}x$ is the solution to the original problem, where
\[
T_n(t) = \frac{l}{n\pi a}\int_0^t\sin\frac{n\pi}{l}a(t-s)g_n(s)ds + a_n\cos\frac{n\pi}{l}at + \frac{b_nl}{n\pi a}\sin\frac{n\pi}{l}at
\]
with $a_n=\frac{2}{l}\int_0^l[\phi(x)-v(x,0)]\sin\frac{n\pi}{l}xdx$, $b_n=\frac{2}{l}\int_0^{l}\left[\psi(x)-\left.\frac{\partial v(x,t)}{\partial t}\right|_{t=0}\right]\sin\frac{n\pi}{l}xdx$, and
\[
g_n(t) = \frac{2}{l}\int_0^l\left\{f(x,t) - \left[\frac{\partial^2v}{\partial t^2} - a^2\frac{\partial^2 v}{\partial x^2}\right]\right\}\sin\frac{n\pi}{l}xdx .
\]
\end{proof}



\subsection{章末习题}


\noindent 1. \begin{proof}
The partial differential equation under consideration is
\[
\begin{cases}
\frac{\partial^2 u}{\partial t^2} - a^2 \frac{\partial^2 u}{\partial x^2} = 0, & \\
u|_{x=0}= 0, & \left.\frac{\partial u}{\partial x}\right|_{x=l}=0, \\
u|_{t=0}=\frac{F}{ES}x, & \left.\frac{\partial u}{\partial t}\right|_{t=0}=0.
\end{cases}
\]
The corresponding eigenvalue problem is
\[
\begin{cases}
X''(x)+\lambda X(x) = 0 \\
X(0) = 0, \; X'(l)=0.
\end{cases}
\]
The solution is $X_n(x)=\sin\frac{2n+1}{2l}\pi x$ ($n\ge 0$) with eigenvalue $\lambda_n=\left(\frac{2n+1}{2l}\right)^2$. Expand $\frac{F}{ES}x$ according to $\{X_n(x)\}_{n=1}^{\infty}$, we have the coefficient
\[
a_n=\frac{2}{l}\int_0^l\frac{F}{ES}x\sin\frac{2n+1}{2l}\pi x dx = \frac{8Fl}{ES\pi^2}\frac{(-1)^{n}}{(2n+1)^2}.
\]
So $T_n(t)$ satisfies the ODE
\[
\begin{cases}
T_n''(t)+\lambda_na^2T_n(t)=0\\
T_n(0)=a_n, \; T_n'(0)=0.
\end{cases}
\]
Therefore $T_n(t)=a_n\cos\sqrt{\lambda_n}at$ and
\[
u(x,t)=\sum_{n=1}^{\infty}T_n(t)X_n(x) = \frac{8Fl}{ES\pi^2}\sum_{n=0}^{\infty}\frac{(-1)^n}{(2n+1)^2}\sin\frac{2n+1}{2l}\pi x \cos\frac{2n+1}{2l}at.
\]
\end{proof}

\noindent 2. \begin{proof} The partial differential equation to be solved is
\begin{eqnarray*}
\begin{cases}
\frac{\partial^2 u}{\partial t^2} - a^2 \frac{\partial^2 u}{\partial x^2} = 0, & \mbox{$0<x<l, t>0$}\\
u|_{x=0}=u_{x=l}=0, & \mbox{$t\ge 0$}, \\
u|_{t=0} = \frac{h}{c}x1_{\{0\le x \le c\}} + \frac{h}{l-c}(l-x)1_{\{c\le x \le l\}},\; \left.\frac{\partial u}{\partial t}\right|_{t=0} = 0, & \mbox{$0 \le x \le t$}. \\
\end{cases}
\end{eqnarray*}
This is a special case of Exercise Problem 14.11, with $f(x,t)=0$, $\mu(t)=\nu(t)=0$, $\psi(x)=0$, and $\phi(x) = \begin{cases}\frac{h}{c}x & 0\le x \le c \\ \frac{h}{l-c}(l-x) & c \le x \le l \end{cases}$. Applying the formula we obtained for Exercise Problem 14.11, we have $u(x,t)=\sum_{n=1}^{\infty} a_n \cos\frac{n\pi}{l}at \sin\frac{n\pi}{l}x$, where
\begin{eqnarray*}
a_n = \frac{2}{l}\int_0^l\phi(x)\sin\frac{n\pi}{l}xdx = \frac{2}{l}\int_0^c \frac{h}{c}x\sin\frac{n\pi}{l}xdx + \frac{2}{l} \int_c^l\frac{h}{l-c}(l-x)\sin\frac{n\pi}{l}xdx = \frac{2hl^2}{c(l-c)(n\pi)^2}\sin\frac{c}{l}n\pi.
\end{eqnarray*}
Therefore $u(x,t) = \frac{2hl^2}{c(l-c)\pi^2} \sum_{n=1}^{\infty}\frac{1}{n^2}\sin\frac{n\pi}{l}c \cos\frac{n\pi}{l}at \sin\frac{n\pi}{l}x$.
 \end{proof}

\noindent 3. \begin{proof}
The partial differential equation under consideration is
\[
\begin{cases}
\frac{\partial u}{\partial t}-\kappa\frac{\partial^2 u}{\partial x^2}=0\\
u|_{x=0}=u|_{x=l}=0\\
u|_{t=0}=b\frac{x(l-x)}{l^2}.
\end{cases}
\]
Plug $T(t)X(x)$ into the differential equation, we get $T'(t)X(t)-T(t)X''(t)=0$. So the eigenvalue problem associated with this PDE is
\[
\begin{cases}
X''(x)+\lambda X(x)=0\\
X(0)=X(l)=0.
\end{cases}
\]
If $\lambda = 0$, $X\equiv 0$. If $\lambda \ne 0$, the solution must have the form $X_n(x)=\sin\sqrt{\lambda_n}x$ with $\lambda_n=\left(\frac{n\pi}{l}\right)^2$ $(n\in \mathbb N)$. Suppose $u(x,t)=\sum_{n=1}^{\infty}T_n(t)X_n(x)$. Then
\[
\begin{cases}
\sum_{n=1}^{\infty}[T_n'(t)+\lambda\kappa T_n(t)]X_n(x)=0\\
\sum_{n=1}^{\infty}T_n(0)X_n(x)=b\frac{x(l-x)}{l^2}.
\end{cases}
\]
Using orthogonality of $\{X_n(x)\}_{n=1}^{\infty}$, we conclude
\[
\begin{cases}
T_n'(t)+\lambda_n\kappa T_n(t)=0\\
T_n(0) = a_n,
\end{cases}
\]
where
\begin{eqnarray*}
a_n = \frac{\frac{b}{l^2}\int_0^lx(l-x)X_n(x)dx}{\int_0^lX_n^2(x)dx} = \frac{4b}{n^3\pi^3}[1+(-1)^{n+1}]=\begin{cases}0& \mbox{if $n$ is even}\\ \frac{8b}{n^3\pi^3} & \mbox{if $n$ is odd}.\end{cases}
\end{eqnarray*}
Therefore,
\[
T_n(t)=\begin{cases}
0 & \mbox{if $n$ is even}\\
\frac{8b}{n^3\pi^3}e^{-\lambda_n\kappa t} & \mbox{if $n$ is odd}.
\end{cases}
\]
Finally, we can conclude
\[
u(x,t)=\frac{8b}{\pi^3}\sum_{n=1}^{\infty}\frac{1}{(2n+1)^3}e^{-\left(\frac{2n+1}{l}\pi\right)^2\kappa t} \sin\frac{2n+1}{l}\pi x.
\]
\end{proof}

\noindent 5. \begin{proof}Plug $u(x,y)=X(x)Y(y)$ into the differential equation, we get $\frac{X''(x)}{X(x)}=-\frac{Y''(y)}{Y(y)}$. So the associated eigenvalue problem for $Y(y)$ is
\begin{eqnarray*}
Y''(y)+\lambda Y(y) = 0 \\
Y'(0)=Y'(b)=0.
\end{eqnarray*}
Therefore nontrivial solution is $Y_n(y)=\cos\sqrt{\lambda_n}y$ with $\lambda_n=\left(\frac{n\pi}{b}\right)^2$ $(n\in \mathbb N)$. Suppose $u(x,y)=\sum_{n=1}^{\infty}X_n(x)Y_n(y)$, we have
\[
\begin{cases}
\sum_{n=1}^{\infty}[X_n''(x)-\lambda_nX_n(x)]Y_n(y)=0 \\
\sum_{n=1}^{\infty}X_n(0)Y_n(y)= u_0, \; \sum_{n=1}^{\infty}X_n(a)Y_n(y)=u_0\left[3\left(\frac{y}{b}\right)^2-2\left(\frac{y}{b}\right)^3\right].
\end{cases}
\]
Using the orthogonality of $\{Y_n(y)\}_{n=1}^{\infty}$, we have
\[
\begin{cases}
X_n''(x)-\lambda_n X_n(x) = 0 & \\
X_n(0) = a_n, & X_n(a)=b_n,
\end{cases}
\]
where
\[
a_n = \frac{\int_0^bu_0Y_n(y)dy}{\int_0^bY_n^2(y)dy}=0, \; b_n =\frac{\int_0^bu_0\left[3\left(\frac{y}{b}\right)^2-2\left(\frac{y}{b}\right)^3\right]Y_n(y)dy}{\int_0^bY_n^2(y)dy}=\begin{cases}0 & \mbox{if $n$ is even}\\ -\frac{48u_0}{\lambda_n^2b^4} & \mbox{if $n$ is odd}.\end{cases}
\]
Solving the ODE for $X_n(x)$, we get
\[
X_n(x)=\begin{cases}
0 & \mbox{if $n$ is even}\\
-\frac{48u_0}{(n\pi)^4\sinh\frac{n\pi}{b}a} \sinh\frac{n\pi}{b}x& \mbox{if $n$ is odd}.
\end{cases}
\]
Combined, we conclude
\[
u(x,y) = -\frac{48u_0}{\pi^4} \sum_{n=1}^{\infty}\frac{1}{(2n+1)^4}\frac{\sinh\frac{2n+1}{b}\pi x}{\sinh\frac{2n+1}{b}\pi a}\cos\frac{2n+1}{b}\pi y.
\]
\end{proof}

\noindent 6. \begin{proof}We apply the formula developed in Exercise Problem 14.11. In this problem's context, we have $\phi(x)=\psi(x)=0$, $v(x,t)=0$, and $f(x,t)=bx(l-x)$. So
\begin{eqnarray*}
g_n(t) &=& \frac{2}{l}\int_0^lbx(l-x)\sin\frac{n\pi}{l}xdx = \frac{4bl^2}{(n\pi)^3}[(-1)^{n+1}+1]=\begin{cases}
0 & \mbox{if $n$ is even}\\
\frac{8bl^2}{(n\pi)^3} & \mbox{if $n$ is odd}.
\end{cases}
\end{eqnarray*}
This implies
\[
T_n(t)=\frac{l}{n\pi a}\int_0^t\sin\frac{n\pi}{l}a(t-s)ds \cdot \frac{4bl^2}{(n\pi)^3}[(-1)^{n+1}+1] = \frac{4bl^4}{(n\pi)^5a^2}[(-1)^{n+1}+1]\left(1-\cos\frac{n\pi}{l}at\right).
\]
Therefore
\[
u(x,t)=\sum_{n=1}^{\infty}T_n(t)\sin\frac{n\pi}{l}x = \frac{8bl^4}{\pi^5a^2}\sum_{n=0}^{\infty}\frac{1}{(2n+1)^5}\sin\frac{2n+1}{l}\pi x\left(1-\cos\frac{2n+1}{l}\pi at\right).
\]

\begin{remark}
The textbook's answer has a $\pi$ missing in $\sin\frac{2n+1}{l}\pi x$.
\end{remark}
\end{proof}

\noindent 7. (1) \begin{proof} Let $u(x,y)=X(x)Y(y)$. Then the original equation becomes
\[
\begin{cases}
X''(x)Y(y) + X(x)Y''(y) = -2\\
X(0)Y(y)=X(a)Y(y)=0 \\
X(x)Y(\frac{b}{2})=X(x)Y(-\frac{b}{2})=0.
\end{cases}
\]
So we can consider the eigenvalue problem
\[
\begin{cases}
X''(x)+\lambda X(x) = 0\\
X(0) = X(a) = 0,
\end{cases}
\]
which has the solution $X_n(x)=\sin\sqrt{\lambda_n}x$ with $\lambda_n= \left(\frac{n\pi}{a}\right)^2$ ($n\in \mathbb N$). Suppose $u(x,y)=\sum_{n=1}^{\infty}X_n(x)Y_n(y)$. Then by the orthogonality of $\{X_n(x)\}_{n=1}^{\infty}$, we have
\[
\begin{cases}
Y_n''(y)-\lambda_nY_n(y) = \frac{4}{n\pi}[(-1)^n-1]\\
Y_n(-\frac{b}{2})=Y_n(\frac{b}{2})= 0.
\end{cases}
\]
To find the expression for $Y_n(y)$, we note the Green's function associated with $Y_n(y)$ satisfies
\[
\begin{cases}
\frac{d^2}{dy^2}g(y;t)-\lambda_n g(y;t) = \delta(y-t)\\
g(-\frac{b}{2}; t)=g(\frac{b}{2}; t) = 0.
\end{cases}
\]
Therefore
\[
g(y;t) =
\begin{cases}
 A(t) \sinh\sqrt{\lambda_n}(y+\frac{b}{2}), & -\frac{b}{2} < y < t\\
 B(t) \sinh\sqrt{\lambda_n}(y-\frac{b}{2}), & t < y < \frac{b}{2},
\end{cases}
\]
where the coefficient function $A(t)$ and $B(t)$ are determined by the continuity property of $g(y;t)$ at $y=t$:
\[
\begin{cases}
A(t)\sinh\sqrt{\lambda_n}(t+\frac{b}{2})=B(t)\sinh\sqrt{\lambda_n}(t-\frac{b}{2})\\
\sqrt{\lambda_n}B(t)\cosh\sqrt{\lambda_n}(t-\frac{b}{2}) - \sqrt{\lambda_n}A(t)\cosh\sqrt{\lambda_n}(t+\frac{b}{2})=1.
\end{cases}
\]
Solving this equation gives
\[
\begin{cases}
A(t) = \frac{1}{\sqrt{\lambda_n}\sinh\sqrt{\lambda_n}b}\sinh\sqrt{\lambda_n}(t-\frac{b}{2})\\
B(t) = \frac{1}{\sqrt{\lambda_n}\sinh\sqrt{\lambda_n}b}\sinh\sqrt{\lambda_n}(t+\frac{b}{2}).
\end{cases}
\]
Therefore
\[
g(y;t) = \frac{\sinh\sqrt{\lambda_n}(t-\frac{b}{2})\sinh\sqrt{\lambda_n}(y+\frac{b}{2})1_{\{-\frac{b}{2}<y<t\}} + \sinh\sqrt{\lambda_n}(t+\frac{b}{2})\sinh\sqrt{\lambda_n}(y-\frac{b}{2})1_{\{t<y<\frac{b}{2}\}}}{\sqrt{\lambda_n}\sinh\sqrt{\lambda_n}b}
\]
and
\begin{eqnarray*}
\int_{-\infty}^{\infty}g(y;t)dt &=& \int_{-\frac{b}{2}}^y\frac{\sinh\sqrt{\lambda_n}(y-\frac{b}{2})}{\sqrt{\lambda_n}\sinh\sqrt{\lambda_n}b}\sinh\sqrt{\lambda_n}(t+\frac{b}{2})dt
+ \int_y^{\frac{b}{2}}\frac{\sinh\sqrt{\lambda_n}(y+\frac{b}{2})}{\sqrt{\lambda_n}\sinh\sqrt{\lambda_n}b}\sinh\sqrt{\lambda_n}(t-\frac{b}{2})dt \\
&=& \frac{\sinh\sqrt{\lambda_n}(y-\frac{b}{2})}{\lambda_n\sinh\sqrt{\lambda_n}b}\left[\cosh\sqrt{\lambda_n}(y+\frac{b}{2})-1\right]
+ \frac{\sinh\sqrt{\lambda_n}(y+\frac{b}{2})}{\lambda_n\sinh\sqrt{\lambda_n}b}\left[1-\cosh\sqrt{\lambda_n}(y-\frac{b}{2})\right] \\
&=&\frac{-\sinh\sqrt{\lambda_n}b - \sinh\sqrt{\lambda_n}(y-\frac{b}{2}) + \sinh\sqrt{\lambda_n}(y+\frac{b}{2})}{\lambda_n\sinh\sqrt{\lambda_n}b} \\
&=& \frac{-\sinh\sqrt{\lambda_n}b + 2\sinh\frac{\sqrt{\lambda_n}b}{2}\cosh\sqrt{\lambda_n}y }{\lambda_n\sinh\sqrt{\lambda_n}b} \\
&=& -\frac{1}{\lambda_n} + \frac{\cosh\sqrt{\lambda_n}y}{\lambda_n \cosh\frac{\sqrt{\lambda_n}b}{2}}.
\end{eqnarray*}
Hence
\begin{eqnarray*}
Y_n(y)&=&\frac{4}{n\pi}[(-1)^{n}-1]\int_{-\infty}^{\infty}g(y;t)dt = \frac{4}{n\pi}[(-1)^{n+1}+1]\left[\frac{a^2}{(n\pi)^2} - \frac{a^2\cosh\frac{n\pi y}{a}}{(n\pi)^2\cosh\frac{n\pi b}{2a}}\right]\\
&=&
\begin{cases}
\frac{8a^2}{(n\pi)^3} \left[1-\frac{\cosh\frac{n\pi y}{a}}{\cosh\frac{n\pi b}{2a}}\right]& \mbox{if $n$ is odd,} \\
0 & \mbox{if $n$ is even.}
\end{cases}
\end{eqnarray*}
Combined, we have
\[
u(x,t)=\sum_{n=1}^{\infty}X_n(x)Y_n(y) = \frac{8a^2}{\pi^3}\sum_{n=0}^{\infty}\frac{1}{(2n+1)^3}\left[1-\frac{\cosh\frac{2n+1}{a}\pi y}{\cosh\frac{2n+1}{2a}\pi b}\right]\sin\frac{2n+1}{a}\pi x.
\]
\end{proof}

(2) \begin{proof}
Similar to part (1), we find the solution to the eigenvalue problem
\[
\begin{cases}
X''(x)+\lambda X(x)= 0 \\
X(0)=X(a)=0
\end{cases}
\]
as $X_n(x)=\sin\sqrt{\lambda_n}x$ with $\lambda_n=\left(\frac{n\pi}{a}\right)^2$ $(n\in\mathbb N)$. Expanding $u(x,y)$ according to $\{X_n(x)\}_{n=1}^{\infty}$, we have
\begin{eqnarray*}
u(x,y)&=& \sum_{n=1}^{\infty} Y_n(y)X_n(x),
\end{eqnarray*}
where
\begin{eqnarray*}
Y_n(y) = \frac{\int_0^ax^2yX_n(x)dx}{\int_0^aX_n^2(x)dx} = \frac{2}{a}\int_0^ax^2yX_n(x)dx = 2y\left\{\frac{a^2(-1)^{n+1}}{n\pi} + \frac{2a^2}{(n\pi)^3}[(-1)^n-1]\right\}.
\end{eqnarray*}
So the eigenvalue problem associated with $Y_n(x)$ becomes
\[
\begin{cases}
Y_n''(x)-\lambda_n Y_n(x) = 2y\left\{\frac{a^2(-1)^{n+1}}{n\pi} + \frac{2a^2}{(n\pi)^3}[(-1)^n-1]\right\} \\
Y_n(-\frac{b}{2}) = Y_n(\frac{b}{2}) = 0.
\end{cases}
\]
The Green's function associated with $Y_n(y)$ is the same as that of part (1):
\[
g(y;t) = \frac{\sinh\sqrt{\lambda_n}(t-\frac{b}{2})\sinh\sqrt{\lambda_n}(y+\frac{b}{2})1_{\{-\frac{b}{2}<y<t\}} + \sinh\sqrt{\lambda_n}(t+\frac{b}{2})\sinh\sqrt{\lambda_n}(y-\frac{b}{2})1_{\{t<y<\frac{b}{2}\}}}{\sqrt{\lambda_n}\sinh\sqrt{\lambda_n}b}.
\]
Therefore
\begin{eqnarray*}
Y_n(y) &=& \int_{-\infty}^{\infty}2t\left\{\frac{a^2(-1)^{n+1}}{n\pi} + \frac{2a^2}{(n\pi)^3}[(-1)^n-1]\right\} g(y;t)dt \\
&=& \frac{2a^2}{n\pi}\left\{(-1)^{n+1}+\frac{2}{(n\pi)^2}[(-1)^n-1]\right\}\int_{-\infty}^{\infty}tg(y;t)dt.
\end{eqnarray*}
Note
\begin{eqnarray*}
\int_{-\infty}^{\infty}tg(y;t)dt &=& \frac{\sinh\sqrt{\lambda_n}(y+\frac{b}{2})}{\sqrt{\lambda_n}\sinh\sqrt{\lambda_n}b} \int_y^{\frac{b}{2}} t\sinh\sqrt{\lambda_n}(t-\frac{b}{2})dt + \frac{\sinh\sqrt{\lambda_n}(y-\frac{b}{2})}{\sqrt{\lambda_n}\sinh\sqrt{\lambda_n}b} \int_{-\frac{b}{2}}^y t\sinh\sqrt{\lambda_n}(t+\frac{b}{2})dt \\
&=& \frac{\sinh\sqrt{\lambda_n}(y+\frac{b}{2})}{\sqrt{\lambda_n}\sinh\sqrt{\lambda_n}b} \left[\frac{b}{2\sqrt{\lambda_n}} - \frac{y}{\sqrt{\lambda_n}} \cosh\sqrt{\lambda_n}(y-\frac{b}{2}) + \frac{1}{\lambda_n}\sinh\sqrt{\lambda_n}(y-\frac{b}{2})\right] + \\
& & \frac{\sinh\sqrt{\lambda_n}(y-\frac{b}{2})}{\sqrt{\lambda_n}\sinh\sqrt{\lambda_n}b} \left[\frac{b}{2\sqrt{\lambda_n}} + \frac{y\cosh\sqrt{\lambda_n}(y+\frac{b}{2})}{\sqrt{\lambda_n}} - \frac{1}{\lambda_n}\sinh\sqrt{\lambda_n}(y+\frac{b}{2})\right] \\
&=& \frac{b[\sinh\sqrt{\lambda_n}(y+\frac{b}{2})+\sinh\sqrt{\lambda_n}(y-\frac{b}{2})]}{2\lambda_n\sinh\sqrt{\lambda_n}b} + \frac{y\sinh(-b)\sqrt{\lambda_n}}{\lambda_n\sinh\sqrt{\lambda_n}b} \\
&=& \frac{b}{2\lambda_n} \frac{\sinh\sqrt{\lambda_n}y}{\sinh\frac{\sqrt{\lambda_n}b}{2}} - \frac{y}{\lambda_n}.
\end{eqnarray*}
Therefore
\begin{eqnarray*}
Y_n(y) &=& \frac{2a^2}{n\pi}\left\{(-1)^{n+1}+\frac{2}{(n\pi)^2}[(-1)^n-1]\right\}\left(\frac{b}{2\lambda_n} \frac{\sinh\sqrt{\lambda_n}y}{\sinh\frac{\sqrt{\lambda_n}b}{2}} - \frac{y}{\lambda_n}\right) \\
&=& \begin{cases}
-\frac{2a^2}{n\pi \lambda_n} \left(y - \frac{b}{2} \frac{\sinh\sqrt{\lambda_n}y}{\sinh\frac{\sqrt{\lambda_n}b}{2}}\right) + \frac{4}{(n\pi)^2} \frac{2a^2}{n\pi \lambda_n} \left(y - \frac{b}{2} \frac{\sinh\sqrt{\lambda_n}y}{\sinh\frac{\sqrt{\lambda_n}b}{2}}\right)& \mbox{if $n$ is odd,} \\
\frac{2a^2}{n\pi \lambda_n} \left(y - \frac{b}{2} \frac{\sinh\sqrt{\lambda_n}y}{\sinh\frac{\sqrt{\lambda_n}b}{2}}\right)& \mbox{if $n$ is even.}
\end{cases}
\end{eqnarray*}
Hence
\begin{eqnarray*}
& & u(x,y) \\&=& \sum_{n=1}^{\infty}Y_n(y) X_n(x)\\
&=& \frac{2a^4}{\pi^3}\sum_{n=1}^{\infty}\frac{(-1)^n}{n^3} \left(y-\frac{b}{2}\frac{\sinh\frac{n\pi}{a}y}{\sinh\frac{n\pi}{2a}b}\right)\sin\frac{n\pi}{a} x
+ \frac{8a^4}{\pi^5}\sum_{n=0}^{\infty}\frac{1}{(2n+1)^5}\left(y-\frac{b}{2}\frac{\sin\frac{2n+1}{a}\pi y}{\sinh\frac{2n+1}{2a}\pi b}\right)\sin\frac{2n+1}{a}\pi x.
\end{eqnarray*}

\begin{remark}
The above solution differs from the textbook's answer by a sign. Check.
\end{remark}
\end{proof}

\noindent 9. \begin{proof}
We choose $v(x,t)=\cos\frac{\pi}{l}x\cos\frac{\pi}{l}at$ and suppose $u(x,t)=v(x,t)+w(x,t)$. Then $w(x,t)$ satisfies the equation
\[
\begin{cases}
\frac{\partial^2w}{\partial t^2} - a^2\frac{\partial^2 w}{\partial x^2} = 0  & \\
w|_{x=0}=0, & \left.\frac{\partial w}{\partial x}\right|_{x=l} = 0 \\
w(x,0)=0, & \left. \frac{\partial w}{\partial t} \right|_{t=0} = \sin\frac{\pi}{2l}x.
\end{cases}
\]
Solving the associated eigenvalue problem
\[
\begin{cases}
X''(x)+\lambda X(x) = 0 \\
X(0)=0, X'(l) = 0
\end{cases}
\]
gives the eigenfunctions $\{X_n(x)\}=\{\sin\sqrt{\lambda_n}x\}$, with $\lambda_n=\left(\frac{\pi+2n\pi}{2l}\right)^2$. Suppose $w(x,t)=\sum_{n=0}^{\infty}T_n(t)X_n(x)$, then we must have
\[
\begin{cases}
\sum_{n=0}^{\infty}(T_n''(t)+\lambda_n a^2)X_n(x) = 0 \\
\sum_{n=0}^{\infty}T_n(0)X_n(x)=0, \; \sum_{n=0}^{\infty}T_n'(0)X_n(x)=X_0(x).
\end{cases}
\]
Therefore $T_n(t)\equiv 0$ for $n\ge 1$ and $T_0(t)=\frac{2l}{a\pi}\sin\frac{\pi}{2l}at$. Combined, we conclude
$u(x,t)=\cos\frac{\pi}{l}x\cos\frac{\pi}{l}at + \frac{2l}{\pi a}\sin\frac{\pi}{2l}x\sin\frac{\pi}{2l}at$.
\end{proof}

\noindent 10. \begin{proof}
We choose $v(x,t)=\frac{l-x}{l}Ae^{-\alpha^2\kappa t} + \frac{x}{l}Be^{-\beta^2\kappa t}$ and suppose $u(x,t)=v(x,t)+w(x,t)$. Note $v(0,t)=Ae^{-\alpha^2\kappa t}$ and $v(l,t)=Be^{-\beta^2\kappa t}$, we have the following equation for $w(x,t)$:
\[
\begin{cases}
\frac{\partial w}{\partial t} - \kappa \frac{\partial^2 w}{\partial x^2} = -\left[\frac{\partial v}{\partial t} - \kappa \frac{\partial^2 v}{\partial x^2}\right] = \frac{l-x}{l}A\alpha^2\kappa e^{-\alpha^2\kappa t} + \frac{x}{l}B\beta^2\kappa e^{-\beta^2\kappa t}=f(x,t)\\
w|_{x=0}=w|_{x=l}= 0 \\
w(x,0)=-v(x,0)=-\frac{l-x}{l}A -\frac{x}{l}B=\phi(x).
\end{cases}
\]
Solving the eigenvalue problem
\[
\begin{cases}
X''(x)+\lambda X(x) = 0 \\
X(0)=X(l)=0
\end{cases}
\]
gives the eigenfunctions $\{X_n(x)\}_{n=1}^{\infty}$, where $X_n(x)=\sin\sqrt{\lambda_n}x$ with $\lambda_n=\left(\frac{n\pi}{l}\right)^2$. Expand $f(x,t)$ according to $\{X_n(x)\}$, we have $f(x,t)=\sum_{n=1}^{\infty}g_n(t)X_n(x)$ with
\begin{eqnarray*}
g_n(t) &=& \frac{2}{l} \int_0^l\left[\frac{l-x}{l}A\alpha^2\kappa e^{-\alpha^2\kappa t} + \frac{x}{l} B\beta^2\kappa e^{-\beta^2\kappa t}\right]X_n(x)dx.
\end{eqnarray*}
Note $\int_0^lX_n(x)dx = \frac{1}{-\sqrt{\lambda_n}}\cos\sqrt{\lambda_n}x|_{x=0}^l = \frac{(-1)^n-1}{-\frac{n\pi}{l}}= \frac{l[(-1)^{n+1}+1]}{n\pi}$ and
\[
\int_0^lxX_n(x)dx = \frac{1}{-\sqrt{\lambda_n}}\int_0^lx d\cos\sqrt{\lambda_n}x=\frac{l(-1)^n}{-\frac{n\pi}{l}}=\frac{l^2}{n\pi}(-1)^{n+1}.
\]
Therefore
\begin{eqnarray*}
g_n(t) &=& \frac{2}{l} \int_0^l\left[A\alpha^2\kappa e^{-\alpha^2\kappa t}X_n(x) + \frac{B\beta^2\kappa e^{-\beta^2\kappa t}-A\alpha^2\kappa e^{-\alpha^2\kappa t}}{l}xX_n(x)\right]dx \\
&=& \frac{2\kappa}{n\pi}A\alpha^2e^{-\alpha^2\kappa t} + \frac{2\kappa}{n\pi}B\beta^2e^{-\beta^2\kappa t}(-1)^{n+1}.
\end{eqnarray*}
Expand $\phi(x)$ according to $\{X_n(x)\}_{n=1}^{\infty}$, we have $\phi(x)=\sum_{n=1}^{\infty}a_n X_n(x)$ where
\begin{eqnarray*}
a_n=\frac{2}{l}\int_0^l\left[-A+\frac{A-B}{l}x\right]X_n(x)dx = -\frac{2A}{n\pi} - \frac{2B}{n\pi}(-1)^{n+1}.
\end{eqnarray*}
If we let $w(x,t)=\sum_{n=1}^{\infty}T_n(t)X_n(x)$, then
\[
\begin{cases}
\sum_{n=1}^{\infty}[T_n'(t)+\kappa\lambda_nT_n(t)]X_n(x)=\sum_{n=1}^{\infty}g_n(t)X_n(x)\\
\sum_{n=1}^{\infty}T_n(0)X_n(x)=\sum_{n=1}^{\infty}a_nX_n(x).
\end{cases}
\]
By orthogonality of $\{X_n(x)\}$, we have
\[
\begin{cases}
T_n'(t)+\kappa \lambda_n T_n(t) = g_n(t) \\
T_n(0)= a_n,
\end{cases}
\]
which implies
\begin{eqnarray*}
T_n(t) &=& e^{-\kappa \lambda_n t}\int_0^tg_n(s)e^{\kappa \lambda_n s}ds + a_n \\
&=& \frac{2\kappa}{n\pi}\left[A\alpha^2\frac{e^{-\alpha^2\kappa t}-e^{-\kappa\lambda_n t}}{-\alpha^2\kappa + \kappa\lambda_n} + B\beta^2(-1)^{n+1}\frac{e^{-\beta^2\kappa t}-e^{-\kappa\lambda_n t}}{-\beta^2\kappa +\kappa\lambda_n}\right]+a_n
\end{eqnarray*}
Combined, we can write the solution $u(x,t)$ to the original problem as
\[
\left[\frac{l-x}{l}Ae^{-\alpha^2\kappa t} + \frac{x}{l}Be^{-\beta^2\kappa t}\right] + \sum_{n=1}^{\infty}\left\{\frac{2\kappa}{n\pi}\left[A\alpha^2\frac{e^{-\alpha^2\kappa t}-e^{-\kappa\left(\frac{n\pi}{l}\right)^2 t}}{-\alpha^2\kappa + \kappa\left(\frac{n\pi}{l}\right)^2} + B\beta^2(-1)^{n+1}\frac{e^{-\beta^2\kappa t}-e^{-\kappa\left(\frac{n\pi}{l}\right)^2 t}}{-\beta^2\kappa +\kappa\left(\frac{n\pi}{l}\right)^2}\right]+a_n\right\}\sin\frac{n\pi}{l}x.
\]

\begin{remark}
If we choose $v(x,t)=A\frac{\sin\alpha(l-x)}{\sin\alpha l}e^{-\alpha^2\kappa t} + B \frac{\sin\beta x}{\sin\beta l}e^{-\beta^2\kappa t}$, then it's easy to verify $\frac{\partial v}{\partial t}-\kappa \frac{\partial^2 v}{\partial x^2}=0$. So this choice of $v(x,t)$ simultaneously homogenizes boundary condition and the differential equation, which makes the solution much easier.
\end{remark}
\end{proof}

\section{正交曲面坐标系}

\subsection{正文练习题}

\noindent 15.1. \begin{proof}
Suppose $\lambda_m$ and $\lambda_n$ are two distinct eigenvalues of the eigenvalue problem (15.41). Let $\Phi_m$ be an eigenfunction associated with
$\lambda_m$ and $\Phi_n$ an eigenfunction associated with $\lambda_n$. Then
\[
\Phi_m''(\phi)+\lambda_m\Phi_m(\phi)=0, \; \Phi_n''(\phi)+\lambda_n\Phi_n(\phi)=0.
\]
Therefore
\begin{eqnarray*}
0 &=& \left[\int_0^{2\pi}\Phi_m''(\phi)\Phi_n(\phi)d\phi + \lambda_m \int_0^{2\pi}\Phi_m(\phi)\Phi_n(\phi)d\phi \right]
- \left[\int_0^{2\pi}\Phi_n''(\phi)\Phi_m(\phi)d\phi + \lambda_n \int_0^{2\pi}\Phi_n(\phi)\Phi_m(\phi)d\phi \right] \\
&=& \left.\left[\Phi_n(\phi)\Phi'_m(\phi)-\Phi_m(\phi)\Phi'_n(\phi)\right]\right|_{0}^{2\pi} + (\lambda_m -\lambda_n)\int_0^{2\pi}\Phi_m(\phi)\Phi_n(\phi)d\phi \\
&=&  (\lambda_m -\lambda_n)\int_0^{2\pi}\Phi_m(\phi)\Phi_n(\phi)d\phi.
\end{eqnarray*}
This implies $\Phi_m$ and $\Phi_n$ are orthogonal to each other.
\end{proof}

\noindent 15.2. \begin{proof}
\[
\int_0^{2\pi}\sin m\phi \cos m \phi d\phi = \frac{1}{2}\int_0^{2\pi}\sin(2m\phi)d\phi =\frac{1}{4m}\cos(2m\phi)|_{0}^{2\pi} = 0.
\]
\end{proof}

\noindent 15.3. \begin{proof}
\[
\int_0^{2\pi}e^{im\phi}(e^{-im\phi})^*d\phi = \int_0^{2\pi} e^{2im\phi}d\phi = \frac{e^{2im\cdot 2\pi}- e^{2im\cdot 0}}{2im} = 0.
\]
\end{proof}

\subsection{章末习题}

\noindent 3. \begin{proof}
By formula (15.48), the general solution to the equation is
\[
u(r,\phi)=\alpha_0+\beta_0\ln r + \sum_{m=1}^{\infty}(C_{m1}r^m + D_{m1}r^{-m})\sin m\phi + \sum_{m=1}^{\infty}(C_{m2}r^m+D_{m2}r^{-m})\cos m\phi.
\]
Suppose the expansion of $f(\phi)$ and $g(\phi)$ for the given eigenfunctions $\{0, \sin m\phi, \cos m\phi\}_{m=1}^{\infty}$ are, respectively,
\[
f(\phi) = A_0+\sum_{m=1}^{\infty}(A_m\cos m\phi + B_m\sin m\phi),\; g(\phi) = C_0 + \sum_{m=1}^{\infty})(C_m\cos m\phi + D_m \sin m\phi).
\]
The boundary conditions $u(a,\phi)=f(\phi)$ and $u(b,\phi)=g(\phi)$ gives the equations
\[
\begin{cases}
\alpha_0 + \beta_0\ln a = A_0 \\
\alpha_0 + \beta_0\ln b = C_0,
\end{cases}
\;
\begin{cases}
C_{m1}a^m + D_{m1}a^{-m} = B_m \\
C_{m1}b^m + D_{m1}b^{-m} = D_m,
\end{cases}
\;
\begin{cases}
C_{m2}a^m + D_{m2}a^{-m} = A_m \\
C_{m2}b^m + D_{m2}b^{-m} = C_m.
\end{cases}
\]
Solving these equations gives us the expression of $u(r,\phi)$ as
\begin{eqnarray*}
u(r,\phi) &=& A_0\frac{\ln b -\ln r}{\ln b-\ln a} + \sum_{m=1}^{\infty}\frac{\left(\frac{r}{b}\right)^m - \left(\frac{b}{r}\right)^m}{\left(\frac{a}{b}\right)^m - \left(\frac{b}{a}\right)^m}(A_m\cos m\phi + B_m \sin m \phi) \\
& & -C_0\frac{\ln a - \ln r}{\ln b - \ln a} - \sum_{m=1}^{\infty}\frac{\left(\frac{r}{a}\right)^m - \left(\frac{a}{r}\right)^m}{\left(\frac{a}{b}\right)^m - \left(\frac{b}{a}\right)^m}(C_m\cos m\phi + D_m \sin m\phi).
\end{eqnarray*}
\end{proof}

\noindent 4. \begin{proof} All the subproblems of this exercise share the same feature. So we first deal with a general problem:
\[
\begin{cases}
\frac{\partial^2 u}{\partial x^2} + \frac{\partial^2 u}{\partial y^2} = f(x,y), & x^2+y^2 < a^2,\\
u|_{x^2+y^2=a^2} = 0.
\end{cases}
\]
Similar to the calculations carried out in \S 15.4, we can use polar coordinate to transform the above problem into the following one:
\[
\begin{cases}
\frac{1}{r}\frac{\partial}{\partial r}\left(r\frac{\partial u}{\partial r}\right) + \frac{1}{r^2}\frac{\partial^2 u}{\partial \phi^2} = f(r, \phi), & 0<r<a, \; 0<\phi<2\pi,\\
u(r,\phi)|_{\phi=0}=u(r,\phi)|_{\phi=2\pi}, & 0<r<a,\\
\left.\frac{\partial u(r,\phi)}{\partial \phi}\right|_{\phi=0}=\left.\frac{\partial u(r,\phi)}{\partial \phi}\right|_{\phi=2\pi}, & 0<r<a, \\
u|_{r=a} = 0, & 0<\phi<2\pi, \\
u(r,\phi)|_{r=0}\; \mbox{is bounded}, & 0<\phi<2\pi.
\end{cases}
\]
For the eigenvalue problem
\[
\begin{cases}
\frac{d^2\Phi}{d\phi^2}+\lambda\Phi=0\\
\Phi(0)=\Phi(2\pi)\\
\Phi'(0)=\Phi'(2\pi),
\end{cases}
\]
we have eigenfunctions $\{1, \sin m\phi, \cos m\phi\}_{m=1}^{\infty}$ with eigenvalues $\{m^2\}_{m=0}^{\infty}$. Expand $u(r,\phi)$ according to this system of eigenfunctions, we obtain
\[
u(r,\phi)=A(r)+\sum_{m=1}^{\infty}B_m(r)\sin m \phi + \sum_{m=1}^{\infty} C_m(r) \cos m \phi.
\]
Expand also $f(r,\phi)$ according the above system of eigenfunctions, we have
\[
f(r,\phi) = a(r) + \sum_{m=1}^{\infty}b_m(r)\sin m\phi + \sum_{m=1}^{\infty}c_m(r)\cos m \phi.
\]
Then the equation $\frac{1}{r}\frac{\partial}{\partial r}\left(r\frac{\partial u}{\partial r}\right) + \frac{1}{r^2}\frac{\partial^2 u}{\partial \phi^2} = f(r, \phi)$ becomes
\begin{eqnarray*}
& & \frac{1}{r}\frac{\partial}{\partial r}\left(r\frac{dA(r)}{r}\right) + \sum_{m=1}^{\infty} \left[\frac{1}{r}\frac{\partial}{\partial r}\left(r\frac{dB_m(r)}{dr}\right) - \frac{m^2}{r^2} B_m(r) \right]\sin m\phi + \sum_{m=1}^{\infty} \left[\frac{1}{r}\frac{\partial}{\partial r}\left(r\frac{dC_m(r)}{dr}\right) - \frac{m^2}{r^2} C_m(r) \right]\cos m\phi\\
&=& a(r) + \sum_{m=1}^{\infty}b_m(r)\sin m\phi + \sum_{m=1}^{\infty}c_m(r)\cos m \phi.
\end{eqnarray*}
Therefore, we have the following equations
\[
\begin{cases}
\frac{1}{r}A'(r) + A''(r) = a(r), & \\
A(a)=0,
\end{cases}\; \mbox{or equivalently} \;
\begin{cases}
[rA'(r)]' = ra(r), & \\
A(a)=0,
\end{cases}
\]
\[
\begin{cases}
\frac{1}{r}B_m'(r) + B_m''(r) - \frac{m^2}{r^2}B_m(r) = b_m(r), & m\ge 1\\
B_m(a) = 0,
\end{cases}
\]
\[
\begin{cases}
\frac{1}{r}C_m'(r) + C_m''(r) - \frac{m^2}{r^2}C_m(r) = c_m(r), & m\ge 1\\
C_m(a) = 0.
\end{cases}
\]

\medskip

(1) $f(r,\phi)=-4$. In this case, $a(r) = -4$ and $b_m(r)=c_m(r)=0$ ($m\in \mathbb N$). It's easy to see $A(r)=-r^2+a^2$. By Theorem 6.3 and the boundedness of $u(r,\phi)$ at $r=0$, we conclude $B_m(r)$ and $C_m(r)$ are analytic in $\{x^2+y^2<a^2\}$ ($m\in \mathbb N$). Since $b_m(r)=c_m(r)=0$, we can deduce $B_m(r)=C_m(r)=0$. Combined, we conclude $u(r,\phi)=a^2-r^2$.

\medskip

(2) $f(r,\phi)=-4r\sin\phi$. So $a(r)=0$, $b_1(r)=-4r$, $b_n(r)=0$ $(n\ge 2)$, and $c_m(r)=0$ ($m\in \mathbb N$). Then it's easy to see $A(r)=B_n(r)=C_m(r)=0$ ($m\in \mathbb N, \; n\ge 2$) by an argument similar to  that of part (1). To find $B_1(r)$, we consider a general power series $\varphi(r)=\sum_{n=0}^{\infty}\alpha_nr^n$. Then
\[
\frac{1}{r}\varphi'(r) + \varphi''(r) - \frac{m^2}{r^2}\varphi(r) = -\frac{\alpha_0m^2}{r^2} + \frac{\alpha_1(1-m^2)}{r} + \sum_{n=0}^{\infty}\alpha_{n+2}[(n+2)^2-m^2]r^n.
\]
By letting $m=1$ and the above formula equal to $-4r$, we conclude $\alpha_0=0$, $\alpha_1$ is arbitrary, $\alpha_2=0$, $\alpha_3=-\frac{1}{2}$ and $\alpha_n=0$ for $n\ge 4$. So $B_1(r) = \alpha_1r-\frac{1}{2}r^3$. By the boundary condition $B_1(a)=0$, we have $\alpha_1=\frac{1}{2}a^2$. So $B_1(r)=\frac{1}{2}(a^2-r^2)r$ and $u(x,t) = \frac{1}{2}(a^2-r^2)r\sin\phi$.

\medskip

(3) Similar to the argument in part (1) and (2), we have $u(r,\phi)=\frac{1}{6}(a^2-r^2)r^2\sin2\phi$.

\medskip

(4) Similar to the argument in part (1) and (2), we have $u(r,\phi)=\frac{1}{2}(a^2-r^2)r(\sin\phi+\cos\phi)$.
\end{proof}

\section{球函数}

\subsection{正文练习题}

\noindent 16.1. \begin{proof}
Multiply both sides of (16.12a) by $y^*(x)$ and integrate from $-1$ to $1$, we have by integration-by-parts formula
\[
0 = \nu(\nu+1)\int_{-1}^1|y(x)|^2dx + \int_{-1}^1 \frac{d}{dx}\left[(1-x^2)\frac{dy}{dx}\right]y^*(x)dx = \nu(\nu+1)\int_{-1}^1|y(x)|^2dx - \int_{-1}^1(1-x^2)\left|\frac{dy}{dx}\right|^2dx.
\]
When $y(x)\not\equiv 0$, $\int_{-1}^1(1-x^2)\left|\frac{dy}{dx}\right|^2dx > 0$ and $\int_{-1}^1|y(x)|^2dx > 0$. So
\[
\nu(\nu+1)=\frac{\int_{-1}^1(1-x^2)\left|\frac{dy}{dx}\right|^2dx}{\int_{-1}^1|y(x)|^2dx}> 0.
\]
\end{proof}

\noindent 16.2. \begin{proof} We note
\[
P'_l(x) = \sum_{n=1}^l\frac{1}{(n!)^2}\frac{(l+n)!}{(l-n)!}n\left(\frac{x-1}{2}\right)^{n-1}, \;
P''_l(x) = \sum_{n=2}^l\frac{1}{(n!)^2}\frac{(l+n)!}{(l-n)!}n(n-1)\left(\frac{x-1}{2}\right)^{n-2}.
\]
So $P_l'(1)=\frac{(l+1)!}{(l-1)!}=(l+1)l$ and $P_l''(1) = \frac{1}{(2!)^2}\frac{(l+2)!}{(l-2)!}\cdot 2 = \frac{1}{2}(l+2)(l+1)l(l-1)$.
\end{proof}

\noindent 16.3. \begin{proof} By formula (16.19),
$P'_l(-x)(-1)=(-1)^lP_l'(x)$ and $P_l''(-x) = (-1)^l P_l''(x)$. So
\[P_l'(-1)=(-1)^{l+1}P_l'(1) = (-1)^{l+1}l(l+1)\] and
\[P_l''(-1)=(-1)^lP_l''(1) = \frac{(-1)^l}{2}(l+2)(l+1)l(l-1).\]
\end{proof}

\noindent 16.4. \begin{proof}
$P_{2l}(x)=\sum_{r=0}^l(-1)^r\frac{(4l+2-2r)!}{2^{2l+1}r!(2l-r)!(2l-2r)!}x^{2l-2r}$,
so $P_{2l}'(0)=0$.
\[P_{2l+1}(x)=\sum_{r=0}^l(-1)^r\frac{(4l+2-2r)!}{2^{2l+1}r!(2l+1-r)!(2l+1-2r)!}x^{2l+1-2r},\]
so $P_{2l+1}'(0)=(-1)^l\frac{(2l+2)!}{2^{2l+1}(2l)!(l+1)!}$.
\end{proof}

\noindent 16.5. \begin{proof} This is straightforward from (16.24).
\end{proof}

\noindent 16.6. \begin{proof}
By separation of variables, we have the following three eigenvalue problems:
\[
\begin{cases}
\frac{d}{dr}\left[r^2\frac{dR(r)}{dr}\right] - \lambda R(r)=0\\
\lim_{r\to\infty}R(r)=0,
\end{cases}
\;
\begin{cases}
\frac{1}{\sin\theta}\frac{d}{d\theta}\left[\sin\theta\frac{d\Theta(\theta)}{d\theta}\right] + \left[\lambda-\frac{\mu}{\sin^2\theta}\right]\Theta(\theta)=0\\
\Theta(0), \Theta(\pi) \; \mbox{are bounded},
\end{cases}
\;
\begin{cases}
\Phi''+\mu\Phi = 0 \\
\Phi(0)=\Phi(2\pi),\; \Phi'(0)=\Phi'(2\pi).
\end{cases}
\]
For the third eigenvalue problem, by the calculations in \S 15.4, page 216, the eigenvalues are $m^2$ $(m\in \mathbb N)$ with corresponding eigenfunctions $\sin m\phi$ and $\cos m \phi$. For the second eigenvalue problem, by \S 16.8, the eigenvalues are $\lambda_l = l(l+1)$, $l=m, m+1, m+2,\cdots$ with corresponding eigenfunctions $P^m_l(\cos\theta)$, where $P^m_l$ are the associated Legendre's polynomials. Finally, for each given $\lambda_l$, the calculation on page 236 shows the first eigenvalue problem has eigenfunction $r^{-l-1}$. Combined, we conclude the general solution has the form of
\[
u(r,\theta,\phi)=\sum_{l=0}^{\infty}\sum_{m=0}^lr^{-l-1}P_l^m(\cos\theta)[A_{lm}\cos m\phi + B_{lm}\sin m\phi].
\]
To determine $A_{lm}$ and $B_{lm}$, we expand $f(\theta,\phi)$ according to $\{P_{l}^m(\cos\theta)e^{i m\phi}\}$, then use the fact $u(a,\theta,\phi)=f(\theta,\phi)$ and the orthogonality of $\{P_{l}^m(\cos\theta)e^{i m\phi}\}$.
\end{proof}

\noindent 16.7. \begin{proof}
Similar to the calculations in \S 16.7 and Exercise Problem 16.6, we can conclude the general solution has the form of
\[
u(r,\theta,\phi)=\sum_{l=0}^{\infty}\sum_{m=0}^lP_l^m(\cos\theta)\{r^l[A_{lm}\cos m\phi + B_{lm}\sin m\phi] + r^{-l-1}[C_{lm}\cos m\phi + D_{lm}\sin m\phi]\}.
\]
Then we expand $f(\theta,\phi)$ and $g(\theta,\phi)$ according to $\{P_{l}^m(\cos\theta)e^{i m\phi}\}$ to determine $A_{lm}$, $B_{lm}$, $C_{lm}$, and $D_{lm}$.
\end{proof}

\subsection{章末习题}


\section{柱函数}

\subsection{正文练习题}

\noindent 17.1. \begin{proof}
\begin{eqnarray*}
\cos(\nu \pi)N_{\nu}(z)+\sin(\nu \pi)J_{\nu}(z) &=& \frac{\cos^2(\nu \pi)J_{\nu}(z)-\cos(\nu\pi)J_{-\nu}(z)+\sin^2(\nu\pi)J_{\nu}(z)}{\sin(\nu \pi)} \\
&=&
\frac{J_{\nu}(z)-\cos(\nu\pi)J_{-\nu}(z)}{\sin(\nu\pi)} = N_{-\nu}(z).
\end{eqnarray*}
By noting $J_{\pm \nu}(ze^{m\pi i}) = \sum_{k=0}^{\infty}\frac{(-1)^k}{k!\Gamma(k\pm \nu+1)}\left(\frac{z}{2}\right)^{2k\pm \nu} e^{\pm \nu m\pi i} = J_{\pm}(z)e^{\pm \nu m\pi i}$, we have
\begin{eqnarray*}
N_{\nu}(ze^{m\pi i}) &=& \frac{\cos(\nu\pi)J_{\nu}(ze^{m\pi i}) - J_{-\nu}(ze^{m\pi i})}{\sin(\nu\pi)} \\
&=& \frac{\cos(\nu\pi)J_{\nu}(z)e^{m\pi i} -\cos(\nu\pi)J_{\nu}(z)e^{-\nu m\pi i} + \cos(\nu\pi)J_{\nu}(z)e^{-\nu m\pi i}- J_{-\nu}(z)e^{-m\pi i}}{\sin(\nu\pi)} \\
&=& 2i\sin(m\nu \pi)\cot(\nu\pi)J_v(z) + e^{-m\nu\pi i}N_{\nu}(z).
\end{eqnarray*}
The equality $N_{-\nu}(ze^{m\pi i}) = e^{-m\nu \pi i}N_{-\nu}(z) + 2i\sin(m \nu \pi)\csc(\nu \pi)J_{\nu}(z)$ can be proved similarly.
\end{proof}

\noindent 17.2. \begin{proof}
If $N_{\nu}(x)$ and $J_{\nu}(x)$ have a common zero, then their Wronskian  $\left|\begin{matrix}J_{\nu}(x) & N_{\nu}(x) \\ J_{\nu}'(x) & N_{\nu}'(x)\end{matrix}\right|$ will vanish at that zero, which contradicts with the fact that $J_{\nu}$ and $N_{\nu}$ are linearly independent (see \S 6.4, page 79).
\end{proof}

\noindent 17.3. \begin{proof}We note
\begin{eqnarray*}
\frac{d}{dx}[x^{\nu}N_{\nu}(x)] &=& \frac{d}{dx} \left[\frac{\cos(\nu\pi)x^{\nu}J_{\nu}(x)-x^{\nu}J_{-\nu}(x)}{\sin(\nu\pi)}\right] = \frac{\cos(\nu\pi)x^{\nu}J_{\nu-1}(x) -\frac{d}{dx}[x^{-(-\nu)}J_{(-\nu)}(x)]}{\sin(\nu\pi)} \\
&=& \frac{\cos(\nu\pi)x^{\nu}J_{\nu-1}(x)+x^{\nu}J_{-\nu+1}(x)}{\sin(\nu\pi)} = x^{\nu}\frac{\cos(\nu-1)\pi J_{\nu-1}(x) - J_{-(\nu-1)}(x)}{\sin(\nu-1)\pi} \\
&=& x^{\nu}N_{\nu-1}(x),
\end{eqnarray*}
and
\begin{eqnarray*}
\frac{d}{dx}[x^{-\nu}N_{\nu}(x)] &=& \frac{d}{dx}\left[\frac{\cos(\nu\pi)x^{-\nu}J_{\nu}(x)-x^{-\nu}J_{-\nu}(x)}{\sin(\nu\pi)}\right] = \frac{\cos(\nu\pi)(-x^{-\nu})J_{\nu+1}(x)-x^{-\nu}J_{-\nu-1}(x)}{\sin(\nu\pi)} \\
&=& \frac{-\cos(\nu+1)\pi x^{-\nu}J_{\nu+1}(x)+x^{-\nu}J_{-\nu-1}(x)}{\sin(\nu+1)\pi} = -x^{-\nu}N_{\nu+1}(x).
\end{eqnarray*}
\end{proof}


\subsection{章末习题}

\section{变量分离法总结}

\subsection{正文练习题}

\noindent 18.1. \begin{proof} For $\alpha \in \mathbb C$, we have
\[
0\le |\!|f-\alpha g |\!|^2 = |\!|f |\!|^2 - 2 \mbox{Re}(f, \alpha g) + |\alpha|^2 |\!|g |\!|^2.
\]
If $g\ne 0$, we pick $\alpha = \frac{(f,g)}{|\!| g|\!|^2}$, from which the Schwarz inequality is immediate.
\end{proof}

\noindent 18.2. \begin{proof} Divide both sides of the equation by $a(x)$, we get $y''+\frac{b(x)}{a(x)}y'+\frac{c(x)-\lambda d(x)}{a(x)}y=0$. Multiply both sides by $e^{\int\frac{b(x)}{a(x)}dx}$, we get
\[
\frac{d}{dx}\left[e^{\int\frac{b(x)}{a(x)}dx}\frac{dy}{dx}\right] + \left\{\lambda\left[-\frac{d(x)}{a(x)}e^{\int\frac{b(x)}{a(x)}dx}\right] - \left[-\frac{c(x)}{a(x)}e^{\int\frac{b(x)}{a(x)}dx}\right]\right\}y = 0.
\]
Therefore, we can set
\[
p(x) = e^{\int\frac{b(x)}{a(x)}dx}, \; \rho(x) = -\frac{d(x)}{a(x)}e^{\int\frac{b(x)}{a(x)}dx}=-\frac{d(x)}{a(x)}p(x), \; q(x) = -\frac{c(x)}{a(x)}e^{\int\frac{b(x)}{a(x)}dx}=-\frac{c(x)}{a(x)}p(x).
\]
\end{proof}

\subsection{章末习题}

\noindent 1. We apply the result obtained in Exercise Problem 18.2.

(1) \begin{proof} Multiplying both sides of $x\frac{d^2y}{dx^2}+2\frac{dy}{dx}+(x+\lambda )y=0$ by $x$, we get $\frac{d}{dx}\left[x^2\frac{dy}{dx}\right]+(\lambda x + x^2)y = 0$. \end{proof}

(2) \begin{proof} We have $p(x) = e^{\int\frac{a-bx}{x(1-x)}dx} = x^a(1-x)^{b-a}$. So $q(x)=0$ and $\rho(x)=-x^{a-1}(1-x)^{b-a-1}$.
\end{proof}

(3) \begin{proof}
Multiplying both sides of the equation by $e^{-x}$, we get $\frac{d}{dx}\left[xe^{-x}\frac{dy}{dx}\right]+\lambda e^{-x}y=0$.
\end{proof}

(4) \begin{proof}
Multiplying both sides of the equation by $e^{-x^2}$, we get $\frac{d}{dx}\left[e^{-x^2}\frac{dy}{dx}\right] + 2\lambda e^{-x^2}y = 0$.
\end{proof}

\noindent 2. \begin{proof} The key idea is to make a change of variable $x=x(r)$ so that $r\frac{dR}{dr}=\frac{dR}{dx}$. This implies $dx=\frac{dr}{r}$, so $x(r)=\ln r$. Plugging $r=e^x$ into the original equation, we have
\begin{eqnarray*}
\frac{1}{r}\frac{d}{dr}\left(r\frac{dR}{dr}\right) + \frac{\lambda}{r^2}R &=& \frac{1}{e^x}\frac{d}{e^xdx}\frac{dR}{dx} + \lambda e^{-2x}R=0,
\end{eqnarray*}
which is simply the equation $\frac{d^2R(x)}{dx^2}+\lambda R(x)=0$. This reminds us of the eigenvalue problem (14.3) (\S 14.1, page 186), only that $a$ is not zero. So we make a further change of variable: $x=z+\ln a$, then we have the following eigenvalue problem
\[
\begin{cases}
\frac{d^2R(z)}{dz^2}+\lambda R(z) = 0 \\
R(0)= 0,\; R(\ln b -\ln a) = 0.
\end{cases}
\]
The above new eigenvalue problem is shown in \S 14.1 to have the solution $\lambda_n = \left(\frac{n\pi}{\ln b- \ln a}\right)^2$, $R_n(z) = \sin\left(\frac{n\pi}{\ln b -\ln a}z\right)$, $n=1, 2, 3, \cdots$. Changing back to the original variable $r$, we have $R_n(r) = \sin \left(\frac{\ln r -\ln a}{\ln b -\ln a}n\pi\right)$.

\end{proof}

\noindent 3. \begin{proof} Suppose we have two distinct eigenvalues $\lambda_1$ and $\lambda_2$, with their corresponding eigenfunctions $y_1(x)$ and $y_2(x)$, respectively. Then from the equation $\frac{d}{dx}\left[p(x)\frac{dy_1(x)}{dx}\right]+[\lambda_1\rho(x)-q(x)]y_1(x)=0$, we have
\begin{eqnarray*}
0 &=& \int_a^b\left\{y_2(x)\frac{d}{dx}\left[p(x)\frac{dy_1(x)}{dx}\right] + [\lambda_1\rho(x)-q(x)]y_1(x)y_2(x)\right\}dx \\
&=& y_2(x)p(x)y_1'(x)|_a^b + \int_a^b[\lambda_1\rho(x)-q(x)]y_1(x)y_2(x)dx.
\end{eqnarray*}
By symmetry, we have
\[
y_1(x)p(x)y_2'(x)|_a^b + \int_a^b[\lambda_2\rho(x)-q(x)]y_1(x)y_2(x)dx = 0.
\]
Taking the difference of these two equations and using the condition $p(a)=p(b)$, we have $(p_0:=p(a)=p(b))$
\begin{eqnarray*}
0 &=& p_0[y_2(x)y_1'(x)-y_2'(x)y_1(x)]|_a^b + \int_a^b(\lambda_1-\lambda_2)\rho(x)y_1(x)y_2(x)dx \\
&=& p_0\left(\left|\begin{matrix}\alpha_{11} & \alpha_{12} \\ \alpha_{21} & \alpha_{22}\end{matrix}\right|-1\right)y_1'(a)y_2(a) - p_0 \left(\left|\begin{matrix}\alpha_{11} & \alpha_{12} \\ \alpha_{21} & \alpha_{22}\end{matrix}\right|-1\right)y_1(a)y_2'(a) + (\lambda_1-\lambda_2)\int_a^b\rho(x)y_1(x)y_2(x)dx.
\end{eqnarray*}
So if $\left|\begin{matrix}\alpha_{11} & \alpha_{12} \\ \alpha_{21} & \alpha_{22}\end{matrix}\right|=1$, we must have $\int_a^b\rho(x)y_1(x)y_2(x)dx=0$ since $\lambda_1-\lambda_2\ne 0$.
\end{proof}

\noindent 4. \begin{proof}
Suppose $u = \sum_{k}\alpha_k\Phi_k$, then
\[
\nabla^2u = \sum_{k}\alpha_k\nabla^2\Phi_k = \sum_k\alpha_k(-\lambda_k)\Phi_k = -f = -\sum_{k}A_k\Phi_k.
\]
Comparing the coefficients, we conclude $\alpha_k = \frac{A_k}{\lambda_k}$. So $u=\sum_k\frac{A_k}{\lambda_k}\Phi_k$.
\end{proof}

\noindent 5. \begin{proof} We first solve the eigenvalue problem
\[
\begin{cases}
\nabla^2\Phi(x,y)+\lambda\Phi(x,y)=0\\
\Phi(0,y)=\Phi(a,y)=\Phi(x,0)=\Phi(x,b) = 0.
\end{cases}
\]
Suppose $\Phi(x,y)=X(x)Y(y)$, then we have $\frac{X''(x)}{X(x)}+\frac{Y''(y)}{Y(y)}+ \lambda  = 0$. Solving two separate eigenvalue problems
\[
\begin{cases}
X''(x)+\alpha X(x) = 0\\
X(0)=X(a)=0,
\end{cases}
\;
\begin{cases}
Y''(y)+\beta Y(y) = 0 \\
Y(0)=Y(b)=0,
\end{cases}
\]
we have $\alpha_m=\left(\frac{m\pi}{a}\right)^2$ with $X_m(x)=\sin\frac{m\pi x}{a}$ and $\beta_n=\left(\frac{n\pi}{b}\right)^2$ with $Y_n(y)=\sin\frac{n\pi y}{b}$ $(m, n \in \mathbb N)$. So the eigenvalue $\lambda_{mn}=\alpha_m+\beta_n= \left(\frac{m\pi}{a}\right)^2+\left(\frac{n\pi}{b}\right)^2$ and by the result of Problem 4, we have
\[
u(x,y)=\sum_{m,n=1}^{\infty}\frac{A_{mn}}{\left(\frac{m\pi}{a}\right)^2+\left(\frac{n\pi}{b}\right)^2}\sin\frac{m\pi x}{a}\sin\frac{n\pi y}{b},
\]
where $A_{mn}=\frac{4}{ab}\int_0^a\sin\frac{m\pi x}{a}dx\int_0^bf(x,y)\sin\frac{n\pi y}{b}dy$.
\end{proof}

\section{积分变换的应用}

\noindent 1. \begin{proof}
Let $U(x,p)=\int_0^{\infty}u(x,t)e^{-pt}dt$. Then
\begin{eqnarray*}
0 &=& \int_0^{\infty}\frac{\partial u(x,t)}{\partial t}e^{-pt}dt - \kappa \int_0^{\infty}\frac{\partial^2u(x,t)}{\partial x^2}e^{-pt}dt \\
&=& u(x,t)e^{-pt}|_{0}^{\infty}+p\int_0^{\infty}e^{-pt}u(x,t)dt - \kappa \frac{\partial^2U(x,p)}{\partial x^2} \\
&=& pU(x,p)-\kappa\frac{\partial^2U(x,p)}{\partial x^2}.
\end{eqnarray*}
So the general solution for $U(x,p)$ is $C_1(p)e^{\sqrt{\frac{p}{\kappa}}x} + C_2(p)e^{-\sqrt{\frac{p}{\kappa}}x}$. By the boundedness of $u|_{x\to\infty}$, we have the boundedness of $U(x,p)|_{x\to\infty}$. So we must have $C_1(p)\equiv 0$. Since $U(0,p)=u_0\int_0^{\infty}e^{-pt}dt = \frac{u_0}{p}$, we have $U(x,p)=\frac{u_0}{p}e^{-\sqrt{\frac{p}{\kappa}}x}$. By Example 9.8, $u(x,t)=u_0\mbox{erfc}\frac{x}{2\sqrt{\kappa t}}$, which satisfies $u|_{t=0}=0$.
\end{proof}

\noindent 2. \begin{proof} The partial differential equation for the problem is
\begin{eqnarray*}
& & \frac{\partial u}{\partial t} - \kappa \frac{\partial^2 u}{\partial x^2} = 0, \; x > 0, \; t> 0 \\
& & u|_{t=0}= 0, \; x < 0 \\
& & u|_{t=0}= u_0, \; x > 0.
\end{eqnarray*}
Let $U(x,p)=\int_0^{\infty}u(x,t)e^{-pt}dt$. Then (intuitively, $u(x,t)$ should approach to $\frac{u_0}{2}$ as $t\to\infty$ and is hence bounded)
\begin{eqnarray*}
0 &=& \int_0^{\infty}\frac{\partial u(x,t)}{\partial t}e^{-pt}dt - \kappa \int_0^{\infty}\frac{\partial^2u(x,t)}{\partial x^2}e^{-pt}dt \\
&=& u(x,t)e^{-pt}|_{0}^{\infty}+p\int_0^{\infty}e^{-pt}u(x,t)dt - \kappa \frac{\partial^2U(x,p)}{\partial x^2} \\
&=& -u(x,0)+pU(x,p)-\kappa\frac{\partial^2U(x,p)}{\partial x^2}.
\end{eqnarray*}
So we have the ODE for $U(x,p)$: $\frac{\partial^2U(x,p)}{\partial x^2} - \frac{p}{\kappa}U(x,p) = - \frac{u_01_{\{x>0\}}}{\kappa}$.
Using the method of Green's function, we can obtain (see Example 10.8)
\[
U(x,p) = \frac{1}{2\sqrt{\frac{p}{\kappa}}}\int_{-\infty}^{\infty}e^{-\sqrt{\frac{p}{\kappa}}|x-t|}\frac{u_01_{\{t>0\}}}{\kappa}dt = \frac{u_0}{2\sqrt{p\kappa}}\int_0^{\infty}e^{-\sqrt{\frac{p}{\kappa}}|x-t|}dt =
\begin{cases}
\frac{u_0}{2p}\exp\{\sqrt{\frac{p}{\kappa}}x\} & x<0, \\
\frac{u_0}{p}-\frac{u_0}{2p}\exp\{-\sqrt{\frac{p}{\kappa}}x\} & x>0. \\
\end{cases}
\]
By Example 9.8, we can obtain the formula for $u(x,t)$ as
\[
u(x,t)=\begin{cases}
\frac{u_0}{2}\mbox{erfc}\left(-\frac{x}{2\sqrt{\kappa t}}\right) & x < 0, \\
u_0-\frac{u_0}{2}\mbox{erfc}\left(\frac{x}{2\sqrt{\kappa t}}\right) & x > 0.
\end{cases}
\]
\end{proof}

\noindent 3. \begin{proof}
Let $U(x,p)=\int_0^{\infty}u(x,t)e^{-pt}dt$. Then the equation $\frac{\partial u}{\partial t}-\kappa\frac{\partial^2u}{\partial x^2} = 0$ gives us $\frac{\partial^2 U(x,p)}{\partial x^2}-\frac{p}{\kappa}U(x,p)=0$, whose general solution is $C_1(p)e^{\sqrt{\frac{p}{\kappa}}x} + C_2(p)e^{-\sqrt{\frac{p}{\kappa}}x}$. For convenience of applying the boundary conditions, we can write $U(x,p)$ as $C_1(p)\sinh\sqrt{\frac{p}{\kappa}}x + C_2(p)\sinh\sqrt{\frac{p}{\kappa}}(l-x)$. Then
\[
U(0,p) = C_2(p)\sinh\sqrt{\frac{p}{\kappa}}l = \int_0^{\infty}Ae^{-\kappa\alpha^2t}e^{-pt}dt= \frac{A}{p+\kappa\alpha^2}
\]
and
\[
U(l,p)=C_1(p)\sinh\sqrt{\frac{p}{\kappa}}l=\int_0^{\infty}Be^{-\kappa\beta^2t}e^{-pt}dt = \frac{B}{p+\kappa\beta^2}.
\]
Therefore, $U(x,p)=\frac{A}{p+\alpha^2\kappa}\frac{\sinh\sqrt{p/\kappa}(l-x)}{\sinh\sqrt{p/\kappa}l} + \frac{B}{p+\beta^2\kappa}\frac{\sinh\sqrt{p/\kappa}x}{\sinh\sqrt{p/\kappa}l}$.
\end{proof}

\noindent 4. \begin{proof}
Define $U(k,t) =\frac{1}{\sqrt{2\pi}}\int_{-\infty}^{\infty}u(x,t) e^{-ikx}dx$ and $F(k,t) = \frac{1}{\sqrt{2\pi}}\int_{-\infty}^{\infty}f(x,t) e^{-ikx}dx$. Then we have
\begin{eqnarray*}
\begin{cases}
\frac{\partial^2U(k,t)}{\partial t^2}  + a^2k^2 U(k,t) = F(k,t) \\
U(k,0)= \frac{1}{\sqrt{2\pi}}\int_{-\infty}^{\infty}\phi(x)e^{-ikx}dx := \Phi(k) \\
\left.\frac{\partial U(k,t)}{\partial t}\right|_{t=0} = \frac{1}{\sqrt{2\pi}}\int_{-\infty}^{\infty}\psi(x)e^{-ikx}dx := \Psi(k).
\end{cases}
\end{eqnarray*}
The solution of the above problem can be obtained by superposition of solutions to the following two problems:
\[
\begin{cases}
\frac{\partial^2U(k,t)}{\partial t^2} + a^2k^2U(k,t) = 0 \\
U(k,0) = \Phi(k), \; \left.\frac{\partial U(k,t)}{\partial t}\right|_{t=0} = \Psi(k),
\end{cases}
\]
and
\[
\begin{cases}
\frac{\partial^2U(k,t)}{\partial t^2} + a^2k^2U(k,t) = F(k,t) \\
U(k,0) = 0, \; \left.\frac{\partial U(k,t)}{\partial t}\right|_{t=0} = 0.
\end{cases}
\]
To solve the first problem, we note the general solution to the homogeneous differential equation is $U(k,t)=C_1(k)\sin(akt)+C_2(k)\cos(akt)$. The initial conditions dictate $C_2(k)=\Phi(k)$ and $C_1(k)=\frac{\Psi(k)}{ak}$. So the solution to the first problem is
\[
U(k,t) = \frac{\Psi(k)}{ak}\sin(akt)+\Phi(k)\cos(akt).
\]
To solve the second problem, we apply the method of Green's function and obtain (see formula (10.34))
\[
U(k,t) = \frac{1}{ak} \int_0^t F(k,\xi) \sin(ak(t-\xi))d\xi.
\]
Combined, we conclude the solution to the original non-homogenous second order ODE with non-homogeneous initial conditions has the form of
\[
U(k,t) = \Phi(k)\cos(akt) + \frac{\Psi(k)}{ak}\sin(akt) + \frac{1}{ak} \int_0^tF(k,\xi)\sin(ak(t-\xi))d\xi.
\]

To find the inverse Fourier transform of $\Phi(k)\cos(akt)$, we note
\begin{eqnarray*}
\Phi(k)\cos(akt) &=&  \frac{e^{iakt}+e^{-iakt}}{2}\frac{1}{\sqrt{2\pi}}\int_{-\infty}^{\infty}\phi(x)e^{-ikx}dx \\
&=& \frac{1}{2}\left[\frac{1}{\sqrt{2\pi}}\int_{-\infty}^{\infty}\phi(x)e^{-ik(x-at)}dx +\frac{1}{\sqrt{2\pi}} \int_{-\infty}^{\infty}\phi(x)e^{-ik(x+at)}dx \right] \\
&=& \frac{1}{2}\left[\frac{1}{\sqrt{2\pi}} \int_{-\infty}^{\infty}\phi(y+at)e^{-iky}dx + \frac{1}{\sqrt{2\pi}} \int_{-\infty}^{\infty}\phi(y-at)e^{-iky}dx \right].
\end{eqnarray*}
So ${\cal F}^{-1}[\Phi(k)\cos(akt)] = \frac{1}{2}[\phi(x+at)+\phi(x-at)]$.

To find the inverse Fourier transform of $\frac{\Psi(k)}{ak}\sin(akt)$, we assume ${\cal F}^{-1}\left[\frac{\Psi(k)}{ak}\sin(akt)\right]=h(x,t)$. Then
\[
\Psi(k)\cos(akt) = \frac{d}{dt}\left[\frac{\Psi(k)}{ak}\sin(akt)\right] = \frac{d}{dt}{\cal F}(h(x,t)) = {\cal F}\left(\frac{\partial h(x,t)}{\partial t}\right).
\]
Using the result for $\Phi(k)\cos(akt)$, we have
\[
\frac{\partial h(x,t)}{\partial t} = \frac{\psi(x+at)+\psi(x-at)}{2}.
\]
So there exists some function $l(x)$ such that
\[
h(x,t) = \frac{\int_{x-at}^{x+at}\psi(\xi)d\xi}{2a} + l(x).
\]
Once we ``guessed" out the form of $h(x,t)$, we can verify easily that ${\cal F}\left[\frac{\int_{x-at}^{x+at}\psi(\xi)d\xi}{2a}\right]=\frac{\Psi(k)}{ak}\sin(akt)$, so $l(x)\equiv 0$ and ${\cal F}^{-1}\left(\frac{\Psi(k)}{ak}\sin(akt)\right)=\frac{1}{2a}\int_{x-at}^{x+at}\psi(\xi)d\xi$.

To find the inverse Fourier transform of $\frac{1}{ak} \int_0^tF(k,\xi)\sin(ak(t-\xi))d\xi$, we suppose
\[
{\cal F}^{-1}\left[\frac{1}{ak} \int_0^tF(k,\xi)\sin(ak(t-\xi))d\xi\right]=H(x,t).
\]
Then
\begin{eqnarray*}
{\cal F}\left(\frac{\partial H(x,t)}{\partial t}\right) &=& \frac{\partial}{\partial t}{\cal F}(H(x,t)) = \int_0^tF(k,\tau)\cos(ak(t-\tau))d\tau \\
&=& \int_0^tF(k,\tau)\frac{e^{iak(t-\tau)}+e^{-iak(t-\tau)}}{2}d\tau.
\end{eqnarray*}
By the convolution theorem of Fourier transform, we have
\begin{eqnarray*}
F(k,\tau)e^{iak(t-\tau)} &=& {\cal F}(f(x,\tau)){\cal F}(\delta(x+a(t-\tau))) = {\cal F}\left(\int_{-\infty}^{\infty}f(x-\xi,\tau)\delta(\xi+a(t-\tau))d\xi\right) \\
&=& {\cal F}(f(x+a(t-\tau),\tau)),
\end{eqnarray*}
\begin{eqnarray*}
F(k,\tau)e^{-iak(t-\tau)} &=& {\cal F}(f(x,\tau)){\cal F}(\delta(x-a(t-\tau))) = {\cal F}\left(\int_{-\infty}^{\infty}f(x-\xi,\tau)\delta(\xi-a(t-\tau))d\xi\right) \\
&=& {\cal F}(f(x-a(t-\tau),\tau)).
\end{eqnarray*}
So we have
\[
{\cal F}\left(\frac{\partial H(x,t)}{\partial t}\right) = \int_0^t \frac{{\cal F}(f(x+a(t-\tau),\tau))+{\cal F}(f(x-a(t-\tau),\tau))}{2}d\tau,
\]
which implies
\[
\frac{\partial H(x,t)}{\partial t} = \int_0^t \frac{f(x+a(t-\tau),\tau)+f(x-a(t-\tau),\tau)}{2}d\tau.
\]
Therefore, there exists some function $l(x)$ such that
\[
H(x,t) = l(x) + \frac{1}{2a}\int_0^t\int_{x-a(t-\tau)}^{x+a(t-\tau)} f(\xi,\tau)d\xi d\tau.
\]
Now let's wave hand and assume $l(x)\equiv 0$, we then have the solution to the original problem
\[
u(x,t) = \frac{1}{2}[\phi(x+at)+\phi(x-at)] + \frac{1}{2a}\int_{x-at}^{x+at}\psi(\xi)d\xi + \frac{1}{2a}\int_0^t\int_{x-a(t-\tau)}^{x+a(t-\tau)} f(\xi,\tau)d\xi d\tau.
\]
\begin{remark}
It'll be nice if we can find an easy way to show $l(x)\equiv 0$ in the above calculation. We leave this to the next version of the solution manual.
\end{remark}
\end{proof}

\noindent 5. \begin{proof}
We define
\[
U(k,m, t) = \frac{1}{2\pi}\int_{\mathbb R^2}u(x,y,t)e^{-ikx-imy}dxdy,
\]
\[
\Phi(k,m)=\frac{1}{2\pi}\int_{\mathbb R^2}\phi(x,y)e^{-ikx-imy}dxdy,
\]
and
\[
\Psi(k,m)=\frac{1}{2\pi}\int_{\mathbb R^2}\psi(x,y)e^{-ikx-imy}dxdy.
\]
Then the original problem is converted after Fourier transform into the following problem
\[
\begin{cases}
\frac{\partial^2U(k,m,t)}{\partial t^2} + a^2[k^2+m^2]U(k,m,t) = 0\\
U(k,m,0)=\Phi(x,y),\; \left.\frac{\partial U}{\partial t}\right|_{t=0}=\Psi(k,m).
\end{cases}
\]
Then it's easy to deduce that
\[
U(k,m,t) = \Phi(x,y)\cos(\sqrt{k^2+m^2}at)+ \frac{\Psi(x,y)}{a\sqrt{k^2+m^2}}\sin(\sqrt{k^2+m^2}at)
\]
Suppose ${\cal F}^{-1}[\frac{\sin(\sqrt{k^2+m^2}at)}{a\sqrt{k^2+m^2}}]=h(x,y,t)$. Then we have by convolution theorem of Fourier transform
\begin{eqnarray*}
U(k,m,t)&=&{\cal F}[\phi(x,y)]\frac{\partial}{\partial t}{\cal F}[h(x,y,t)] + {\cal F}[\psi(x,y)]{\cal F}[h(x,y,t)]\\
&=& {\cal F}\left[\phi * \frac{\partial}{\partial t}h(\cdot,\cdot,t)+\psi*h(\cdot,\cdot,t)\right].
\end{eqnarray*}
Hence, $u(x,y,t)$ can be written as
\[
u(x,y,t)=\int_{\mathbb R^2}\psi(x',y')h(x-x',y-y',t)dx'dy' + \frac{\partial}{\partial t}\int_{\mathbb R^2}\phi(x',y')h(x-x',y-y',t)dx'dy'.
\]
To find $h(x,y,t)$, ... (to be continued)
\end{proof}

\section{格林函数法}

\noindent 1. \begin{proof} \end{proof}

\section{变分法初步}

\subsection{正文练习题}

\noindent 21.1. \begin{proof} We follow the line of reasoning in Gelfand and Fomin \cite{GF00}, \S 35. Assume the integration region $R$ stays fixed while the function $u(x_1,\cdots,x_n)$ goes into
\[
u^*(x_1,\cdots,x_n) = u(x_1,\cdots, x_n) + \varepsilon \phi(x_1,\cdots, x_n) + \cdots,
\]
where the dots denote terms of order higher than 1 relative to $\varepsilon$. By the variation $\delta J$ of the function $J[u]=\int\cdots\int F(x_1,\cdots, x_n, u, u_{x_1}, \cdots, u_{x_n})dx_1\cdots dx_n$, corresponding to the transformation $u\to u^*$, we mean the principle linear part (in $\varepsilon$) of the differences $J[u^*]-J[u]$. For simplicity, we write $u(x)$, $\phi(x)$ instead of $u(x_1,\cdots, x_n)$, $\phi(x_1,\cdots,x_n)$, $dx$ instead of $dx_1\cdots dx_n$, etc. Then, using Taylor's theorem, we find that
\begin{eqnarray*}
& & J[u^*]-J[u] \\
&=& \int_R\{F[x,u(x)+\varepsilon\phi(x),u_{x_1}(x)+\varepsilon \phi_{x_1}(x),\cdots, u_{x_n}(x)+\varepsilon \phi_{x_n}(x)]-F[x,u(x),u_{x_1}(x),\cdots, u_{x_n}(x)]\}dx \\
 &=& \varepsilon \int_R \left(F_u\phi+\sum_{i=1}^nF_{u_{x_i}}\phi_{x_i}\right)dx + \cdots,
\end{eqnarray*}
where the dots again denote terms of order higher than 1 relative to $\varepsilon$. It follows that \[ \delta J = \varepsilon \int_R \left(F_u\phi+\sum_{i=1}^nF_{u_{x_i}}\phi_{x_i}\right) dx \]
is the variation of the functional $J[u]$.

Next, we try to represent the variation of $J[u]$ as an integral of an expression of the form
\[
G(x)\phi(x) + \mbox{div}(\cdots),
\]
i.e., we try to transform $\delta J$ in such a way that the derivatives $\phi_{x_i}$ only appear in a combination of terms which can be written as divergence. To achieve this, we replace $F_{u_{x_i}}\phi_{x_i}(x)$ by $\frac{\partial}{\partial x_i}[F_{u_{x_i}}\phi(x)]- \frac{\partial F_{u_{x_i}}}{\partial x_i}\phi(x)$ and obtain
\[
\delta J = \varepsilon \int_R \left(F_u - \sum_{i=1}^n\frac{\partial}{\partial x_i}F_{u_{x_i}}\right)\phi(x)dx + \varepsilon \int_R \sum_{i=1}^n\frac{\partial}{\partial x_i}[F_{u_{x_i}}\phi(x)]dx.
\]
This expression for $\delta J$ has the important feature that its second term is the integral of a divergence, and hence can be reduced to an integral over the boundary $\Gamma$ of the integration region. In fact, let $d\sigma$ be the area of a variable element of $\Gamma$, regarded as an $(n-1)$-dimensional surface. Then the $n$-dimensional version of Green's theorem states that
\[
\int_R\sum_{i=1}^n\frac{\partial }{\partial x_i}[F_{u_{x_i}}\phi(x)]dx = \int_{\Gamma} \phi(x)(G, \nu)d\sigma,
\]
where $G=(F_{u_{x_1}},\cdots, F_{u_{x_n}})$ is the $n$-dimensional vector whose components are the derivatives $F_{u_{x_i}}$, $\nu=(\nu_1,\cdots,\nu_n)$ is the unit outward normal to $\Gamma$, and $(G,\nu)$ denotes the scalar product of $G$ and $\nu$. Therefore
\[
\delta J = \varepsilon \int_R\left(F_{u}-\sum_{i=1}^n\frac{\partial}{\partial x_i}F_{u_{x_i}}\right)\phi(x)dx + \varepsilon \int_{\Gamma}\phi(x)(G,\nu)d\sigma.
\]

In order for the functional $J[u]$ to have an extremum, we must require that $\delta J=0$ for all admissible $\phi(x)$, in particular, that $\delta J = 0$ for all admissible $\phi(x)$ which vanishes on the boundary $\Gamma$. For such functions, $\delta J$ reduces to
\[
\delta J = \int_R\left(F_u-\sum_{i=1}^n\frac{\partial}{\partial x_i}F_{u_{x_i}}\right)\phi(x)dx,
\]
and then, because of the arbitrariness of $\phi(x)$ inside $R$, $\delta J = 0$ implies that
\[
F_u-\sum_{i=1}^n\frac{\partial}{\partial x_i}F_{u_{x_i}} = 0
\]
for all $x\in R$.
\end{proof}

\noindent 21.2. \begin{proof} We follow the line of reasoning in Gelfand and Folmin \cite{GF00}, \S 6. The problem can be formulated as follows: {\it Among all curves whose end points lie on two given vertical lines $x=a$ and $x=b$, find the curve for which the functional
\[
J[y]=\int_a^bF(x,y,y')dx
\]
has an extremum.}

We begin by calculating the variation $\delta J$ of $J[y]$. As before, $\delta J$ means the principle linear part of the increment
\[
\Delta J = J[y+h]-J[y] = \int_a^b [F(x,y+h,y'+h')-F(x,y,y')]dx.
\]
Using Taylor's theorem to expand the integrand, we obtain
\[
\Delta J = \int_a^b (F_yh + F_{y'}h')dx + \cdots,
\]
where the dots denote terms of order higher than 1 relative to $h$ and $h'$, and hence
\[
\delta J = \int_a^b(F_yh+F_{y'}h')dx.
\]
Here, unlike the fixed end point problem, $h(x)$ need no longer vanish at the points $a$ and $b$, so that integration by parts now gives
\begin{eqnarray*}
\delta J &=& \int_a^b \left(F_y-\frac{d}{dx}F_{y'}\right) h(x)dx + F_{y'}h(x)|_{x=a}^{x=b}\\
&=& \int_a^b \left(F_y - \frac{d}{dx}F_{y'}\right)h(x)dx + F_{y'}|_{x=b}h(b)-F_{y'}|_{x=a}h(a).
\end{eqnarray*}

We first consider function $h(x)$ such that $h(a)=h(b)=0$. The rationale is that if $y^*$ is an extremal among all admissible function, then $y^*$ must be an extremal among the smaller class of functions whose values at end points agree with those of $y^*$. Then as in the simplest variational problem, the condition $\delta J =0$ implies that
\[
F_y - \frac{d}{dx}F_{y'} = 0.
\]
Therefore, in order for the curve $y=y(x)$ to be a solution of the variable end point problem, $y$ must be an extremal, i.e. a solution of Euler's equation. But if $y$ is an extremal, the integral in the above expression for $\delta J$ vanishes, and then the condition $\delta J = 0$ takes the form
\[
F_{y'}|_{x=b}h(b) - F_{y'}|_{x=a}h(a) = 0,
\]
from which it follows that
\[
F_{y'}|_{x=a} = 0, F_{y'}|_{x=b} = 0,
\]
since $h(x)$ is arbitrary. Thus, to solve the variable end point problem, we must first find a general integral of Euler's equation, and then use the {\it natural boundary conditions} $F_{y'}|_{x=a} = F_{y'}|_{x=b} = 0$ to determine the values of the arbitrary constants.
\end{proof}

\subsection{The Rayleigh--Ritz method and its application to the Sturm--Liouville problem}

The textbook's explanation of the Rayleigh?Ritz method is a bit ambiguous. We therefore give a summary of this method, as presented in Gelfand and Folmin \cite{GF00}, Chapter 8.

The idea of the Rayleigh-Ritz method consists of two parts. First, convert a differential equation to a variational problem, in that the solution of the differential equation is the extremal of a variational problem. Second, in a certain function space, use a set of complete functions to approximate the extremal, so that each approximation is reduced to a finite-dimensional optimization problem.

As an example, consider the Sturm-Liouville problem: Let $P=P(x)>0$ and $Q=Q(x)$ be two given functions, where $Q$ is continuous and $P$ is continuously differentiable, and consider the Sturm-Liouville equation
\[
-(Py')'+Qy=\lambda y,
\]
subject to the boundary conditions $y(a)=y(b)=0$. It's required to find the eigenfunctions and eigenvalues of the above boundary value problem.

The following result converts the above problem of solving a differential equation into a problem of finding variational extremal (Gelfand and Fomin \cite{GF00}, \S 12, Theorem 1; also see \S 41.1):
\begin{theorem}\label{therem_reduction}
Given the functional $J[y]=\int_a^bF(x,y,y')dx$, let the admissible curves satisfy the conditions
\[
y(a)=A, \; y(b)=B, \; K[y] = \int_a^bG(x,y,y')dx = l,
\]
where $K[y]$ is another functional, and let $J[y]$ have an extremum for $y=y(x)$. Then, if $y=y(x)$ is not an extremal of $K[y]$, there exists a constant $\lambda$ such that $y=y(x)$ is an extremal of the functional $\int_a^b(F+\lambda G)dx$, i.e. $y=y(x)$ satisfies the differential equation
\[
F_y-\frac{d}{dx}F_{y'} + \lambda \left(G_y - \frac{d}{dx}G_{y'}\right) = 0
\]
\end{theorem}
With the above result, the Sturm-Liouville problem is reduced to finding an extremum of the quadratic functional
\[
J[y] = \int_a^b(Py'^2+Qy^2)dx,
\]
subject to the boundary conditions $y(a)=y(b)=0$ and the subsidiary
condition $\int_a^by^2dx = 1$.\footnote{Use Theorem
\ref{therem_reduction}, changing $\lambda$ to $-\lambda$.}

Then we can apply the Rayleigh--Ritz method as follows. Suppose we are looking for the minimum of a functional $J[y]$ defined on some space ${\cal M}$ of admissible functions, which for simplicity we take to be a normed linear space. Let $\varphi_1$, $\varphi_2$, $\cdots$ be an infinite sequence of functions in ${\cal M}$, and let ${\cal M}_n$ be the $n$-dimensional linear subspace of ${\cal M}$ spanned by the first $n$ of the functions $(\varphi_i)_{i=1}^{\infty}$. Then on each subspace ${\cal M}_n$, the functional $J[y]$ leads to a function $J[\alpha_1\varphi_1 + \cdots + \alpha_n\varphi_n]$ of the $n$ variables $\alpha_1$, $\cdots$, $\alpha_n$.

Next, we choose $\alpha_1$, $\cdots$, $\alpha_n$ in such a way as to minimize $J[\alpha_1\varphi_1 + \cdots + \alpha_n\varphi_n]$, denoting the minimum by $\mu_n$ and the element of ${\cal M}_n$ which yields the minimum by $y_n$. We say the sequence $(\varphi_i)_{i=1}^{\infty}$ is {\it complete (in ${\cal M}$)} if given any $y\in {\cal M}$ and any $\varepsilon > 0$, there is a finite linear combination $\eta_n$ of the form $\eta_n =  \sum_{i=1}^n\alpha_i\varphi_i$ such that $|\!|\eta_n - y|\!| < \varepsilon$ (where $n$ depends on $\varepsilon$). Then we have the following theorem
\begin{theorem} \label{theorem_conv}
If the functional $J[y]$ is continuous in the norm of ${\cal M}$, and if the sequence $(\varphi_i)_{i=1}^{\infty}$ is complete, then
\[
\lim_{n\to\infty}\mu_n = \mu,
\]
where $\mu = \inf_{y} J[y]$.
\end{theorem}
In the particular case of the Sturm--Liouville problem, we can choose $\varphi_k(x) = \sin (kx)$. Then $(\mu_n)_{n=1}^{\infty}$ converges to the smallest eigenvalue of the Sturm--Liouville equation and $(y_n)_{n=1}^{\infty}$ converges to the corresponding eigenfunction. We can continue this procedure to find the rest of the eigenvalues and eigenfunctions (see Gelfand and Fomin \cite{GF00}, \S 41.4). This is summarized in the following result
\begin{theorem}
The Sturm-Liouville problem has an infinite sequence of eigenvalues $\lambda^{(1)}$, $\lambda^{(2)}$, $\cdots$, and to each eigenvalue $\lambda^{(n)}$ there corresponds an eigenfunction $y^{(n)}$ which is unique to within a constant factor.
\end{theorem}

\subsection{Solutions of the exercise problems from Gelfand and Fomin
\cite{GF00}, Chapter 8}

\noindent 2. \begin{proof}We first calculate the exact solution. Let
$F(x,y,y')=y'^2-y^2-2xy$. Then Euler's equation becomes
\[
F_y-\frac{d}{dx}F_{y'} = -2y -2x - \frac{d}{dx}(2y') = -2(y+x+y'').
\]
To solve the second order linear ODE
\[
\begin{cases}
y'' + y = -x \\
y(0)=y(1)=0,
\end{cases}
\]
we can employ at least four methods: the Green's function for
boundary value problems, Fourier expansion over the interval
$(0,1)$, operator calculus (including Laplace transform, see 丁同仁等\cite{丁同仁等2004}, and reduction to a system of first order ODEs
(also see 丁同仁等\cite{丁同仁等2004}). However, it's very easy to see
directly the general solution of $y''+y=-x$ is $-x + C_1\cos x + C_2
\sin x$. Using boundary condition, we conclude $y(x)= -x + \csc 1
\sin x$. In this case, it's easy to calculate $J[y] = \frac{1}{3}$.

We then use the Ritz method to find an approximate solution. For
this purpose, we need to show the completeness of
$\{\varphi_n\}_{n=1}^{\infty}$ in the space ${\cal M} = \{f\in {\cal
D}_1(0,1): f(0)=f(1)=0\}$, where $\varphi_n=x^n(1-x)$ and the norm
is that of ${\cal D}_1(0,1)$ (i.e. $|\!|f|\!|=\max_{0\le x\le
1}|f(x)|+\max_{0\le x\le 1}|f'(x)|$).\footnote{For definition of the
normed space ${\cal D}_n(a,b)$, see Gelfand and Fomin \cite{GF00},
page 7.} We have the following lemmas.

\begin{lemma}\label{lemma_poly_dense} The set of polynomials is dense in ${\cal D}_1(0,1)$.
\end{lemma}
\begin{proof}
By the Weierstrass approximation theorem, for any element $f\in
{\cal D}_1(0,1)$, we can find a sequence $(P_n)_{n=1}^{\infty}$ of
polynomials, such that
\[
\max_{0\le x \le 1}|f'(x)-P_n(x)|\le \frac{1}{n}.
\]
Let $Q_n(x)=\int_0^xP_n(\xi)d\xi + f(0)$. Then $Q_n$ is still a
polynomial and
\begin{eqnarray*}
|\!|Q_n-f|\!| &=& \max_{0\le x \le 1}\left|\int_0^xP_n(\xi)d\xi +
f(0) - \left[\int_0^xf'(\xi)d\xi+f(0)\right]\right| + \max_{0\le x
\le
1}|P_n(x)-f'(x)| \\
&\le&\max_{0\le x \le 1} \int_0^x |P_n(\xi)-f'(\xi)|d\xi
+\frac{1}{n} \\
&\le & \frac{2}{n}.
\end{eqnarray*}
This proves the lemma.
\end{proof}

\begin{lemma}\label{lemma_density}
The linear space spanned by $\{\varphi_n\}_{n=1}^{\infty}$ is dense
in ${\cal M}$ under the norm of ${\cal D}_1(0,1)$.
\end{lemma}
\begin{proof}
It's clear each $\varphi_n\in {\cal M}$. Hence any finite linear
combination of $\varphi_n$'s belongs to ${\cal M}$. For any $f\in
{\cal M}$, by Lemma \ref{lemma_poly_dense}, we can find
$(\varepsilon_n)_{n=1}^{\infty}$ with $\varepsilon_n\downarrow 0$
and a sequence $(P_n)_{n=1}^{\infty}$ of polynomials such that
\[
|\!|P_n - f |\!| \le \varepsilon_n.
\]
Each $P_n$ can be written in the form of $Q_n(x) + \alpha_n +
\beta_n(1-x)$, where $Q_n(x)$ is a finite linear combination of
$\varphi_i$'s. Since $|\!|P_n-f|\!|\to 0$ as $n\to \infty$, we have
$\alpha_n = P_n(1)\to f(1)=0$ and $\alpha_n+\beta_n = P_n(0)\to
f(0)=0$ as $n\to \infty$. Therefore, we have by triangle inequality
\[
|\!|Q_n-f|\!| \le |\!|\alpha_n + \beta_n(1-x)|\!| + |\!|P_n - f|\!|
\le \varepsilon_n + |\alpha_n| + 2|\beta_n| \to 0
\]
as $n\to \infty$. This shows the linear space spanned by
$\{\varphi_n\}_{n=1}^{\infty}$ is dense in ${\cal M}$.
\end{proof}

Combining Lemma \ref{lemma_poly_dense} and Lemma
\ref{lemma_density}, we can by Theorem \ref{theorem_conv} find the
approximate solution of the original variational problem. Indeed,
consider the $n$-th degree approximate solution $y_n = \sum_{k=1}^n
a_{nk}\varphi_k(x)$. For simplicity of notation, we write $a_k$ for
each $a_{nk}$. Then finding the extremum of $J[y_n] =
\int_0^1(y_n'^2-y_n^2-2xy_n)dx$ becomes the minimization of a
function of $n$ variables $a_1$, $a_2$, $\cdots$, $a_n$:
\[
{\arg\min}_{(a_1,\cdots, a_n)\in\mathbb R^n}J[y_n].
\]
Note for $i=1, \cdots, n$, we have
\begin{eqnarray*}
\frac{\partial}{\partial a_i} J[y_n] &=&
\int_0^1\frac{\partial}{\partial a_i}(y_n'^2 - y_n^2 - 2xy_n)dx
\\
&=& 2\int_0^1\left(y_n'\frac{\partial}{\partial
a_i}y_n'-y_n\frac{\partial}{\partial a_i}y_n -
x\frac{\partial}{\partial a_i}y_n\right)dx \\
&=& 2\int_0^1\left(y_n'\varphi_i'(x)-y_n\varphi_i(x) -
x\varphi_i(x)\right)dx \\
&=& 2\left[\sum_{k=1}^na_k\left(\frac{ki}{k+i-1} -
\frac{2ki+i+k}{i+k} + \frac{ik+i+k}{k+i+1} + \frac{2}{k+i+2} -
\frac{1}{k+i+3} \right)-\frac{1}{(i+2)(i+3)}\right].
\end{eqnarray*}
So the extremal $y_n=\sum_{k=1}^na_k\varphi_k(x)$ are given by
solving the linear equation
\[
A \left[\begin{matrix}a_1 \\ a_2 \\ \cdots \\ a_n
\end{matrix}\right] = \left[\begin{matrix}\frac{1}{3\cdot 4} \\ \frac{1}{4\cdot 5} \\ \cdots \\
\frac{1}{(n+2)(n+3)}\end{matrix}\right],
\]
where $A_{ik} = \frac{ki}{k+i-1} - \frac{2ki+i+k}{i+k} +
\frac{ik+i+k}{k+i+1} + \frac{2}{k+i+2} - \frac{1}{k+i+3}$.

Therefore, to find the $n$-th degree approximation of the extremal
$y$ as well as the corresponding extreme $J[y]$, we first solve the
above system of $n$ linear equations to get $y_n$, and then apply
either the quadrature method or an explicit formula to evaluate
$J[y_n]$.

\begin{remark}
Note the matrix $A$ is ill-conditioned as $n$ grows. So we postpone
a numerical illustration of the above approximation to next version
of the solution manual.
\end{remark}
\end{proof}

\noindent 8. \begin{proof}Since on a finite interval,
$L^2$-convergence implies $L^1$-convergence (by the Cauchy-Schwarz
inequality), we can without loss of generality assume the meaning of
"in the mean" is in $L^2$-sense. By extending $h''(x)$ to an odd
function on $[-\pi, \pi]$ (not necessarily continuous at 0), we can
write $h''(x)$ on $[0,\pi]$ as
\[
h''(x)=\sum_{r=1}^{\infty}A_r\sin(rx),
\]
where $A_r = \frac{2}{\pi} \int_0^{\pi} h''(x)\sin(rx)dx$. Define
\[
h_n(x) = - \sum_{r=1}^n \frac{A_r}{r^2} \sin(rx).
\]
Then $h_n''(x)\to h''(x)$ in $L^2(0,\pi)$ as $n\to \infty$.

Meanwhile, we have (note $h'(0)=0$)
\begin{eqnarray*}
|h_n'(x) - h'(x)| &=& \left|\int_0^xh_n''(\xi)d\xi + h_n'(0) -
\int_0^x h''(\xi)d\xi - h'(0)\right| \\
&\le& \int_0^x|h_n''(\xi)-h''(\xi)|d\xi + |h_n'(0)| \\
&\le& |\!|h_n''-h''|\!|_{L^2(0,\pi)}\sqrt{\pi} + |h_n'(0)|.
\end{eqnarray*}
So $|\!|h_n'-h'|\!|_{L^2(0,\pi)} \le
|\!|h_n''-h''|\!|_{L^2(0,\pi)}\pi + |h_n'(0)|\sqrt{\pi}$. We further
note
\[
h_n'(0)= -\sum_{r=1}^n\frac{A_r}{r} =
-\sum_{r=1}^n\frac{2}{r\pi}\int_0^{\pi}h''(x)\sin(rx)dx =
\sum_{r=1}^n\frac{2}{\pi}\int_0^{\pi}h'(x)\cos(rx)dx.
\]
By extending $h'(x)$ to a continuous even function on $[-\pi, \pi]$,
we can conclude that the Fourier cosine expansion of $h'(x)$ (note
$h(0)=h(\pi)=0$)
\[
\sum_{r=0}^{n}\frac{2}{\pi}\int_0^{\pi}h'(x)\cos(rx)dx \cdot
\cos(rx) = \sum_{r=1}^{n}\frac{2}{\pi}\int_0^{\pi}h'(x)\cos(rx)dx
\cdot \cos(rx)
\]
converges to $h'(x)$ pointwise as $n\to\infty$. In particular,
$h_n'(0) = \sum_{r=1}^{n}\frac{2}{\pi}\int_0^{\pi}h'(x)\cos(rx)dx
\cdot \cos(0\cdot x) \to h'(0)=0$ as $n\to \infty$. Combining with
the inequality  $|\!|h_n'-h'|\!|_{L^2(0,\pi)} \le
|\!|h_n''-h''|\!|_{L^2(0,\pi)}\pi + |h_n'(0)|\sqrt{\pi}$, we
conclude $h_n'(x)\to h'(x)$ in $L^2(0,\pi)$ as $n\to \infty$.

Finally, we have
\[
|h_n(x)-h(x)| =
\left|\int_0^xh_n'(\xi)d\xi+h_n(0)-\int_0^xh'(\xi)d\xi - h(0)\right|
\le \int_0^x|h_n'(\xi)-h'(\xi)|d\xi \le
|\!|h_n'-h'|\!|_{L^2(0,\pi)}\sqrt{\pi}.
\]
So $|\!|h_n-h|\!|_{L^2(0,\pi)} \le  |\!|h_n'-h'|\!|_{L^2(0,\pi)}\pi
\to 0$ as $n\to\infty$.

In summary, we conclude as $n\to\infty$, $h_n\to h$, $h_n'\to h'$,
and $h_n''\to h''$ all in $L^2(0,\pi)$. And $C_r^n =
-\frac{A_r}{r^2} = -\frac{2}{r^2\pi}\int_0^{\pi}h''(x)\sin(rx)dx$ is
clearly independent of $n$.
\end{proof}

\noindent 9. \begin{proof} \begin{eqnarray*}
\left|\int_a^bf_n(x)g_n(x)dx - \int_a^b f(x)g(x)dx \right| & \le & \int_a^b|f_n(x)||g_n(x)-g(x)|dx + \int_a^b |g(x)||f_n(x)-f(x)|dx \\
&\le& \max_{a\le x \le b}|g_n(x)-g(x)| |\!|f_n|\!|_{L^2(a,b)}\sqrt{b-a} + |\!|f_n-f|\!|_{L^2(a,b)}|\!|g|\!|_{L^2(a,b)}^2.
\end{eqnarray*}
Since $f_n\to f$ in mean, $(|\!|f_n|\!|_{L^2(a,b)})_{n=1}^{\infty}$
is bounded. As $n\to \infty$, the RHS of the above inequality goes
to $0$, so we can conclude $\int_a^bf_n(x)g_n(x)dx \to \int_a^b
f(x)g(x)dx $.
\end{proof}

\subsection{章末习题}

\noindent 1. (1) \begin{proof} $F(x,y,y')=\sqrt{1+y^2y'^2}$. So the
Euler-Lagrange equation becomes
\begin{eqnarray*}
\frac{\partial F}{\partial y} - \frac{d}{dx}\frac{\partial
F}{\partial y'} &=& \frac{yy'^2}{\sqrt{1+y^2y'^2}} -
\frac{d}{dx}\frac{y'y^2}{\sqrt{1+y^2y'^2}} \\
&=& \frac{yy'^2}{\sqrt{1+y^2y'^2}} -
\frac{(y''y^2+2yy'^2)\sqrt{1+y^2y'^2}-y'y^2\frac{2yy'^3+2y^2y'y''}{2\sqrt{1+y^2y'^2}}}{1+y^2y'^2}\\
&=&0.
\end{eqnarray*}
After simplification, we have
$0=y''y+y'^2=(y'y)'=\left(\frac{1}{2}y^2\right)''$. Therefore, the
general solution is $y^2=ax+b$.
\end{proof}

(2) \begin{proof} $F(x,y,y')=y^2+y'^2$, so the Euler-Lagrange
equation becomes
\[
\frac{\partial F}{\partial y} - \frac{d}{dx} \frac{\partial
F}{\partial y'} = 2y - \frac{d}{dx}(2y') = 2y-2y'' = 0.
\]
Therefore $y=ae^x + be^{-x}$.
\end{proof}

(3) \begin{proof} $F(x,y,y') = \frac{x}{x+y'}$. So the
Euler-Lagrange equation becomes
\[
\frac{\partial F}{\partial y} - \frac{d}{dx}\frac{\partial
F}{\partial y'} = \frac{d}{dx}\frac{x}{(x+y')^2} = 0.
\]
So there exists some constant $C$ such that $\frac{x}{(x+y')^2}=C$,
which gives $y' = -x\pm \sqrt{\frac{x}{C}}$. Use a
change-of-variable, we have $y' = \frac{3a}{2} x^{1/2}-x$. Hence $y
= ax^{3/2} - \frac{x^2}{2} + b$.
\end{proof}

(4) \begin{proof} $F(x,y,y') = \sqrt{1+x}\sqrt{1+y'^2}$. So the
Euler-Lagrange equation becomes
\[
0-\frac{d}{dx}\left[\sqrt{1+x}\frac{2y'}{2\sqrt{1+y'^2}}\right] = 0.
\]
There must exist a constant $a$ such that
$\sqrt{1+x}\frac{y'}{\sqrt{1+y'^2}}=a$. Solving this equation gives
$y'=\frac{a}{\sqrt{1+x-a^2}}$. Therefore $y = 2a\sqrt{1+x-a^2} + b$.
 \end{proof}

 \noindent 2. \begin{proof}The point on the cone $x^2+y^2=z^2$ can be described by the parametric coordinate $(z\cos\theta, z\sin\theta, z)$ $(0\le \theta
 < 2\pi)$. For any given curve $\gamma$ on the cone, assuming $\gamma$ is parametrized by $\theta$, the length of $\gamma$ is
 \[
\int_{(x_0,y_0)}^{(x_1,y_1)}\sqrt{\left(\frac{dx}{d\theta}\right)^2 +
\left(\frac{dy}{d\theta}\right)^2 +
\left(\frac{dz}{d\theta}\right)^2}d\theta =
\int_{(x_0,y_0)}^{(x_1,y_1)}\sqrt{z^2+2(z')^2}d\theta = \int_{(x_0,y_0)}^{(x_1,y_1)}F(\theta, z,
z')d\theta,
 \]
 where $F(\theta, z, z') = \sqrt{z^2 + 2(z')^2}$. So the
 Euler-Lagrange equation becomes
 \begin{eqnarray*}
 \frac{\partial F}{\partial z} - \frac{d}{d\theta}\frac{\partial F}{\partial
 z'} &=& \frac{z}{\sqrt{z^2+2(z')^2}} - \frac{d}{d\theta}
 \frac{2z'}{\sqrt{z^2+2(z')^2}} \\
 &=& \frac{z}{\sqrt{z^2+2(z')^2}} - \frac{2z''\sqrt{z^2+2(z')^2} -
 2z'\frac{zz'+2z'z''}{\sqrt{z^2+2(z')^2}}}{z^2+2(z')^2} \\
 &=& \frac{z}{\sqrt{z^2+2(z')^2}} - \frac{2z''[z^2+2(z')^2]-2(z')^2(z+2z'')}{[z^2+2(z')^2]\sqrt{z^2+2(z')^2}} \\
 &=& 0.
 \end{eqnarray*}
 Simplifying the above equation, we have
 \[
z[z^2+2(z')^2]-2z''z^2-4(z')^2z'' + 2(z')^2z + 4(z')^2z'' =
z[z^2+4(z')^2-2zz''] = 0.
 \]
 So a non-trivial solution must satisfy the equation $z'' -
 \frac{2}{z}(z')^2-\frac{z}{2} = 0$ or equivalently
 $\frac{z''}{z}-\frac{2(z')^2}{z^2}=\frac{1}{2}$. Note
 $\left(\frac{z'}{z}\right)' = \frac{z''z - (z')^2}{z^2} = \frac{z''}{z} -
 \left(\frac{z'}{z}\right)^2$. Using the substitution $\frac{z''}{z}
 = \left(\frac{z'}{z}\right)' + \left(\frac{z'}{z}\right)^2$, we
 transform the original equation to
 \[
 \left(\frac{z'}{z}\right)' - \left(\frac{z'}{z}\right)^2 =
 \frac{1}{2}.
 \]
 Define $h(\theta) = \frac{z'(\theta)}{z(\theta)} =
 \frac{z'}{z}$, we get a Riccati equation of $h$:
 \[
h' = \frac{1}{2} + h^2.
 \]
 It is well-known that a general Riccati equation can always be reduced to a second order linear ordinary differential equation (see Remark \ref{remark_riccati} below). For our particular case, we use the substitution $h = -\frac{w'}{w}$, then
\[
\frac{1}{2} = h'-h^2 = -\frac{w''}{w}+ \frac{(w')^2}{w^2} - \left(\frac{w'}{w}\right)^2 = - \frac{w''}{w}.
\]
So $w$ satisfies the equation $w''+\frac{1}{2}w = 0$, which has a general solution $w= C_1\cos\frac{\theta}{\sqrt{2}} + C_2\sin\frac{\theta}{\sqrt{2}}$. By using trigonometric identities, we can write $w$ as $\frac{1}{a}\cos\frac{\theta+b}{\sqrt{2}}$. Note $-\frac{w'}{w}=h=\frac{z'}{z}$, we conclude $z(\theta)=\frac{1}{w(\theta)} = a\sec\frac{\theta+b}{\sqrt{2}}$.

\begin{remark}\label{remark_riccati}
In mathematics, a Riccati equation is any ordinary differential equation that has the form $y'=q_0(x)+q_1(x)y+q_2(x)y^2$ where $q_0(x)\ne 0$ and $q_2(x)\ne 0$. It can be reduced to a second order linear equation by the following procedure (see, for example, wikipedia). First, use the substitution $v= q_2(x)y$. Then the original equation becomes
\[
v' = v^2 + R(x)v + S(x),
\]
where $S(x) = q_0(x)q_2(x)$ and $R(x) = q_1(x) + q'_2(x)/q_2(x)$. Then substitute $v= -u'/u$ and it follows that $u$ satisfies the linear second order ODE
\[
u''-R(x)u'+S(x)u = 0.
\]
A solution of this equation will lead to a solution $y=-u'/(uq_2(x))$ of the original Riccati equation.
\end{remark}
\end{proof}

\noindent 3. \begin{proof} Points on the cylindrical surface are described by the parametric coordinates $(r\cos\theta, r\sin\theta, z)$ where $r>0$ is a constant and $\theta \in [0, 2\pi)$. For any curve $\gamma$ on the cylindrical surface, assuming $\gamma$ is parametrized by $\theta$, then the length of $\gamma$ is
\[
\int_{(x_0,y_0)}^{(x_1,y_1)}\sqrt{r^2+(z')^2}d\theta = \int_{(x_0,y_0)}^{(x_1,y_1)}F(\theta, z, z')d\theta,
\]
where $F(\theta, z, z') = \sqrt{r^2 + (z')^2}$ and the Euler-Lagrange equation becomes
\[
\frac{\partial F}{\partial z} - \frac{d}{d\theta}\frac{\partial F}{\partial z'} = - \frac{d}{d\theta}\frac{z'}{\sqrt{r^2+(z')^2}} = - \frac{z''\sqrt{r^2+(z')^2} + z'\frac{z'z''}{\sqrt{r^2+(z')^2}}}{r^2+(z')^2}.
\]
After simplification, we get $z''[r^2+2(z')^2]=0$ and hence $z'' = 0$. This implies $z(\theta) = a+b\theta$ for some constants $a$ and $b$.
\end{proof}

\section{数学物理方程综述}

\subsection{Summary on the classification of second order linear PDE}
Suppose we have a general second order linear partial differential equation
\[
\sum_{i=1}^n\sum_{j=1}^n A_{ij} \frac{\partial^2u}{\partial x_i\partial x_j} + \sum_{i=0}^n B_i \frac{\partial u}{\partial x_i} + C u = F.
\]
If we choose a non-singular change of variable
\[
\begin{cases}
x_1 = x_1(y_1,\cdots, y_n) \\
x_2 = x_2(y_1,\cdots, y_n) \\
\cdots \\
x_n = x_n(y_1,\cdots, y_n),
\end{cases}
\]
then the original equation becomese
\[
\sum_{k=1}^n\sum_{l=1}^n \bar A_{kl} \frac{\partial^2u}{\partial y_k\partial y_l} + \sum_{l=1}^n \bar B_l \frac{\partial u}{\partial y_l} + C u = F,
\]
with
\[
\bar A_{kl} = \sum_{i=1}^n\sum_{j=1}^n A_{ij}\frac{\partial y_k}{\partial x_i}\frac{\partial y_l}{\partial x_j}, \;
\bar B_l = \sum_{i=1}^n\sum_{j=1}^n A_{ij} \frac{\partial^2 y_l}{\partial x_i\partial x_j} + \sum_{i=1}^n B_i \frac{\partial y_l}{\partial x_i}.
\]
In matrix form, we have
\[
\bar A = (\bar A_{kl}) = JAJ^T,
\]
where
\[
J = \frac{\partial(y_1,\cdots,y_n)}{\partial(x_1,\cdots,x_n)}=\left[
\begin{matrix}
\frac{\partial y_1}{\partial x_1} & \frac{\partial y_1}{\partial x_2} & \cdots & \frac{\partial y_1}{\partial x_n} \\
\frac{\partial y_2}{\partial x_1} & \frac{\partial y_2}{\partial x_2} & \cdots & \frac{\partial y_2}{\partial x_n} \\
\cdots & \cdots & \cdots & \cdots \\
\frac{\partial y_n}{\partial x_1} & \frac{\partial y_n}{\partial x_2} & \cdots & \frac{\partial y_n}{\partial x_n} \\
\end{matrix}
\right].
\]
Since $A$ is a real symmetric matrix, we can find an orthogonal matrix $U(=U(x_1,x_2,\cdots,x_n))$ such that $UAU^T$ is a diagonal matrix. Then we obtain $n^2$ equations $\frac{\partial y_i}{\partial x_j} = u_{ij}(x_1,\cdots,x_n)$ $(i, j = 1,\cdots, n)$.

In the special case of $n=2$, we suppose the second order differential operator in the original equation is
\[
Lu \equiv A\frac{\partial^2 u}{\partial x\partial y} + 2B\frac{\partial^2 u}{\partial x\partial y} + C \frac{\partial^2 u}{\partial y^2}.
\]
Then with the change of variable $\xi = \xi(x,y)$, $\eta=\eta(x,y)$, the operator $Lu$ takes the form
\[
Lu = A_1 \frac{\partial^2 u}{\partial \xi^2} + 2B_1 \frac{\partial^2 u}{\partial\xi\partial\eta} + C_1 \frac{\partial^2u}{\partial \eta^2} + \frac{\partial u}{\partial \xi}L\xi + \frac{\partial u}{\partial \eta}L\eta,
\]
where
\[
A_1 = A\left(\frac{\partial \xi}{\partial x}\right)^2 + 2B\frac{\partial \xi}{\partial x}\frac{\partial \xi}{\partial \eta} + C\left(\frac{\partial \xi}{\partial y}\right)^2,
\]
\[
B_1 = A\frac{\partial \xi}{\partial x}\frac{\partial \eta}{\partial x} + B \left(\frac{\partial \xi}{\partial x}\frac{\partial \eta}{\partial y}+\frac{\partial \eta}{\partial x}\frac{\partial \xi}{\partial y}\right) + C\frac{\partial \xi}{\partial y}\frac{\partial \eta}{\partial y},
\]
\[
C_1 = A\left(\frac{\partial \eta}{\partial x}\right)^2 + 2B\frac{\partial \eta}{\partial x}\frac{\partial \eta}{\partial \eta} + C\left(\frac{\partial \eta}{\partial y}\right)^2.
\]
As a consequence, we have $B_1^2-A_1C_1 = \left|\frac{\partial(\xi,\eta)}{\partial(x,y)}\right|^2(B^2-AC)$. Beside Theorem 22.1 and formulas (22.7)-(22.13) in the textbook, we also have the following more explicit results.

1) $B^2 = AC$. In this case, put $k=\frac{B}{A}=\frac{C}{B}$ and we have
\[
A_1 = A\left(\frac{\partial \xi}{\partial x} + k \frac{\partial \xi}{\partial y}\right)^2, \; B_1 = A\left(\frac{\partial \xi}{\partial x} + k\frac{\partial \xi}{\partial y}\right)\left(\frac{\partial \eta}{\partial x}+k\frac{\partial \eta}{\partial y}\right), \; C_1 = A \left(\frac{\partial \eta}{\partial x}+k\frac{\partial \eta}{\partial y}\right)^2.
\]
In order that both $B_1$ and $C_1$ vanish it is sufficient to put
\[
\frac{\partial \eta}{\partial x} + k \frac{\partial \eta}{\partial y} = 0.
\]
For the solution of first order linear PDE, see 丁同仁等\cite{丁同仁等2004}. $\xi$ can be chosen arbitrarily in this case, as far as the change of variable $(x,y)\mapsto(\xi,\eta)$ is non-singular.


2) $B^2 > AC$. First assume $A$ does not vanish (the case $C\ne 0$, $A=0$ can be treated in a similar way; the case $A=C=0$ we shall deal with separately). We put $\xi = x$, $\eta=\varphi(x,y)$. Then the condition that $B_1$ should vanish becomes
\[
A\frac{\partial \eta}{\partial x}+B\frac{\partial \eta}{\partial y}=0.
\]
Solving for $\eta$, we can obtained the canonical form of a hyperbolic second order linear PDE. If $A=C=0$, then the original equation will have a principle term of the form $\frac{\partial^2 u}{\partial x\partial y}$. Use the substitution $\xi=x+y$, $\eta=x-y$, the equation takes the canonical form
\[
\frac{\partial^2 u}{\partial xi^2}-\frac{\partial^2 u}{\partial \eta^2}+\cdots.
\]

\subsection{Exercise at the end of chapter}

\noindent 1. (1) \begin{proof} Using the notation of Theorem 22.1, we have $a=1$, $b=0$, $c=y$. If $b^2-ac>0$, i.e. $y<0$, the ODE for characteristics is
\[
\frac{dy}{dx}=\pm\sqrt{-y}.
\]
So the two integral curves are $\sqrt{-y}-\frac{x}{2}=C_1$ and $\sqrt{-y}+\frac{x}{2}=C_2$, or equivalently, $x=C_1$ and $2\sqrt{-y}=C_2$. Under the change of variable $\xi = x$, $\eta=2\sqrt{-y}$, the equation is simplified to $\frac{\partial^2u}{\partial\xi^2}-\frac{\partial^2u}{\partial \eta^2}=0$. If $b^2-ac<0$, i.e. $y>0$, the ODE for characteristics is
\[
\frac{dy}{dx}=\pm i\sqrt{y}.
\]
By similar argument, we should choose the change of variable $\xi=x$, $\eta=2\sqrt{y}$. Then the original equation is reduced to $\frac{\partial^2 u}{\partial \xi^2}+\frac{\partial^2 u}{\partial \eta^2}=0$.
\end{proof}

(2) \begin{proof} Using the notation of Theorem 22.1, we have $a=1+x^2$, $b=0$, and $c=1+y^2$. So the ODE for characteristics is
\[
\frac{dy}{dx}=\pm i \sqrt{(1+x^2)(1+y^2)}.
\]
Writing it in the form of separated variables, we have
\[
\frac{dy}{\sqrt{1+y^2}} = \pm i \frac{dx}{\sqrt{1+x^2}}.
\]
We can solve these equations to obtain the change of variable
\[
\begin{cases}
\xi=\mbox{arcsinh} x\\
\eta=\mbox{arcsinh} y,
\end{cases}
\]
under which the original equation is reduced to $\frac{\partial^2 u}{\partial \xi^2}+\frac{\partial^2 u}{\partial \eta^2}=0$.
\end{proof}

(3) \begin{proof} Using the notation of Theorem 22.1, we have $a=\tan^2x$, $b=-y\tan x$, and $c=y^2$. Then $\Delta = b^2 - ac = 0$. So the ODE for characteristics is
\[
\frac{dy}{dx} = -\frac{y}{\tan x},
\]
which has the general solution $y = C\sin x$. Let $\xi = y\sin x$ and $\eta = y\cos x$. The original equation is simplified to
\[
(\xi^2 + \eta^2) \frac{\partial^2 u}{\partial \eta^2} - \xi \frac{\partial u}{\partial \xi} + \eta \frac{\partial u}{\partial \eta} = 0.
\]

\end{proof}

(4) \begin{proof} Using the notation of Theorem 22.1, we have $a=1$, $b=-\sin x$, $c=-\cos^2x$. So the ODE for characteristic becomes
\[
\frac{dy}{dx} = -\sin x,
\]
which has a general solution $y = \cos x +C$. Calculation shows the following change of variable simplifies the original equation most
\[
\begin{cases}
\xi = x+y-\cos x \\
\eta = x-y+\cos x,
\end{cases}
\]
under which the original equation becomes $\frac{\partial^2 u}{\partial \xi \partial \eta}=0$.
\end{proof}

\noindent 2. (1) \begin{proof}
\[
\frac{\partial u}{\partial x} = e^{-(ax+by)}\left[-av(x,y)+\frac{\partial v}{\partial x}\right], \;
\frac{\partial^2 u}{\partial x^2} = e^{-(ax+by)}\left\{-a\left[-av+\frac{\partial v}{\partial x}\right] - a\frac{\partial v}{\partial x} + \frac{\partial^2 v}{\partial x^2}\right\}.
\]
By symmetry, we have
\[
\frac{\partial u}{\partial y} = e^{-(ax+by)}\left[-bv(x,y)+\frac{\partial v}{\partial y}\right], \;
\frac{\partial^2 u}{\partial y^2} = e^{-(ax+by)}\left\{-b\left[-bv+\frac{\partial v}{\partial y}\right] - b\frac{\partial v}{\partial y} + \frac{\partial^2 v}{\partial y^2}\right\}.
\]
Therefore
\begin{eqnarray*}
& & \bigtriangledown^2 u + 2a\frac{\partial u}{\partial x} + 2b\frac{\partial u}{\partial y} \\
&=& e^{-(ax+by)}\left[(a^2+b^2)v -2a\frac{\partial v}{\partial x} -2b\frac{\partial v}{\partial y} + \bigtriangledown^2 v -2(a^2+b^2)v + 2a\frac{\partial v}{\partial x} + 2b\frac{\partial v}{\partial y}\right] \\
&=& e^{-(ax+by)}\left[ \bigtriangledown^2 v -(a^2+b^2)v\right].
\end{eqnarray*}
So the original equation is transformed into $\bigtriangledown^2 v -(a^2+b^2)v=0$.
\end{proof}

(2) \begin{proof} Follow the hint and use the transformation $u(x,y)=e^{-ax+by}v(x,y)$.\end{proof}

(3) \begin{proof} Let $u(x,y,t)=v(x,y,t)h(\alpha,\beta,\gamma,x,y,t)$, where $h(\alpha,\beta,\gamma,x,y,t)=e^{\alpha x + \beta y + \gamma t}$.
Then it's easy to see
\[
\frac{\partial u}{\partial x} = h\left(\alpha v+\frac{\partial v}{\partial x}\right), \; \frac{\partial u}{\partial y} = h\left(\beta v+\frac{\partial v}{\partial y}\right), \; \frac{\partial u}{\partial t} = h\left(\gamma v + \frac{\partial v}{\partial t}\right),
\]
and
\[
\frac{\partial^2 u}{\partial x^2} = h\left[\alpha\left(\alpha v + \frac{\partial v}{\partial x}\right) + \alpha\frac{\partial v}{\partial x} + \frac{\partial^2 v}{\partial x^2}\right],\; \frac{\partial^2 u}{\partial y^2} = h\left[\beta\left(\beta v + \frac{\partial v}{\partial y}\right) + \beta\frac{\partial v}{\partial y} + \frac{\partial^2 v}{\partial y^2}\right],
\]
\[
\frac{\partial^2 u}{\partial x\partial y} = h\left[\beta\left(\alpha v + \frac{\partial v}{\partial x}\right)+\alpha\frac{\partial v}{\partial y} + \frac{\partial^2 v}{\partial x\partial y}\right].
\]
Therefore
\begin{eqnarray*}
& & h^{-1}\left[a\frac{\partial^2 u}{\partial x^2} + 2b \frac{\partial^2 u}{\partial x\partial y} + c\frac{\partial^2 u}{\partial y^2} + d\frac{\partial u}{\partial x} + e\frac{\partial u}{\partial y} + fu - \frac{\partial u}{\partial t} \right]\\
&=& (a\alpha^2 + c\beta^2 + 2b\alpha\beta + d\alpha + e\beta )v + (2a\alpha + 2b\beta + d)\frac{\partial v}{\partial x} + (2c\beta + 2b\alpha + e)\frac{\partial v}{\partial y} + (f-\gamma) v \\
& & + a\frac{\partial^2 v}{\partial x^2} + 2b \frac{\partial^2 v}{\partial x\partial y} + c\frac{\partial^2 v}{\partial y^2} - \frac{\partial v}{\partial t}.
\end{eqnarray*}
Solving the equation
\[
2
\left[
\begin{matrix}
a  & b \\
b & c
\end{matrix}
\right]
\left[
\begin{matrix}
\alpha \\
\beta
\end{matrix}
\right] = - \left[
\begin{matrix}
d \\
e
\end{matrix}
\right],
\]
we have $\alpha = \frac{be-cd}{2(ac-b^2)}$, $\beta = \frac{bd-ae}{2(ac-b^2)}$. Note
\[
a\alpha^2 + c\beta^2 + 2b\alpha\beta + d\alpha + e\beta = [\alpha, \beta] \left[\begin{matrix}a & b \\ b & c\end{matrix}\right]\left[\begin{matrix}\alpha \\ \beta \end{matrix} \right] + [\alpha, \beta]\left[\begin{matrix}d \\ e \end{matrix} \right]
=[\alpha, \beta]\left\{ \left[\begin{matrix}a & b \\ b & c\end{matrix}\right]\left[\begin{matrix}\alpha \\ \beta \end{matrix} \right] + \left[\begin{matrix}d \\ e \end{matrix} \right]\right\} = 0.
\]
So by choosing the above $\alpha$ and $\beta$, and by setting $\gamma =f$, we can transform the original equation into the form
\[
a\frac{\partial^2 v}{\partial x^2} + 2b\frac{\partial^2 v}{\partial x\partial y} + c\frac{\partial^2 v}{\partial y^2} = \frac{\partial v}{\partial t}.
\]

\begin{remark}
Something wrong in the calculation? Check!
\end{remark}
\end{proof}

\noindent 3. \begin{proof}
By Example 13.1 (p172), the general solution of \[\frac{\partial^2 u}{\partial t^2} - a^2 \frac{\partial^2 u}{\partial x^2}=0\] can be written as $f(x+at)+g(x-at)$. Using the boundary condition, it's easy to see $f(x) = \phi\left(\frac{x}{2}\right) - g(0)$ and $g(x) = \psi\left(\frac{x}{2}\right) - f(0)$. So the solution must be
\[
\phi\left(\frac{x+at}{2}\right) + \psi\left(\frac{x-at}{2}\right) - \phi(0).
\]
\end{proof}


\noindent 4. (1) \begin{proof}
\[
\frac{\partial u}{\partial x} = -e^{-x}\sin y,\; \frac{\partial^2 u}{\partial x^2} = e^{-x} \sin y, \; \frac{\partial u}{\partial y} = e^{-x}\cos y, \; \frac{\partial^2 u}{\partial y^2} = -e^{-x}\sin y.
\]
So $\bigtriangledown^2 u = 0$. $u|_{y=0}=x$ is obvious.
\end{proof}

(2) \begin{proof}Let $(x,y)$ approach $(0,1)$ along the $y$-axis, we have
\[
u(x,y) = u(0,y) = \frac{1}{1-y^2} \to \infty.
\]
So as along as $u(x,y)$ has a definite value at $(0,1)$, it cannot be continuous at $(0,1)$. The case of $(0,-1)$ can be proved similarly.
\end{proof}

\begin{appendix}
\section{The Black-Scholes partial differential equation}

In mathematical finance, the following PDE appears in the derivation
of the Black-Scholes call option pricing formula ($K>0$):
\[
\begin{cases}
c_t(t,x) + rxc_x(t,x)+\frac{1}{2}\sigma^2 x^2c_{xx}(t,x)=rc(t,x),\;
t\in [0, T), \; x\ge 0 \\
c(T,x)=(x-K)^+,\\
c(t,0)=0,  \;t\in [0, T], \\
\lim_{x\to \infty}[c(t,x)-(x-e^{-r(T-t)}K)] = 0, \; t\in [0, T].
\end{cases}
\]
Many more PDEs important in mathematical finance can be found in
Kohn \cite{Kohn03}. For now, we focus on the explicit solution of
the Black-Scholes PDE using the techniques from the current
textbook. ``{\it How do you derive and solve the Black-Scholes PDE}"
is a frequently asked question in Wall Street job interviews. The
presentation below is essentially that of Wilmott et al.
\cite{WHD95}, \S 5.4.

\subsection{Derivation of the Black-Scholes PDE and its boundary
conditions}

For the derivation of the main PDE, we refer to Shreve
\cite{Shreve04b}, \S 4.5.3 (see also Remark \ref{remark_BS_PDE});
for the derivation of the boundary conditions, we refer to Shreve
\cite{Shreve04b}, \S 4.5.4.

\subsection{Simplification of the Black-Scholes PDE via change-of-variable}

The first step of simplification is to convert
$x\frac{\partial}{\partial x}$ and $x^2\frac{\partial^2}{\partial
x^2}$ to $\frac{\partial}{\partial \eta}$ and
$\frac{\partial^2}{\partial \eta^2}$, respectively, via some change
of variable $x= f(\eta)$. This is the trick presented in \S 13.4 of
the textbook. With  the change-of-variable $x = e^{\eta}$ and $w(t,\eta)=c(t,e^{\eta})$, we have
\[
w_{\eta}(t,\eta)=x c_x(t,x),\;
w_{\eta\eta}(t,\eta)=xc_x(t,x)+x^2c_{xx}(t,x).
\]
So the original PDE becomes
\[
w_t + \left(r-\frac{1}{2}\sigma^2\right)w_{\eta} +
\frac{1}{2}\sigma^2 w_{\eta\eta} = rw, \; -\infty < \eta < \infty,
\; t\in [0, T).
\]Or equivalently,
\[
w_{\eta\eta} + (k-1)w_{\eta} + \frac{1}{\frac{1}{2}\sigma^2}w_t =
kw,
\]
where $k=r/\frac{1}{2}\sigma^2$.

The second simplification is to convert the equation into a form as
close as possible to the standard heat equation
\[
\frac{\partial v}{\partial \tau} - \alpha \Delta v = [...],
\]
which means we need to normalize the coefficient of $w_t$ to $-1$.
Define $v(\tau,\eta) = v(\tau(t),\eta)=w(t,\eta)$. Then
\begin{eqnarray*}
& &
w_{\eta\eta}(t,\eta)+(k-1)w_{\eta}(t,\eta)+\frac{1}{\frac{1}{2}\sigma^2}w_t(t,\eta)
- kw(t,\eta)\\
&=& v_{\eta\eta}(\tau, \eta) + (k-1) v_{\eta}(\tau,\eta) +
\frac{1}{\frac{1}{2}\sigma^2}\frac{d\tau}{dt}v_{\tau}(\tau,\eta)-kv(\tau,\eta).
\end{eqnarray*}
So we want to set $\tau = \frac{1}{2}\sigma^2 (T-t)$. Then
$v(\tau,\eta)$ satisfies the following PDE:
\[
v_{\tau} = v_{\eta\eta} + (k-1) v_{\eta} - k v,\;
-\infty<\eta<\infty,\; 0< \tau \le \frac{1}{2}\sigma^2 T.
\]

The third step of simplification is to remove the first order
differential operator $\frac{\partial }{\partial \eta}$ so that we
have the standard form of heat equation. To do so, we use the trick
introduced in Exercise Problem 2 of Chapter 22 of the textbook. More
precisely, rewriting the PDE for $v$ in the following form
\[
v_{\tau} + kv = v_{\eta\eta} + (k-1)v_{\eta},
\]
and motivated by the exponential integrating factor employed in
solving first order ODE, we try the function $u(\tau,\eta) =
e^{\alpha \eta+ \beta \tau}v(\tau,\eta)$. Then
\[
u_{\eta} = e^{\alpha\eta+\beta \tau}(\alpha v + v_{\eta}), \;
u_{\eta\eta} = e^{\alpha\eta+\beta \tau}(\alpha^2v + \alpha v_{\eta}
+ \alpha v_{\eta} + v_{\eta\eta}), \; u_{\tau} = e^{\alpha\eta +
\beta \tau}(\beta v + v_{\tau}).
\]
So we have
\[
u_{\tau} - u_{\eta\eta} = e^{\alpha \eta + \beta t}[v_{\tau} +
(\beta -\alpha^2) v - 2\alpha v_{\eta} - v_{\eta\eta}].
\]
Comparing with the PDE for $v$:
$v_{\tau}+kv-(k-1)v_{\eta}-v_{\eta\eta}=0$, we want to set $\alpha =
\frac{k-1}{2}$ and $\beta = \alpha^2 + k =\frac{1}{4}(k+1)^2$.

In summary,  $u$ satisfies the PDE \[
\begin{cases}
u_{\tau}(\tau,\eta) = u_{\eta\eta}(\tau,\eta), \;-\infty < \eta < \infty,
\; 0<\tau \le \frac{1}{2}\sigma^2 T, \\
u(0,\eta)=\left(e^{\frac{k+1}{2}\eta}-Ke^{\frac{k-1}{2}\eta}\right)^+,\\
\lim_{\eta\to-\infty}u(\tau,\eta) = 0, \; \tau \in [0,
\frac{1}{2}\sigma^2 T],\\
\lim_{\eta\to\infty}\left[e^{-\frac{k-1}{2}\eta -
\frac{(k+1)^2}{4}\tau}u(\tau,\eta)-(e^{\eta}-Ke^{-rT/\frac{1}{2}\sigma^2})\right]
= 0, \; \tau \in [0, \frac{1}{2}\sigma^2 T].
\end{cases}
\]
and $u$ is related to $c(t,x)$ in the following way
\[
u(\tau,\eta) = e^{\frac{k-1}{2}\eta + \frac{(k+1)^2}{4}\tau}
v(\tau,\eta) = e^{\frac{k-1}{2}\eta + \frac{(k+1)^2}{4}\tau}
w\left(T-\frac{\tau}{\frac{1}{2}\sigma^2},\eta\right)=e^{\frac{k-1}{2}\eta + \frac{(k+1)^2}{4}\tau}
c\left(T-\frac{\tau}{\frac{1}{2}\sigma^2},e^{\eta}\right),
\]or equivalently
\[
c(t,x) = u\left(\frac{1}{2}\sigma^2(T-t),\ln x\right)e^{-\frac{k-1}{2}\ln x - \frac{(k+1)^2}{4}\frac{1}{2}\sigma^2(T-t)}.
\]


\begin{remark}\label{remark_BS_PDE}
We have used change-of-variable throughout to simplify the
Black-Scholes PDE. There is an observation that can simplify the PDE
to begin with. Recall during the
derivation of the Black-Scholes PDE, we used the fact that under the
risk-neutral measure, the discounted call option price
$e^{-rt}c(S_t, t)$ should be a martingale, where the underlying
asset price process $S_t$ satisfies the SDE $dS_t = rS_tdt + \sigma
S_t d\widetilde W_t$ ($\widetilde W_t$ is a Brownian motion under risk-neutral measure). Note $X_t = e^{-rt}S_t$ is a martingale
satisfying the SDE $dX_t = \sigma X_t d\widetilde W_t$, so instead of writing $c$ as a function of $S_t$ and $t$, we suppose $c$ is a function
of $X_t$ and $t$. Then
\[
d[e^{-rt}c(X_t,t)] =
e^{-rt}\left[-rc+c_t+\frac{1}{2}\sigma^2x^2c_{xx}\right]dt +
\mbox{martingale part}.
\]
So the PDE has a simpler form: $c_t + \frac{1}{2}\sigma^2 x^2c_{xx}
= rc$. To remove $x^2$, we still need to set $x=e^{\eta}$ and
$x^2\frac{\partial^2}{\partial x^2}$ becomes
$\frac{\partial^2}{\partial \eta^2} - \frac{\partial}{\partial
\eta}$. The resulting expression is not really as simple as we
hoped, since first order differential operator still persists. But
the coefficients become simpler. The rest of the simplification
should proceed as before.
\end{remark}

\subsection{Solution of the simplified PDE and the Black-Scholes call option pricing formula}

There are many methods to solve the initial value problem of heat equation over an infinite line. For example, we could use Fourier's
transform. However, due to the messy form of the boundary value function, which is probably
not easy for inverse Fourier transform, we employ the method of
Green's function. Recall the fundamental solution of the equation $u_t = u_{\eta\eta}$ is
\[
\frac{1}{2\sqrt{\pi t}}e^{-\frac{x^2}{4\tau}}.
\]
So $u(x,\tau)$ can be obtained through the convolution formula:
\begin{eqnarray*}
u(\tau,\eta) &=& \frac{1}{2\sqrt{\pi \tau}}\int_{-\infty}^{\infty}u(0,x)e^{-\frac{(\eta-x)^2}{4\tau}}dx \\
&=&  \frac{1}{2\sqrt{\pi \tau}}\int_{-\infty}^{\infty}\left(e^{\frac{k+1}{2}\eta}-Ke^{\frac{k-1}{2}\eta}\right)^+e^{-\frac{(\eta-x)^2}{4\tau}}dx.
\end{eqnarray*}
After some tedious calculation, we can get the Black-Scholes call option pricing formula
\[
c(S_t, t) = S_t N(d_1) - K e^{-r(T-t)}N(d_2),
\]
where
\[
d_1 = \frac{\log\frac{S_t}{K} + \left(r+\frac{1}{2}\sigma^2\right)(T-t)}{\sigma\sqrt{T-t}}, \;
d_2 = \frac{\log\frac{S_t}{K} + \left(r-\frac{1}{2}\sigma^2\right)(T-t)}{\sigma\sqrt{T-t}}.
\]
\begin{remark}
An alternative, and much easier, method, is via the Feymann-Kac
formula, see {\O}ksendal \cite{Oksendal95} for details. It corresponds directly to the so-called risk-neutral pricing methodology.
\end{remark}

\section{漫谈数学物理方法和特殊函数}

我在准备这本习题解答的过程中，查阅了一些相关的资料，自然而然地产生了以“博览”的形式，介绍相关著述的想法。与此同时，也想介绍一些有用的数值分析参考书，使得数学物理方法的训练，一开始就建立在理论和实践的有机结合之上。

用“博览”的方式先大致了解一个领域，然后再逐步深入的读书法，有两个现成的例子可供参考。一个是毛泽东同志：“于是决定为学之道，先博而后约，先中而后西，先普通而后专门。质之吾兄，以为何如？前者已矣，今日为始。昔吾好独立蹊径，今乃知其非。学校分数奖励之虚荣，尤所鄙弃。今乃知其不是。尝见曾文正公家书有云：‘吾阅性理书时，又好作文章；作文章时，又参以他务，以致百不一成。’此言岂非金玉！吾今日舍治科学，求分数，尚有何事？别人或谓退化，吾自谓进化也。”——《毛泽东早期文稿（1912.6-1920.11）》·1915年6月25日致湘生信\cite{中共中央文献研究室2008}

另一个例子是数学家丘成桐先生：“我如饥似渴地从他们处学习不同的科目。从早上八时到下午五时我都在上课﹙有时在班上吃午饭﹚。这些学科包括拓朴、几何、微分方程、李群、数论、组合学、概率及动力系统。我并非科科都精通，但对某几门学问格外留神。学拓朴时，发现跟以前学的完全不同，班上五十人，每个人看来都醒目在行，比我好多了。他们表现出色，说话条理分明。于是我埋首做好功课，不久之后，我发现自己毕竟也不赖。关键是做好所有棘手的题目，并把这些题目想通想透。”——《丘成桐：我的数学之路》\cite{丘成桐}

至于数值方法的用途和意义，则可以参考以下例子：
\newline $\bullet$ 诺贝尔物理学奖得主Gerard't Hooft 的评论：Even the pure sang theorist may be interested in some aspects of computational physics \cite{Hooft}.
\newline $\bullet$ Kenneth Judd所著{\it Numerical Methods in Economics} \cite{Judd1998}一书的第1章。
\newline $\bullet$ 欧盟用数值模拟来研究宏观经济政策影响的一个项目：The Eurace Project \cite{DVD2008}。

\bigskip

下面按照吴崇试\cite{吴崇试2003}一书的章节顺序，逐一介绍相关参考资料。

\medskip

{\bf 第一章：复数和复变函数}。这一章是介绍复数概念，没有什么难的地方，习题也平淡无奇，所以习题解答直接省略了这章习题。但需要说明一下对复数的两种不同看法。一种是纯粹数学家的公理化观点：复数是数学家创造的一个“概念”；用公理化的观点看，就是给定了一个集合，然后在这个集合上赋予一套自洽的运算，随后就可以进行逻辑自洽的推导了。另外一种看法就是建模的观点：复数是一个“模型”，从物理和工程的角度讲，它被发明出来是为了便于描述现实世界的某些现象（比如交流电的变化），它的运算也对应于一定的物理运动。

第一种观点可以在方企勤编著的《复变函数教程》\cite{方企勤1996}的开篇找到痕迹：复数是二元数组的集合以及在这个集合上定义的一组运算。第二种观点则在几乎所有的物理工程类教材中都可以找到。

这两种看法反映了同一事物的不同侧面。我们需要注意的是，当我们论证复变函数的性质时，是在严格遵循“公理$\rightarrow$定义$\rightarrow$定理”的路径；但当我们使用复变函数来描述自然现象的时候，又是把抽象的观念和具体的物理现象做了一个一一对应。所以“物理直观不等同于数学上的严格证明，无论它多么显然”。我们在这两个世界之间切换的时候，不要自己把自己搞迷糊了。

\medskip

{\bf 第二章：解析函数}。这一章介绍解析函数的定义与基本性质，是经典的题目、经典的讲法。章末练习的第8至第14题略过了，因为都是很简单的题目，而且课本后面已给出答案了。

在此做几点补充说明。第一，这一章讲解了一下多值函数。多值函数的用途在后面利用柯西积分算各种特殊定积分的时候就会显现。简而言之，我们用黎曼面定义了一个抽象的拓扑空间，来作为多值函数的定义域空间，从而使得多值函数再次成为“普通”的单值函数。教科书上用了一些图片来把这些抽象的拓扑空间从视觉上直观化，但严格讲来，它们还需要数学上的严格定义。这是“黎曼曲面”这个课程通常研究的对象。作为物理工程类的学生来说，只要记住“黎曼面是一个抽象的拓扑空间，以便我们把多值函数变成单值函数，方便我们做计算”就行了。此处可以看到前面所提两种不同世界观的差异：对于物理学家和工程师来说，工作已经结束了；对于数学家来说，工作才刚刚开始。关于黎曼面的一个粗浅介绍，可参考Gong \cite{Gong2007} Chapter 4 Appendix以及美国一所大学的本科复分析课程的补充笔记: Mathematics 418 Functions of One Complex Variable (Spring 2003), Notes on Riemann Surfaces, \href{https://www.quantsummaries.com/Notes_on_Riemann_surfaces_1.pdf}{part 1} and \href{https://www.quantsummaries.com/Notes_on_Riemann_surfaces_2.pdf}{part 2}。

第二，读书可以求全责备，希望一本书尽善尽美，以后需要学习或者查阅什么内容，只看这一本书就行了。读书也可以以一本较好的教材或者专著为主，以其他书籍、文章、笔记为辅，尤其注重用一个个独立的模块为自己的知识体系打补丁升级。这是不同哲学观在学习方法上的反映。后一种学习方式有一个不错的网站Connexions可供借鉴：\href{http://cnx.org/}{http://cnx.org/}。 这个网站的宗旨是
Connexions is:
a place to view and share educational material made of small knowledge chunks called modules that can be organized as courses, books, reports,etc.

对于把“知识分割成小模块逐步吸收”这种学习方式还有疑虑的读者，可以参阅纽约大学柯郎研究所的退休教授Peter Lax的著作Functional Analysis \cite{Lax2002}。Lax 教授是世界著名的数学家，沃尔夫终身成就奖获得者（陈省身先生也曾获此殊荣）。他的这本泛函分析教材很有特色，每一章节通常不超过6 页，因为他认为这大约是一次阅读而不使大脑疲劳的最大限度。

第三，读书通常有“见树”与“见森林”的说法。直观上讲，“见树”无非就是“格物致知”，具体到数学的学习，就是每个证明或者计算都搞清楚，离开书本也能自行推导。而“见森林”无非就是看大脉络：“从哪里来，来解决什么问题，又到哪里去”。以前面黎曼面的概念为例子。“见树”就是要理解黎曼面的构建和性质。“见森林”则是理解发明它的动机，它能够解决什么问题，以及它和数学其它分支的联系。“见树”是“见森林”的基础，使得我们可以只看大脉络而不看细节。“见森林”又是“见树”的动力和指导原则，使得我们在格物致知的时候，知道什么是重要的，什么是可以轻轻放过、以后再细究的。华罗庚先生将之总结为“把厚书读薄”：“应该怎样学会读书呢？在对书中每一个问题都经过细嚼慢咽，真正懂得之后，就需要进一步把全书各部分内容串连起来理解，加以融会贯通，从而弄清楚什么是书中的主要问题，以及各个问题之间的关联。这样我们就能抓住统帅全书的基本线索，贯串全书的精神实质。我常常把这种读书过程，叫做‘从厚到薄’的过程。愈是懂得透彻，就愈有薄的感觉。这是每个科学家都要经历的过程。”——华罗庚：《学·思·锲而不舍》


\medskip

{\bf 第三章：复变积分}。这一章的核心内容是柯西积分定理和柯西积分公式。后者其实是前者的推论，而前者则来自于解析函数的可微性对实部和虚部同时提出了要求，使得可以用格林定理确立一些很强的性质，这个思路是这一章的主线。

一个有意思的问题如下：微积分里面有中值定理 $f(a) – f(b) = f’(c) (a – b)$。对于复可微的函数，这一条还对吗？如果不对，我们在实际工作中往往需要估计 $|f(a) – f(b)|$的大小，该如何估计？

Qazi \cite{Qazi2006}对第一个问题给出否定的回答，并给出了反例。对于第二个问题，使用柯西积分公式就可以达到类似效果。所以解析函数一定是（局部）李普西茨的。

\medskip

{\bf 第四章：无穷级数}。这一章是为解析函数的一个等价定义做准备，与第五章密不可分。有兴趣的读者可以查阅历史上维尔斯特拉斯使用这种等价定义的来龙去脉。

这一章结尾处讲了一点发散级数和渐进分析。个人感觉没有讲透，故列举一些书中提过的和没提过的参考资料供读者参考：
\newline $\bullet$ G. H. Hardy. {\it Divergent Series} \cite{Hardy2000}.
\newline $\bullet$ R. Wong. {\it Asymptotic Approximations of Integrals} \cite{Wong14}（美国工业与应用协会“经典应用数学丛书”中的一本）。
\newline $\bullet$ N. G. de Bruijn. {\it Asymptotic Methods in Analysis} \cite{deBruijn2010}.
\newline $\bullet$ A. Erdelyi. {\it Asymptotic Expansions} \cite{Erdelyi2010}.
\newline $\bullet$ R. B. Dingle. {\it Asymptotic Expansions: Their Derivation and Interpretation} \cite{Dingle1973}.

\medskip

{\bf 第五章：解析函数的局域性展开}。在第四章完成相关的准备工作之后，这一章开始系统讲解析函数的一个等价定义：级数展开。这个等价定义的好处是便于我们研究解析函数的奇点和零点——例如，如果不用级数展开这种“语言”，则这一章的很多结果不但不好证明，就是直观上也不那么显然（例如解析函数的零点孤立定理和唯一性定理）。从哲学的观点看，这是“形式决定内容”的一个具体例子。在语言学里，则对应于“你的词汇表圈定了你所能表达的思想”。

在数学史上，这样的例子层出不穷：一个数学对象，在大家研究它很久之后，突然发现其实还有另外一种观点来看待它，从而引发了一系列新的进展。复分析的发展过程中，就曾经有过三种等价的看法：从分析的角度（可微性的定义），从级数展开的角度（这一章的局域性展开），以及从几何的角度（黎曼面）。

这章末尾的解析延拓个人觉得讲得还不够透彻。还有就是省略了维尔斯特拉斯分解定理和米塔格-列夫勒定理。这两个定理的威力在于为函数的各种展开形式提供了一个统一的方法，从而把欧拉在 {\it Introduction to Analysis of the Infinite} \cite{Euler1988}一书里让人眼花缭乱的变换，用一个统一的观点统摄了起来，达到了化繁为简的目的。这两个定理的讲解，可参见龚昇先生的著作\cite{Gong2007}。

\medskip

{\bf 第六章：二阶线性常微分方程的幂级数解法}。这章是幂级数的应用，背景是物理学中出现的各种常微分方程，也是引出各种特殊函数的源泉。习题比较繁琐，计算量较大。读者如果之前学习过理论上更漂亮的数学，可能会觉得这些习题意义不大。不过笔者认为这是理工科学习的一个必经训练，其实是对意志品质的一个磨练。

其次，用幂级数方法解常微分方程，不可避免地涉及到解递归方程（recurrence equation）。这本教材里的递归方程大多是齐次的，有很简单的解法。但对于非齐次的就麻烦一点。我个人偏好用生成函数的办法对齐次和非齐次的情形产生一个统一的解法。普林斯顿大学计算机系的一门离散数学课程有一个笔记对此作了系统总结：
\href{https://www.quantsummaries.com/Solving_recurrence_relations.pdf}{Solving recurrence relations}.

最后，在对线性方程求解的时候，教材里面使用了一点线性空间的基的概念。这个概念在线性微分方程解空间的应用可参阅丁同仁、李承治合著的《常微分方程教程》一书\cite{丁同仁等2004}。

\medskip

{\bf 第七章：留数定理及其应用}。这一章的内容大概是数学物理方法里面关于复分析的高潮了。高潮在于我们真正看到了复分析解决问题的威力：以前对各种稀奇古怪的定积分的求解，在数学分析课程里面需要依赖于含参变量的积分，对技巧性要求较高。现在则是有了一个统一的、甚至有些机械的方法：留数定理。同时我们也看到了多值函数的定义的确是有必要的：一些计算因此有了意义。

关于参考书的几点说明。首先，Lax和Zalcman合著的{\it Complex Proofs of Real Theorems} \cite{LZ2011}一书展示了复分析在解决一些经典问题上的威力。其次，Whittaker和Watson合著的{\it A Course of Modern Analysis} \cite{WW1927} 一书写作于数学公理化运动（尤其是布尔巴基学派）彻底改变现代数学的表述形式之前，从而较好地展现了数学家、尤其是英国分析学派是如何分析思考问题的（与此类似的是法国数学家古尔沙（Goursat）的《数学分析教程》\cite{Goursat2013}）。

由此引出数学该如何讲授的问题。不少成名的数学家都对现代数学一本正经、上来就是讲大套理论的做法不以为然。例如证明对数索伯列夫不等式并曾担任《泛函分析杂志》主编的数学家Leonard Gross教授，他讲授的泛函分析就很有特色，不是一上来就讲授一个成熟完美的数学理论，而是以一种探索的态度摸索数学：“假定我不懂什么抽象理论，我作为一个研究数学物理的人，对薛定谔微分算子很有兴趣。今天我们就来看看能不能七拼八凑地把这个算子折腾明白”。

这和国内教材和教授讲课的情形形成鲜明的对比。我认为这除了个人风格以外，一个重要区别就是讲课人是否真地吃透了讲授内容，是否真地在活生生地使用这些内容，是否真地对讲授内容的来龙去脉有自己的理解。

\medskip

{\bf 第八章：伽玛函数}。对于伽玛函数的很多实用公式，可参阅\cite{WW1927}一书。

\medskip

{\bf 第九章：拉普拉斯变换}。丁同仁、李承治合著的《常微分教程》一书有两个版本。其中拉普拉斯变换在第一版\cite{丁同仁等1990}第六章第4节有讲述，但不知为何在第二版\cite{丁同仁等2004}中删去了。

\medskip

{\bf 第十章：德尔塔函数}。这一章的核心，是理解德尔塔函数所引出的格林函数法是如何有效地解决实际问题。

\medskip

{\bf 第十一章：Mathematica中的复变函数}。这一章没有习题，主要是介绍Mathematica这个软件的使用。这个软件的好处是能够做符号计算，也即可以帮我们推公式。它背后的支撑是各种各样的数学用表—— 以前我们需要手动查公式的地方，现在可以用Mathematica自动化操作了。一方面，这让数学家们的重要性下降了；另一方面，又把数学家们解放了出来去做更重要的工作。

\medskip

作为复分析最后的补充，笔者提一下Driscoll和Trefethen合著的{\it Schwarz-Christoffel Mapping}一书\cite{DT2002}。笔者不是相关领域的专家，但依稀记得共形映射（conformal mapping）在流体力学和电磁力学中很有用，因为它可以把不规则区域映射为性质很好的规则区域，从而把不规则区域上的偏微分方程变换为定义在规则区域上的偏微分方程，以便于求解。这种变换的一个系统方法就是Schwarz-Christoffel mapping。 该书作者之一的Lloyd Trefethen是牛津大学著名的数值分析学家。他在该书中为各种区域之间的共形变换提供了具体的公式和数值方法，非常实用。希望这本短小精悍的专著能够对中国的工程技术人员有所帮助。

\medskip

现在谈谈《数学物理方法》这本教材的第二部分：数学物理方程。这一部分用了近两百页的篇幅诠释了四个字：{\bf “变换”、“逼近”}。说数学物理方法只有这两条当然是不对的。但从这两个观点看过去，确实可以统摄一批工具和技巧。

\medskip

{\bf 第十二章：数学物理方程和定解条件}。这一章是搭建表演的舞台，没有什么可以多说的。

\medskip

{\bf 第十三章：线性偏微分方程的通解}。这一章的方法有个明确的名目，“operational calculus”。它最早来源于一些数学家和工程师从形式上解微分方程的努力（例如广为人知的Heaviside就是其中一位）。具体的做法就是把微分方程的求解通过形式化的微分算子，转化成代数方程求解。

例如求解一个二阶常微分方程$y’’(t) + a y’(t) + by(t) = 0$。我们通常被告知：“先求解特征方程$x^2 + ax + b = 0$，然后方程解的一般形式就是 $c_1 \exp(x_1t) + c_22 \exp(x_2t)$了”。验证一下这确实是对的，但怎么把这个技巧看得比较“自然”呢？

办法就是把原方程看作$（D^2 + a D + b） y = （D – x_1）(D – x_2) y = 0$。这里 $D$ 是微分算子。然后问题就简化为解一阶线性常微分方程 $(D - x_1)y = 0$ 和 $(D – x_2) y = 0$。而这是可以通过积分因子法轻易求解的（参见丁同仁和李承治合著的《常微分方程教程》一书\cite{丁同仁等2004}）。最后利用解的线性叠加性，把方程的通解表示为两个基解的线性组合就行了。这一思考过程很好地诠释了数学家们口耳相传的常识：“一开始，我们只是发现了一个技巧；然后技巧演化成了一个方法；最终方法变成了一个理论”。

所以在很多情况下，用这种形式化的算子法来解微分方程，当其适用的时候，往往是最简单的。丁同仁和李承治合著的《常微分方程教程》第一版\cite{丁同仁等1990}有一章专门讲述这个方法。但第二版\cite{丁同仁等2004}却把相关内容删掉了。

这种形式化的operational calculus能够解线性常微分方程，也能够解线性偏微分方程。这是第十三章的一条主线。

\medskip

{\bf 第十四章：分离变量法}。变量分离法粗看之下，假设过强：“我们怎么知道解可以写成变量分离的形式？”其实这无非就是逼近罢了。以求解一个二元偏微分方程为例。基本想法就是把未知函数 $f(x,y)$ 用形如 $g(x)h(y)$ 这样的函数的线性组合来逼近。而每一个$g(x)$ 和$h(y)$ 又各自可以用一组基来表示。类似的自然想法包括用多元多项式去逼近多元函数，用傅立叶级数去构造一个函数，等等。最近的文献可参阅\cite{DK2010}。

所谓的“本征函数”，无非就是满足一定限制条件的逼近函数而已。为了能够达到逼近的目的，我们还需要确认它们构成了函数空间的一组基。而为了让逼近方式尽量简洁，我们还希望取正交基，等等。这大致就是后面第十八所章解释的“高观点”。

\medskip

{\bf 第十五章：正交曲面坐标系、第十六章：球函数、第十七章：柱函数}。这几章并没有什么新的东西。无非就是如第十五章开篇所言，由于微分方程的作用区域不同而需要引入新的坐标，从而导致产生新的方程、新的函数（作为方程的解）。

\medskip

{\bf 第十八章：分离变量法总结}。这一章的内容和动机已在第十四章解释过了。

\medskip

{\bf 第十九章：积分变换的应用}。提供几本关于积分变换的参考资料如下：
\newline $\bullet$ U. Cherubini, G. D. Lunga, S. Mulinacci, and P. Rossi. {\it Fourier Transform Methods in Finance}. \cite{CLMR2010}
\newline $\bullet$ B. Davies. {\it Integral Transforms and Their Applications}, 3rd edition. \cite{Davies2010}
\newline $\bullet$ L. Debnath and D. Bhatta. {\it Integral Transforms and Their Applications}, 2nd edition. \cite{DB2014}
\newline $\bullet$ D. Duffy. {\it Transform Methods for Solving Partial Differential Equations}, 2nd edition. \cite{Duffy2004}
\newline $\bullet$ A. Erdelyi. {\it Tables of Integral Transforms, I, II}. \cite{Erdelyi1954}
\newline $\bullet$ A.Poularikas. {\it Transforms and Applications Handbook}, 3rd edition. \cite{Poularikas2010}
\newline $\bullet$ J. Schiff. {\it The Laplace Transform: Theory and Applications}. \cite{Schiff1999}

几点个人看法如下。

第一、在没有Mathematica的年代，物理学家和工程师往往需要查手册表格来推演公式。即使现在有了Mathematica，有这些资料在案头做参考也是好的，而且Mathematica也有力所不及的地方。

第二、无论理论上多么漂亮，实践中我们都需要用计算机来进行计算。所以我们不能只满足于推导一些closed-form的公式。要确实解决问题，数值方法就必不可少，并且需要给出误差估计。

{\it Fourier Transform Methods in Finance} 和 {\it Transform Methods for Solving Partial Differential Equations}这两本书为上述看法提供了实例。前者浅显易懂，直接应用到了金融建模中。后者的作者 Dean Duffy博士毕业于麻省理工学院，曾长期为美国军方效力（美国海军学院、美国军事学院、美国空军），现在是美国航空航天局的工程师。他这本关于积分变换的著作和另外一本关于格林函数的著作\cite{Duffy2015}，实用性和针对性非常强，解决的是他自己和他的同事朋友们在工作中遇到的实际问题。他写书的另一个特色就是提供“一条龙”的解决方案：不但有理论公式推导，更重要的是有数值计算的解决方案。

第三、抗美援朝的时候，毛主席曾有“美国人钢多气少，中国人钢少气多”的评论。在学术领域，美国的“钢”就体现在前人积累的、各有特色、各有侧重的学术著作上。时代发展到今天，由于互联网和开放课程开源运动的兴起，使得把以学术著作为代表的美国的“钢”源源本本地传播到中国成为可能。这也是夹在时代的裂缝之间的过渡性人物的历史责任。

\medskip

{\bf 第二十章：格林函数}。个人感觉教材里的讲法过于强调技巧，有些见“树”而不见“森林”的感觉。个人心仪的讲法是把格林函数作为微分算子的逆算子来看。然后从这个“高观点”出发，对各种寻找格林函数的技巧做一个统一处理。这种讲法的好处是可以把有限维线性空间、积分方程、泛函分析作为一个有机的整体，按照华罗庚先生“一条龙”的方式一气呵成地讲出来（北大版的这本教材没有专门讲积分方程，但胡嗣柱和倪光炯合著的《数学物理方法》一书\cite{胡嗣柱等2002}则有讲述）\footnote{华罗庚的《高等数学引论）》\cite{华罗庚2009}“共分四册，包含了微积分、高等代数、常微分方程、复变函数论等内容，全书反映了作者的‘数学是一门有紧密内在联系的学问，应将大学数学系的基础课放在一起来讲’的教学思想，还包括了作者的‘要埋有伏笔’、‘生书熟讲，熟书生温’等教学技巧，书中还介绍了数学理论的不少应用，这使得本套书不同于许多现行的教科书，是一套有特色、高水平的高等数学教材。”}。

这种讲法的路线图是先从Roach的{\it Green’s Functions} \cite{Roach1982} 一书开始，从线性代数自然地过渡到积分方程，引出高观点。然后介绍上文提到过的{\it Green’s Functions with Applications} \cite{Duffy2015}一书，尤其强调具体的使用和数值方法。最后再介绍Dieudonne的{\it History of Functional Analysis} \cite{Dieudonne}一书，为以后泛函分析的学习打下坚实的基础（例如前面提过的Lax的 {\it Functional Analysis} \cite{Lax2002}，或者是Lebedev和Vorovich合著的{\it Functional Analysis in Mechanics} \cite{LVC2012} 一书）。

\medskip

{\bf 第二十一章：变分法初步}。这一章的内容比较庞杂，理论分析、数值解法都有一些。一本重要的参考书是Gelfand和Fomin合著的{\it Calculus of Variations} \cite{GF00}一书\footnote{{\it Calculus of Variations}这本书的作者之一Gelfand （中译名盖尔方德）是前苏联著名数学家，苏联数学学派的领袖人物，列宁奖和沃尔夫奖获得者，皇家学会会员，美国科学院外籍院士，“二十世纪最伟大的数学家之一”（纽约时报）。}。这大概是学术界公认的最好的变分法教材。它比较突出的特点是叙述非常详细，读来有娓娓道来的感觉；同时覆盖面很广，短短200多页的篇幅，把变分法的来龙去脉解释得一清二楚。其中场论的章节对于分析力学（汉密尔顿力学和拉格朗日力学）的学习帮助很大。该书对于控制论的学习也不无裨益（Fleming和Rishel合著的{\it Deterministic and Stochastic Optimal Control} \cite{FR1982} 在控制论领域很有名，但笔者读后感觉叙述不清之处甚多。个人推荐雍炯敏和楼红卫合著的《最优控制理论简明教程》一书\cite{雍炯敏等2006}）。

回到吴崇试教授的《数学物理方法》一书。课本上最后一节讲了一点瑞利-里兹方法。个人觉得篇幅太短，讲得不够透彻。所以从Gelfand and Fomin \cite{GF00}摘录了部分内容，做了一个小结。这是习题解答里的第21.2 小节。同时也摘录了他们书上的3道习题，做了解答。

有意思的是，在笔者对其中一道习题给出解式，并试图用Matlab做一个数值试验的时候，Matlab报错了。原因是计算涉及的矩阵性质不太好，造成了算法的不稳定性。这个例子也提醒了我们，写出公式只是第一步，后面还有大量的工程工作需要做。做理论的科研工作者要和一线的工程技术人员一起摸爬滚打，才能真正地把问题吃透，提供完备的解决方案。

\medskip

{\bf 第二十二章：数学物理方程综述}。这部分内容可参阅丁同仁和李承治合著的《常微分方程教程》一书\cite{丁同仁等2004}最后两章的内容（首次积分、一阶偏微分方程）。文末有一个附录，对数学金融里的Black-Scholes方程的推导及解答做了一个示范。

\medskip

综上所述，吴崇试教授的这本教材和国内外的同类教材相比较并不逊色。但由于篇幅所限，该书作为本科生的入门教材无法讲太多的东西，故此在此附录中补充了一些相关资料。笔者由于个人学术经历所限，偏好用统一的“道”去统摄各种具体的“术”，这带有法国结构主义学派的风格（布尔巴基学派）。但这种后知后觉的“整理”，虽然漂亮严谨，却与现实中科研工作的曲折反复不相符合。推而广之，用“高观点”整理过的东西，往往容易让人误以为历史的发展是线性的，这是违背历史的本来面目的。从一线科研工作者的角度解释治学的方法论，丘成桐先生的治学经验\cite{丘成桐}非常值得一读。

\section{更多的参考资料}

变分法
\newline $\bullet$ W. G. Smiley and G. C. Evans. ``The First Variation of A Functional". Bull. Amer. Math. Soc. Volume 36, Number 6 (1930), 427-433. \cite{SE1930}

\medskip

数学物理方法
\newline $\bullet$ R. Courant and D. Hilbert. Methods of Mathematical Physics I, II.
\newline $\bullet$ P. Morse and H. Feshbach. Methods of Theoretical Physics, I, II.
\newline $\bullet$ C. Harper. Analytic Methods in Physics.
\newline $\bullet$ C. Pope. Methods of Theoretical Physics.
\newline $\bullet$ M. Masujima. Applied Mathematics in Theoretical Physics. （主要关于积分方程和变分法）
\newline $\bullet$ L. I. Sedov. Similarity and Dimensional Methods in Mechanics.（维度分析，对物理专业有用）
\newline $\bullet$ H. W. Wyld. {\it Methods of Mathematical Physics} \cite{Wyld1999}.
\newline $\bullet$ 《数学物理方法》（复旦大学出版社，胡嗣柱、倪光炯编著）
\newline $\bullet$ 《数学物理方法解题指导》（高等教育出版社，胡嗣柱、徐建军著）
\newline $\bullet$ 《数学物理方法教程》（南开大学出版社，潘忠诚编）
\newline $\bullet$ 《数学物理方法》（科学出版社，汪德新编著）
\newline $\bullet$ 《广义函数与数学物理方程》（高等教育出版社，第二版，齐民友、吴方同编）
\newline $\bullet$ 《数学物理方法》（李政道）

\medskip

偏微分方程解析解
\newline $\bullet$ H. Bateman. Partial Differential Equations of Mathematical Physics.
\newline $\bullet$ G. Evans, J. Blackledge and P. Yardley. Analytic Methods for Partial Differential Equations.
\newline $\bullet$ R. Iorio and V. Iorio. Fourier Analysis and Partial Differential Equations.
\newline $\bullet$ S. V. Meleshko. Methods for Constructing Exact Solutions of Partial Differential Equations.

\medskip

特殊函数
\newline $\bullet$ W. Press, S. Teukolsky, W. Vetterling and B. Flannery. Numerical Recipes in C, 2nd edition.
\newline $\bullet$ M. Abramowitz and I. Stegun. Handbook of Mathematical Functions with Formulas, Graphs, and Mathematical Tables.
\newline $\bullet$ I. S. Gradshteyn and I. M. Ryzhik. Table of Integrals, Series, and Products, 7th edition.
\newline $\bullet$ G. Andrews, R. Askey and R. Roy. Special Functions.
\newline $\bullet$ R. Bellman. A Brief Introduction to Theta Functions.
\newline $\bullet$ A. Cayley. An Elementary Treatise on Elliptic Functions.
\newline $\bullet$ A. Erdelyi. Higher Transcendental Functions, Volume 1, 2, 3.
\newline $\bullet$ A. Levelt. Hypergeometric Functions.
\newline $\bullet$ A. Nikiforov and V. Uvarov. Special Functions of Mathematical Physics.
\newline $\bullet$ G. Szego. Orthogonal Polynomials.
\newline $\bullet$ E. C. Titchmarsh. The Theory of the Riemann Zeta Function.
\newline $\bullet$ Wang and Guo. Special Functions.
\newline $\bullet$ 王竹溪，郭敦仁：特殊函数论
\newline $\bullet$ G. Watson. A Treatise on the Theory of Bessel Functions.
\newline $\bullet$ A. Zygmund. Trigonometric Series, Volume I, II.
\newline $\bullet$ 刘式适，刘式达：特殊函数

\end{appendix}

\begin{thebibliography}{99}

\bibitem{CLMR2010} U. Cherubini, G. D. Lunga, S. Mulinacci, and P. Rossi. {\it Fourier Transform Methods in Finance}. Wiley (January 26, 2010).

\bibitem{Conway78} John B. Conway. {\it Functions of one complex
variable}, 2nd Edition. Springer, 1978.

\bibitem{DK2010} V. A. Daugavet and M. V. Kireeva. ``Approximation of a function of two variables by a product of functions of one variable on a given domain". Vestnik St. Petersburg University. Mathematics, September 2010, Vol. 43, Issue 3, pp. 131-138.

\bibitem{Davies2010} B. Davies. {\it Integral Transforms and Their Applications}, 3rd edition. Springer, 3rd edition (December 6, 2010).

\bibitem{DB2014} L. Debnath and D. Bhatta. {\it Integral Transforms and Their Applications}, Chapman and Hall/CRC, 3rd edition. (November 7, 2014)

\bibitem{deBruijn2010} N. G. de Bruijn. {\it Asymptotic Methods in Analysis}. Dover Publications (October 18, 2010).

\bibitem{DVD2008} Christophe Deissenberg, Sander van der Hoog, Herbert Dawid. ``EURACE: A Massively Parallel Agent-Based Model of the European Economy". 2008. halshs-00339756 \href{https://halshs.archives-ouvertes.fr/halshs-00339756/document}{https://halshs.archives-ouvertes.fr/halshs-00339756/document}

\bibitem{Dieudonne} J. Dieudonne. {\it History of Functional Analysis}. North Holland; 1st edition (January 15, 1983).

\bibitem{丁同仁等1990}丁同仁，李承治：《常微分方程教程》。北京：高等教育出版社，1991。

\bibitem{丁同仁等2004}丁同仁，李承治：《常微分方程教程（第二版）》。北京：高等教育出版社，2004。

\bibitem{Dingle1973} R. B. Dingle. {\it Asymptotic Expansions: Their Derivation and Interpretation}. Acamedic Press (1973).

\bibitem{DT2002} Tobin A. Driscoll and Lloyd N. Trefethen. {\it Schwarz-Christoffel Mapping}.  Cambridge University Press; 1 edition (June 24, 2002).

\bibitem{Duffy2004} D. Duffy. {\it Transform Methods for Solving Partial Differential Equations}, 2nd edition. Chapman and Hall/CRC (June 26, 2004).

\bibitem{Duffy2015} D. Duffy. {\it Green’s functions with Applications}. Chapman and Hall/CRC; 2nd edition (March 10, 2015).

\bibitem{Erdelyi1954} A. Erdelyi. {\it Tables of Integral Transforms, I, II}. McGraw Hill (January 1954).

\bibitem{Erdelyi2010} A. Erdelyi. {\it Asymptotic Expansions}. Dover Publications (November 18, 2010).

\bibitem{Euler1988} Leonard Euler. {\it Introduction to Analysis of the Infinite: Book I and II}. Springer (1988, 1989).

\bibitem{方企勤1996}方企勤：《复变函数教程》，北京：北京大学出版社，1996.12。

\bibitem{FR1982} Wendell H. Fleming and Raymond W. Rishel. {\it Deterministic and Stochastic Optimal Control}. Springer; 1 edition (October 18, 1982).

\bibitem{GF00} I. M. Gelfand and S. V. Fomin (translated and edited by Richard A. Silverman). {\it Calculus of Variations}, Dover Publications, 2000.

\bibitem{Gong2007} Gong Sheng and Gong You-Hong. {\it Concise Complex Analysis}, 2nd Edition. World Scientific, 2007.

\bibitem{Goursat2013} Edouard Goursat. {\it A Course in Mathematical Analysis}. Dover Publications (April 4, 2013).

\bibitem{Hardy2000} G. H. Hardy. {\it Divergent Series}. American Mathematical Society; 2nd edition (April 5, 2000).

\bibitem{Hooft} Gerard't Hooft. How to become a GOOD theoreticl physicist. \href{http://www.staff.science.uu.nl/~gadda001/goodtheorist/compphys.html}{http://www.staff.science.uu.nl/~gadda001/goodtheorist/compphys.html}.

\bibitem{胡嗣柱等2002}胡嗣柱、倪光炯编著:《数学物理方法（第二版）》，高等教育出版社，2002。

\bibitem{华罗庚2009}华罗庚：《高等数学引论（1-4册）》，高等教育出版社，2009。

\bibitem{Judd1998} Kenneth L. Judd. {\it Numerical Methods in Economics}. The MIT Press (October 27, 1998).

\bibitem{Kohn03} Robert V. Kohn. {\it PDE for Finance}, NYU Master of Science Program: Mathematics in
Finance. Spring 2003.
http://www.math.nyu.edu/faculty/kohn/pde\_finance.html

\bibitem{Lax2002} Peter D. Lax. {\it Functional Analysis}. Wiley-Interscience; 1st edition (April 4, 2002).

\bibitem{LZ2011} Peter D. Lax and Lawrence Zalcman. {\it Complex Proofs of Real Theorems}, American Mathematical Society (December 21, 2011).

\bibitem{LVC2012} Leonid P. Lebedev, Iosif I. Vorovich, and Michael J. Cloud. {\it Functional Analysis in Mechanics}. Springer; 2nd ed. 2013 edition (October 23, 2012).

\bibitem{Oksendal95} B. {\O}ksendal. {\it Stochastic differential equations: An introduciton with applications}. 6th edition. Springer, Berlin, 2003.

\bibitem{Poularikas2010} A.Poularikas. {\it Transforms and Applications Handbook}, 3rd edition. CRC Press (January 19, 2010).

\bibitem{Roach1982} G. F. Roach. {\it Green's Functions}, Cambridge University Press; 2nd edition (June 30, 1982).

\bibitem{Qazi2006}M.A. Qazi. \href{https://www.quantsummaries.com/Qazi_The_mean_value_theorem_and_analytic_functions.pdf}{``The mean value theorem and analytic functions of a complex variable"}. {\it J. Math. Anal. Appl.} 324 (2006) 30-38.

\bibitem{丘成桐}丘成桐：“我的数学之路”。\href{http://www.kepu.net.cn/gb/basic/szsx/3/3_37/3_37_1.htm}{http://www.kepu.net.cn/gb/basic/szsx/3/3\_37/3\_37\_1.htm}。

\bibitem{Schiff1999} J. Schiff. {\it The Laplace Transform: Theory and Applications}. Springer (October 14, 1999).

\bibitem{沈燮昌1986}沈燮昌：《数学分析（第二册）》，北京：高等教育出版社，1986.4。

\bibitem{Shreve04b} S. Shreve. {\it Stochastic calculus for finance II. Continuous-time models}. Springer-Verlag, New York, 2004.

\bibitem{SE1930} W. G. Smiley and G. C. Evans. ``The First Variation of A Functional". Bull. Amer. Math. Soc. Volume 36, Number 6 (1930), 427-433.

\bibitem{WHD95} P. Wilmott, S. Howison, and J. Dewynne, {\it The mathematics of financial derivatives: A student introduction},
Cambridge University Press, Cambridge, UK, 1995.

\bibitem{Wong14} R. Wong. {\it Asymptotic Approximations of Integrals}. Academic Press (May 10, 2014).

\bibitem{Wyld1999} H. W. Wyld. {\it Methematical Methods for Physics}. Perseus Books Publishing, 1999.

\bibitem{吴崇试2003}吴崇试：《数学物理方法（第二版）》。北京：北京大学出版社，2003.12。

\bibitem{WW1927} E. T.Whittaker and G. N. Watson. {\it A Course of Modern Analysis}. Cambridge University Press, 4th edition (January 2, 1927).

\bibitem{雍炯敏等2006}雍炯敏、楼红卫：《最优控制理论简明教程》。高等教育出版社，2006。

\bibitem{中共中央文献研究室2008}中共中央文献研究室，中共湖南省委《毛泽东早期文稿》编辑组：《毛泽东早期文稿（1912.6-1920.11）》。湖南人民出版社，2008 年11月。

\bibitem{周治宁等2004}周治宁、吴崇试、钟毓澍：《数学物理方法习题指导》。北京：北京大学出版社，2004.9。

\end{thebibliography}
\end{document}

% 版本0.1.4，2019-08-31：加入附录“漫谈数学物理方法和特殊函数”。
% 版本0.1.3，2013-05-02：章节名改为中文。
% 版本0.1.0, 2010-01: 初稿。
