\documentclass[10pt]{amsart}
\usepackage{amsmath,amsthm,amssymb}
\usepackage{graphicx}
\usepackage[margin=1in]{geometry}
\usepackage{flafter}
\usepackage{float}
\usepackage[usenames]{color} %used for font color
\usepackage{amssymb} %maths
\usepackage{amsmath} %maths
\usepackage[utf8]{inputenc} %useful to type directly diacritic characters
\usepackage{subfigure}


\input xy 
\xyoption{all}

\floatstyle{boxed} 
\restylefloat{figure}
\begin{document}

\theoremstyle{plain}
\newtheorem{thm}{Theorem}
\newtheorem{prop}[thm]{Proposition}
\newtheorem{cor}[thm]{Corollary}
\newtheorem{lem}[thm]{Lemma}
\newtheorem{conj}[thm]{Conjecture}   

\theoremstyle{definition}
\newtheorem*{defn}{Definition}
\newtheorem*{exmp}{Example}

\theoremstyle{remark}
\newtheorem*{rem}{Remark}
\newtheorem*{hnote}{Historical Note}
\newtheorem*{nota}{Notation}
\newtheorem*{ack}{Acknowledgments}
\numberwithin{equation}{section}

\newcommand{\C}{\mathbb{C}}
\newcommand{\Ch}{\widehat{\mathbb{C}}}
\newcommand{\N}{\mathbb{N}}
\newcommand{\R}{\mathbb{R}}
\newcommand{\Q}{\mathbb{Q}}
\newcommand{\U}{\mathcal{U}}
\newcommand{\Z}{\mathbb{Z}}
\newcommand{\W}{\psi(\vec{x},t)}
\newcommand{\Wc}{\psi^*(\vec{x},t)}
\newcommand{\Sch}{i\hbar\frac{\partial\psi(\vec{x},t)}{\partial t}= \frac{-\hbar^2}{2m}\nabla^2\vec{x}+V(\vec{x})\psi(\vec{x},t)}
\newcommand{\SchTwo}{i\hbar\frac{\partial\psi(\vec{x_{(1)}},\vec{x_{(2)}},t)}{\partial t}= \frac{-\hbar^2}{2m}\left(\nabla^2_{(1)}\psi(\vec{x_{(1)}},\vec{x_{(2)}},t)+\nabla^2_{(2)}\psi(\vec{x_{(1)}},\vec{x_{(2)}},t)\right)+V(\vec{x_{(1)}},\vec{x_{(2)}})\psi(\vec{x_{(1)}},\vec{x_{(2)}},t)}
\newcommand{\Newton}{\vec{F}=m\cfrac{\partial^2 \mathbf{x} }{\partial t^2}=-\vec{\nabla}\nu}
\newcommand{\normalize}{\int\limits_{-\infty}^{\infty} |\psi(x,t)|^2\,dx=1}
\title {Quantum Mechanics Lecture 15}
\author{Alexander Berenbeim}
\maketitle
\section{Recall Our Approach Stationary States}
What we'll be saying applies to bounded states for a simple harmonic oscillator. Remember that the first nontrivial term for small displacements from local minima of potential function is  $V\sim cx^2$. \\
In order to find stationary states, we can rely on the \emph{Hamiltonian operator} 
\begin{equation}
H\psi=E\psi\colon H=\cfrac{p^2}{2m}+V(x)=-\cfrac{\hbar^2}{2m}\nabla^2+V(x)
\end{equation}
However, there is another \emph{algebraic} approach the description of the Hamiltonian operator.
\begin{equation}
a_{\pm}=\cfrac{1}{\sqrt{2\hbar m\omega}}\left(\mp i p+m\omega x\right);\hspace{1cm}
\end{equation}
where the Hamiltonian can be alternately be expressed  as
\begin{equation}
H=\hbar \omega(a_-a_+-\frac{1}{2})
\end{equation}
\begin{equation}
H=\hbar \omega(a_+a_-+\frac{1}{2})
\end{equation}
and where the \emph{commutator} $[a_-,a_+]\equiv a_-a_+-a_+a_-=1$. We refer to $a_+$ as the raising operator and  $a_ -$ as the lower operator, both of which act on our wave function $\psi$. To find the corresponding energies, we assume that we have one solution to the Schr\"odinger equation, $H\psi=E\psi$ and then":
\begin{eqnarray*}
\left(a_+\right)^n\psi&\colon& H\left[\left(a_+\right)^n\right]\psi=\left(E+n\hbar\omega\right)\left(a_+^n\psi\right)\\
\left(a_-\right)^n\psi&\colon& H\left[\left(a_-\right)^n\right]\psi=\left(E-n\hbar\omega\right)\left(a_-^n\psi\right)\\
\end{eqnarray*}
This gives us the nice table:
\center\begin{tabular}{|c|c|} \hline$ (a_+)^n \psi$ & $E+n\hbar\omega$\\\hline $\hdots$ &$\hdots$ \\ \hline$a_+\psi$ &$E+\hbar \omega$\\\hline$\psi$& E\\\hline $a_-\psi$ & $E-\hbar \omega$ \\\hline$ \vdots $&$\vdots$\\\hline 
\end{tabular}\\
\raggedright
Of course, we should recall that the energies of our stationary states must $\ge V_{min}$. This implies that $E\ge 0$ for any stationary state. It must follow that for any $\psi$, there are only finitely many new states of the form $(a_-)^n\psi$, and for the same n, $(a_-)^n\psi\equiv 0 \Rightarrow (a_-)^m \psi \equiv 0 (\forall m \ge n)$. The "energy ladder" terminates in a downward direction, at which point we have our final, non-trivial rung of the "ladder" where $a_-\psi_0\equiv 0$.\\
We find that when we solve this simple differential equation, \[\psi_0=\left(\cfrac{m\omega}{\hbar\pi}\right)^{\frac{1}{4}}e^{-m\omega\frac{x^2}{2\hbar}}\]
which gives us $E_0=\hbar\frac{\omega}{2}$, and thus the table:
\center\begin{tabular}{|c|c|c|} \hline$\psi_n$ & $E_n$&$\hbar\frac{(2n+1)\omega}{2}$\\\hline $\vdots$ &$\vdots$&$\vdots$\\\hline $\psi_1(=a_+\psi_0)$ &$E_1$& $\hbar \frac{3\omega}{2}$ \\\hline$\psi_0$&$ E_0 $&$\hbar\frac{\omega}{2}$\\\hline 
\end{tabular}\\
\raggedright
which has the following normalization $=\cfrac{1}{\sqrt{n!}}(a_+)^n\psi_0$\\
This gives us the most general solution to the Time Independent Schr\"odinger Equation
\[\Psi(x,0)=\sum c_n\psi_n(x)\]
where 
\[c_m=\int\psi_m^*(x)\Psi(x,0)\,dx\]
which we calculate from the solution to the Time Dependent  Schr\"odinger equation: 
\[\Psi(x,t)=\sum c_n\psi_n(x)e^{-iE_n\frac{t}{\hbar}}\]
\section{Quantum Mechanics 2.0}
Now we're going to restart this course as Linear Algebra II. We do this because it gives us an axiomatic framework for QM, where we will find \\
\center\begin{tabular}{|c|c|} \hline QM =Dictionary= & Linear Algebra\\
\hline $\psi$ & vectors in $L_2(a,b)$ (Hilbert space)\\
\hline x,p & linear transformations of vectors\\
\hline $\psi(x), \varphi(p)$ & change of bases \\
\hline uncertainty principle & Schwarz Inequality \\\hline 
\end{tabular}
\raggedright
\subsection{Background in Linear Algebra}
\emph{Vector Spaces} are collections of objects, vectors, $\{v_1,v_2,...\} \in V$ with the following algebraic properties 
\begin{enumerate}
\item
$V$ is \underline{closed} under addition (e.g $(v_1 \in V )\wedge (v_2 \in V) \Rightarrow (v_1+v_2 \in V)) $
\item
objects in $V$ commute under addition (e.g.$v_1+v_2=v_2+v_1$)
\item
$V$ is \underline{closed} under scalar multiplication (e.g for$\lambda\in\C, v_1\to \lambda v_1, \lambda v_1 \in V$)
\item
Scalar multiplication is associative ( e.g. $\lambda(v_1+v_2)=\lambda v_1 +\lambda v_2$)
\end{enumerate}
\begin{defn}
A \textbf{basis} $\mathcal{B}$ is a set of linearly independent vectors "spanning $V$: $\mathcal{B} = \{e_1, e_2,\hdots\}$, such that $\lambda_i e_i=0 \Rightarrow (\forall i, \lambda_i=0)$. We study the basis as any $v \in V$ can be written 
\[v=\sum\lambda_ie_i\]
for some choices $\lambda_i$ where all $e_i \in \mathcal{B}$.
\end{defn}
\begin{defn}
The \textbf{Inner Product} is a map  $I:V \times V \to \C$, which takes the cartesian product of the vector space to the complex plane by the following action $(v_1,v_2)\mapsto v_1 \cdot v_2$. Here are some of the following important things to note about the inner product 
\begin{itemize}
\item $(v_1, v_2) \in V \times V$
\item $I(v_1, v_2) \in \C$
\item $I(v_1, v_2) = I (v_2, v_1)^*$
\item $I(v_1, v_2) \ge 0$ [= 0 if $v_1=0$]
\item$I(v_1, av_2+bv_3)=aI(v_1, v_2) +bI(v_1,v_3)$ This is linearity
\item $I(av_1+bv_2, v_3)=a^*I(v_1,v_3)+b^*I(v_2,v_3)$ This is skewed linearity
\end{itemize}
If you're having trouble with this, consider $\R^2$, where $I(v_1, v_2)=v_1\cdot v_2$, and $v_1=\lambda_1e_1+\lambda_2e_2$, $v_2=\xi_1 e_1 +\xi_2 e_2$, and $v_1\cdots v_2 = \lambda_1\xi_1 +\lambda_2\xi_2$.
\end{defn}
\begin{defn}
The \textbf{Norm} $\|v_1\|$ can be thought of as the "length" of the vector, as $\|v_1\|=\sqrt{I(v_1,v_1)}$. If you're having trouble visualizing this, think of this as the Euclidean distance function. 
\end{defn}
\subsection{Dirac Notation}
When Quantum mechanics was first being axiomatized, it was John von Neumann who realized that a quantum system could be considered as a point in Hilbert space. Although Heisenberg already gave a matrix mechanical formulation, and Schr\"odinger had conceived of the wave mechanical formulation of quantum mechanicss, it was this realization that would provide a single, satisfactory theoretical formulation. As we talked about in class before, this was analogous to a 6N dimensional phase space in classical mechanics, where N is the number of particles, with 3 general coordinates and  canonical momentum for each, only there are infinitely many dimensions instead. With this approach, position and momentum could be represented as particular linear operators, which could reduce the physics to the mathematics of linear Hermitian operators on Hilbert space.\\
However, physicists did not like von Neumann's notation, preferring Dirac's "bra-ket" notation instead. The best way to first approach this is the map $I(v_1,v_2)\to \langle v_1 | v_2 \rangle\in \C$, where:
\begin{defn}
A \textbf{"Bra"} is denoted $\langle x|$, and can be thought of as a linear transformation.
\end{defn}
\begin{defn}
\textbf{"Kets"} are denoted $| x \rangle$, and can be thought of as the vector.
\end{defn}
For a basis $(e_1,..., e_n)$, the corresponding kets are $|e_1\rangle, \hdots, | e_n \rangle$. More generally, for  vectors $v\in V$,
\[|v\rangle = \sum\limits_{i=1}^{n} a_i | e_i\rangle = a_i |e_i\rangle\]
Dirac's innovative idea was that we drop the summation sigma, $\sum$, since he realized that any repeated index implies that the index is being summer over. To be clear
\[a_i|e_i\rangle  = \sum\limits_{i=1}^{n} a_i | e_i\rangle  \]
Now when we think of our orthonormal basis, instead of sums, we can write $\langle e_j | e_i \rangle= \delta_{ij}$.
\subsection{Some Other Neat Observations}
Given 
\[|v_1\rangle=a_i|e_i\rangle\]
and
\[|v_2\rangle b_j | e_j\rangle \]
\[\langle v_2 |v_1 \rangle = b_j^*\langle e_j | e_i \rangle a_i = b_j^*a_i \left(=\sum\limits_ib_i^*a_i\right)\]  
If you're having trouble following this, we note that $\langle e_j | e_i \rangle =\delta_{ij}$.
\subsection{Change of Bases}
Suppose we have a new basis for $V$: $\{|f_k\rangle\}$, where $|e_i\rangle = A_{ik} |f_k\rangle [=\sum\limits_kA_{ik}|f_k\rangle]$, where $A_{ik}$ gives the choice of coefficients and labels the vectors on the LHS.\\
Assuming that our bases are orthonormal, then $\langle f_j | e_k \rangle = \langle f_j |A_{kl}|f_l\rangle$, where we sum over $A_{kl}| f_l\rangle$. We note that
\[A_{kj}=\langle f_j|e_k\rangle=A_{kl}\langle f_j|f_l\rangle=A_{kj}\]
Hence, $\langle f_j | e_k \rangle = A_{kj}$.\\
And now, for some abstract vector v
\begin{itemize}
\item
\begin{eqnarray*}
v&=& v_i | e_i\rangle \mbox{ ( where $v_i$ is the coefficient of the representation of v in basis $\{|e_i\rangle\}$)}\\
&=&v_i[\sum|f_k\rangle \langle f_k|] k_i\rangle\\
&=& v_i \left(\langle f_k | e_i \rangle\right)|f_k \rangle\\
&=& v_k^{\prime} | f_k \rangle\\
&=& \mbox{the second rep. of $v_i$ with respect to basis} \{f_k\}
\end{eqnarray*}
\item
\begin{eqnarray*}
\nu& =& v_i |e_i\rangle\\
&=& v_k^{\prime} | f_k \rangle\\
v^{\prime}_k = v_1 \langle f_k | e_i \rangle
\end{eqnarray*}
This is the change of basis formula.
\item
\begin{eqnarray*}
|e_i\rangle&=& \langle f_k | e_i \rangle | f_k\rangle\\
&=& A_{ki}| f_k\rangle\\
\mathbb{1} &=& |f_k\rangle \langle f_k| e_i\rangle\\
&=& \sum |f_k\rangle \langle f_k | e_i \rangle\\
&=& \sum |f_k\rangle \langle f_k |
\end{eqnarray*}
\end{itemize}
\end{document}