\documentclass{article}

\usepackage[novarsymbols,english]{opdracht}
\usepackage{slashed}
\DeclareMathOperator\tr{tr}

\begin{document}

\inleveropgave{9 -- Quantum Field Theory}
$1=0
\opgave{1 --- More $\gamma$-gymnastics}

\deelopgave
Such a matrix $U$ undoubtedly exists, although it seems easier to me to just
define
\[ \gamma^0 = \begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix},
   \gamma^i = \begin{pmatrix} 0 & -\sigma^i \\ \sigma^i & 0 \end{pmatrix},
\]
check that they satisfy $\{ \gamma^\mu, \gamma^\nu \} = 2 \eta^{\mu\nu}$ and
calculate
\[ \gamma^5 = \begin{pmatrix} -1 & 0 \\ 0 & 1 \end{pmatrix}. \]

Otherwise my approach would probably be to define
\[ U = \begin{pmatrix} A & B \\ C & D \end{pmatrix},
   U^\dagger = \begin{pmatrix} A^\dagger & C^\dagger \\ B^\dagger & D^\dagger \end{pmatrix},
\]
and by writing out $U U^\dagger = 1$ and $U^\dagger \gamma^\mu U$ derive the equations
to solve for $A, B, C$ and $D$.

\deelopgave Define the projection operators $P_\mathrm{L} = \frac12(1 - \gamma^5)$
and $P_\mathrm{R} = \frac12(1 + \gamma^5)$. We introduce the short-hand notation
$P = \frac12(1 \pm \gamma^5)$ for either of them, then it is easy to show that
\[ P^2 = \frac14(1 \pm \gamma^5)(1 \pm \gamma^5) = \frac14(1 \pm 2 \gamma^5 + (\gamma^5)^2)
       = \frac14(1 \pm 2 \gamma^5 + 1) = \frac12(1 \pm \gamma^5) = P,
\]
because $(\gamma^5)^2 = 1$, as shown in the exercise class (problem 2b). Hence
they are indeed both projection operators. Moreover, they project on disjoint
subspaces, as we can see by calculating both $P_\mathrm{L} P_\mathrm{R}$ and
$P_\mathrm{R} P_\mathrm{L}$:
\[ \frac12(1 \pm \gamma^5) \frac12(1 \mp \gamma^5) = \frac14(1 - (\gamma^5)^2)
   = \frac14(1 - 1) = 0.
\]
Since $P_\mathrm{L} + P_\mathrm{R} = \frac12(1 - \gamma^5 + 1 + \gamma^5) = 1$,
we can write
\[ \psi = (P_\mathrm{L} + P_\mathrm{R})\psi = P_\mathrm{L} \psi + P_\mathrm{R} \psi. \]
From basic functional analysis we know that this decomposition is unique (Wikipedia
$\rightarrow$ Projection operators $\rightarrow$ Classification).

\deelopgave Consider the Lagrangian density for Dirac spinors,
\[ \mathcal L = \overline\psi(\imi \gamma^\mu \partial_\mu - m) \psi, \]
where $\overline\psi = \psi^\dagger \gamma^0$. By Hermitian conjugating $\psi_\mathrm{L}$,
\[ \psi_\mathrm{L}^\dagger = ( P_\mathrm{L} \psi )^\dagger
                = \psi^\dagger P_\mathrm{L}^\dagger
\]
but $P_\mathrm{L}^\dagger = \frac12(1 - \gamma^5)^\dagger = P_\mathrm{L}$, because
$(\gamma^5)^\dagger = \gamma^5$. By the calculated anti-commutation relations
$\{ \gamma^5, \gamma^\mu \} = 0$ ($\mu = 0, 1, 2, 3$), so
\[ P_\mathrm{R,L} \gamma^\mu = \frac12(1 \pm \gamma^5) \gamma^\mu = \frac12 \gamma^\mu (1 \mp \gamma^5) = \gamma^\mu P_\mathrm{L,R}. \]
Therefore,
\[ \overline{\psi}_\mathrm{L} = \psi_\mathrm{L}^\dagger \gamma^0
                              = \frac12 \psi^\dagger (1 - \gamma^5) \gamma^0
                              = \frac12 \psi^\dagger \gamma^0 (1 + \gamma^5)
                              = \overline\psi P_\mathrm{R}.
\]
Similarly, $\overline\psi_\mathrm{R} = \overline\psi P_\mathrm{L}$. By conjugating
$\psi$, we also see that
\[ \psi^\dagger \gamma^0 = \left[ (P_\mathrm{L} + P_\mathrm{R}) \psi \right]^\dagger \gamma^0
                         = \psi^\dagger (P_\mathrm{L} + P_\mathrm{R}) \gamma^0
                         = \psi^\dagger \gamma^0 (P_\mathrm{R} + P_\mathrm{L})
                         = \overline\psi_\mathrm{L} + \overline\psi_\mathrm{R}.
\]
The Lagrangian is thence
\[ \begin{split} \mathcal L
     & {} = \overline\psi (\imi \slashed\partial - m) \psi \\
     & {} = \overline\psi (P_\mathrm{R} + P_\mathrm{L})(\slashed\partial - m)(P_\mathrm{R} + P_\mathrm{L}) \psi \\
     & {} = \overline\psi P_\mathrm{R} \slashed\partial P_\mathrm{L} \psi + \overline\psi P_\mathrm{L} \slashed\partial P_\mathrm{R} \psi
            - m \overline\psi P_\mathrm{L} P_\mathrm{L} \psi - m \overline\psi P_\mathrm{R} P_\mathrm{R} \psi \\
     & {} = \overline{P_\mathrm{L}\psi} \slashed\partial (P_\mathrm{L}\psi) + \overline{P_\mathrm{R}\psi} \slashed\partial (P_\mathrm{R}\psi)
            - m \overline{P_\mathrm{R}\psi} (P_\mathrm{L}\psi) - m \overline{P_\mathrm{L}\psi} (P_\mathrm{R}\psi) \\
     & {} = \overline{\psi_\mathrm{L}} \slashed\partial \psi_\mathrm{L} + \overline{\psi_\mathrm{R}} \slashed\partial \psi_\mathrm{R}
            - m \overline{\psi_\mathrm{L}} \psi_\mathrm{R} - m \overline{\psi_\mathrm{R}}  \psi_\mathrm{L}.
   \end{split}
\]
Note that for any ``slashed'' quantity, only terms with different projection
operators survive, because the projection operator changes when pulled through
the $\gamma^\mu$ in the contraction. For example:
\[ P_\mathrm{L} \slashed{A} P_\mathrm{L} = \slashed{A} P_\mathrm{R} P_\mathrm{L} = 0,
   \qquad
   P_\mathrm{L} \slashed{A} P_\mathrm{R} = \slashed{A} P_\mathrm{R} P_\mathrm{R} \neq 0.
\]
On the other hand, if $A$ is an operator with which the $\gamma$-matrices commute
(say, a multiple $m$ of the identity) then the operators are not changed and
\[ P_\mathrm{L} A P_\mathrm{L} = A P_\mathrm{L} P_\mathrm{L} \neq 0,
   \qquad
   P_\mathrm{L} A P_\mathrm{R} = A P_\mathrm{L} P_\mathrm{R} = 0.
\]

The vector and axial current may also be expressed in this way:
\[ j^\mu_V = \overline\psi \gamma^\mu \psi
         = \overline\psi (P_\mathrm{R} + P_\mathrm{L}) \gamma^\mu (P_\mathrm{R} + P_\mathrm{L}) \psi
         = \overline\psi P_\mathrm{R} \gamma^\mu P_\mathrm{L} \psi + \overline\psi P_\mathrm{L} \gamma^\mu P_\mathrm{R} \psi
         = \overline\psi_\mathrm{L} \gamma^\mu \psi_\mathrm{R} + \overline\psi_\mathrm{R} \gamma^\mu \psi_\mathrm{L}
\]
and, because $P_\mathrm{L}\gamma^5 = \frac12(1 - \gamma^5) \gamma^5 = \frac12(\gamma^5 - 1) = -P_\mathrm{L}$
while $P_\mathrm{R}\gamma^5 = \frac12(1 + \gamma^5)\gamma^5 = \frac12(\gamma^5 + 1) = P_\mathrm{R}$,
\[ j^\mu_A = \overline\psi \gamma^\mu \gamma^5 \psi
         = \overline\psi (P_\mathrm{R} + P_\mathrm{L}) \gamma^\mu \gamma^5 (P_\mathrm{R} + P_\mathrm{L}) \psi
         = \overline\psi (P_\mathrm{R} + P_\mathrm{L}) \gamma^\mu (P_\mathrm{R} - P_\mathrm{L}) \psi
         = \overline\psi_\mathrm{L} \gamma^\mu \psi_\mathrm{R} - \overline\psi_\mathrm{R} \gamma^\mu \psi_\mathrm{L}.
\]

\deelopgave For fields $\psi$ satisfying the Dirac equation, since $\gamma^\mu$ is
independent of spacetime,
\[ \partial_\mu j^\mu_V = \partial_\mu(\overline\psi \gamma^\mu \psi ) =
  \partial_\mu \overline\psi \gamma^\mu \psi + \overline\psi \gamma^\mu \partial_\mu \psi =
  - \slashed\partial \overline\psi \psi + \overline\psi \slashed\partial\psi =
  \overline\psi \slashed\partial \psi - \slashed\partial \overline\psi \psi.
\]
Similarly,
\[ \partial_\mu j^\mu_A = \partial_\mu(\overline\psi \gamma^\mu \gamma^5 \psi ) =
  \partial_\mu \overline\psi \gamma^\mu \gamma^5 \psi + \overline\psi \gamma^\mu \gamma^5 \partial_\mu \psi =
  - \slashed\partial \overline\psi \gamma^5 \psi - \overline\psi \slashed\partial\psi =
  - (\overline\psi \slashed\partial \psi + \slashed\partial \overline\psi \psi).
\]

Under $\psi \mapsto e^{\imi\alpha}\psi$,
\[ \overline\psi \mapsto (e^{\imi\alpha}\psi)^\dagger \gamma^0 = e^{-\imi\alpha} \psi^\dagger \gamma^0 = e^{-\imi\alpha} \overline\psi. \]
Since $e^{\pm\imi\alpha}$ is just a (complex) scalar, we can pull it through any
matrix or derivative, so
\[ \mathcal L \mapsto (e^{-\imi\alpha} \overline\psi) (\imi\slashed\partial - m) (e^{\imi\alpha}\psi)
                   = (e^{-\imi\alpha} e^{\imi\alpha}) (\overline\psi (\imi\slashed\partial - m) \psi) = \mathcal L.
\]
The corresponding current is
\[ j^\mu_V = \der p\mathcal L/{\partial_\mu\psi} \delta\psi + \cancel{\der p\mathcal L/{\partial_\mu\overline\psi}} \delta\overline\psi
           = (\overline\psi \imi \gamma^\mu)(\imi\alpha\psi)
           = - (\overline\psi \gamma^\mu \psi) \alpha,
\]
where $\delta\psi = \imi\alpha\psi$, such that $\psi \mapsto \psi + \delta\psi$
to first order. Usually we don't write the generic parameter $\alpha$, and the
minus sign is of course immaterial (if $j^\mu$ is conserved, so is $z j^\mu$ for
any $z \in \CC$).
Similarly,
\[ j^\mu_A = \der p\mathcal L/{\partial_\mu\psi} \delta\psi + \cancel{\der p\mathcal L/{\partial_\mu\overline\psi}} \delta\overline\psi
           = (\overline\psi \imi \gamma^\mu)(\imi\alpha\gamma^5\psi)
           = - (\overline\psi \gamma^\mu \gamma^5 \psi) \alpha.
\]

\opgave{2 --- Complete basis of Dirac matrices}
Label the 16 Dirac matrices according to
\[ \{ \Gamma^A \} = \{ 1, \gamma^\mu, \gamma^{\mu\nu}, \gamma^\mu \gamma^5, \gamma^5 \} \]
and define
\[ \{ \Gamma_A \} = \{ 1, \gamma_\mu, -\gamma_{\mu\nu}, -\gamma_\mu \gamma^5, \gamma^5 \}. \]

\deelopgave We wish to show that $\tr(\Gamma_A \Gamma^B) = 4 \delta_A^B$.
We recall that the trace is linear and has the property that the trace is invariant
under cyclic permutation of the matrices in its argument (that is,
$\tr(M_1 \cdots M_n) = \tr(M_2 \cdots M_n M_1) = \cdots = \tr(M_n M_1 \cdots M_{n - 1}$
for any $n \in \ZZ$ and square matrices $M_1, \cdots, M_n$). In general, we can
prove that the trace of a product $\Gamma$ of an odd number of $\gamma$-matrices is zero.
Remember that $(\gamma^5)^2 = 1$ and $\gamma^5$ anti-commutes with all the terms in the
product, so $\gamma^5 \Gamma = -\Gamma \gamma^5$, so $\tr\Gamma = \tr(\gamma^5 \gamma^5 \Gamma) = -\tr(\gamma^5 \Gamma \gamma^5)$.
On the other hand, by the cyclicity property of the trace,
$\tr \Gamma = \tr(\gamma^5 \gamma^5 \Gamma) = \tr(\gamma^5 \Gamma \gamma^5)$, therefore
the trace must be zero.

First let us show that if the multi-indices $A$ and $B$ are not of the same length,
then the trace vanishes. By the cyclicity, we only have to consider cases where
the length $|A|$ of $A$ is strictly smaller than that of $B$. First consider the case
$|A| = 0$, i.e.\@ $\Gamma^A = 1$. Then we have $\tr(\Gamma^A \Gamma^B) = \tr(\Gamma^B)$.
If $\Gamma^B = \gamma^\mu$ or $\Gamma^B = \gamma^\mu \gamma^5$
the trace vanishes because $\gamma^\mu$ is a product of an odd number of $\gamma$-matrices.
If $\Gamma^B = \gamma^{\mu\nu}$ or $\gamma^5$ then the trace vanishes because the
given matrices are by definition completely anti-symmetric hence have vanishing
diagonal entries.

If $|A| = 1$ then we can have $|B| = 2$ which is either a product of an odd number
of distinct matrices, or can (possibly after interchanging at the expense of a minus
sign) be written as a single matrix, for example
\[ \gamma^2 \gamma^{12} = \frac12( \gamma^2 \gamma^1 \gamma^2 - \gamma^2 \gamma^2 \gamma^1 )
                        = \frac12( - (\gamma^2)^2 \gamma^1 - (\gamma^2)^2 \gamma^1)
                        = - \gamma^1,
\]
whose trace vanishes by the earlier case. If $|B| = 3$ then we want to evaluate
\[ \begin{split} \tr(\gamma^\mu \gamma^\nu \gamma^5)
           & {} = \tr(\gamma^\alpha \gamma^\alpha \gamma^\mu \gamma^\nu \gamma^5)         \qquad\qquad\text{ with $\alpha \neq \mu, \alpha \neq \nu$} \\
           & {} = (-1)^3 \tr(\gamma^\alpha \gamma^\mu \gamma^\nu \gamma^5 \gamma^\alpha)  \qquad\text{ by anti-commutation in the first line} \\
           & {} = \tr(\gamma^\alpha  \gamma^\mu \gamma^\nu \gamma^5 \gamma^\alpha)        \qquad\qquad\text{ by cyclic permutation in the first line}
   \end{split}
\]
so this vanishes. If $|B| = 4$ then $\gamma^\mu \gamma^5$ will contain 3 distinct
matrices (after commuting the factor of $\gamma^\mu$ inside $\gamma^5$ to the front)
and the trace will be zero.

If $|A| = 2$, we can have $|B| = 3$: $\Gamma^A = \gamma^{\mu\nu}$, $\Gamma^B = \gamma^\lambda \gamma^5$.
Either $\mu$, $\nu$, $\lambda$ are different and -- after commutations -- only
one matrix remains (the term $\gamma^\kappa$ in $\gamma^5$ for which $\kappa$
is not $\mu$, $\nu$ and $\lambda$), or $\mu = \lambda$ and after commuting the
term $\gamma^\nu$ in $\gamma^5$ to the front, three of the four matrices in $\gamma^5$
remain which is again an odd number. By anti-symmetry, the cases $\nu = \lambda$
and $\mu = \nu$ are identical and trivial, respectively.

Now let us consider the cases where $n \perdef |A| = |B|$. If $n = 0$ then we must
have $\Gamma^A = \Gamma^B = 1$, so we get $\tr(1) = 4$. If $n = 1$ then $\Gamma^A = \gamma^\mu$
and $\gamma^B = \gamma^\nu$, so either $A = B$ ($\mu = \nu$) and we again get
$\tr(\gamma^\mu \gamma^\nu) = \eta^{\mu\nu} \tr(1) = 4 \eta^{\mu\nu}$, or
\[ \tr(\gamma^\mu \gamma^\nu) = \tr(\gamma^\nu \gamma^\mu) = -\tr(\gamma^\nu \gamma^\mu) = 0 \]
by cyclicity and anti-commutation, respectively.
For $n = 2$, there are several possibilities. Suppose that $A = (\mu, \nu)$ and
$B = (\rho, \sigma)$. If all the indices are different, then
$\Gamma^A \Gamma^B \propto \gamma^0 \gamma^1 \gamma^2 \gamma^3$ so
\[ \tr(\Gamma^A \Gamma^B) \propto \tr(\gamma^5) = 0 \]
where the last identity can be proven by replacing $\gamma^5 = \gamma^0 \gamma^0 \gamma^5$
and respectively anti-commuting $\gamma^0 \leftrightarrow \gamma^5$ and cyclicly permuting.
If two indices are equal, but the other two are not, by anti-symmetry we can assume
without loss of generality that $\mu = \rho$ for the non-trivial case. Then
\[ \tr(\gamma^{\mu\nu}\gamma^{\mu\rho})
   = \frac14 \tr( (\gamma^\mu \gamma^\nu - \gamma^\nu \gamma^\mu)(\gamma^\mu \gamma^\rho - \gamma^\rho \gamma^\mu) )
   = \frac14 \tr( - \gamma^\nu \gamma^\rho - \gamma^\rho \gamma^\nu - \sigma \gamma^\nu \gamma^\rho + \sigma \gamma^\nu \gamma^\rho )
\]
where we assumed that $(\gamma^\mu)^2 = \sigma$ ($\sigma = 1$ for $\mu = 0$ and
$\sigma = -1$ for $\mu = 1, 2, 3$) and we have anti-commuted the matrices where
needed to produce this square, at the expense of a minus sign for each commutation.
The first two terms cancel upon anti-commuting the matrices in the second term,
as $\nu \neq \rho$. The last two terms cancel rather obviously. Hence we get $0$.
If the indices are the same (assume $\mu = \rho$, $\nu = \sigma$) then
\[ \tr(\gamma^{\mu\nu} \gamma^{\mu\nu}) = \frac14 \tr( \gamma^\mu \gamma^\nu \gamma^\mu \gamma^\nu - \gamma^\mu \gamma^\nu \gamma^\nu \gamma^\mu - \gamma^\nu \gamma^\mu \gamma^\mu \gamma^\nu + \gamma^\nu \gamma^\mu \gamma^\nu \gamma^\mu ). \]
The outermost and innermost terms are equal, by cyclicity of the trace:
\[ \tr(\gamma^{\mu\nu} \gamma^{\mu\nu}) = \frac12 \tr( \gamma^\mu \gamma^\nu \gamma^\mu \gamma^\nu - (\gamma^\mu)^2 (\gamma^\nu)^2 ). \]
If $\mu = \nu$ then we have calculated $\tr(0) = 0$, so let us assume that $\mu \neq \nu$.
After anti-commutation,
\[ \tr(\gamma^{\mu\nu} \gamma^{\mu\nu}) = - \tr( (\gamma^\mu)^2 (\gamma^\nu)^2 ) = - \eta^{\mu\mu} \eta^{\nu\nu} \tr(1) = - 4 \eta^{\mu\mu} \eta^{\nu\nu}. \]
For $\mu = 0 \neq \nu$ this gives $4$, for $\mu \neq \nu$ both non-zero it gives $-4$.
When $n = 3$, we get
\[ \tr(\gamma^\mu \gamma^5 \gamma^\nu \gamma^5) = - \tr(\gamma^\mu \gamma^\nu \gamma^5 \gamma^5)
                                                = - \tr(\gamma^\mu \gamma^\nu)
                                                = 4 \eta^{\mu\nu},
\]
as calculated before.
For $n = 4$, there only is one possible value for $A$ and $B$:
$\tr(\gamma^5 \gamma^5) = \tr(1) = 4$.
When we lower the index $A$ we see that all the minus signs are precisely absorbed
and we can write $\tr(\Gamma_A \Gamma^B) = 4\delta^B_A$.

\deelopgave
Using the result of \zie{1} we can show that all the $\Gamma^A$ are linearly
independent. Suppose that $c_A$ is some set of coefficients such that $c_A \Gamma^A = 0$
(summation over $A$ is implied). Let $B$ be an arbitrary index, then clearly
$c_A \Gamma^A \Gamma_B = 0$, therefore also
\[ 0 = \tr(c_A \Gamma^A \Gamma_B) = c_A \tr(\Gamma_B \Gamma^A) = 4 c_A \delta^A_B = 4 c_B
   \qquad\implies\qquad c_B  = 0.
\]
Since $B$ is arbitrary, all the coefficients must be zero.

\deelopgave Let $M$ be any $4 \times 4$ matrix. Clearly, $M$ has $16$ independent
entries. As we have precisely $16$ independent matrices $\Gamma^A$, we can express
$M$ as $M_A \Gamma^A$, with coefficients $M_A$.
%If we take $M = \Gamma_A$, then
%\[ \frac14 \Gamma^A M

\opgave{3 --- The Dirac equation}
Recall the Dirac equation
\[ (\imi \slashed\partial - m)\psi = 0. \]
Let us Fourier-transform
\[ \psi(x) = \int \frac{\dd^4k}{(2\pi)^4} \psi(k) e^{-\imi k x}, \]
where $\psi(k)$ are the Fourier components (I am dropping the tilde, where confusion
is possible I will write the argument $x$ or $k$) and $k x = k^\mu x_\mu$. The
Dirac equation then reads

\[ 0 = (\imi \slashed\partial - m) \left( \int \frac{\dd^4k}{(2\pi)^4} \psi(k) e^{-\imi k x} \right)
     = \int \frac{\dd^4k}{(2\pi)^4} \left( \imi \gamma^\mu \partial_\mu - m \right) \psi(k) e^{-\imi k x}
     = \int \frac{\dd^4k}{(2\pi)^4} \left( \imi \gamma^\mu (-\imi k_\mu) - m \right) \psi(k) e^{-\imi k x}.
\]
As the exponentials form a complete (but not overcomplete) basis of functions,
this can only vanish if
\[  \left( - \imi^2 \gamma^\mu k_\mu - m \right) \psi(k) = (\slashed k - m)\psi = 0. \]
Choose the coordinate system such that $\vec k$ points along the three-axis,
$k^0 = E, k^1 = k^2 = 0, k^3 = k$ where the energy is $E^2 = k^2 + m^2$.
Then we can write out the Dirac equation:
\[ (\gamma^0 E - \gamma^3 k - m)\psi(k) = 0. \]
In the Dirac representation of the $\gamma$ matrices, the bracketed operator becomes
\[ \begin{pmatrix}
    E & 0 & 0 & 0 \\
    0 & E & 0 & 0 \\
    0 & 0 & -E & 0 \\
    0 & 0 & 0 & -E
   \end{pmatrix}
   -
   \begin{pmatrix}
    0 & 0 & k & 0 \\
    0 & 0 & 0 & -k \\
    -k & 0 & 0 & 0 \\
    0 & k & 0 & 0
   \end{pmatrix}
   -
   \begin{pmatrix}
    m & 0 & 0 & 0 \\
    0 & m & 0 & 0 \\
    0 & 0 & m & 0 \\
    0 & 0 & 0 & m
   \end{pmatrix}
   =
   \begin{pmatrix}
     E-m & 0 & -k & 0 \\
     0 & E-m & 0 & k  \\
     k & 0 & -E-m & 0 \\
     0 & -k & 0 & -E-m
   \end{pmatrix}
\]
so the Dirac equation reads
\[
   \begin{pmatrix}
     E-m & 0 & -k & 0 \\
     0 & E-m & 0 & k  \\
     k & 0 & -E-m & 0 \\
     0 & -k & 0 & -E-m
   \end{pmatrix}
   \begin{pmatrix}
     \psi_1 \\ \psi_2 \\ \psi_3 \\ \psi_4
   \end{pmatrix}
   =
   \begin{pmatrix}
   (E-m) \psi_1  - k \psi_3 \\
   (E-m) \psi_2  + k \psi_4 \\
   (-E-m) \psi_3 + k \psi_1 \\
   (-E-m) \psi_4 - k \psi_2
   \end{pmatrix}
   =
   \begin{pmatrix}
     0 \\ 0 \\ 0 \\ 0
   \end{pmatrix}.
\]
Solving the first equation, $\frac{(E - m)}{k} \psi_1 = \psi_3$, the third equation
\[
   0 \stackrel{?}{=} - \frac{(E - m)(E + m)}{k} \psi_1 + k \psi_1
              = \frac{-E^2 + m^2 + k^2}{k} \psi_1
\]
is trivially satisfied, because $E^2 = m^2 + k^2$. Similarly, the fourth equation
is automatically taken care of when we solve the second one, $\frac{(E - m)}{k} \psi_2 = \psi_4$.
The solution then reads
\[ \psi(k) = \begin{pmatrix}
     \psi_1 \\
     \psi_2 \\
     (E - m) \psi_1 / k \\
     (E - m) \psi_2 / k
  \end{pmatrix}.
\]
When the electron is slowly moving ($k \ll m$, such that $k / m \ll 1$) we can expand
\[ E = \sqrt{m^2 + k^2} = m \sqrt{1 + (k / m)^2} \simeq m \left( 1 + \frac12 \frac{k}{m} + \mathcal O(k/m)^2 \right)
     = m + \frac12 k + \mathcal O(k / m)^2
\]
and the solution can be written as
\[ \psi(k) = \begin{pmatrix}
     \psi_1 \\
     \psi_2 \\
     \frac12 \psi_1 \\
     \frac12 \psi_2
  \end{pmatrix}.
\]
Possibly I have made a mistake, I would have expected a bigger difference than a
factor of $\frac12$. In any case the point is, to show that slowly moving electrons
are mostly left-handed.

\end{document}
