\addtocontents{toc}{\protect\setChapterprefix{Appendix }}
\chapter{}
\begin{proof}{Proposition\ref{bsbetter}: (Biased Beliefs is preferred to Unbiased Beliefs)\\}
For $U=\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}+\eta\{\underset{s\in\mathbf{\mathcal{S}}}{\sum}p_{s}\mu(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})\}$

For k=1,....,S
\begin{eqnarray*}
\dfrac{\partial U}{\partial q_{k}} & = & u_{k}\{1-\eta\underset{s\in\mathbf{\mathcal{S}}}{\sum}p_{s}\mu'(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})\}\\
 & = & u_{k}\{1-\eta\underset{s\in A}{\sum}p_{s}-\eta\lambda\underset{s\in\overline{A}}{\sum}p_{s}\}\\
 & = & u_{k}\{1-\eta P_{+}-\eta\lambda(1-P_{+})\}
\end{eqnarray*}
where $P_{+}=\underset{A}{\sum}p_{s},\: A=\{s\in\mathbf{\mathcal{S}}:\; u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}\geq0\}$.
\\
For the cases $\dfrac{\partial U}{\partial q_{k}}|_{p_{1,}...p_{S}}\neq0$
(for at least one k), the conclusion is obvious. 

Consider the case that $\dfrac{\partial U}{\partial q_{k}}|_{p_{1,}...p_{S}}=0$,
$\forall k\in\mathbf{\mathcal{S}}$. \\

$\dfrac{\partial U}{\partial q_{k}}|_{p_{1,}...p_{S}}=0$ for all
k, iff $P_{+}=\underset{A^{0}}{\sum}p_{s}=\dfrac{\eta\lambda-1}{\eta(\lambda-1)},\: A^{0}=\{s\in\mathbf{\mathcal{S}}:\; u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}p_{s}u_{s}\geq0\}$. 

Then we have
\begin{eqnarray*}
U_{BS} & = & \underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}+\eta\{\underset{s\in\mathbf{\mathcal{S}}}{\sum}p_{s}\mu(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})\}\\
 & = & \underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}+\eta\{\underset{s\in A}{\sum}p_{s}u_{s}-(\underset{s\in A}{\sum}p_{s})\cdot\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}+\lambda\cdot\underset{s\in A}{\sum}p_{s}u_{s}-\lambda\cdot(\underset{s\in\overline{A}}{\sum}p_{s})\cdot\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})\}\\
 & = & \underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}+\eta\{\underset{s\in A}{\sum}p_{s}u_{s}-\dfrac{\eta\lambda-1}{\eta(\lambda-1)}\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}+\lambda\cdot\underset{s\in\overline{A}}{\sum}p_{s}u_{s}-\lambda\cdot(1-\dfrac{\eta\lambda-1}{\eta(\lambda-1)})\cdot\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})\}\\
 & = & \eta\{\underset{s\in A}{\sum}p_{s}u_{s}+\lambda\cdot\underset{s\in\overline{A}}{\sum}p_{s}u_{s}\}
\end{eqnarray*}


The total utility is independent of $q_{s}$ and $U_{BS}\equiv U_{RE}$.
$Q.E.D.$ \\
\end{proof}

\begin{proof}{Proposition\ref{tradeoff}:(Trade-off on Beliefs among Different States)}
From $\dfrac{\partial U}{\partial q_{k}}=u_{k}\{1-\eta P_{+}-\eta\lambda(1-P_{+})\}$,
we have, 

for $u_{k}>0,$ $U$ is increasing in $q_{k}$ iff $P_{+}>P^{*}$,
decreasing in $q_{k}$ iff $P_{+}<P^{*}$, where $P^{*}=\dfrac{\eta\lambda-1}{\eta(\lambda-1)}$. 

Now consider the constraint $\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}=1$.
This constraint requires that any increase in $q_{k}$ must come together
with decrease(s) in subjective probability(ies) of other states. For
simplification, consider the case that $q_{k}$ and $q_{l}$, $(k>l)$
change together. Suppose $P_{+}>P^{*}$ and all the other subjective
beliefs are given, as $u_{k}>u_{l}$ for all $k>l$, $\dfrac{\partial U}{\partial q_{k}}>\dfrac{\partial U}{\partial q_{l}}$.
A small increase in $q_{k}$ with a small decrease in $q_{l}$ will
increase the total utility. Similar analysis for the case $P_{+}<P^{*}$.
$Q.E.D.$ \\
\end{proof}

\begin{proof}{Proposition\ref{op}:(Over-optimistic versus Over-pessimistic)}
From the proof of proposition 2, we know that for $P_{+}\neq P^{*}$,
there always exists room for further biases. Therefore, at optimal
beliefs, $P_{+}=P^{*}=\dfrac{\eta\lambda-1}{\eta(\lambda-1)}$. As
the optimal $P_{+}$is uniquely determined by $\eta$ and $\lambda$,
and the objective probabilities are exogenous, the sets of $u_{s}$
above the expectation are also uniquely determined. However, even
though the value of $\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}$
is given, there still exist multiple combinations of $\{q_{s}\}$,
and as long as they could generate the required value of $\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}$,
they all achieve the same value of total utility.

Further more, directly from proof of proposition 2, we see that if
$P_{+}^{0}>(<)P^{*}$, DM will be up-biased(down-biased) in the upper
rank outcomes and down-biased(up-biased) in the lower rank outcomes.
Therefore, $\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}>(<)\underset{s\in\mathbf{\mathcal{S}}}{\sum}p_{s}u_{s}$
iff $P_{+}^{0}>(<)P^{*}$. $Q.E.D.$ \\
\end{proof}

\begin{proof}{Proposition\ref{time}: (Information Timing Preference)}
Suppose an agent holds optimal subjective beliefs $\{q_{s}\}_{s\in\mathbf{\mathcal{S}}}$. 

If $i=k$, the agent knows that $Z_{k}$ will happen at T=2, then, 

at T=1, $U_{k}^{A}=u_{k}+\eta\mu(u_{k}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})$; 

at T=2, $U_{k}^{R}=1\times\eta\mu(u_{k}-u_{k})+0\times\underset{s\in\mathbf{\mathcal{S}}/k}{\sum}\eta\mu(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})=0$;

The total utility from observing $i=k$ is, 

$U_{k}=U_{k}^{A}+U_{k}^{R}=u_{k}+\eta\mu(u_{k}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})$, 

An agent holding optimal beliefs $\{q_{s}\}_{s\in\mathbf{\mathcal{S}}}$
will believe that with probability $q_{k}$, she is going to observe
$i=k$, $k=1,...,S$. Therefore, the agent's utility from
getting information in advance is, 
\begin{eqnarray*}
U_{early} & = & \underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}\cdot U_{s}\\
 & = & \underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}\cdot u_{s}+\eta\cdot\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}\mu(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})
\end{eqnarray*}
Utility without early information is, 
\begin{eqnarray*}
U_{wait} & = & \underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}+\eta\{\underset{s\in\mathbf{\mathcal{S}}}{\sum}p_{s}\mu(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})\}\\
\end{eqnarray*}
Early information is preferred iff $U_{early}>U_{wait}$ holds.
\begin{eqnarray*}
U_{early}-U_{wait} & = & \eta\cdot\underset{s\in\mathbf{\mathcal{S}}}{\sum}(q_{s}-p_{s})\mu(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})\\
 & = & \eta\cdot\{\underset{s\in A}{\sum}(q_{s}-p_{s})\cdot(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})+\underset{s\in\overline{A}}{\sum}(q_{s}-p_{s})\cdot\lambda(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})\}
\end{eqnarray*}
$U_{early}-U_{wait}>0$ iff\\
\begin{equation*}
\underset{s\in A}{\sum}(q_{s}-p_{s})\cdot(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s}) > \lambda \underset{s\in\overline{A}}{\sum}(p_{s}-q_{s})\cdot(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})
\end{equation*}


For $s\in A,$ where $A=\{s\in\mathbf{\mathcal{S}}:\; u(Z_{s})-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u(Z_{s})\geq0\}$,
$(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})\geq0$
by definition; and for $s\in\overline{A},$ $(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})<0$. 

If $P_{+}^{0}>(<)P^{*}$, by proposition 3, $q_{s}-p_{s}\geq(\leq)0$
if $s\in A$ while $p_{s}-q_{s}\geq(\leq)0$ if $s\in\overline{A}$,
strict inequality holds for at least one s in each subset. Therefore,
only one of $\underset{s\in A}{\sum}(q_{s}-p_{s})\cdot(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})>0$
and $\underset{s\in\overline{A}}{\sum}(p_{s}-q_{s})\cdot(u_{s}-\underset{s\in\mathbf{\mathcal{S}}}{\sum}q_{s}u_{s})>0$
holds. 

When $P_{+}^{0}>P^{*}$, LHS of $(*)$ is greater than 0, and $U_{early}>U_{wait}$;
when $P_{+}<P^{*}$, RHS of ({*}) is greater than 0, and $U_{early}<U_{wait}$.
\end{proof}

\begin{proof}{Proposition\ref{twoindep},\ref{twoindepgen}:(Risk Attitudes: An Naive Agent)}
For (ii), I prove the case $P^{0}_{+A}>P^*$, $P^{0}_{+B}<P^*$. 
From proposition\ref{op}, if $P^{0}_{+A}>P^*$ and $P^{0}_{+B}<P^*$, the investor will be on average over-optimistic about the payoff of lottery $A$, and over-pessimistic about the payoff of lottery $B$. Therefore, we have, \\
$$
E_{g_{B(\cdot)}^{*}}(Z_{B})<E_{f_{B(\cdot)}}(Z_{B})=E_{f_{A(\cdot)}}(Z_{A})<E_{g_{A(\cdot)}^{*}}(Z_{A}) 
$$
Therefore, for an naive agent, lottery A is strictly preferred to lottery B.
Now consider case(i), in which either both $P^{0}_{+A}$ and $P^{0}_{+B}$ are greater than $P^*$ or both smaller than $P^*$.
Note that optimal belief $g_i^*(\cdot)$ ensures that $\ensuremath{\underset{a}{\overset{+\infty}{\int}}f_{A}(Z)\mathrm{d}Z=P^{*},}\ensuremath{\underset{b}{\overset{+\infty}{\int}}f_{B}(Z)dZ=P^{*}}$, where $a=min\{Z_{A}:\; Z_{A}-\ensuremath{\underset{-\infty}{\overset{+\infty}{\int}}g_{A}^{*}(Z_{A})Z_{A}\mathrm{d}Z_{A}\geq0}\}=E_{g_{A}^{*}}(Z_{A})$, and $b=min\{Z_{B}:\; Z_{B}-\ensuremath{\underset{-\infty}{\overset{+\infty}{\int}}g_{B}^{*}(Z_{B})Z_{B}\mathrm{d}Z_{B}\geq0}\}=E_{g_{B}^{*}}(Z_{B}) $. \\
Therefore, if $a>b$, $E_{g_{A}^{*}}(Z_{A})>E_{g_{B}^{*}}(Z_{B})$ and $\underset{a}{\overset{+\infty}{\int}}[f_{A}(Z)-f_{B}(Z)]\mathrm{d}Z>0. $\\
Similar proof for the case $a<b$.
Proposition\ref{twoindep} is easy to derive from here.\quad \quad \quad Q.E.D.
\end{proof}

\begin{proof}{Proposition\ref{lotterys}:(Choice between two lotteries: Sophisticated case)}
The objection function $E_g(Z)+\eta E_f\mu[Z-E_g(Z)]+$can be reformed as, \\
$$
\int_{E_{g}(Z)}^{+\infty}f(Z)Z\mathrm{d}Z+\lambda\int_{-\infty}^{E_{g}(Z)}f(Z)Z\mathrm{d}Z =\eta E_{f}(Z)+(1-\eta)E_{g}(Z)+(\lambda-1)\eta\ensuremath{\underset{loss}{\overset{}{\int}}f(Z)}E_{g}(Z)\mathrm{d}Z+(\lambda-1)\eta\ensuremath{\underset{loss}{\overset{}{\int}}f(Z)Z\mathrm{d}Z}.
$$
At optimal beliefs, we have $\underset{loss}{\overset{}{\int}}f(Z)\mathrm{d}Z=1-P^*=\dfrac{1-\eta}{\eta(\lambda-1)}$. Substitute back into the reformed objection function, we have, \\
$$
\eta E_{f}(Z)+(\lambda-1)\eta\ensuremath{\underset{loss}{\overset{}{\int}}f(Z)Z\mathrm{dZ}}
$$
As $\ensuremath{E_{f_{A}}}(Z_{A})=\ensuremath{E_{f_{B}}}(Z_{B})$, the conclusion is proved. 
\end{proof}

\begin{proof}{Proposition\ref{riskfreenaive}:(Risk taking due to optimism and pessimism: Naive Case)}
I continue use the discrete state case to make the proofs clear for read. The proof can be easily extend to the continuous case. 

The agent want to maximize $\int u(R_{f}+\alpha R)\mathrm{dF(R)}$. The objective function is concave in $\alpha$ as $\int u''(R_{f}+\alpha R)R^{2}\mathrm{dF(R)\leq0}$. For $0\leq \alpha \leq 1$, if $\alpha$ is optimal, it must satisfy the Kuhn-Tucker first-order condition: 
$$
\phi(\alpha)=\int u'(R_{f}+\alpha R)R\mathrm{dF(R)\{\begin{array}{c}
\leq0\quad if\;\alpha<1\\
\geq0\quad if\;\alpha>0
\end{array}} . 
$$
Note that $\int RdF(R)>0$ implies $\phi(0)>0$. Hence, $\alpha=0$ cannot satisfy the first-order condition. We conclude that the optimal portfolio has $\alpha>0$. Similar analogue for the case $E(R)<0$. 

The problem of a rational agent to choose $\alpha^{RE}$  for given $\{p_{s}\}_{s\in\mathcal{S}}$ is 

$\underset{\alpha}{Max}\underset{s\in\mathcal{S}}{\sum}p_{s}u(R_{f}+\alpha R_{s})$ , 

where $s$ is the state.

The F.O.C of this problem is 
\begin{equation}\label{refoc}
\underset{s\in\mathcal{S}}{\sum}p_{s}u'(R_{f}+\alpha^{RE}R_{s})=0,  
\end{equation}
where $\alpha^{RE} is the optimal allocation of wealth to the risky asset under rational beliefs.$
Similarly, $\alpha^{BS}$ denotes the optimal proportion of endowment chosen by a biased agent. 

Similarly, the F.O.C. of the problem of a biased agent to choose $\alpha^{BS}$ for given subjective beliefs $\{q_{s}\}_{s\in\mathcal{S}}$ is 

Further more, we examine the agent's F.O.C. for optimal $\alpha^*$. Consider moving $\mathrm{d}\hat{\pi}$ from state $s'$ to state $s''$ with $R_s''>R_s'$, we have:
\begin{equation}
(u'(R_f+\alpha^*R_s'')R_s''-u'(R_f+\alpha^*R_s')R_s')\mathrm{d}\hat{\pi}+\sum_{s\in{\mathcal{S}}}\hat{q_s}u''(R_f+\alpha^*R_s)R_s^2\mathrm{d}\alpha^*=0
\end{equation}
\begin{equation}
\dfrac{\mathrm{d}\alpha^*}{\mathrm{d}\hat{\pi}}=\dfrac{u'(\cdot)R_s'-u'(\cdot)R_s''}{\sum_{s\in{\mathcal{S}}}q_su''(R_f+\alpha^*R_s)R_s^2}>0. 
\end{equation}
Therefore, we proved that optimal $\alpha*$ is increasing in the subjective probability put on upper ranking outcomes. We proved before that an optimistic agent are biased up because they overestimate the probability of good outcomes and underestimate the probability of bad outcomes. For $\alpha^{BS}>0$, better outcomes come from higher returns, while for $\alpha^{BS}<0$, better outcomes are those states give lower returns. 
For $\alpha^{RE}>0$ and $\alpha^{BS}>0$, an optimistic agent is one who over-estimates the probability on high returns. To bring back the biased beliefs from optimistic to rational, $\alpha$ must decrease. Therefore, we have $\alpha^{OP}>\alpha^{RE}>0$. Instead, for a pessimistic agent who over-estimates the probabilities on the low returns need to increase $\alpha$ to get back to rational level. Therefore, we have $0<\alpha^{PE}<\alpha^{RE}$. 
For $\alpha^{RE}>0$ and $\alpha^{BS}<0$, an optimistic agent will over-estimate the probability on low returns and to get back to rational level, $\alpha$ must increase. Therefore, we have $\alpha^{OP}<0<\alpha^{RE}$. However, for pessimistic agent with $\alpha^{PE}<0$, we must have $\alpha^{RE}<\alpha^{PE}<0$ and lead to a contradiction to the assumption $\alpha^{RE}>0$. We proved the case for $\alpha^{RE}>0$.
Similar analogue for $\alpha^{RE}<0$.    $\mathcal{Q.E.D.}$
\end{proof}

\begin{proof}{Proposition\ref{riskfreeso}:(Risk taking due to optimism and pessimism: Sophisticated Case)}

The problem of choosing optimal $\alpha^{BS}$ for given optimal beliefs ${q^*_s}s\in \mathcal{S}$ is, 
$$
\underset{\alpha}{Max}V=\underset{s\in\mathcal{S}}{\sum}q_{s}^{*}u(R_{f}+\alpha R_{s})+\eta\underset{s\in\mathcal{S}}{\sum}p_{s}\mu[u(R_{f}+\alpha R_{s})-\underset{s\in\mathcal{S}}{\sum}q_{s}^{*}u(R_{f}+\alpha R_{s})] 
$$
The first order condition with respect to $\alpha$ at optimal $\alpha^{BS}$ is
\begin{equation*}
\begin{split}
&\quad \dfrac{\partial V}{\partial\alpha}\mid_{\alpha=\alpha^{BS},q_{s}=q_{s}^{*}}\\
%&=\underset{s\in\mathcal{S}}{\sum}q_{s}^{*}u'(R_{f}+\alpha^{BS}R_{s})R_{s}\\
%&\quad +\eta\underset{+BS}{\sum}p_{s}[u'(R_{f}+\alpha^{BS}R_{s})R_{s}-\underset{s\in\mathcal{S}}{\sum}q_{s}^{*}u'(R_{f}+\alpha^{BS}R_{s})R_{s}]\\
%&\quad+\eta\lambda\underset{-BS}{\sum}p_{s}[u'(R_{f}+\alpha^{BS}R_{s})R_{s}-\underset{s\in\mathcal{S}}{\sum}q_{s}^{*}u'(R_{f}+\alpha^{BS}R_{s})R_{s}]\\
%&=(1-\eta)\underset{s\in\mathcal{S}}{\sum}q_{s}^{*}u'(\cdot)R_{s}+\eta\underset{s\in\mathcal{S}}{\sum}p_{s}u'(\cdot)R_{s}\\
%&\quad +\eta(\lambda-1)\underset{-BS}{\sum}p_{s}u'(\cdot)R_{s}-\eta(\lambda-1)\underset{-BS}{(\sum}p_{s})\underset{s\in\mathcal{S}}{(\sum}q_{s}^{*}u'(\cdot)R_{s})\\
%&=(1-\eta)\underset{s\in\mathcal{S}}{\sum}q_{s}^{*}u'(\cdot)R_{s}-\eta(\lambda-1)(1-P^{*})(\underset{s\in\mathcal{S}}{\sum}q_{s}^{*}u'(\cdot)R_{s})\\
%&\quad +\eta\underset{s\in\mathcal{S}}{\sum}p_{s}u'(\cdot)R_{s}+\eta(\lambda-1)\underset{-BS}{\sum}p_{s}u'(\cdot)R_{s}\\
%&=(1-\eta)\underset{s\in\mathcal{S}}{\sum}q_{s}^{*}u'(\cdot)R_{s}-\eta(\lambda-1)(1-\dfrac{\eta\lambda-1}{\eta(\lambda-1)})(\underset{s\in\mathcal{S}}{\sum}q_{s}^{*}u'(\cdot)R_{s})\\
%&\quad +\eta\underset{s\in\mathcal{S}}{\sum}p_{s}u'(\cdot)R_{s}+\eta(\lambda-1)\underset{-BS}{\sum}p_{s}u'(\cdot)R_{s}\\
&=0\times\underset{s\in\mathcal{S}}{\sum}q_{s}^{*}u'(\cdot)R_{s}+\eta\underset{s\in\mathcal{S}}{\sum}p_{s}u'(\cdot)R_{s}+\eta(\lambda-1)\underset{-BS}{\sum}p_{s}u'(\cdot)R_{s}\\
&=0 \\
\end{split}
\end{equation*}
Now assume at optimal beliefs, the subjective expectation is $E_{q_s^*}u(R_f+\alpha R_s)$. Consider moving $\mathrm{d}\hat{\omega}>0$ from state $s'$ to $s''$ with $R_{s''}>R_{s'}$. We suppose that without any change in $\alpha$, subjective expectation changes from $E_{q_s^*}u(R_f+\alpha R_s)$ to $E_{q_s^*}u(R_f+\alpha R_s)+\Delta$, where $\Delta=\mathrm{d}\hat{\omega}(u(R_f+\alpha R_{s''}-u(R_f+\alpha R_{s'}))$. Suppose $\Delta$ is small enough, therefore, there exist only one state $\tilde{s}$ moves from gain to loss due to the increase in expectation. This assumption is always satisfied under continuous assumption. 
We examine the first order condition for optimal $\alpha$: 
\begin{equation*}
\eta(\lambda-1)p_{\tilde{s}}u'(R_{f}+\alpha R_{\tilde{s}})R_{\tilde{s}}+\{\eta\underset{s\in\mathcal{S}}{\Sigma}p_{s}u"(R_{f}+\alpha R_{s})R_{s}^{2}+\eta(\lambda-1)\underset{-BS}{\Sigma}p_{s}u"(R_{f}+\alpha R_{s})R_{s}^{2}\}\mathrm{d\alpha}=0 
\end{equation*}

Since $\eta\underset{s\in\mathcal{S}}{\Sigma}p_{s}u"(R_{f}+\alpha R_{s})R_{s}^{2}+\eta(\lambda-1)\underset{-BS}{\Sigma}p_{s}u"(R_{f}+\alpha R_{s})R_{s}^{2}<0$, for $\mathrm{d\alpha}>0$, we must have $\eta(\lambda-1)p_{\tilde{s}}u'(R_{f}+\alpha R_{\tilde{s}})R_{\tilde{s}}>0$, therefore, $R_{\tilde{s}}>0$. 

Instead, if $R_{\tilde{s}}<0$, we need $-\mathrm{d}\hat{\omega}$, which is down-bias, so that $\eta(\lambda-1)p_{\tilde{s}}u'(R_{f}+\alpha R_{\tilde{s}})R_{\tilde{s}}$ will be removed from loss region.  
Notice that the state moves in or out into the loss region is the one on the margin of the gain and loss regions. Therefore, we must have
$$
u(R_f+\alpha R_{\tilde{s}})=E_{q_s^*}u(R_f+\alpha R_s).
$$
Change the denotation a little, we have $R_{\tilde{s}}=R_{CE}$ in the proposition. 

Therefore, we proved that if $R_{CE}>0$, $\alpha$ is increasing in $\mathrm{d}\hat{\omega}$; if  $R_{CE}<0$, $\alpha$ is decreasing in $\mathrm{d}\hat{\omega}$. 

When $R_{\tilde{s}}>0$, to bring down the beliefs back to rational level, an optimistic investor will adjust $\alpha$ to a lower level, therefore, $\alpha^{RE}<\alpha^{OP}$. 

Instead, from pessimistic to rational, the investor need to move up her subjective beliefs. If $R_{\tilde{s}}>0$, then she has $\alpha^{RE}>\alpha^{PE}$. 
Same analogue for $R_{\tilde{s}}<0$.  $\mathcal{Q.E.D.}$
\end{proof}
