\documentclass[11pt]{article} 
\usepackage{amsmath,amsthm,amstext,amssymb,amsfonts,amscd,graphicx,bbm,algorithmic} 

\voffset -0.5in
\addtolength{\textheight}{1in} 

% new theorems
\newtheorem{lem}{Lemma} 
\newtheorem{lemma}{Lemma}
\newtheorem{thm}{Theorem} 
\newtheorem{ass}{Assumption} 
\newtheorem{cor}{Corollary} 
\newtheorem{clm}{Claim}
\newtheorem{prop}{Proposition} 
\newtheorem{con}{Conjecture} 
\newtheorem{wff}{Wff} 
\newtheorem{defn}{Definition} 
\newtheorem{axiom}{Axiom} 
\newcounter{rulenum} 
\newcounter{sent} 
\newcounter{tempcnt} 

% macros
\newcommand*{\add}{\mbox{\bf add}} 
\newcommand*{\open}{\mbox{\bf open}} 
\newcommand*{\close}{\mbox{\bf close}} 
\newcommand*{\ex}{\mbox{\rm E}} 
\newcommand*{\pr}{\mbox{\rm Pr}} 
\newcommand*{\range}{\mbox{\rm range}} 
\newcommand*{\rank}{\mbox{\rm rank}} 
\newcommand*{\sgn}{\mbox{\rm sign}} 
\newcommand*{\var}{\mbox{\rm Var}} 
\newcommand*{\diag}{\mbox{\rm diag}} 
\newcommand*{\epi}{\epsilon} 
\newcommand*{\QED}{\ \hfill\rule[-2pt]{6pt}{12pt} \medskip}
\newcommand*{\supp}{\mbox{\rm supp}} 

\newcommand*{\grad}{\nabla}
\newcommand*{\half}{\frac{1}{2}}
\newcommand*{\inv}{^{-1}}
\newcommand*{\0}{\mathbf{0}}
\newcommand*{\1}{\mathbf{1}}
\newcommand*{\E}{\ensuremath{\operatorname{E}}}
\newcommand*{\maximize}{\text{maximize}}
\newcommand*{\minimize}{\text{minimize}}
\newcommand*{\st}{\text{subject to}}
\newcommand*{\R}{\mathbbm{R}}
\newcommand*{\matlab}{{\sc Matlab}}

\newcommand{\abs}[1]{\left\vert #1 \right\vert}
\newcommand{\bigo}[1]{\mathcal{O} \left( #1 \right)}
\newcommand{\cov}[2]{\ensuremath{\operatorname{Cov}\left( #1, #2\right)}}
\newcommand{\Ex}[2][]{\ensuremath{\E_{#1} \left[ #2 \right]}}
\newcommand{\norm}[1]{\left\lVert\,#1\,\right\rVert}
\newcommand{\bmat}[1]{\begin{bmatrix}#1\end{bmatrix}}
\newcommand{\pmat}[1]{\begin{pmatrix}#1\end{pmatrix}}
\newcommand{\smallmat}[1]{\left (\begin{smallmatrix}#1\end{smallmatrix} \right)}
\newcommand{\vb}[1]{\mathbf{#1}}

\renewcommand{\P}{\ensuremath{\operatorname{P}}}
\renewcommand{\Pr}[2][]{\ensuremath{\P_{#1} \left \{ #2 \right \}}}


\title{Existence of Positive Equilibria for Inhomogeneous Mass Conserving and Mass-Action
	Chemical Reaction Networks with a 
	Single Terminal-Linkage-Class%
   \thanks{{\tt InhomScale.tex}, \today.}}
   \author{Scribed by Santiago}
\date{}


\begin{document}
\maketitle

\section{Introduction}

We report results in the context of mass-conserving chemical reaction networks
formed by a single terminal-linkage-class.  We describe a {\em necessary and
sufficient} condition (on the species exchange rates $b$) that guarantees the
existence of positive concentrations ($c>0$) for which $YA_k\psi(c)=\alpha b$.
Where $\alpha$ is some unknown positive scalar.  

We also show that for all single terminal linkage class networks a second
condition on the matrix $Y$ holds and this condition allows us to find positive
concentrations that achieve $YA_k\psi(c) =b$.  

\section{Solution of scaled inhomogeneous systems}

Assume that the system is mass conserving and is formed by a single
terminal-linkage-class. 

We will show that there exists a concentration vector $c>0$ and a scalar $\alpha>0$ such that
$YA_k\psi(c) = \alpha b$ if and only if the inhomogeneous term $b$ is in the
range of the matrix $YA_k$ and can be written as  
\begin{align}
  b=Y(A^T\eta^+-D\eta^-) \label{suf-cond}
  \\ \text{ with } \eta^+,\eta^- > 0.  \notag
\end{align}
 

Firstly, observe that if there exists a pair $(c,\alpha)>0$ such that
$YA_k\psi(c) = \alpha b$, then $b$ is in the range of $YA_k$. Furthermore if we
let $\eta = \frac{1}{\alpha}\psi(c)$ then $b=YA_k\eta$ and $\eta>0$. This
implies that $b = Y(A^T\eta - D\eta)$ and \eqref{suf-cond} holds. 

The proof of the converse follows the argument of the previous paper and draws
on the lemmas proved there.  First we establish some necessary results, then we
define an optimization problem and an induced mapping. For this mapping we
establish the existence of a fixed point. Using the single
terminal-linkage-class hypothesis we show the fixed points are positive.
All this together establishes the existence of positive
concentrations which yield a scaled version of the inhomogeneous
term. Finally we show that single terminal-linkage-class-networks can 
satisfy the equations exactly.

\subsection{Existence of fixed points}

In this section we define the optimization problem and associated mapping. Then
we establish the existence of fixed points for such mapping.
We will first establish a necessary result.

\begin{clm} Assume $b$ is as in \eqref{suf-cond}, then the vectors $\eta^+$ and $\eta^-$ satisfy \[e^TYD \eta^- = e^TYA^T\eta^+.\]
  \label{claim:eq}
\end{clm}
\begin{proof} Since $b$ is in the range of $YA_k$ then, for some $x$, $b=YA_kx$. Since the system is mass
  conserving then $0=e^TYA_kx=e^Tb=e^TY(A^T\eta^+-D\eta^-)$, which implies that $e^TYA^T\eta^+=e^TYD\eta^-$.
\end{proof}

We will now define the optimization problem and the induced mapping. Denote by
$(v^\star,v_0^\star)$ the minimizer of the parametric optimization problem
\begin{align}
  \underset{(v,v_0)\in\R^{n+1}}{\minimize} &\quad v^TD(\log(v)-\1) + v_0(\log(v_0) -1)   \notag
\\                     \st &\quad YD v + YA^T\eta^+ v_0 = YA^Tr + YD\eta^- r_0 &:\ y \label{convex-fix}
\\                         &\quad (v,v_0) \ge 0.                     \notag
\end{align} This problem is parametrized by the vector
$(r,r_0)\in \Re^{n+1}$. Since it is strictly convex, for any $(r,r_0)$ where
\eqref{convex-fix} is feasible, there is a unique minimizer
$(v^\star,v_0^\star)$. The object of study will be the mapping
$(r,r_0)\rightarrow (v^\star,v_0^\star)$.


Let $\gamma>0$ be a positive and fixed scalar and define the compact and convex set 
\[\Omega_\gamma = \left\{ (v,v_0)\in \Re^{n+1} : (v,v_0)\geq 0, e^TYDv +
e^TYA^T\eta^+v_0 = \gamma   \right\}.\] 

\begin{clm} For any $(r,r_0)$ in the set $\Omega_\gamma$
problem \eqref{convex-fix} will be feasible and $(v^\star,v_0^\star)$ will be
well defined.
\end{clm}
\begin{proof}
  For the case $r_0=0$ select the point $(v,v_0) = (D^{-1}A^Tr,0)$. 
  This point is such that $(v,v_0)\geq0$ and 
  \[YDv + YA^T\eta^+v_0=YA^Tr=YA^Tr+YD\eta^-r_0,\] 
  and thus it is feasible. For the case $r_0>0$, let $(v,v_0) = (D^{-1}A^T r +
  \eta^-r_0,0)$. This point satisfies $(v,v_0) \geq 0$ and \[ YD v + YA^T\eta^+
  v_0 = Y(A^T r + D\eta^-r_0) = YA^Tr + YD\eta^-r_0, \] and is therefore
  feasible. 
 
%  \begin{proof}
%   let $\rho = D^{-1}A^Tr + D^{-1}A^T\eta^+r_0$. Since
%	$\eta^+,r_0>0$, and both $D^{-1}$ and $A^T$ are non negative and contain no
%	rows of only zeros, then $\rho>0$. This implies that there exists a $v_0>0$
%	small enough so that $v :=\rho-\eta^- v_0 > 0$. This pair $(v,v_0)$ satisfies
%	$(v,v_0)>0$ and 
%	
%	\begin{align} YD(v + \eta^-v_0) & = YD(\rho - \eta^- v_0 + \eta^- v_0) \notag
%	  \\ & = Y(A^Tr+ A^T\eta^+ r_0), \notag 
%	\end{align}
%	and is therefore feasible. 

  Since for all elements of $\Omega_\gamma$ problem \eqref{convex-fix} is feasible and 
  since the minimizer is unique then the mapping $\Omega_\gamma \ni (r,r_0)\rightarrow (v^\star,v_0^\star)$
  is well defined.
\end{proof}

\begin{clm}
 For all
  $(r,r_0)\in\Omega_\gamma$ the mapping $(r,r_0)\rightarrow
  (v^\star,v_0^\star)$ satisfies $(v^\star,v_0^\star) \in \Omega_\gamma$.
\end{clm}

\begin{proof} Observe that by the bounds in problem $\eqref{convex-fix}$ the vector 
  $(v^\star,v_0^\star)\geq0$.  Also, from claim \eqref{claim:eq} and from the
  equality constraint in the problem 
  \begin{align}
	e^T YD v + e^T YA^T \eta^+ v_0 &= e^T YA^T r + e^T YD \eta^- r_0 \notag 
	\\ & = e^T YD r + e^T YA^T \eta^+r_0=\gamma, \notag \end{align} thus
	$(v^\star,v_0^\star)\in \Omega_\gamma.$ 
  \end{proof}

\begin{clm}
  The mapping $\Omega_\gamma \ni (r,r_0)\rightarrow (v^\star,v_0^\star)\in \Omega_\gamma$ has a fixed point.
\end{clm}
\begin{proof}
	Since the mapping is continuous and $\Omega_\gamma$ is compact and convex, by Brouwer's fixed
	point theorem there exists a fixed point. 
 \end{proof}

 We will now revisit some of the properties of the fixed point. Firstly denote
 by $(\hat{v},\hat v_0)$ a fixed point of the mapping. The first observation is
 that if the fixed point is positive, then it defines a solution to the
 equations $YA_k\psi(c) = \alpha b$.

 Observe that at the fixed point the equality 
\[YD\hat v + YA^T\eta^+\hat v_0=YA^T\hat v + YD \eta^- \hat v_0,\] 
implies that 
\[YA^T \hat v - YD\hat v  =  YA^T\eta^- v_0 - YD \eta^- \hat v_0 = v_0 b.\]
And therefore at the fixed point $YA_k\hat v = \hat v_0 b$.

Since $(\hat v,\hat v_0)$ minimizes problem \eqref{convex-fix}, if it is assumed positive the gradient is defined and 
the KKT conditions hold. Therefore there exist Lagrange multipliers such that 
\begin{align}
  \bmat{DY^T\\(YA^T\eta^+)^T}y = \bmat{ D\log(\hat v) \\ \log(\hat v_0)}. \notag
\end{align} From the first equation we can conclude that $Y^Ty = \log \hat v$, which implies that $\psi(\exp y) = v$.

To conclude, if the fixed point is positive, the pair $(\exp y,\hat v_0) $ satisfies $YA_k\psi(\exp y ) = v_0b$.

\subsection{Existence of positive fixed points for scalings of inhomogeneous problems on single 
terminal-linkage-class networks}

We will now argue that if the network is formed by a single connected component
then all fixed points will be positive. We will show by contradiction that the
case $\hat v=0$ and $\hat v_0 = 0$, the case $\hat v_0>0$ and
$\hat v $ with some zero entry, and the case $v>0$ 
$\hat v_0 = 0$ are all impossible. Therefore the only possibility is for $(v,v_0)>0$.

To discard the first case, observe that the origin is not contained in $\Omega_\gamma$, since
the fixed point is in this set, then it is not zero. To discard the following
cases we need the following lemma.

\begin{lemma}If the minimizer of problem
\eqref{convex-fix} has support $K$ then any feasible point has support at
most $K$. \label{lem:supp}
\end{lemma}

Assume that the fixed point is such that $\hat v_0 >0$ then the point
$(x,x_0)=(D^{-1}A^T\hat v + \eta^-\hat v_0,0)\geq0$ is feasible. Since the
support of $(x,x_0)$ contains all the entries of $x$, by lemma
\ref{lem:supp} the support of $(\hat v,\hat v_0)$ contains all entries in $\hat
v$ and thus $\hat v>0$. To discard the third case, assume that $\hat v > 0$ then
$\rho := D^{-1}A^T \hat v + \eta ^- \hat v_0 > 0$.  Thus there exists a
positive $x_0$ small enough so that $x:= \rho - D^{-1}A^T\eta^+ x_0 > 0$, this
implies that the point $(x,x_0)>0$ is feasible since \[YD(\rho -D^{-1}A^T\eta^+
x_0) + YA^T \eta^+x_0= YD\rho=YA^T \hat v + YD\eta^- \hat v_0.\] Which in turn
implies that $\hat v_0 > 0$ and discards the third case. Therefore we can conclude
that $(\hat v, \hat v_0) >0$.

\section{Exact solution of inhomogeneous problems}

This section shows that if the network is formed by a single terminal linkage
class, then for any $b$ which satisfies \eqref{suf-cond} there exists a
positive concentration $c>0$ such that $YA_k\psi(c) = b$. 

Assume that there exist a positive $c>0,\alpha>0$ such that $YA_k\psi(c) = \alpha b$.
If we can construct $\tilde c >0$ such that $\psi(\tilde c) = \frac{1}{\alpha}\psi(c)$, 
then \[YA_k\psi(\tilde c) = \frac{1}{\alpha}YA_k\psi(c) = b.\]

Let $\1$ denote the vector of all ones and observe that $\log\left(
\frac{1}{\alpha}\psi(c) \right) = \log(\psi(c))-\1\log(\alpha) = Y^T\log(c) -
\1\log(\alpha)$. Assume that $\1$ is in the range of $Y^T$ and that can be
expressed as $Y^T\delta = \1$.  Then $\log(\alpha)\1 = Y^T(\log(\alpha)\delta)$
thus if we let $\log(\tilde c) = \log(c) - \log(\alpha)\delta$ (here $\log(c)$
and $\log(\tilde c) $) are entry-wise logarithms of the corresponding vectors and
$\log(\alpha)$ the scalar logarithm), then \[Y^T\log(\tilde c) = Y^T\log(c) -
\log(\alpha)\1,\] which implies that \[\psi(\tilde c) =
\frac{1}{\alpha}\psi(c),\] and thus the inhomogeneous system can be solved.

Finally we have to argue that the vector $\1$ is in the range of $Y^T$ for
single terminal-linkage-class networks. Observe that the condition of mass
conservation implies that $e^TY A_k = 0$. This implies that $Y^Te\in {\mathcal N}(A_k^T)$.
Since the matrix $A_k^T$ is the graph laplacian of a strongly
connected graph it contains only the constant vector in its null space, thus
$Y^Te = \beta\1$ for some $\beta$.

\end{document}
