\documentclass[11pt]{book}
\usepackage{stata}
\usepackage{ifthen}  % for thanks and bibliography
\usepackage[dvips]{graphicx}
\usepackage{threeparttable}
\usepackage{amsmath, amssymb}
\linespread{1.6} %1.6 for double spacing.  not useful when setspace is used.
\usepackage{geometry}
%\geometry{letterpaper,left=1.2in,right=1.2in,top=4.04cm,bottom=1.04cm}
\usepackage{mycomds, color}
\usepackage{bm}
\usepackage{shortcut}
\renewcommand{\textfraction}{0.15}
\renewcommand{\topfraction}{0.85}
\renewcommand{\bottomfraction}{0.65}
\renewcommand{\floatpagefraction}{0.60}
\makeindex

\setcounter{secnumdepth}{4}
\setcounter{tocdepth}{4}

\renewcommand{\index}[1]{#1}

\newcommand{\bx}{\bm{x}}
\newcommand{\bbeta}{\bm{\beta}}
\newcommand{\bz}{\bm{z}}
\newcommand{\bdelta}{\bm{\delta}}
\newcommand{\T}{\mbox{T}}






%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%! ------  Begin Customization --------------

 \MyTitle{\huge{Manual of Hung-Jen Wang's Stata Codes}}\label{ch:single}



\MyDate{August 2012}

\MyAuthor{Hung-Jen Wang\\ wangh@ntu.edu.tw\\
http://homepage.ntu.edu.tw/\textasciitilde wangh}


\MyTitleFNT{}

\WithTitlePage{no} % whether to use Article class's default "maketitle". if yes, it becomes the title page
\abstractagain{no} % whether the abstract should be on my own generated title page

\MyThanks{Copyright \copyright\ 2012 by Hung-Jen Wang}


\MyAbstract{}


%!------------ end of customization----------
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

          \title{\givetitle\titlenote}
          \author{\giveauthor}
          \date{\givedate}


%          \title{\givetitle}
%          \author{\giveauthor}
%          \date{\givedate}
%---------------------------------------------------
\begin{document}
          \whethertitle  % leave it here; it'll decide whether to make a title page
          \mytitle   % must have here; it makes a title in the page with text
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\addtocounter{chapter}{1} % so that this will be shown as chapter 2

%! -----------------------------------------------

\noindent This manual provides explanations of the various
Stata commands that perform estimations proposed in the following
papers. You should also see the Stata \hjf{do} files in the demo
folders for how to use them. If the papers/models you are interested in
are not listed below, please check my web page and see if the program
is available for separate download.


\begin{itemize}

\item[] Chen, Y.-T. and Wang, H.-J.\@ (2012) ``Centered-Residuals-Based
Moment Tests for Stochastic Frontier Models,'' \textit{Econometric
Reviews}, 31(6), 625-53.

\item[] Wang, H.-J. and Ho, C.-W.\@ (2010) ``Estimating Fixed-Effect
Panel Stochastic Frontier Models by Model Transformation,''
\textit{Journal of Econometrics,} 157 (2), 289-96.

\item[] Wang, H.-J.\@ (2003) ``A Stochastic Frontier Analysis of
Financing Constraints on Investment: The Case of Financial
Liberalization in Taiwan,'' \textit{Journal of Business and Economic
Statistics,} 21(3), pp.406-419.

\item[] Wang, H.-J.\@ (2002) ``Heteroscedasticity and Non-Monotonic
Efficiency Effects of a Stochastic Frontier Model,'' \textit{Journal of
Productivity Analysis,} 18, pp.241-253.

\item[] Wang, H.-J. and Schmidt, P.\@ (2002) ``One-Step and Two-Step
Estimation of the Effects of Exogenous Variables on Technical
Efficiency Levels,'' \textit{Journal of Productivity Analysis,} 18,
pp.129-44.
\end{itemize}

Except for \hjf{ml max} which is a native Stata command, I write all
other commands including \hjf{sfmodel}, \hjf{sf\_init}, \hjf{sf\_srch},
\hjf{sf\_predict}, \hjf{sf\_fixeff}, \hjf{sfmtest}, and
others.


\tableofcontents

\clearpage

% There are several ways to obtain the programs.




\section[Cross-Sectional Models]{Cross-Sectional Models}\label{sec:truncated}

As an overview, the main command of the model estimation is
\hjf{sfmodel}, which specifies the distribution assumption, and the
dependent and independent variables to be included in the model. After
that, the optional commands \hjf{sf\_init} and \hjf{sf\_srch} can be
used to give initial values of the parameters (the former) and to
search for better initial values given the initial values (the latter).
If initial values were not given by \hjf{sf\_init} (or the native,
low-level Stata command \hjf{ml~init}), an arbitrary set of initial
values will be selected by Stata in order to jump start the numerical
optimization process. The model is estimated by numerically maximizing
the log-likelihood function of the model; this is accomplished by
\hjf{ml~max}. After the model parameters are estimated, users can then
use the command \hjf{sf\_predict} to obtain observation-specific
inefficiency index, confidence intervals, and marginal effects of
inefficiency determinants (if applicable).



To illustrate, consider a production frontier model with a flexible
specification.
\begin{eqnarray}
 \ln y_i & = & \ln y_i^* - u_i,\label{mle:t:main}\\
 \ln y_i^* & = & \xib + v_i,\label{mle:t:front}\\
 u_i & \sim & N^+(\mu,\,\sigma_u^2),\label{mle:t:udist}\\
 v_i & \sim & N(0,\, \sigma_v^2).\label{mle:t:vdist}
\end{eqnarray}
The following parameterizations are used.
\begin{align}\label{mle:t:mu1}
 \mu & = \bm{z}_i'\bm{\delta},\\
 \sigma_u^2 & = \exp({\bm{z}_{i}}' \bm{w}),\label{mle:t:wui}\\
 \sigma_v^2 & = \exp(c_0),
\end{align}
where $c_0$ is a constant, $\bm{z}_i$ is the vector of exogenous
variables of observation $i$, and $\bm{\delta}$ and $\bm{w}$ are the
corresponding coefficient vectors.


The model of Battese and Coelli~(1995) parameterizes $\mu$ by a vector
of variable and it keeps $\sigma^2_u$ constant ($\bm{z}$ contains only
a vector of 1). The model of \index{Wang}~(2002, 2003) calls for
parameterizing $\mu$ and $\sigma_u^2$ by the same vector of exogenous
variables. The double parameterization is not only less ad hoc, but it
also accommodates non-monotonic relationships between the inefficiency
and its determinants.


The scaling property model of Wang and Schmidt (2002) does not nest
within the above framework. For illustration purposes, we list it here.
\begin{eqnarray}
 \ln y_i & = & \ln y_i^* - u_i,\label{mle:s:main}\\
 \ln y_i^* & = & \xb + v_i,\label{mle:s:front}\\
 u_i & \sim & h(\bm{z}_i,\,\bm{\delta})\cdot N^+(\tau,\,\sigma^2)\nonumber\\
     & \equiv & \exp(\bm{z}_i'\bm{\delta})\cdot N^+(\tau,\,\exp(c_u))\label{mle:s:udist}\\
 v_i & \sim & N(0, \sigma_v^2) \nonumber\\
     & \equiv & N(0, \exp(c_0)),\label{mle:s:vdist}
\end{eqnarray}
\noindent where $\tau$, $c_u$, and $c_0$ are unconstrained constant
parameters, and $\bm{z}_i$ is a variable vector which does
\textit{not} contain a constant. In this setup, the distribution of
$u_i$ is based on the basic distribution $N^+(\mu,\,\sigma^2)$ and
the scale is stretched by the non-stochastic and non-negative
scaling function $\exp(\bm{z}_i'\bm{\delta})$.





\subsection[sfmodel]{\textbf{\LARGE sfmodel}}


\noindent\begin{tabular}{rl}
 \hjf{sfmodel \fit{$depvar$} [if] [in],} & \hjf{\ue{d}istribution(\ue{h}alfnormal | \ue{t}runcated | \ue{e}xponential)} \\
 &  \hjf{\ue{prod}uction | cost}  \\
 &  \hjf{frontier($varlist_f$[, \ue{nocons}tant])} \\
 &  \hjf{mu([$varlist_m$[, \ue{nocons}tant]])} \hjf{etas([$varlist_e$[, \ue{nocons}tant]])}\\
 &  \hjf{usigmas([$varlist_u$[, \ue{nocons}tant]])}\\
 &  \hjf{vsigmas([$varlist_v$[, \ue{nocons}tant]])}\\
 &  \hjf{\ue{scal}ing} \hjf{hscale($varlist_h$)}  \hjf{tau cu} \hjf{\ue{rob}ust} \hjf{cluster($variable$)}\\
 &  \hjf{show} \hjf{[\ue{tech}nique(\fit{NR}|\fit{DFP}|\fit{BFGS}|\fit{BHHH})]}
\end{tabular}
\vspace{+0.5cm}


\noindent\textbf{\large Description}

\noindent The command \hjf{sfmodel} selects the distribution
assumption of $u$ for the model, specifies the dependent and
independent variables of the frontier function, and optionally specifies
the exogenous determinants of inefficiency.
In effect, this command set up the
appropriate likelihood function, but it does \textit{not} carry out
the estimation. The estimation is put into action by \hjf{ml~max}.

\vspace{+0.3cm}
\noindent\textbf{\large Options}

\begin{description}
\item[\ue{d}istribution($\ue{h}alfnormal | \ue{t}runcated |
\ue{e}xponential)$] indicates the distribution assumption of $u_i$.
The possible choices are \hjf{\ue{h}alfnormal} for the model of
half-normal distribution,
\hjf{\ue{t}runcated} for the truncated-normal distribution model
and the scaling-property
model, and
\hjf{\ue{e}xponential} for exponential distribution model.
 \item[\ue{prod}uction$\mid$cost] indicates whether the
model is a production-type model (production) or a cost-type model
(cost).

 \item[{frontier($varlist_f$[, $\ue{nocons}tant$])}]\label{frontierpage} specifies variables to
be included in the deterministic part of the frontier function;
i.e., the $\bm{x}$ variables in the discussions of the previous
sections.

Note that for the variables specified in the various functions,
including \hjf{frontier()}, \hjf{usigmas()}, \hjf{vsigmas()}, and
many others to be discussed later, only \textit{non-constant}
variables should be specified. It is because, by default, a constant
of 1 will be appended to the specified variable list by Stata,
therefore if the $varlist_f$ (i.e., variable list) includes a
constant, there will be a collinearity problem.

There are three different possibilities in specifying the
$varlist_f$. (1) Leave it blank (ex., \hjf{frontier()}). Then the
function will contain only a constant automatically appended by
Stata. (2) Specify a list of existing variables (ex.,
\hjf{frontier(x1 x2)}). Then the function will contain those
specified plus a constant. (3) Specify a list of existing variables
and ask Stata not to automatically include the constant (ex.,
\hjf{frontier(x1 x2, noconstant)}). Then the function will contain
only the specified variables, and no constant will be appended to
the list by Stata.


\item[{mu([$varlist_m$[, $\ue{nocons}tant$]])}] is used only when
$u_i$ is assumed to have a truncated-normal distribution. It
specifies variables used to parameterize the pre-truncation mean of
the distribution of $u_i$, i.e., the $\bm{z}_i$ in places such
as~(\ref{mle:t:mu1}). If specifies \hjf{mu()} without arguments, the
pre-truncation mean is a constant. If variables are specified in the
argument, such as \hjf{mu(z1, z2)}, then the pre-truncation mean is
a function of these variables.

\item[{etas([$varlist_e$[, $\ue{nocons}tant$]])}] is used only when
$u_i$ is assumed to have an exponential distribution. Note that it
parameterizes the $\eta^2$ (which is the variance of $u$), not
$\eta$ (which is the standard deviation). This is emphasized by the
last letter ``\textit{s}'' (for \textit{square}) of this syntax. If
specifies \hjf{etas()} without arguments, then $\eta^2$ is assumed
to be a constant.



\item[{hscale($varlist_h$)}] is used only for the scaling property
model of Wang and Schmidt~(2002). It specifies the variables in the
scaling function; i.e., the $\bm{z}_i$ variables
in~(\ref{mle:s:udist}). Since the scaling function
of~(\ref{mle:s:udist}) needs to be specified as a function of
variable, empty string (\hjf{hscale()}) is not allowed. Also,
because the function does not have a constant by construction, the
\hjf{noconstant} option is not allowed.

\item[tau] is used only for the scaling property model of Wang and
Schmidt~(2002). It indicates the $\tau$ parameter in~(\ref{mle:s:udist}).

\item[cu] is used only for the scaling property model of Wang and
Schmidt~(2002). It indicates the $c_u$ parameter in~(\ref{mle:s:udist}).

\item[{usigmas([$varlist_u$ [, $\ue{nocons}tant$]] )}] specifies the
variables used to parameterize the pre-truncation variance of the
inefficiency $u_i$. Note that it parameterizes the $\sigma^2$
(variance), not $\sigma$ (standard deviations). This is emphasized
by the last letter ``\textit{s}'' (for \textit{square}) of this
syntax. If \hjf{usigmas()} is specified without arguments, a
constant variance of $u_i$ is assumed.

\item[{vsigmas([$varlist_v$ [, $\ue{nocons}tant$]])}] specifies the
variables used to parameterize the variance of the random error
$v_i$. Note that it parameterizes the $\sigma^2_v$, not $\sigma_v$.
This is emphasized by the last letter ``\textit{s}'' (for
\textit{square}) of this syntax. If \hjf{vsigmas()} is specified
without arguments, a constant variance of $v_i$ is assumed.


\item[show] prints the likelihood function set up by \hjf{sfmodel}
in Stata's \hjf{ml~model} syntax. It is mainly for debugging
purposes. It might also be useful if, for example, users want to
supply initial values using Stata's \hjf{ml~init} in lieu of
\hjf{sf\_init} and need to know the order of equations and variables
in the likelihood function.

\end{description}



\noindent \textbf{A Note:}  Users are reminded that to estimate the model
of \index{Wang}~(2002), variable list specified in \hjf{mu()} and
\hjf{usigmas()} should be identical. They should be identical in two
aspects: all the specified variables should be the same, \textit{and}
that the order of their appearances in \hjf{mu()} and \hjf{usigmas()}
should also be the same. The requirement that the same variables be
included in both places \textit{is} the basic idea of the model. The
requirement of the same ordering, on the other hand, is a purely
technical one. If same variables are specified but they appear in
different orders, the model can still be estimated, but the marginal
effects cannot be computed by the \hjf{sf\_predict} command to be
discussed later.


\subsection[sf\_init]{\textbf{\LARGE \index{sf\_init}}}\label{page:sfinit}

\begin{tabular}{rl}
 \hjf{sf\_init}, &  \hjf{frontier($numlist_f$)}  [\ \hjf{mu($numlist_m$)} \hjf{eta($numlist_e$)} \hjf{usigmas($numlist_u$)} \\
 & \hjf{vsigmas($numlist_v$)}  \hjf{hscale($number_h$)} \hjf{zvar($number_z$)} \\
 & \hjf{tau($number_t$)} \hjf{cu($numlist_c$) \hjf{show}} \ ]
\end{tabular}
\vspace{+0.5cm}


\noindent\textbf{\large Description}

The \hjf{sf\_init} command is used following \hjf{sfmodel} or
\hjf{sf\_fixeff}. It allows users to supply initial values for the
model parameters, which are later used by the numerical maximization
routines to maximize the likelihood function. This is an optional
command. Stata will pick an arbitrary set of initial values to begin
the maximization process if no initial value is given by the user.
However, since stochastic frontier models are numerically difficult,
particularly for the more elaborated models, \textit{good} initial
values have better chances of successful convergence and would
certainly speed up convergence.

The \hjf{sf\_init} is essentially a wrapper of the Stata command
\hjf{ml~init}. Unlike \hjf{ml~init}, users using \hjf{sf\_init} do not
need to worry about orders of the equations (\hjf{frontier}, \hjf{mu},
etc.) used in the model. For example, users can put
\hjf{vsigmas($numlist_v$)} before \hjf{frontier($numlist_f$)} or vice
versa. Experienced users may specify \hjf{show} option in \hjf{sfmodel}
to know the order of equations/variables, and use \hjf{ml~init} to
provide initial values directly. See [R] \hjf{ml} for information on
\hjf{ml~init}.

It is important to note that if the user supplies initial values using
\hjf{sf\_init} (or for this matter, \hjf{ml~init} as well), he needs to
give a \textit{complete} set of initial values for the model. That
means initial values for each variable (including the constant) in each
of the equations. Users cannot specify only a subset of initial values.


\vspace{+0.3cm}
\noindent\textbf{\large Options}


The options, \hjf{frontier($numlist_f$)}, \hjf{mu($numlist_m$)}, etc.,
correspond to those in \hjf{sfmodel} \hjf{sf\_fixeff} are used in a
similar way: It specifies a list of numbers or a $1 \times k$ matrix of
numerical values. The \hjf{show} option, which is mainly for debugging
purposes, prints the initial value vector set up by \hjf{sf\_init} in
Stata's \hjf{ml~init} syntax.

There is a difference in specifying the number of initial values for
\hjf{sfmodel} and \hjf{sf\_fixeff}. The explanations are in the
follows.


Consider a model estimated by \hjf{sfmodel}. Suppose in the
\hjf{sfmodel} command line you specify \hjf{frontier(x1 x2)}. This
implies that the deterministic part of the frontier equation contains
two variables, $x1$ and $x2$, and a constant. The constant is
automatically appended to the equation by Stata unless \hjf{noconstant}
is also specified. The corresponding initial values in \hjf{sf\_init}
would be specified as \hjf{frontier(0.1 0.2 0.5)}. In this example, 0.1
and 0.2 are the initial values of $x1$ and $x2$, respectively, and the
value 0.5 (the \textit{last} value in the list) is the initial value of
the constant of the deterministic frontier function. The above rule
applies to almost all the equations except for the \hjf{hscale()}
function which does not have a constant by construction.

Now consider a model estimated by \hjf{sf\_fixeff}. Suppose in the
\hjf{sf\_fixeff} command line you specify \hjf{frontier(x1 x2)}. Unlike
in the previous case, Stata will not automatically append a constant in
this case. That is, \hjf{frontier()} in \hjf{sf\_fixeff} does not (and
cannot) have a constant by construction. Therefore, the corresponding
initial values in \hjf{sf\_init} would be specified as
\hjf{frontier(0.1 0.2)}. The same is also true for \hjf{zvar()}; that
is, no initial value for the \textit{additional constant} since no
constant is added to the equation.


%%%%%%%%%%%%%%%%%%

\subsection[sf\_srch]{\textbf{\LARGE \index{sf\_srch}}}\label{page:sfsrch}


\begin{tabular}{rl}
 \hjf{sf\_srch}, &  [ \hjf{n($number$)} \hjf{frontier([$varlist_f$])}  \hjf{mu([$varlist_m$])} \hjf{eta([$varlist_e$])} \\
 & \hjf{usigmas([$varlist_u$])} \hjf{vsigmas([$varlist_v$])}  \hjf{hscale([$varlist_h$])} \hjf{zvar([$varlist_z$])} \\
 & \hjf{nograph} \hjf{fast}]
\end{tabular}
\vspace{+0.5cm}


\noindent\textbf{\large Description}

The \hjf{sf\_srch} searches for better initial values for the
variables in the specified functions. The \hjf{sf\_srch} will do the
search for all the constants in the model by default. This is an
optional command, and it can be used regardless of whether
\hjf{sf\_init} has been previously issued. If \hjf{sf\_init} is used
to provide initial values before issuing \hjf{sf\_srch},
\hjf{sf\_srch} will perform the search using those initial values as
starting points. Otherwise, the search starts at initial values
chosen by the internal algorithm of Stata.


Unlike \hjf{sf\_init}, users do not need to specify a complete set
of variables with \hjf{sf\_srch}. That is, users can choose to
perform the search on only a subset of variables from all or part of
the equations.

The \hjf{sf\_srch} is essentially a wrapper of Stata's \hjf{ml plot}
command, which graphs the likelihood profile for a specified
parameter and then replaces the value of the parameter according to
the maximum value of the likelihood function. The command is useful
for fine-tuning the initial value of the specified parameter,
holding other parameter values unchanged.

\vspace{+0.3cm}
\noindent\textbf{\large Options}

\begin{description}
\item[n($number$)] specified the number of times the search is to be
performed on the specified parameters. For example, \hjf{n(1)} will
do the search once for each of the specified variables, and
\hjf{n(2)} will allow the search to cycle through the variables once
again. There is no upper limit on the number, but it has to be an
integer and greater than 0.

\item[{frontier([$varlist_f$])}] specifies the variables of the
frontier function to which the search is to be performed. The
specified variables should be the same or be a subset of the
variables specified in the \hjf{frontier} function of the
\hjf{sfmodel} command. If only a subset of the variables is
specified, the search will be performed on those variables only, and
the initial values of other variables not specified will not be
altered.

By default of \hjf{sf\_srch}, all the constants of the model, such
as the constants in functions of \hjf{frontier}, \hjf{mu}, etc.\@
and the \hjf{tau} and \hjf{cu} parameters, will be searched
automatically for better initial values.


\item[mu() eta() usigmas() vsigmas() hscale() zvar()] they are used in
the similar way as described above. Note that \hjf{zvar()} is used only
for models estimated by \hjf{sf\_fixeff}.

\item[nograph] asks Stata to perform the search silently without
showing graphs of the likelihood function profile in a graph window.
Although the graphs are sometimes informative, the graph rendering

\item[fast] asks Stata to draw graphs of variables' likelihood
profiles using Stata~7 style, which is much faster than the default.
By default, graphs are drawn using newer (Stata~8.0 or higher) graph
styles, which generate good-looking graphs at the expense of longer
time. For the purpose of initial value search, pretty graphs are not
of much value. The time-look tradeoff is significant particularly
when many initial values are to be searched by \hjf{sf\_srch}.

Note that the \hjf{nograph} option does not cut the search time as
\hjf{fast} would do. The \hjf{nograph} option only suppresses graph
renderings in the screen.

One caveat of the \hjf{fast} option is that the code is based on
Stata~8.2 when the book is written. New features added to
\hjf{ml~plot} in the future versions of Stata will not be available
if \hjf{fast} is specified.

\end{description}


%%%%%%%%%%

\subsection[sf\_predict]{\textbf{\LARGE \index{sf\_predict}}}


\begin{tabular}{rl}
 \hjf{sf\_predict [if] [in]}, &  \hjf{bc($newvarname_1$) jlms($newvarname_2$) [ci(\#) \ue{marg}inal ]}
\end{tabular}
\vspace{+0.5cm}

\noindent\textbf{\large Description}

The command \hjf{sf\_predict} computes the observation-specific
efficiency index of Battese and Coelli~(1988) (\hjf{bc}) and/or
Jondrow et al.~(1982) (\hjf{jlms}), and the confidence intervals of
these index (\hjf{ci}). When appropriate, it can also calculate the
marginal effects of the exogenous determinants on the mean and the
variance of inefficiency $u_i$ (\hjf{marginal}). If marginal effects
are requested, the sample mean of the marginal effects will be
printed on Stata's result window, and the observation-specific
marginal effects will be saved in the dataset.

The syntax of \hjf{sf\_predict} is the same for all the models,
regardless of the distributional assumptions on $u_i$ or whether
exogenous determinant variables are included in the model.

\vspace{+0.3cm}
\noindent\textbf{\large Options}

\begin{description}
\item[bc($newvarname_1$)] calculates the technical efficiency index
$E(\exp(-u_i) | \epsilon_i)$ of Battese and Coelli~(1988), and save the
observation-specific values in the variable $newnvarname_1$.

\item[jlms($newvarname_2$)] calculates the inefficiency index $E(u_i|
\epsilon_i )$ of Jondrow et al.~(1982), and save the
observation-specific values in the variable $newvarname_2$.

\item[ci(\#)] calculates, for each observation, the lower and upper
bounds of the confidence intervals of the efficiency score specified
by either \hjf{bc($newvarname_1$)}, or \hjf{jlms($newvarname_2$)},
or both. The number (\#) indicates the coverage of the confidence
interval. For example, ci(95) indicates that the lower and upper
bounds of a 95\% confidence intervals are to be calculated. Values
of the bounds are saved in new variables, with the names being
\hjf{$newvarname_1$\_\#L} and \hjf{$newvarname_1$\_\#U}, or
\hjf{$newvarname_2$\_\#L} and \hjf{$newvarname_2$\_\#U},
respectively, for the case of \hjf{bc} and \hjf{jlms}.

For example, if \hjf{bc(r1)} and \hjf{ci(95)} are specified, then three
variables will be created. One is \hjf{r1} created per the option
\hjf{bc(r1)}, which takes values of the point estimates of
$E(\exp(-u_i)|\epsilon_i)$ of each observation. The other two are
\hjf{r1\_95L} and \hjf{r1\_95U}. The former contains values of the
lower bound of the 95\% confidence interval of \hjf{r1} and the latter
contains values of the upper bound of the 95\% confidence interval of
\hjf{r1} for each observation.  If \hjf{jlms(k2)} is also specified,
then, in addition to the variable \hjf{k2}, two other variables are
created that contains values of the lower and upper bounds of the
associated confidence interval: \hjf{k2\_95L} and \hjf{k2\_95U}.


\item[\ue{marg}inal] calculates the marginal effects of the
exogenous determinants on inefficiency. The marginal effects are
observation-specific, and those values are saved in the variables
$variable$\_M and $variable$\_V for the marginal effects on the mean
and the variance, respectively, of the inefficiency, where
$variable$ is the variable name of the exogenous determinant. In
addition, the sample mean of the variable's marginal effects will be
printed on the Stata's result window.

For example, if \hjf{mu(z1)} is specified in \hjf{sfmodel} for a
truncated normal model and \hjf{marginal} is specified in
\hjf{sf\_predict}, then variables z1\_M and z1\_V are created,
taking values of the marginal effects of z1 on the mean and the
variance, respectively, of expected inefficiency ($E(u_i)$). The
sample means of z1\_M and z1\_V are also printed on the result
window of Stata.


\end{description}

\subsection[sf\_mixtable]{\textbf{\LARGE \index{sf\_mixtable}}}


\begin{tabular}{rl}
 \hjf{sf\_mixtable}, &  \hjf{dof($num$)}
\end{tabular}
\vspace{+0.5cm}

\noindent\textbf{\large Description}

The command \hjf{sf\_mixtable} tabulates critical values of a
\index{mixed Chi-square distribution} at different significance levels
with the degree of freedom equal to \hjf{-dof-}. The values are used
for hypothesis testings of the LR test. The values are taken from
Table~1 of \index{Kodde and Palm}~(1986).

\vspace{+0.3cm}
\noindent\textbf{\large Options}

\begin{description}
\item[dof($num$)] specifies the degrees of freedom of the test
statistic, which is usually the number of restrictions in the test. The
degrees of freedom is restricted to values between 1 and 40
(inclusive).
\end{description}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\clearpage

\section[Panel Data Models]{Panel Data Models}

The panel data model program estimates a fixed-effect panel stochastic
frontier model by within-transformation as proposed by Wang and
Ho~(2010). The main program is \hjf{sf\_fixeff} which sets up the
likelihood function. The \hjf{sf\_init} and \hjf{sf\_srch} commands
described on p.\pageref{page:sfinit} and p.\pageref{page:sfsrch} can be
optionally used following \hjf{sf\_fixeff} to provide and refine
initial values. The model is then estimated by the standard \hjf{ml
max} command. After the model is estimated, the (in)efficiency index is
obtained by \hjf{sf\_effindex}.

We introduce \hjf{sf\_fixeff} and \hjf{sf\_effindex} commands in the
rest of this section. Readers should refer to previous sections for
\hjf{sf\_init} and \hjf{sf\_srch}.

Consider a fixed-effects panel stochastic frontier model with the following specifications:
\begin{align}
 y_{it} & =  \alpha_i + \bx_{it}\bbeta + \varepsilon_{it},\label{eq:y}\\
 \varepsilon_{it} & = v_{it} - u_{it},\\
 v_{it} & \sim N(0, \sigma_v^2),\\
 u_{it} & = h_{it}\cdot u_i^*,\label{eq:uit}\\
 h_{it} & = f(\bz_{it}\bdelta),\label{eq:hfun}\\
 u_i^*  & \sim N^+(\mu, \sigma_u^2),\label{eq:ui} \\
 \sigma_v^2 & = \exp(C_v),\label{eq:sigv}\\
 \sigma_u^2 & = \exp(C_u), \qquad i=1,\ldots,N, \quad t=1,\ldots,T. \label{eq:sigu}
\end{align}
In this setup, $\alpha_i$ is individual $i$'s fixed unobservable
effect, and other variables are defined as usual.


\subsection[sf\_fixeff]{\textbf{\LARGE \index{sf\_fixeff}}}

\noindent\begin{tabular}{rl}
 \hjf{sf\_fixeff \fit{$depvar$} [if] [in],} & \hjf{\ue{d}istribution(\ue{h}alfnormal | \ue{t}runcated)} \\
 &  \hjf{\ue{prod}uction | cost}    \\
 &   \hjf{frontier($varlist_f$)} \ \hjf{zvar($varlist_z$)} \ \hjf{id($varname$)} \\
 &   \hjf{time($varname$)}[\ \hjf{mu} \ \hjf{usigmas} \ \hjf{vsigmas} \ \hjf{show} \ ]
\end{tabular}
\vspace{+0.5cm}


\noindent\textbf{\large Description}

\noindent The command \hjf{sf\_fixeff} estimates the panel data model described
in~(\ref{eq:y}) to~(\ref{eq:sigu}) using the within-transformation
method. It handls balanced and unbalanced panels automatically.
The command specifies the dependent and independent variables of the
frontier function, the exogenous determinants of inefficiency, and
selects the distribution assumption of $u^*$ of the model. In effect,
this command set up the appropriate likelihood function, but it does
\textit{not} carry out the estimation. The estimation is put into
action by \hjf{ml~max}.

Wang and Ho~(2010) show that the within-transformed and the
first-differenced models are algebraically the same. This command uses
only the within-transformation method. Users do not need to transformed
the variables prior to using the command; the program will do it for
you. The command will create a list of within-transformed variables
after the estimation.


\vspace{+0.3cm}
\noindent\textbf{\large Options}

\begin{description}

\item[\ue{d}istribution( $\ue{h}alfnormal | \ue{t}runcated )$]
indicates the distribution assumption of $u_i^*$. The possible choices
are \hjf{\ue{h}alfnormal} for the model of half-normal distribution and
\hjf{\ue{t}runcated} for the truncated-normal distribution mode. If
\hjf{halfnormal} is chosen, $\mu = 0$ in~(\ref{eq:ui}).

 \item[\ue{prod}uction $\mid$ cost] indicates whether the
model is a production-type model (production) or a cost-type model
(cost).


 \item[{frontier($varlist_f$)}] specifies variables to
be included in the frontier function, i.e., $\bx_{it}$ in~(\ref{eq:y}).
It cannot be empty.

Note that individual-specific and time-invariant variables, such as
gender and regional dummies, cannot be specified. Unlike most of other
equation specification, a constant \textit{will not be} automatically
added to the equation.

Note also that variables specified here should \textit{not} be
transformed by the within. The transformation will be done by the
program.

\item[{zvar($varlist_z$)}] specifies variables to be included in the
scaling function, i.e., $\bz_{it}$ in~(\ref{eq:hfun}). It cannot be
empty.

Similar to the \hjf{frontier()}, variables included here should not be
transformed by the within or the first-difference method. A constant
will NOT be automatically added to this equation.

\item[id($varname$)] specifies the variable that identifies each panel.

\item[time($varname$)] specifies the time variable for the panels.

\item[{mu}] is used only when $u_i^*$ is assumed to have a
truncated-normal distribution. It indicates the $\mu$ parameter
in~(\ref{eq:ui}).

\item[{vsigmas}] indicates the $C_v$ parameter in~(\ref{eq:sigv}). It
is a constant.

\item[{usigmas}] indicates the $C_u$ parameter in~(\ref{eq:sigu}). It
is a constant.

After the estimation, \hjf{sf\_transform} can be used to obtain
$\hat{\sigma}_v^2$ and $\hat{\sigma}_u^2$.

\item[show] prints the likelihood function set up by \hjf{sf\_fixeff}
in Stata's \hjf{ml~model} syntax. It is mainly for debugging purposes.
It might also be useful if, for example, users want to supply initial
values using Stata's \hjf{ml~init} in lieu of \hjf{sf\_init} and need
to know the order of equations and variables in the likelihood
function.

\end{description}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\subsection[sf\_effindex]{\textbf{\LARGE \index{sf\_effindex}}}

\noindent\begin{tabular}{rl}
 \hjf{sf\_effindex ,} & \hjf{bc($newvarname_1$)} \ \hjf{jlms($newvarname_2$) }
\end{tabular}
\vspace{+0.5cm}


\noindent\textbf{\large Description}

\noindent The command \hjf{sf\_effindex} computes both of the JLMS inefficiency
index and the BC efficiency index for the panel data model estimated by
\hjf{sf\_fixeff}.


\vspace{+0.3cm}
\noindent\textbf{\large Options}

\begin{description}

\item[bc($newvarname_1$)] calculates the technical efficiency index
$E(\exp(-u_i) | \tilde{\epsilon}_{i.})$ of Battese and Coelli~(1988),
and save the observation-specific values in the variable
$newnvarname_1$. See Wang and Ho~(2010) for details.

\item[jlms($newvarname_2$)] calculates the inefficiency index $E(u_i|
\tilde{\epsilon}_{i.} )$ of Jondrow et al.~(1982), and save the
observation-specific values in the variable $newvarname_2$. See Wang
and Ho~(2010) for details.


\end{description}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section[Specification Tests I: Moment Test]{Specification Tests I: Moment-Based Test}

Consider the following model.

\begin{eqnarray}
 \ln y_i & = & \ln y_i^* - u_i,\label{mtest:main}\\
 \ln y_i^* & = & \xib + v_i,\label{mtest:front}\\
 v_i & \sim & N(0,\, \sigma_v^2),\label{mtest:vdist}\\
 u_i & \sim & N^+(0, \, \sigma_u^2),\label{mtest:udist1}\\
 \mbox{or \hspace{+1cm}} \notag \\
 u_i & \sim & \mbox{exponential}(\theta). \label{mtest:udist2}
\end{eqnarray}
The $\theta$ is the parameter for the exponential distribution with the
mean of the distribution being $1/\theta$.


The skewness test of Schmidt and Lin~(1984) and the likelihood ratio
tests have been used by researchers to test the existence of $u_i$ and
the associated distribution assumption of it. These tests, however, are
conditional on the model's other specifications being correct. The
specification includes the functional form of the frontier function,
the variables in the frontier and/or the inefficiency functions, and
the distribution assumption of $v_i$.

The tests we introduced here and in the next section are general
specification tests. They can be particularly useful in testing the
distribution assumption of the composed error of the model, i.e., $v_i
- u_i$ for a production frontier model and $v_i+u_i$ for a cost
frontier model.


\subsection[sfmtest]{\textbf{\LARGE \index{sfmtest}}}


\noindent\begin{tabular}{rl}
 \hjf{sfmtest \fit{$depvar$} [if] [in],} & \hjf{\ue{udist}ribution(\ue{h}alfnormal | \ue{e}xponential)} \\
 &  \hjf{\ue{prod}uction | cost} \\
 &  \hjf{frontier($varlist_f$[, \ue{nocons}tant])} \\
 &  [\ \hjf{omega($num$)} \\
 &   \ \ \hjf{bc($newvarname_1$)} \   \hjf{jlms($newvarname_2$)} \ ]
\end{tabular}
\vspace{+0.5cm}

\noindent\textbf{\large Description}

This command carries out the moment-based estimation and test for
stochastic frontier models proposed by Chen and Wang~(2012). It does
three things: (1) It estimates the main model parameters using the
method of moment. (2) It optionally estimates the JLMS and BC
(in)efficiency index. (3) It performs the specification test of the
model.

The specification test may be particularly useful in testing the
distribution assumption of the composed error of the model ($v_i-u_i$
for production frontier model and $v_i+u_i$ for cost frontier model).
The test can be performed against one of the following two null
hypothesis: (A) $v_i$ is normal and $u_i$ is half-normal
(\hjf{udist(h)}), and (B) $v_i$ is normal and $u_i$ is exponential
(\hjf{udist(e)}).

The test is based on the moment generating function of the assumed
distribution and it can take the form of a sine or a cosine test. The
forms of the tests, are, respectively:

 \begin{equation}
 \phi_2(\epsilon_{c,i},\theta)=
 \left[
 \begin{array}{c}
 \sin(\omega_1\epsilon_{c,i})-{\E}[\sin(\omega_1\epsilon_{c,i})]\\
 \vdots\\
 \sin(\omega_q\epsilon_{c,i})-{\E}[\sin(\omega_q\epsilon_{c,i})]\\
 \end{array}
 \right]
 \label{eq:TF-sin}
 \end{equation}
and
 \begin{equation}
 \phi_2(\epsilon_{c,i},\theta)=
 \left[
 \begin{array}{c}
 \cos(\omega_1\epsilon_{c,i})-{\E}[\cos(\omega_1\epsilon_{c,i})]\\
 \vdots\\
 \cos(\omega_q\epsilon_{c,i})-{\E}[\cos(\omega_q\epsilon_{c,i})]\\
 \end{array}
 \right],
 \label{eq:TF-cos}
 \end{equation}
where $\epsilon_{c,i} = \epsilon_i - \E[\epsilon_i]$ is the centered
composed error, and $\omega_k$ is a pre-determined real number with
$k=1,\ldots,q$. In \hjf{sfmtest}, we choose $q=1$. It is very difficult
to determine the optimal choice of the $\omega$ from a theoretical
viewpoint. Nonetheless, as shown by Chen and Wang~(2012), $\omega =1$
or around 1 usually work well. Their simulation results also show that
the cosine test works better than the sine test in some of the
scenarios.

\vspace{+0.3cm}
\noindent\textbf{\large Options}

\begin{description}

\item[\ue{udist}ribution( $\ue{h}alfnormal | \ue{e}xponential )$]
indicates the distribution assumption of $u_i$. The possible choices
are \hjf{\ue{h}alfnormal} for the model of half-normal distribution and
\hjf{\ue{e}xponential} for the truncated-normal distribution mode.

 \item[\ue{prod}uction$\mid$cost] indicates whether the
model is a production-type model (production) or a cost-type model
(cost).

 \item[{frontier($varlist_f$)}] specifies variables to
be included in the frontier function, i.e., $\bx_{i}$
in~(\ref{mtest:main}).

 \item[omega($num$)] is $\omega$ in~(\ref{eq:TF-sin}) and~(\ref{eq:TF-cos})
and it contains a single constant parameter. Chen and Wang~(2012) show
that the value of 1 (the default) or around 1 usually works well in
their simulation experiments.

\item[bc($newvarname_1$)] calculates the technical efficiency index
$E(\exp(-u_i) | \epsilon_i)$ of Battese and Coelli~(1988), and save the
observation-specific values in the variable $newnvarname_1$.

\item[jlms($newvarname_2$)] calculates the inefficiency index $E(u_i|
\epsilon_i )$ of Jondrow et al.~(1982), and save the
observation-specific values in the variable $newvarname_2$.

\end{description}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section[Specification Test II: SICM Test]{Specification Test II: SICM Test (preliminary)}

\noindent \textbf{Sorry, the paper has not been published yet.}


\clearpage
\begin{thebibliography}{99}

\addcontentsline{toc}{section}{Bibliography}

 \bibitem{2}Battese, G.E., and Coelli, T.J.\@ (1988). \myquote{Prediction of Firm-level Technical Efficiencies with a Generalized Frontier Production Function and Panel Data,}
      \textit{Journal of Econometrics} \textbf{38}, pp.\@ 387-399.

 \bibitem{3}Battese, G.E., and Coelli, T.J.\@ (1995). \myquote{A Model for Technical Inefficiency Effects in a Stochastic Frontier Production Function for Panel Data,}
      \textit{Empirical Economics} \textbf{20}, pp.\@ 325-32.

 \bibitem{4}Bierens, H.J., and Wang, L.\@ (2012). \myquote{Integrated Conditional Moment
 Tests for Parametric Conditional Distributions,} \textit{Econometric Theory} \textbf{28}, pp.\@ 328-362.

 \bibitem{5}Chen, Y.-T., and Wang, H.-J.\@ (2012). \myquote{Centered-Residuals-Based Moment
 Tests for Stochastic Frontier Models,} \textit{Econometric Reviews} \textbf{31}, pp.\@ 625-53.

 \bibitem{24}Jondrow, J., Knox Lovell, C.A., Materov, I.S., and Schmidt, P.\@ (1982). \myquote{On the Estimation of Technical Inefficiency in the Stochastic Frontier Production Function Model,}
      \textit{Journal of Econometrics} \textbf{19}, pp.\@ 233-238.

 \bibitem{25}Kodde, D.A., and Palm, F.C.\@ (1986). \myquote{Wald Criteria for Jointly Testing Equality and Inequality Restrictions,}
      \textit{Econometrica} \textbf{54}, pp.\@ 1243-48.

 \bibitem{39}Schmidt, P., and Lin, T.-F.\@ (1984). \myquote{Simple Tests of Alternative Specifications in Stochastic Frontier Models,}
      \textit{Journal of Econometrics} \textbf{24}, pp.\@ 349-361.

 \bibitem{43}Wang, H.J.\@ (2002). \myquote{Heteroscedasticity and Non-Monotonic Efficiency Effects of a Stochastic Frontier Model,}
      \textbf{18}, pp.\@ 241-253.

 \bibitem{44}Wang, H.J., and Schmidt, P.\@ (2002). \myquote{One-Step and Two-Step Estimation of the Effects of Exogenous Variables on Technical Efficiency Levels,}
      \textbf{18}, pp.\@ 129-144.

 \bibitem{42}Wang, H.-J.\@ (2003). \myquote{A Stochastic Frontier Analysis of Financing Constraints on Investment: The Case of Financial Liberalization in Taiwan,}
      \textit{Journal of Business \& Economic Statistics} \textbf{21}, pp.\@ 406-19.

\bibitem{10}Wang, H.-J., and Ho, C.-W.\@ (2010). \myquote{Estimating
Fixed-Effect Panel Stochastic Frontier Models by Model Transformation,}
     \textit{Journal of Econometrics} \mybf{2}, pp.\@ 289-96.

 \bibitem{55}Chen, Y.-T., Su, H.-W., and Wang, H.J.\@ (2012). \myquote{The SICM Specification Tests of Stochastic Frontier
   Models,} manuscript, National Taiwan University.

\end{thebibliography}


\end{document}
