
%% bare_jrnl.tex
%% V1.4b
%% 2015/08/26
%% by Michael Shell
%% see http://www.michaelshell.org/
%% for current contact information.
%%
%% This is a skeleton file demonstrating the use of IEEEtran.cls
%% (requires IEEEtran.cls version 1.8b or later) with an IEEE
%% journal paper.
%%
%% Support sites:
%% http://www.michaelshell.org/tex/ieeetran/
%% http://www.ctan.org/pkg/ieeetran
%% and
%% http://www.ieee.org/

%%*************************************************************************
%% Legal Notice:
%% This code is offered as-is without any warranty either expressed or
%% implied; without even the implied warranty of MERCHANTABILITY or
%% FITNESS FOR A PARTICULAR PURPOSE! 
%% User assumes all risk.
%% In no event shall the IEEE or any contributor to this code be liable for
%% any damages or losses, including, but not limited to, incidental,
%% consequential, or any other damages, resulting from the use or misuse
%% of any information contained here.
%%
%% All comments are the opinions of their respective authors and are not
%% necessarily endorsed by the IEEE.
%%
%% This work is distributed under the LaTeX Project Public License (LPPL)
%% ( http://www.latex-project.org/ ) version 1.3, and may be freely used,
%% distributed and modified. A copy of the LPPL, version 1.3, is included
%% in the base LaTeX documentation of all distributions of LaTeX released
%% 2003/12/01 or later.
%% Retain all contribution notices and credits.
%% ** Modified files should be clearly indicated as such, including  **
%% ** renaming them and changing author support contact information. **
%%*************************************************************************


% *** Authors should verify (and, if needed, correct) their LaTeX system  ***
% *** with the testflow diagnostic prior to trusting their LaTeX platform ***
% *** with production work. The IEEE's font choices and paper sizes can   ***
% *** trigger bugs that do not appear when using other class files.       ***                          ***
% The testflow support page is at:
% http://www.michaelshell.org/tex/testflow/



\documentclass[journal]{IEEEtran}
%
% If IEEEtran.cls has not been installed into the LaTeX system files,
% manually specify the path to it like:
% \documentclass[journal]{../sty/IEEEtran}


\newcommand{\etal}{\textit{et al}.}
\usepackage{multirow}


% Some very useful LaTeX packages include:
% (uncomment the ones you want to load)


% *** MISC UTILITY PACKAGES ***
%
%\usepackage{ifpdf}
% Heiko Oberdiek's ifpdf.sty is very useful if you need conditional
% compilation based on whether the output is pdf or dvi.
% usage:
% \ifpdf
%   % pdf code
% \else
%   % dvi code
% \fi
% The latest version of ifpdf.sty can be obtained from:
% http://www.ctan.org/pkg/ifpdf
% Also, note that IEEEtran.cls V1.7 and later provides a builtin
% \ifCLASSINFOpdf conditional that works the same way.
% When switching from latex to pdflatex and vice-versa, the compiler may
% have to be run twice to clear warning/error messages.






% *** CITATION PACKAGES ***
%
\usepackage{cite}
% cite.sty was written by Donald Arseneau
% V1.6 and later of IEEEtran pre-defines the format of the cite.sty package
% \cite{} output to follow that of the IEEE. Loading the cite package will
% result in citation numbers being automatically sorted and properly
% "compressed/ranged". e.g., [1], [9], [2], [7], [5], [6] without using
% cite.sty will become [1], [2], [5]--[7], [9] using cite.sty. cite.sty's
% \cite will automatically add leading space, if needed. Use cite.sty's
% noadjust option (cite.sty V3.8 and later) if you want to turn this off
% such as if a citation ever needs to be enclosed in parenthesis.
% cite.sty is already installed on most LaTeX systems. Be sure and use
% version 5.0 (2009-03-20) and later if using hyperref.sty.
% The latest version can be obtained at:
% http://www.ctan.org/pkg/cite
% The documentation is contained in the cite.sty file itself.






% *** GRAPHICS RELATED PACKAGES ***
%
\ifCLASSINFOpdf
  % \usepackage[pdftex]{graphicx}
  % declare the path(s) where your graphic files are
  % \graphicspath{{../pdf/}{../jpeg/}}
  % and their extensions so you won't have to specify these with
  % every instance of \includegraphics
  % \DeclareGraphicsExtensions{.pdf,.jpeg,.png}
\else
  % or other class option (dvipsone, dvipdf, if not using dvips). graphicx
  % will default to the driver specified in the system graphics.cfg if no
  % driver is specified.
  % \usepackage[dvips]{graphicx}
  % declare the path(s) where your graphic files are
  % \graphicspath{{../eps/}}
  % and their extensions so you won't have to specify these with
  % every instance of \includegraphics
  % \DeclareGraphicsExtensions{.eps}
\fi
% graphicx was written by David Carlisle and Sebastian Rahtz. It is
% required if you want graphics, photos, etc. graphicx.sty is already
% installed on most LaTeX systems. The latest version and documentation
% can be obtained at: 
% http://www.ctan.org/pkg/graphicx
% Another good source of documentation is "Using Imported Graphics in
% LaTeX2e" by Keith Reckdahl which can be found at:
% http://www.ctan.org/pkg/epslatex
%
% latex, and pdflatex in dvi mode, support graphics in encapsulated
% postscript (.eps) format. pdflatex in pdf mode supports graphics
% in .pdf, .jpeg, .png and .mps (metapost) formats. Users should ensure
% that all non-photo figures use a vector format (.eps, .pdf, .mps) and
% not a bitmapped formats (.jpeg, .png). The IEEE frowns on bitmapped formats
% which can result in "jaggedy"/blurry rendering of lines and letters as
% well as large increases in file sizes.
%
% You can find documentation about the pdfTeX application at:
% http://www.tug.org/applications/pdftex





% *** MATH PACKAGES ***
%
\usepackage{amsmath}
% A popular package from the American Mathematical Society that provides
% many useful and powerful commands for dealing with mathematics.
%
% Note that the amsmath package sets \interdisplaylinepenalty to 10000
% thus preventing page breaks from occurring within multiline equations. Use:
%\interdisplaylinepenalty=2500
% after loading amsmath to restore such page breaks as IEEEtran.cls normally
% does. amsmath.sty is already installed on most LaTeX systems. The latest
% version and documentation can be obtained at:
% http://www.ctan.org/pkg/amsmath





% *** SPECIALIZED LIST PACKAGES ***
%
%\usepackage{algorithmic}
% algorithmic.sty was written by Peter Williams and Rogerio Brito.
% This package provides an algorithmic environment fo describing algorithms.
% You can use the algorithmic environment in-text or within a figure
% environment to provide for a floating algorithm. Do NOT use the algorithm
% floating environment provided by algorithm.sty (by the same authors) or
% algorithm2e.sty (by Christophe Fiorio) as the IEEE does not use dedicated
% algorithm float types and packages that provide these will not provide
% correct IEEE style captions. The latest version and documentation of
% algorithmic.sty can be obtained at:
% http://www.ctan.org/pkg/algorithms
% Also of interest may be the (relatively newer and more customizable)
% algorithmicx.sty package by Szasz Janos:
% http://www.ctan.org/pkg/algorithmicx




% *** ALIGNMENT PACKAGES ***
%
\usepackage{array}
% Frank Mittelbach's and David Carlisle's array.sty patches and improves
% the standard LaTeX2e array and tabular environments to provide better
% appearance and additional user controls. As the default LaTeX2e table
% generation code is lacking to the point of almost being broken with
% respect to the quality of the end results, all users are strongly
% advised to use an enhanced (at the very least that provided by array.sty)
% set of table tools. array.sty is already installed on most systems. The
% latest version and documentation can be obtained at:
% http://www.ctan.org/pkg/array


% IEEEtran contains the IEEEeqnarray family of commands that can be used to
% generate multiline equations as well as matrices, tables, etc., of high
% quality.




% *** SUBFIGURE PACKAGES ***
%\ifCLASSOPTIONcompsoc
%  \usepackage[caption=false,font=normalsize,labelfont=sf,textfont=sf]{subfig}
%\else
%  \usepackage[caption=false,font=footnotesize]{subfig}
%\fi
% subfig.sty, written by Steven Douglas Cochran, is the modern replacement
% for subfigure.sty, the latter of which is no longer maintained and is
% incompatible with some LaTeX packages including fixltx2e. However,
% subfig.sty requires and automatically loads Axel Sommerfeldt's caption.sty
% which will override IEEEtran.cls' handling of captions and this will result
% in non-IEEE style figure/table captions. To prevent this problem, be sure
% and invoke subfig.sty's "caption=false" package option (available since
% subfig.sty version 1.3, 2005/06/28) as this is will preserve IEEEtran.cls
% handling of captions.
% Note that the Computer Society format requires a larger sans serif font
% than the serif footnote size font used in traditional IEEE formatting
% and thus the need to invoke different subfig.sty package options depending
% on whether compsoc mode has been enabled.
%
% The latest version and documentation of subfig.sty can be obtained at:
% http://www.ctan.org/pkg/subfig




% *** FLOAT PACKAGES ***
%
\usepackage{fixltx2e}
% fixltx2e, the successor to the earlier fix2col.sty, was written by
% Frank Mittelbach and David Carlisle. This package corrects a few problems
% in the LaTeX2e kernel, the most notable of which is that in current
% LaTeX2e releases, the ordering of single and double column floats is not
% guaranteed to be preserved. Thus, an unpatched LaTeX2e can allow a
% single column figure to be placed prior to an earlier double column
% figure.
% Be aware that LaTeX2e kernels dated 2015 and later have fixltx2e.sty's
% corrections already built into the system in which case a warning will
% be issued if an attempt is made to load fixltx2e.sty as it is no longer
% needed.
% The latest version and documentation can be found at:
% http://www.ctan.org/pkg/fixltx2e


\usepackage{stfloats}
% stfloats.sty was written by Sigitas Tolusis. This package gives LaTeX2e
% the ability to do double column floats at the bottom of the page as well
% as the top. (e.g., "\begin{figure*}[!b]" is not normally possible in
% LaTeX2e). It also provides a command:
%\fnbelowfloat
% to enable the placement of footnotes below bottom floats (the standard
% LaTeX2e kernel puts them above bottom floats). This is an invasive package
% which rewrites many portions of the LaTeX2e float routines. It may not work
% with other packages that modify the LaTeX2e float routines. The latest
% version and documentation can be obtained at:
% http://www.ctan.org/pkg/stfloats
% Do not use the stfloats baselinefloat ability as the IEEE does not allow
% \baselineskip to stretch. Authors submitting work to the IEEE should note
% that the IEEE rarely uses double column equations and that authors should try
% to avoid such use. Do not be tempted to use the cuted.sty or midfloat.sty
% packages (also by Sigitas Tolusis) as the IEEE does not format its papers in
% such ways.
% Do not attempt to use stfloats with fixltx2e as they are incompatible.
% Instead, use Morten Hogholm'a dblfloatfix which combines the features
% of both fixltx2e and stfloats:
%
% \usepackage{dblfloatfix}
% The latest version can be found at:
% http://www.ctan.org/pkg/dblfloatfix




\ifCLASSOPTIONcaptionsoff
 \usepackage[nomarkers]{endfloat}
\let\MYoriglatexcaption\caption
\renewcommand{\caption}[2][\relax]{\MYoriglatexcaption[#2]{#2}}
\fi
% endfloat.sty was written by James Darrell McCauley, Jeff Goldberg and 
% Axel Sommerfeldt. This package may be useful when used in conjunction with 
% IEEEtran.cls'  captionsoff option. Some IEEE journals/societies require that
% submissions have lists of figures/tables at the end of the paper and that
% figures/tables without any captions are placed on a page by themselves at
% the end of the document. If needed, the draftcls IEEEtran class option or
% \CLASSINPUTbaselinestretch interface can be used to increase the line
% spacing as well. Be sure and use the nomarkers option of endfloat to
% prevent endfloat from "marking" where the figures would have been placed
% in the text. The two hack lines of code above are a slight modification of
% that suggested by in the endfloat docs (section 8.4.1) to ensure that
% the full captions always appear in the list of figures/tables - even if
% the user used the short optional argument of \caption[]{}.
% IEEE papers do not typically make use of \caption[]'s optional argument,
% so this should not be an issue. A similar trick can be used to disable
% captions of packages such as subfig.sty that lack options to turn off
% the subcaptions:
% For subfig.sty:
% \let\MYorigsubfloat\subfloat
% \renewcommand{\subfloat}[2][\relax]{\MYorigsubfloat[]{#2}}
% However, the above trick will not work if both optional arguments of
% the \subfloat command are used. Furthermore, there needs to be a
% description of each subfigure *somewhere* and endfloat does not add
% subfigure captions to its list of figures. Thus, the best approach is to
% avoid the use of subfigure captions (many IEEE journals avoid them anyway)
% and instead reference/explain all the subfigures within the main caption.
% The latest version of endfloat.sty and its documentation can obtained at:
% http://www.ctan.org/pkg/endfloat
%
% The IEEEtran \ifCLASSOPTIONcaptionsoff conditional can also be used
% later in the document, say, to conditionally put the References on a 
% page by themselves.




% *** PDF, URL AND HYPERLINK PACKAGES ***
%
\usepackage{url}
% url.sty was written by Donald Arseneau. It provides better support for
% handling and breaking URLs. url.sty is already installed on most LaTeX
% systems. The latest version and documentation can be obtained at:
% http://www.ctan.org/pkg/url
% Basically, \url{my_url_here}.


\usepackage{booktabs}


% *** Do not adjust lengths that control margins, column widths, etc. ***
% *** Do not use packages that alter fonts (such as pslatex).         ***
% There should be no need to do such things with IEEEtran.cls V1.6 and later.
% (Unless specifically asked to do so by the journal or conference you plan
% to submit to, of course. )


% correct bad hyphenation here
\hyphenation{op-tical net-works semi-conduc-tor}


\begin{document}
%
% paper title
% Titles are generally capitalized except for words such as a, an, and, as,
% at, but, by, for, in, nor, of, on, or, the, to and up, which are usually
% not capitalized unless they are the first or last word of the title.
% Linebreaks \\ can be used within to get better formatting as desired.
% Do not put math or special symbols in the title.
\title{Image Generation from Layout with Causal Discovery}
%
%
% author names and IEEE memberships
% note positions of commas and nonbreaking spaces ( ~ ) LaTeX will not break
% a structure at a ~ so this keeps an author's name from being broken across
% two lines.
% use \thanks{} to gain access to the first footnote area
% a separate \thanks must be used for each paragraph as LaTeX2e's \thanks
% was not built to handle multiple paragraphs
%

\author{Author1,~\IEEEmembership{Member,~IEEE,}
        Author2,~\IEEEmembership{Fellow,~OSA,}
        and~Author3,~\IEEEmembership{Life~Fellow,~IEEE}% <-this % stops a space
\thanks{M. Shell was with the Department
of Electrical and Computer Engineering, Georgia Institute of Technology, Atlanta,
GA, 30332 USA e-mail: (see http://www.michaelshell.org/contact.html).}% <-this % stops a space
\thanks{J. Doe and J. Doe are with Anonymous University.}% <-this % stops a space
\thanks{Manuscript received April 19, 2005; revised August 26, 2015.}
\thanks{This paper is supported by 2018AAA100010 NSFC}
}

% note the % following the last \IEEEmembership and also \thanks - 
% these prevent an unwanted space from occurring between the last author name
% and the end of the author line. i.e., if you had this:
% 
% \author{....lastname \thanks{...} \thanks{...} }
%                     ^------------^------------^----Do not want these spaces!
%
% a space would be appended to the last name and could cause every name on that
% line to be shifted left slightly. This is one of those "LaTeX things". For
% instance, "\textbf{A} \textbf{B}" will typeset as "A B" not "AB". To get
% "AB" then you have to do: "\textbf{A}\textbf{B}"
% \thanks is no different in this regard, so shield the last } of each \thanks
% that ends a line with a % and do not let a space in before the next \thanks.
% Spaces after \IEEEmembership other than the last one are OK (and needed) as
% you are supposed to have spaces between the names. For what it is worth,
% this is a minor point as most people would not even notice if the said evil
% space somehow managed to creep in.



% The paper headers
\markboth{Journal of \LaTeX\ Class Files,~Vol.~14, No.~8, August~2015}%
{Shell \MakeLowercase{\textit{et al.}}: Bare Demo of IEEEtran.cls for IEEE Journals}
% The only time the second header will appear is for the odd numbered pages
% after the title page when using the twoside option.
% 
% *** Note that you probably will NOT want to include the author's ***
% *** name in the headers of peer review papers.                   ***
% You can use \ifCLASSOPTIONpeerreview for conditional compilation here if
% you desire.




% If you want to put a publisher's ID mark on the page you can do it like
% this:
%\IEEEpubid{0000--0000/00\$00.00~\copyright~2015 IEEE}
% Remember, if you use this you must call \IEEEpubidadjcol in the second
% column for its text to clear the IEEEpubid mark.



% use for special paper notices
%\IEEEspecialpapernotice{(Invited Paper)}




% make the title area
\maketitle

% As a general rule, do not put math, special symbols or citations
% in the abstract or keywords.
\begin{abstract}
% The abstract must be between 150–250 words.
The abstract goes here.
This paper discusses causal discovery in layout-to-image generation.
The proposed method aims at discovering causal relations between object's shape and style in layout-to-image generation.
\end{abstract}

% Note that keywords are not normally used for peerreview papers.
\begin{IEEEkeywords}
layout-to-image generation, causal discovery, generative adversarial networks, causal regularization, shape and style disentanglement, interpretable image generation.
\end{IEEEkeywords}



% For peer review papers, you can put extra information on the cover
% page as needed:
% \ifCLASSOPTIONpeerreview
% \begin{center} \bfseries EDICS Category: 3-BBND \end{center}
% \fi
%
% For peerreview papers, this IEEEtran command inserts a page break and
% creates the second title. It will be ignored for other modes.
\IEEEpeerreviewmaketitle


\section{Introduction} % (fold)
\label{sec:introduction}

% 开门见山
This paper is concerned with discovering causal relations among objects in a scene for image generation.

This paper is concerned with causal discovery of object shapes and styles in the layout-to-image generation.
% 具体定义和描述任务
The layout-to-image generation task is to synthesize a new image according to a given layout~\cite{Layout2image}. A layout is a set of bounding boxes, representing the positions, sizes and classes of objects in an image.
Therefore, the layout-to-image generation is an inverse task of object detection~\cite{FasterRCNN}.
% layout2image 的意义是什么，为什么重要
This task provides an analysis-by-synthesis approach to understand relations between objects in a scene~\cite{Layout2image_OWA} and is able to visualize the understanding by intervening the input layout~\cite{LostGANv1}.
Furthermore, it also has a wide applications in human-computer collaborative creation~\cite{ILeadYouHelp}, such as to generate an expected realistic image given a manually crafted layout.

% layout-to-image 的挑战是什么
The main challenges in layout-to-image generation lies in two aspects, generating realistic objects as well as the whole image and capturing the interactive relation between objects.
The first challenge can be solved by adversarial training of image generation~\cite{GAN,SNGAN}.
% 现有的都是建模object之间的相关性或者独立性
For the second challenge, existing layout-to-image generation methods are designed to model global object correlations~\cite{Layout2image,Layout2image_OWA,Jahn2021HighResolutionCS,Object-Driven_Text-To-Image_Synthesis_via_Adversarial_Training} or local object correlations~\cite{LAMA}.
Other pioneering works~\cite{LostGANv1,LostGANv2,OCGAN} simply assume objects are independent.
Such assumptions like independence or locality are necessary in the training of layout-to-image generation, because layout-to-image is a highly under-specified task. Namely, there are many plausible appearances and interactions of objects and thus various generated images.
Thus, strong assumtions are required to faciliate the learning process.
In this paper, we assume the appearance of objects are causally related and this causal relation can be learned in the training of layout-to-image generation.

% 为什么需要学习因果关系 %！重要
% 这里可以引用目标审稿人的论文
The benefit to learn causal relation is three-fold. 
Firstly, learning causal relations can avoid capturing spurious correlation~\cite{ECI} and learn a robust object interaction. Spurious correlation means two factors appear casually related but are not. In the layout-to-image scenario, spurious correlation can be counter-intuitive interactions between objects, such as that changing clouds position in the sky affect the shape of bench on the ground. 
However, when consistent with causal relations, the way objects interact in a scene can be robust and interpretable.
\marginpar{ZSY add citations and find a better example.}
Secondly, object relations can be revealed by a casual graph~\cite{ECI,Causality} while correlations learned in existing works~\cite{Layout2image,Layout2image_OWA,Jahn2021HighResolutionCS,Object-Driven_Text-To-Image_Synthesis_via_Adversarial_Training,LAMA} are implicit. From the learned causal graph, the model interprets how it understand object relations in the generating process and achieves analysis-by-synthesis.
\marginpar{TODO:Experiments must show the learned relations}
Finally, learing causal relations allows ~\cite{Generative_Interventions_for_Causal_Learning} or counterfactual inference~\cite{Counterfactual_Generative_Networks} in the image generation process.
\marginpar{TODO:Must do intervention and counterfactual sample.}

% ! 我们和occurance的causal relation不同， 在我们的问题里面 occurance 是 layout 指定的，我们学习的是 given layout 下 object 的 content 和 style 的关系，而不是
% ？ 但是学习方法没有本质区别，只是sample的里面有区别，不好这么说

% Second, learning causal relations allows learning an disentangled representation~\cite{Representation_Learning2013} of objects. 
% The disentangled representation learning aims to find the underlying factors of variation and to understand the underlying causal relations amongst the factors~\cite{Representation_Learning2021}. 
% The disentangled representation 
% and achieve analysis-by-synthesis.

% 名字？
We propose Causal LAyout-To-Image (CLATI) to achieve layout-to-image generation and to learn causal relations amongst categories.
% 表示物体
% 因果关系怎么学习
To learn causal relations, we extract high-dimensional hidden features of a trained classifier to represent objects.
As assumed by Lopez-Paz~\etal~\cite{Discovering_Causal_Signals_in_Images}, causal relations of objects are reflected by the corresponding relations of features.
Relations are learned on features based on the linear reconstruction of features, the constraint of directed acyclic graph~\cite{Causality,NOTEARS,DAGGNN} and the regularization of reconstruction residual independence~\cite{DARING}.
Specifically, we use a density ratio estimation method~\cite{Density_ratio_matching,FactorVAE} to approximate the mutual independence among residuals. 
For a better approximation, we also use an adversarial strategy~\cite{FactorVAE,DARING} to train the approximator.
The learned causal relation is encoded by a causal graph, which represents the structural causal model between object classes.

% 怎么从因果关系中采样生成物体，shape and style
Based on the learned causal relation, the image generator synthesizes a new image according to a given layout. 
Specifically, a causal subgraph is extracted based on the layout and object features are sampled. 
Then, object features are transformed to style features and mask features, respectively, and the raw object masks are formed based on mask features and mapped to the layout.
To further capture object shape relations and alleviate mask overlaps~\cite{LAMA}, mapped object masks are adapted locally according to the causal relations. The adaption module is term Causal Locality-Aware Mask Adaption (CLAMA).
Notice that in the training, only images and the associated layouts are available while the ground-truth semantic segmentation of objects is not provided~\cite{LostGANv1,OCGAN}.
Finally, the style features and the adapted masks are combined and injected to the image generation process to synthesize the output image.
The components of causal relations and image generation are trained jointly, so the causal graph is updated in the learning of both components.

\marginpar{mark section number here to show the organization of paper.}

Our experiments are conducted on synthetic and real-world datasets.
On synthetic datasets, results show our component of causal relation learning has a comparable performanc on causal relation discovery.
On two real-world datasets COCO-Stuff~\cite{coco} and Visual Genome~\cite{vg}, the proposed CLATI model is comparable with the state-of-the-art methods in terms of visual quality of images and layout alignment.
\marginpar{Highlight some noticable results.}
We further investigate the learned causal relations between object categories and display qualitative intervention results.
\marginpar{TODO}

Our main contribution lies in three aspects.
\begin{enumerate}
  \item We propose CLATI, a layout-to-image generation method with object-level causal discovery. To the best of our knowledge, CLATI is the first to learn objects' causal relations in layout-to-image generation.
  \item We extend DARING~\cite{DARING} to high-dimensional causal discovery with a density ratio estimation~\cite{Density_ratio_matching} of residual independent.
  \item We propose CLAMA, a local mask adaption module based on causal relations. It adapts the spatial appearance of objects to be consistent with causal relations and avoids potential object mask overlaps.
\end{enumerate}



% 针对每个挑战，我们做了什么，contribute什么，对应哪个章节

% 最后实验怎么样，分成synthetic和real，同样是章节对应

% 我们的contribution



% section introduction (end)

\section{Related Works} % (fold)
\label{sec:related_works}

\subsection{Causal Discovery} % (fold)
\label{sub:related_causal_discovery}

% 有多种分类方法
% 按照方法来分，可以有conditional test/graph structure，以及score based 的方法
% 按照处理数据的类型来区分，可能是矩阵、时序等数据，

% 这部分可能抄别人的综述思路，再加一点点东西吧，

% 时序数据的因果发现 Causal Discovery with Attention-Based Convolutional Neural Networks

% DAG学习的讨论\cite{On_the_Role_of_Sparsity_and_DAG_Constraints_for_Learning_Linear_DAGs}
% notears的后续 \cite{NOFEARS}， 把DAG变成KKT条件
% 使用DAG 学习用在监督学习上面 \cite{CASTLE_NIPS}
% 使用cross entropy 而不是l2来发现因果关系 \cite{Cai2021OnTR} 

% subsection related_causal_discovery (end)

\subsection{Layout-to-Image Generation} % (fold)
\label{sub:related_layout_to_image_generation}


%* 现有layout2image 方法对于shape和style 的关系假设
\begin{table}[ht]
  \centering
  \caption{A summarty of assumptions of object relations in existing layout-to-image related works.}
  \label{tab:rw_assumption}
  \begin{tabular}{l|l|l}
  \toprule
  \multirow{2}{*}{Methods} & Trained with  & Assumption of \\
  & GT masks & object relations\\
  \midrule
  Layout2Im~\cite{Layout2image} & No & Correlated\\
  Layout2Im (OWA)~\cite{Layout2image_OWA} & No & Correlated\\
  AG Layout2Im~\cite{Attribute-guided_image_generation_from_layout} & No & Correlated \\
  CALIG~\cite{layout2image_cvpr2021} & No & Correlated\\
  HRCSS\cite{Jahn2021HighResolutionCS} & No & Correlated \\
  LostGAN-V1~\cite{LostGANv1} & No & Independent\\
  LostGAN-V2~\cite{LostGANv2} & No & Independent\\
  OCGAN~\cite{OCGAN} & No & Independent \\
  AttrLostGAN~\cite{AttrLostGAN} & No & Independent \\
  DCL~\cite{Sun2021DeepCL} & No & Independent \\
  LAMA~\cite{LAMA} & No & Locally correlated \\
  \midrule
  Hong \etal~\cite{Inferring_Semantic_Layout_for_Hierarchical_Text-to-Image_Synthesis} & Yes & Correlated \\
  Obj-GAN~\cite{Object-Driven_Text-To-Image_Synthesis_via_Adversarial_Training} & Yes & Correlated \\
  \multirow{2}{*}{OP-GAN~\cite{ICLR2019_pathway,TPAMI2020_pathway}} & \multirow{2}{*}{Yes} & Correlated in global pathway \\
  && Independent in local pathway\\
  \midrule
  SG2IM~\cite{sg2im} & Yes & Correlated \\
  SOAR~\cite{SOAR} & Yes & Correlated \\
  VRCIG~\cite{Visual-Relation_Conscious_Image_Generation_from_Structured-Text} & Yes & Correlated \\
  COLoR~\cite{COLOR} & Yes & Correlated \\
  \midrule
  CLATI (Ours) & No & Causally related \\
  \bottomrule
  \end{tabular}
\end{table}

Layout-to-image generation is the our main task of this paper, so in the part we briefly review layout-to-image related methods and summarize their assumptions of object relations (Table~\ref{tab:rw_assumption}).
Existing layout-to-image methods divide the task into two subtaks: generating semantic masks from layouts (layout-to-mask) and image synthesis from masks (mask-to-image).
Layout2Im~\cite{Layout2image} firstly proposes the task, and it generates object features to fill bounding boxes for image generation, and its extension~\cite{Layout2image_OWA} further generates a mask directly for each box.
Attribute-Guided Layout2Im~\cite{Attribute-guided_image_generation_from_layout} further includes attribute annotation for diverse generation.
These three methods use ConvLSTM~\cite{ConvLSTM} to fuse object features and capturing the global correlation amongst objects.
CALIG~\cite{layout2image_cvpr2021} and HRCSS\cite{Jahn2021HighResolutionCS} further use attention mechanism~\cite{Attention} and VQGAN~\cite{Taming_transformers_for_high-resolution_image_synthesis} (a transformer-based image generator) to capture object correlation implicitly.
On the other hand, LostGAN-based methods assume objects are independent.
LostGAN-V1~\cite{LostGANv1} generates masks for bounding boxes independently and forms a whole semantic mask, and the semantic mask is injected into the mask-to-image generator via ISLA-Norm layers.
LostGAN-V2~\cite{LostGANv2} further integrates masks learned from feature maps at different generation stages.
Based on this backbone, OC-GAN~\cite{OCGAN} further improves layout fidelity by maximizing the similarity between the image embeddings and the scene graph inferred from the layout.
Additionally, DCL~\cite{Deep_Consensus_Learning} maps the generated mask and the inferred mask from the generated image to maximize structural consensus, and AttrLostGAN~\cite{AttrLostGAN} also use attribute information in generation.
The LostGAN-based methods form object style and masks generation independently for each object, and thus implicitly assume object are independent; objects are not aware of each other in the image generation process.
An important work GIRAFFE~\cite{GIRAFFE} also models a composite scene with multiple objects without layout, and it also assumes independence as objects and background are generated individually.
Finally, LAMA~\cite{LAMA} captures only local correlations of objects and implicitly assume non-local objects are independent.
Notice again that in the training of layout-to-image model, the ground-truth masks are not available.

Layout-to-image generation also serves as a useful subprocess in text-to-image translation and image generation from scene graphs.
In text-to-image, an input sentence is first formed as a layout, and entities in the sentence become bounding boxes (objects). This reduce the text-to-image problem to a layout-to-image one.
Hong \etal~\cite{Inferring_Semantic_Layout_for_Hierarchical_Text-to-Image_Synthesis} and Obj-GAN~\cite{Object-Driven_Text-To-Image_Synthesis_via_Adversarial_Training} use LSTM~\cite{LSTM} to model the correlation of objects.
Conversely, Obj-GAN~\cite{ICLR2019_pathway,TPAMI2020_pathway} designs a global and local pathway to generate image based on the formed layout. The global pathway allows object to interact while the local pathway processes each object separately, and thus they have different assumptions.
In Scene-Graph-to-Image generation, nodes in an input scene graph are regarded as objects in an inferred layout and also become bounding boxes.
Then the inferred layout are converted to the generated image~\cite{SOAR,sg2im,Visual-Relation_Conscious_Image_Generation_from_Structured-Text,COLOR}. 
Notice that object feature in Scene-Graph-to-Image generation are learned with graph neural network~\cite{GCN, A_Comprehensive_Survey_on_Graph_Neural_Networks_TNNLS}, so the correlation of objects are captured.


How to learn object relations in a scene is also a central problem in the literature of scene graph generation~\cite{SceneGG_first,SGG_TNNLS2}.
The scene graph generation is to abstract a scene graph given an input image.
The algorithm first identifies objects as graph nodes and labels the interrelation as edges~\cite{SceneGG_first}.
Advanced methods try to capture spatial correlations between objects with hierarchical context~\cite{SGG_TNNLS2}, local-to-global interaction~\cite{SGG_TNNLS0} and region-aware attention~\cite{SGG_TNNLS1}.
Furthermore, Unbiased SGG~\cite{Unbiased_Scene_Graph_Generation_From_Biased_Training} further considers biased relations in training data and proposes a counterfactual inference method to remove relation bias. 
Though assuming objects are correlated, these methods shed light on how to precisely model object interaction in a scene and explicitly label the inferred relations.
Different from scene graph generation, the layout-to-image generation can visualize the relation with generated images.
% layout也作为text2image或者sg2im的中间部分，
% Object pathway 直接从bbox生成




% Technically LAMA shifts pixelwise object distributions, but theoretically LAMA has an underlying assumption different from those of existing works.
% In existing works~\cite{LostGANv1,OCGAN}, object masks are generated individually, which assumes masks are mutually independent.
% The layout-to-image task is ill-posed and thus the mask configuration is highly uncertain.
% Without ground-truth segmentations, the training of mask generation is weakly-supervised and relies on strong assumptions like independence.
% Layout2Im~\cite{Layout2image,Layout2image_OWA} and Obj-GAN~\cite{Object-Driven_Text-To-Image_Synthesis_via_Adversarial_Training} use ConvLSTM~\cite{ConvLSTM} to generate or aggregate object masks, which assumes all objects are correlated.
% Our proposed LAMA has a different assumption that overlapped or adjacent boxes have masks correlated on visibility and appearance.
% This assumption is more general than independence but preserves locality.
% It allows the generative model to consider relations only among overlapped or nearby objects.
% With this local correlation assumption, LAMA boasts reconfigurability~\cite{LostGANv2}. 
% Reconfigurability means keeping most generated objects unchanged while moving, altering or adding a bounding box, which enables generative results to be more controllable. 




% subsection related_layout_to_image_generation (end)


\subsection{Causal Image Generation} % (fold)
\label{sub:related_causal_image_generation}

% * 交给张晟源

% subsection related_causal_image_generation (end)

% section related_works (end)


\section{Methods} % (fold)
\label{sec:methods}

\subsection{Problem Definition} % (fold)
\label{sub:methods_problem_definition}

% 我们的目的是学习object的shape和style的因果关系
% 给定scene的layout，根据根据因果关系采样hidden feature，就可以生成新的场景图片，
% 假设不同类别的物体存在因果关系，假设ANM的SCM，
% F={F_1, \ldots ,F_C} 是C类的random vectors，存在非线性的ANM，噪声分布相互独立，所有噪声服从期望为0且\Sigma 为对角阵切对焦元素都相同，这个是 \cite{TNNLS2019Entropy} 里面justify的假设
% 而 F 决定了生成图片的shape和style，从而影响生成图片
% local and universal consistency
% 非线性，因为ECI的theorem 7.7，可以背后的因果关系是可以发现的。
% 分解为两个目标：发现object的因果关系，并使得生成图片和object 足够真实

% TODO 这里要有layout2image的问题定义，可以接在因果图发现的后面

% 为什么要分shape和style 讨论，因为这样的disentanglement有利于生成学习 \cite{SPADE}? 作为inductive bias

% subsection methods_problem_definition (end)

% TODO 这两个部分之间加symbol table

% 非模型累类号的表格
% \begin{table}[ht]
%   \centering
%   \caption{Description of Notations.}
%   \label{tab:notations}
%   \begin{tabular}{l|l}
%   \toprule
%   Notations & Description \\
%   \midrule
%   $W$, $H$ & the width and height of real and generated images. \\
%   $\mathcal{P}_R$, $\bar{\mathcal{P}}_R$ & the distritbution of regressive residuals, and the virtual distribution of mutual independence. \\
%   m & the number of objects in a scene (image). \\
%   \bottomrule
%   \end{tabular}
% \end{table}

\subsection{Mehtod Overview} % (fold)
\label{sub:mehtod_overview}

% 为了完成上述两个目标，我们设计了两方面的学习机制：
% causal learning和auxiliary approximater 尝试从feature里面学习因果关系。 
% image generation 和 discrimination \ref{sub:image_generation} \ref{sub:image_and_object_discrimination}，这个机制保证图像的真实性，基于因果关系sample feature生成图像。
% 两个方面的学习机制联合学习。

% # TODO 这里需要一个大图辅助

% subsection mehtod_overview (end)



\subsection{Causal Discovery} % (fold)
\label{sub:causal_discovery}
% loss model optmization

% 根据 \cite{Discovering_Causal_Signals_in_Images} 的假设，我们假设了了feature 可以表示物体的shape 和 style 两方面的内容。
% 将在后面 \ref{sub:image_and_object_discrimination} 说明如何得到特征，目前我们对于图片I，和对应了的layout L，使用F(I，L) 抽取objects的特征，进而学习因果关系
% Inspired by DARING， 我们设计了 causal discovery的总loss，用于在feature 上面发现因果关系。
\begin{equation}
  \begin{aligned}
    L &= L_{\mathit{rec}} + L_{\mathit{sparse}} + L_{\mathit{MI}}\\
    s.t.& L_{\mathit{DAG}} = 0
  \end{aligned}
\end{equation}

% $L_{\mathit{rec}}$ 是自回归的term，因为假设了同高斯而且对角矩阵，所以是$\ell_2$ 范数，其中回归模型可以是linear 或者 nonlinear 的， specified by W，之后discuss具体模型。W的元素表示了两个类别之间的因果关系，如果0则表示没有因果关系。without abuse， 假设W代表了因果图的结构。 最后写出完整的公式。
% L_{\mathit{DAG}} 就是W的有向无环，使用DAG-GNN论文的罚项。写出公式。 因为a sample 只有 m 个物体，因此我们使用子图节点个数来作为指数项。
% sparsity 是l1
% MI是residual 相互独立，假设来源于DARING。我们使用total correlation作为MI项。什么事total correlation？为什么这里使用total correlation？这个很符合独立的需要，后面会说明如何估计。
% DARING里面需要计算每个随机变量i和其他的f的相关性。因此先计算期望，中心化之后计算residual之间的协方差，此处按照batch训练，一个类别的object可能只有1个或者几个，期望的估计variance 很大，更难以估计后面的相关性，因此这个方法不合适。其他假设可以在future work里面explore。
% 我们假设了feature 都是unit norm的，这样就变成了 2 - 2 cos。这样做的目的是更容易回归，以前的sparsity都是这么做的，此处引用TPAMI和以往TNNLS 在sparse learning的文章。

% * 模型
% 自回归的模型是怎样的 。 P(F(o_i) | PA_i).linear的，是WX，non linear的是CAM+PNL，首先FC+swish+FC+swish 得到feature，W相加，之后是cFC-swish-cFC-rezero。第一部分FC是WN的，避免W的放缩，之后的cFC需要说明，参考styleGAN2，这样对于生成类别的信息有更多的感知。只是使用生成类别的embdding。使用swish因为三次可导，这样才能保证identifiability。Rezero加快收敛，早期约等于一个CAM，同样可以发现因果关系。
% TODO 这里要配图说明结构


% * 训练
% L_{\mathit{DAG}}  变成 aug 来优化，变成两个罚项，加入总loss里面，两个对偶变量，参考DAG-GNN里面优化
% l1 loss 不好优化，我们使用了simplified的 {augmented Lagrangian} \cite{ADMM} 的方法，变成 l(x) + |z| s.t. \| x - z \|_2^2, 再变成l(x) + |z| + \rho/2 * \| x - z \|_2^2 的形式，交替优化
% 有替身变量W0的，交替优化W和W0
% 对于W0，有解析解，得到之后给W优化， soft threshold的推导在\cite{l1_soft_threshold}
% 因此，这个带约束的优化问题变成 无约束的可微的问题，可以使用deep learning的 ADAM来求解，最后得到

% 此外，第一个challenging的点，每个图片仅仅有部分样本，仅有部分W，是整个因果图的子图。我们假设通过子图的学习可以学习到完整的类别之间的因果图。
% TODO这里或者 前面要有bbox的显示
% TODO 后面不能avg pool了，会损失形状信息

% subsection causal_discovery (end)


\subsection{Approximation of Total Correlation} % (fold)
\label{sub:approximation_of_total_correlation}

% loss model train

% inspired by FactorVAE and VAE-GAN，使用D 区分两个分布。其中的loss是怎样的，\cite{GAN} 里面证明了a/(a+b) 是最优
% virtual 分布使用shuffle，我们记住了历史的20个feature，假设还在support里面，表示每个marginal的分布，重新随机组合就是相互独立的联合分布。
% 区分之后就可以approximate到total correlation
% 使用Transformer的结构，因为这样可以capture long-range interaction，更容易找到mutual dependence，最后 avg起来 ，使用fc-swish-fc-LN-swish-rezero 输出。
% 同样ADAM(0., 0.9999)训练

% 注意，假设D(r) = \sigma(h)，那么 \log \frac{D(r)}{1-D(r)}=h，因此最终 L_{MI} = \mathbb{E} h ，can avoid numerical issue and accelerate convergence.
% 这个被用烂了，叫做density ratio trick


% subsection approximation_of_total_correlation (end)


\subsection{Image and Object Discrimination} % (fold)
\label{sub:image_and_object_discrimination}

% 这里和lostgan是一样的，可以快速描述，一个图概括
% 同样是模型、目标函数，训练
% 模型是shared的层，之后是roi，img-path和obj-path，最后都是输出了SN，使用hinge loss
% 为了batch我们加了空的object，但是判断的时候要舍弃他们


% subsection image_and_object_discrimination (end)

\subsection{Image Generation} % (fold)
\label{sub:image_generation}

% 这部分描述图像生成的过程，同样是模型、目标函数和优化方法
% 注意，和之前不同，我们把原始的feature和causal的feature直接相加了，因此会有一点点不一样
% 基本的描述和LostGAN/LAMA的一样，可以重复说明一下
% 这里要分开生成过程和CLAMA两部分，重点描述CLAMA部分的内容，怎么样根据PA来学习affine的，加一个图
% 所以模型分成三部分， forming causal feature ， mask generation， causal mask adaption， mask2image generation
% 最后的loss只是hinge loss 的部分
% 训练是adam，没有其他策略了

% 为什么是shape 和 style 的disentangled 

% 这里说明使用diffaug和ema

% subsection image_generation (end)

\subsection{Summary} % (fold)  
\label{sub:summary}

% 需要一个图总结所有的训练过程，四个优化部分分别训练什么模型

% subsection summary (end)

% section methods (end)


\section{Experiments on Synthesis Data} % (fold)
\label{sec:experiments_on_synthesis_data}

\subsection{Experimental Setup} % (fold)
\label{sub:experimental_setup_synthesis}

\textbf{Synthetic data}.

\textbf{Baselines}.

\textbf{Metrics}.

% subsection experimental_setup (end)

\subsection{Results} % (fold)
\label{sub:results}

% subsection results (end)


% section experiments_on_synthesis_data (end)


\section{Experiments on Image Generation} % (fold)
\label{sec:experiments_on_image_generation}

\subsection{Experimental Setup} % (fold)
\label{sub:experimental_setup}

\subsubsection{Datasets} % (fold)
\label{subsub:datasets}

% subsubsection datasets (end)

\subsubsection{Baselines} % (fold)
\label{subsub:baselines}

% subsubsection baselines (end)

\subsubsection{Metrics} % (fold)
\label{subsub:metrics}

\textbf{Image.}
IS
FID
PRC
DS

\textbf{Object.}
SceneFID
CAS
PRCobj
YOLOscore


% subsubsection metrics (end)

% subsection experimental_setup (end)


\subsection{Quantitative Results} % (fold)
\label{sub:quantitative_results}

% subsection quantitative_results (end)


\subsection{Qualitative Results} % (fold)
\label{sub:qualitative_results}

% subsection qualitative_results (end)


\subsection{Ablation Study} % (fold)
\label{sub:ablation_study}

% 先学习因果关系，再生成
% 没有因果学习
% 只是相关关系
% 乱序因果关系


% subsection ablation_study (end)


\subsection{Discovered Causal Relations} % (fold)
\label{sub:Discovered_causal_relations}

% confounder explained
% Intervention trial


% subsection Discovered_causal_relations (end)


% section experiments_on_image_generation (end)


\section{Discussion and Conclusion} % (fold)
\label{sec:discussion_and_conclusion}

% section discussion_and_conclusion (end)

\section*{Acknowledgment} % (fold)
\label{sec:acknowledgment}

% 所有的grant都在第一页的thanks里面

% section acknowledgment (end)

% NOTEs:
% Accepted Fonts Within Figures.When preparing your graphics IEEE suggests that you use of one of the following Open Type fonts: Times New Roman, Helvetica, Arial, Cambria, and Symbol.
% use words but not only labels in Figure axis labels
% 图片命名：Li1.pdf Li2.pdf, 作者的图片是Li.png, Zhang.png, ....
% Tab. Fig. 作为缩写
% http://graphicsqc.ieee.org/ 可以用来check所有的图片
% paper在这里提交，http://www.ieee.org/publications_standards/publications/autho rs/authors_submission.html 


% \section{Introduction}
% % The very first letter is a 2 line initial drop letter followed
% % by the rest of the first word in caps.
% % 
% % form to use if the first word consists of a single letter:
% % \IEEEPARstart{A}{demo} file is ....
% % 
% % form to use if you need the single drop letter followed by
% % normal text (unknown if ever used by the IEEE):
% % \IEEEPARstart{A}{}demo file is ....
% % 
% % Some journals put the first two words in caps:
% % \IEEEPARstart{T}{his demo} file is ....
% % 
% % Here we have the typical use of a "T" for an initial drop letter
% % and "HIS" in caps to complete the first word.
% \IEEEPARstart{T}{his} demo file is intended to serve as a ``starter file''
% for IEEE journal papers produced under \LaTeX\ using
% IEEEtran.cls version 1.8b and later.
% % You must have at least 2 lines in the paragraph with the drop letter
% % (should never be an issue)
% I wish you the best of success.

% \hfill mds
 
% \hfill August 26, 2015

% \subsection{Subsection Heading Here}
% Subsection text here.

% % needed in second column of first page if using \IEEEpubid
% %\IEEEpubidadjcol

% \subsubsection{Subsubsection Heading Here}
% Subsubsection text here.


% An example of a floating figure using the graphicx package.
% Note that \label must occur AFTER (or within) \caption.
% For figures, \caption should occur after the \includegraphics.
% Note that IEEEtran v1.7 and later has special internal code that
% is designed to preserve the operation of \label within \caption
% even when the captionsoff option is in effect. However, because
% of issues like this, it may be the safest practice to put all your
% \label just after \caption rather than within \caption{}.
%
% Reminder: the "draftcls" or "draftclsnofoot", not "draft", class
% option should be used if it is desired that the figures are to be
% displayed while in draft mode.
%
%\begin{figure}[!t]
%\centering
%\includegraphics[width=2.5in]{myfigure}
% where an .eps filename suffix will be assumed under latex, 
% and a .pdf suffix will be assumed for pdflatex; or what has been declared
% via \DeclareGraphicsExtensions.
%\caption{Simulation results for the network.}
%\label{fig_sim}
%\end{figure}

% Note that the IEEE typically puts floats only at the top, even when this
% results in a large percentage of a column being occupied by floats.


% An example of a double column floating figure using two subfigures.
% (The subfig.sty package must be loaded for this to work.)
% The subfigure \label commands are set within each subfloat command,
% and the \label for the overall figure must come after \caption.
% \hfil is used as a separator to get equal spacing.
% Watch out that the combined width of all the subfigures on a 
% line do not exceed the text width or a line break will occur.
%
%\begin{figure*}[!t]
%\centering
%\subfloat[Case I]{\includegraphics[width=2.5in]{box}%
%\label{fig_first_case}}
%\hfil
%\subfloat[Case II]{\includegraphics[width=2.5in]{box}%
%\label{fig_second_case}}
%\caption{Simulation results for the network.}
%\label{fig_sim}
%\end{figure*}
%
% Note that often IEEE papers with subfigures do not employ subfigure
% captions (using the optional argument to \subfloat[]), but instead will
% reference/describe all of them (a), (b), etc., within the main caption.
% Be aware that for subfig.sty to generate the (a), (b), etc., subfigure
% labels, the optional argument to \subfloat must be present. If a
% subcaption is not desired, just leave its contents blank,
% e.g., \subfloat[].


% An example of a floating table. Note that, for IEEE style tables, the
% \caption command should come BEFORE the table and, given that table
% captions serve much like titles, are usually capitalized except for words
% such as a, an, and, as, at, but, by, for, in, nor, of, on, or, the, to
% and up, which are usually not capitalized unless they are the first or
% last word of the caption. Table text will default to \footnotesize as
% the IEEE normally uses this smaller font for tables.
% The \label must come after \caption as always.
%
%\begin{table}[!t]
%% increase table row spacing, adjust to taste
%\renewcommand{\arraystretch}{1.3}
% if using array.sty, it might be a good idea to tweak the value of
% \extrarowheight as needed to properly center the text within the cells
%\caption{An Example of a Table}
%\label{table_example}
%\centering
%% Some packages, such as MDW tools, offer better commands for making tables
%% than the plain LaTeX2e tabular which is used here.
%\begin{tabular}{|c||c|}
%\hline
%One & Two\\
%\hline
%Three & Four\\
%\hline
%\end{tabular}
%\end{table}


% Note that the IEEE does not put floats in the very first column
% - or typically anywhere on the first page for that matter. Also,
% in-text middle ("here") positioning is typically not used, but it
% is allowed and encouraged for Computer Society conferences (but
% not Computer Society journals). Most IEEE journals/conferences use
% top floats exclusively. 
% Note that, LaTeX2e, unlike IEEE journals/conferences, places
% footnotes above bottom floats. This can be corrected via the
% \fnbelowfloat command of the stfloats package.





% if have a single appendix:
%\appendix[Proof of the Zonklar Equations]
% or
%\appendix  % for no appendix heading
% do not use \section anymore after \appendix, only \section*
% is possibly needed

% use appendices with more than one appendix
% then use \section to start each appendix
% you must declare a \section before using any
% \subsection or using \label (\appendices by itself
% starts a section numbered zero.)
%


% \appendices
% \section{Proof of the First Zonklar Equation}
% Appendix one text goes here.

% % you can choose not to have a title for an appendix
% % if you want by leaving the argument blank
% \section{}
% Appendix two text goes here.


% % use section* for acknowledgment
% \section*{Acknowledgment}


% The authors would like to thank...


% % Can use something like this to put references on a page
% % by themselves when using endfloat and the captionsoff option.
% \ifCLASSOPTIONcaptionsoff
%   \newpage
% \fi



% trigger a \newpage just before the given reference
% number - used to balance the columns on the last page
% adjust value as needed - may need to be readjusted if
% the document is modified later
%\IEEEtriggeratref{8}
% The "triggered" command can be changed if desired:
%\IEEEtriggercmd{\enlargethispage{-5in}}

% references section

% can use a bibliography generated by BibTeX as a .bbl file
% BibTeX documentation can be easily obtained at:
% http://mirror.ctan.org/biblio/bibtex/contrib/doc/
% The IEEEtran BibTeX style support page is at:
% http://www.michaelshell.org/tex/ieeetran/bibtex/
\bibliographystyle{IEEEtran}
% argument is your BibTeX string definitions and bibliography database(s)
\bibliography{mybib}
%
% <OR> manually copy in the resultant .bbl file
% set second argument of \begin to the number of references
% (used to reserve space for the reference number labels box)

% biography section
% 
% If you have an EPS/PDF photo (graphicx package needed) extra braces are
% needed around the contents of the optional argument to biography to prevent
% the LaTeX parser from getting confused when it sees the complicated
% \includegraphics command within an optional argument. (You could create
% your own custom macro containing the \includegraphics command to make things
% simpler here.)
%\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{mshell}}]{Michael Shell}
% or if you just want to reserve a space for a photo:

% \begin{IEEEbiography}{Michael Shell}
% Biography text here.
% \end{IEEEbiography}

% % if you will not have a photo at all:
% \begin{IEEEbiographynophoto}{John Doe}
% Biography text here.
% \end{IEEEbiographynophoto}

% % insert where needed to balance the two columns on the last page with
% % biographies
% %\newpage

% \begin{IEEEbiographynophoto}{Jane Doe}
% Biography text here.
% \end{IEEEbiographynophoto}

% % You can push biographies down or up by placing
% % a \vfill before or after them. The appropriate
% % use of \vfill depends on what kind of text is
% % on the last page and whether or not the columns
% % are being equalized.

%\vfill

% Can be used to pull up biographies so that the bottom of the last one
% is flush with the other column.
%\enlargethispage{-5in}



% that's all folks
\end{document}


