@misc{dcgan2015,
    title={{Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks}},
    author={Alec Radford and Luke Metz and Soumith Chintala},
    year={2016},
    eprint={1511.06434},
    archivePrefix={arXiv},
    primaryClass={cs.LG},
    journal={2016 International Conference on Learning Representations},
}

@article{resnet2016,
   title={{Deep Residual Learning for Image Recognition}},
   ISBN={9781467388511},
   url={http://dx.doi.org/10.1109/CVPR.2016.90},
   DOI={10.1109/cvpr.2016.90},
   journal={2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
   publisher={IEEE},
   author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian},
   year={2016},
   month={Jun}
}

@article{densenet2017,
   title={{Densely Connected Convolutional Networks}},
   ISBN={9781538604571},
   url={http://dx.doi.org/10.1109/CVPR.2017.243},
   DOI={10.1109/cvpr.2017.243},
   journal={2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
   publisher={IEEE},
   author={Huang, Gao and Liu, Zhuang and Maaten, Laurens van der and Weinberger, Kilian Q.},
   year={2017},
   month={Jul}
}

@inproceedings{spectral2018,
    title={Spectral Normalization for Generative Adversarial Networks},
    author={Takeru Miyato and Toshiki Kataoka and Masanori Koyama and Yuichi Yoshida},
    booktitle={International Conference on Learning Representations},
    year={2018},
    url={https://openreview.net/forum?id=B1QRgziT-},
}

@article{selfattn2018,
  title={{Self-Attention Generative Adversarial Networks}},
  author={Zhang, Han and Goodfellow, Ian and Metaxas, Dimitris and Odena, Augustus},
  journal={arXiv preprint arXiv:1805.08318},
  year={2018}
}

@inproceedings{salimans2016improved,
    author = {Salimans, Tim and Goodfellow, Ian and Zaremba, Wojciech and Cheung, Vicki and Radford, Alec and Chen, Xi},
    title = {Improved Techniques for Training GANs},
    year = {2016},
    isbn = {9781510838819},
    publisher = {Curran Associates Inc.},
    address = {Red Hook, NY, USA},
    abstract = {We present a variety of new architectural features and training procedures that we
    apply to the generative adversarial networks (GANs) framework. Using our new techniques,
    we achieve state-of-the-art results in semi-supervised classification on MNIST, CIFAR-10
    and SVHN. The generated images are of high quality as confirmed by a visual Turing
    test: our model generates MNIST samples that humans cannot distinguish from real data,
    and CIFAR-10 samples that yield a human error rate of 21.3%. We also present ImageNet
    samples with unprecedented resolution and show that our methods enable the model to
    learn recognizable features of ImageNet classes.},
    booktitle = {Proceedings of the 30th International Conference on Neural Information Processing Systems},
    pages = {2234–2242},
    numpages = {9},
    location = {Barcelona, Spain},
    series = {NIPS'16}
}

@article{lsgan2017,
   title={{Least Squares Generative Adversarial Networks},
   ISBN={9781538610329}},
   url={http://dx.doi.org/10.1109/ICCV.2017.304},
   DOI={10.1109/iccv.2017.304},
   journal={2017 IEEE International Conference on Computer Vision (ICCV)},
   publisher={IEEE},
   author={Mao, Xudong and Li, Qing and Xie, Haoran and Lau, Raymond Y.K. and Wang, Zhen and Smolley, Stephen Paul},
   year={2017},
   month={Oct}
}

@inproceedings{gan2014,
  title={{Generative Adversarial Nets}},
  author={Goodfellow, Ian and Pouget-Abadie, Jean and Mirza, Mehdi and Xu, Bing and Warde-Farley, David and Ozair, Sherjil and Courville, Aaron and Bengio, Yoshua},
  booktitle={Advances in neural information processing systems},
  pages={2672--2680},
  year={2014}
}

@misc{berthelot2017began,
    title={{BEGAN: Boundary Equilibrium Generative Adversarial Networks}},
    author={David Berthelot and Thomas Schumm and Luke Metz},
    year={2017},
    eprint={1703.10717},
    archivePrefix={arXiv},
    primaryClass={cs.LG}
}

@inproceedings{zhao2016energybased,
    title={{Energy-based Generative Adversarial Network}},
    author={Junbo Zhao and Michael Mathieu and Yann LeCun},
    booktitle={International Conference on Learning Representations},
    year={2016},
    url={https://openreview.net/forum?id=ryh9pmcee},
}

@article{arjovsky2017wasserstein,
  title={{Wasserstein GAN}},
  author={Arjovsky, Martin and Chintala, Soumith and Bottou, L{\'e}on},
  journal={arXiv preprint arXiv:1701.07875},
  year={2017}
}

@inproceedings{gulrajani2017improved,
    author = {Gulrajani, Ishaan and Ahmed, Faruk and Arjovsky, Martin and Dumoulin, Vincent and Courville, Aaron},
    title = {Improved Training of Wasserstein GANs},
    year = {2017},
    isbn = {9781510860964},
    publisher = {Curran Associates Inc.},
    address = {Red Hook, NY, USA},
    abstract = {Generative Adversarial Networks (GANs) are powerful generative models, but suffer
    from training instability. The recently proposed Wasserstein GAN (WGAN) makes progress
    toward stable training of GANs, but sometimes can still generate only poor samples
    or fail to converge. We find that these problems are often due to the use of weight
    clipping in WGAN to enforce a Lipschitz constraint on the critic, which can lead to
    undesired behavior. We propose an alternative to clipping weights: penalize the norm
    of gradient of the critic with respect to its input. Our proposed method performs
    better than standard WGAN and enables stable training of a wide variety of GAN architectures
    with almost no hyperparameter tuning, including 101-layer ResNets and language models
    with continuous generators. We also achieve high quality generations on CIFAR-10 and
    LSUN bedrooms.},
    booktitle = {Proceedings of the 31st International Conference on Neural Information Processing Systems},
    pages = {5769–5779},
    numpages = {11},
    location = {Long Beach, California, USA},
    series = {NIPS'17}
}

@inproceedings{chen2016infogan,
    author = {Chen, Xi and Duan, Yan and Houthooft, Rein and Schulman, John and Sutskever, Ilya and Abbeel, Pieter},
    title = {InfoGAN: Interpretable Representation Learning by Information Maximizing Generative Adversarial Nets},
    year = {2016},
    isbn = {9781510838819},
    publisher = {Curran Associates Inc.},
    address = {Red Hook, NY, USA},
    abstract = {This paper describes InfoGAN, an information-theoretic extension to the Generative
    Adversarial Network that is able to learn disentangled representations in a completely
    unsupervised manner. InfoGAN is a generative adversarial network that also maximizes
    the mutual information between a small subset of the latent variables and the observation.
    We derive a lower bound of the mutual information objective that can be optimized
    efficiently. Specifically, InfoGAN successfully disentangles writing styles from digit
    shapes on the MNIST dataset, pose from lighting of 3D rendered images, and background
    digits from the central digit on the SVHN dataset. It also discovers visual concepts
    that include hair styles, presence/absence of eyeglasses, and emotions on the CelebA
    face dataset. Experiments show that InfoGAN learns interpretable representations that
    are competitive with representations learned by existing supervised methods. For an
    up-to-date version of this paper, please see https://arxiv.org/abs/1606.03657.},
    booktitle = {Proceedings of the 30th International Conference on Neural Information Processing Systems},
    pages = {2180–2188},
    numpages = {9},
    location = {Barcelona, Spain},
    series = {NIPS'16}
}

@misc{kodali2017convergence,
    title={{On Convergence and Stability of GANs}},
    author={Naveen Kodali and Jacob Abernethy and James Hays and Zsolt Kira},
    year={2017},
    eprint={1705.07215},
    archivePrefix={arXiv},
    primaryClass={cs.AI}
}

@misc{mirza2014conditional,
    title={{Conditional Generative Adversarial Nets}},
    author={Mehdi Mirza and Simon Osindero},
    year={2014},
    eprint={1411.1784},
    archivePrefix={arXiv},
    primaryClass={cs.LG}
}

@inproceedings{odena2017ac,
  title={{Conditional Image Synthesis with Auxiliary Classifier GANs}},
  author={Odena, Augustus and Olah, Christopher and Shlens, Jonathon},
  booktitle={Proceedings of the 34th International Conference on Machine Learning-Volume 70},
  pages={2642--2651},
  year={2017},
  organization={JMLR. org}
}

@article{cyclegan,
   title={{Unpaired Image-to-Image Translation Using Cycle-Consistent Adversarial Networks}},
   ISBN={9781538610329},
   url={http://dx.doi.org/10.1109/ICCV.2017.244},
   DOI={10.1109/iccv.2017.244},
   journal={2017 IEEE International Conference on Computer Vision (ICCV)},
   publisher={IEEE},
   author={Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A.},
   year={2017},
   month={Oct}
}

@article{srgan,
   title={{Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network}},
   ISBN={9781538604571},
   url={http://dx.doi.org/10.1109/CVPR.2017.19},
   DOI={10.1109/cvpr.2017.19},
   journal={2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
   publisher={IEEE},
   author={Ledig, Christian and Theis, Lucas and Huszar, Ferenc and Caballero, and et al.},
   year={2017},
   month={Jul}
}

@inproceedings{karras2017progressive,
    title={Progressive Growing of {GAN}s for Improved Quality, Stability, and Variation},
    author={Tero Karras and Timo Aila and Samuli Laine and Jaakko Lehtinen},
    booktitle={International Conference on Learning Representations},
    year={2018},
    url={https://openreview.net/forum?id=Hk99zCeAb},
}

@inproceedings{biggan,
    title={Large Scale {GAN} Training for High Fidelity Natural Image Synthesis},
    author={Andrew Brock and Jeff Donahue and Karen Simonyan},
    booktitle={International Conference on Learning Representations},
    year={2019},
    url={https://openreview.net/forum?id=B1xsqj09Fm},
}

@inproceedings{zhang2017adversarial,
  title={Adversarial feature matching for text generation},
  author={Zhang, Yizhe and Gan, Zhe and Fan, Kai and Chen, Zhi and Henao, Ricardo and Shen, Dinghan and Carin, Lawrence},
  booktitle={Proceedings of the 34th International Conference on Machine Learning-Volume 70},
  pages={4006--4015},
  year={2017},
  organization={JMLR. org}
}

@article{esteban2017real,
  title={Real-valued (medical) time series generation with recurrent conditional gans},
  author={Esteban, Crist{\'o}bal and Hyland, Stephanie L and R{\"a}tsch, Gunnar},
  journal={arXiv preprint arXiv:1706.02633},
  year={2017}
}

@inproceedings{Krizhevsky2009LearningML,
  title={{Learning Multiple Layers of Features from Tiny Images}},
  author={Alex Krizhevsky},
  year={2009},
  publisher={Citeseer}
}

@InProceedings{mocogan,
    author = {Tulyakov, Sergey and Liu, Ming-Yu and Yang, Xiaodong and Kautz, Jan},
    title = {{MoCoGAN: Decomposing Motion and Content for Video Generation}},
    journal = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
    month = {June},
    year = {2018}
}

@misc{dvdgan,
    title={Efficient Video Generation on Complex Datasets},
    author={Aidan Clark and Jeff Donahue and Karen Simonyan},
    year={2019},
    eprint={1907.06571},
    archivePrefix={arXiv},
    primaryClass={cs.CV}
}

@inproceedings{gman,
    title={{Generative Multi-Adversarial Networks}},
    author={Ishan Durugkar and Ian Gemp and Sridhar Mahadevan},
    booktitle={International Conference on Learning Representations},
    year={2016},
    url={https://openreview.net/forum?id=Byk-VI9eg},
}

@misc{dong2018training,
    title={Training Generative Adversarial Networks with Binary Neurons by End-to-end Backpropagation},
    author={Hao-Wen Dong and Yi-Hsuan Yang},
    year={2018},
    eprint={1810.04714},
    archivePrefix={arXiv},
    primaryClass={cs.LG}
}

@article{Choi_2018,
   title={StarGAN: Unified Generative Adversarial Networks for Multi-domain Image-to-Image Translation},
   ISBN={9781538664209},
   url={http://dx.doi.org/10.1109/cvpr.2018.00916},
   DOI={10.1109/cvpr.2018.00916},
   journal={2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition},
   publisher={IEEE},
   author={Choi, Yunjey and Choi, Minje and Kim, Munyoung and Ha, Jung-Woo and Kim, Sunghun and Choo, Jaegul},
   year={2018},
   month={Jun}
}

@inproceedings{dumoulin2016adversarially,
    title={Adversarially learned inference},
    author={Dumoulin, Vincent and Belghazi, Ishmael and Poole, Ben and Mastropietro, Olivier and Lamb, Alex and Arjovsky, Martin and Courville, Aaron},
    booktitle={International Conference on Learning Representations},
    year={2016},
    url={https://openreview.net/forum?id=B1ElR4cgg},
}

@misc{tfgan,
  author = {Joel Shor},
  title = {{Tensorflow GAN}},
  year = {2017},
  publisher = {GitHub},
  journal = {GitHub repository},
  howpublished = {\url{https://github.com/tensorflow/models/tree/master/research/gan}},
  commit = {f96099da87c1d6a00b67cab499660253de75a672}
}

@misc{hypergan,
  author = {KyperGAN Community},
  title = {{HyperGAN}},
  year = {2016},
  publisher = {GitHub},
  journal = {GitHub repository},
  howpublished = {\url{https://github.com/HyperGAN/HyperGAN}},
  commit = {5869e4798a6d13ca6bd8f18b87465826d70be922}
}

@misc{gantoolkit,
  author = {Raunak Sinha, Naveen Panwar, Anush Sankaran},
  title = {{IBM GAN Toolkit}},
  year = {2018},
  publisher = {GitHub},
  journal = {GitHub repository},
  howpublished = {\url{https://https://github.com/IBM/gan-toolkit}},
  commit = {7f868208bd5016ab890f6dfcaf7d1ecef15b4c95}
}
