@misc{2017NVIDIA,
  author={NVIDIA},
  title={NVIDIA Tesla V100 GPU Architecture: The World's Most Advanced Datacenter GPU},
  year={2017},
  howpublished = "Website",
  note = {\url{http://www.nvidia.com/object/volta-architecture-whitepaper.html}}
}

@inproceedings{2021Ascend,
  title={Ascend: a Scalable and Unified Architecture for Ubiquitous Deep Neural Network Computing : Industry Track Paper},
  author={Liao, Heng and Tu, Jiajin and Xia, Jing and Liu, Hu and Zhou, Xiping and Yuan, Honghui and Hu, Yuxing},
  booktitle={2021 IEEE International Symposium on High-Performance Computer Architecture (HPCA)},
  year={2021},
  pages = {789–801},
  doi = {10.1109/HPCA51647.2021.00071},
}

@article{ragan2013halide,
  title={Halide: a language and compiler for optimizing parallelism, locality, and recomputation in image processing pipelines},
  author={Ragan-Kelley, Jonathan and Barnes, Connelly and Adams, Andrew and Paris, Sylvain and Durand, Fr{\'e}do and Amarasinghe, Saman},
  journal={Acm Sigplan Notices},
  volume={48},
  number={6},
  pages={519--530},
  year={2013},
  publisher={ACM New York, NY, USA}
}

@article{chen2018tvm,
  title={TVM: end-to-end optimization stack for deep learning},
  author={Chen, Tianqi and Moreau, Thierry and Jiang, Ziheng and Shen, Haichen and Yan, Eddie Q and Wang, Leyuan and Hu, Yuwei and Ceze, Luis and Guestrin, Carlos and Krishnamurthy, Arvind},
  journal={arXiv preprint arXiv:1802.04799},
  volume={11},
  pages={20},
  year={2018},
  publisher={CoRR}
}

@inproceedings{verdoolaege2010isl,
  title={isl: An integer set library for the polyhedral model},
  author={Verdoolaege, Sven},
  booktitle={International Congress on Mathematical Software},
  pages={299--302},
  year={2010},
  organization={Springer}
}

@inproceedings{zheng2020ansor,
  title={Ansor: Generating $\{$High-Performance$\}$ Tensor Programs for Deep Learning},
  author={Zheng, Lianmin and Jia, Chengfan and Sun, Minmin and Wu, Zhao and Yu, Cody Hao and Haj-Ali, Ameer and Wang, Yida and Yang, Jun and Zhuo, Danyang and Sen, Koushik and others},
  booktitle={14th USENIX Symposium on Operating Systems Design and Implementation (OSDI 20)},
  pages={863--879},
  year={2020}
}

@article{lattner2020mlir,
  title={MLIR: A compiler infrastructure for the end of Moore's law},
  author={Lattner, Chris and Amini, Mehdi and Bondhugula, Uday and Cohen, Albert and Davis, Andy and Pienaar, Jacques and Riddle, River and Shpeisman, Tatiana and Vasilache, Nicolas and Zinenko, Oleksandr},
  journal={arXiv preprint arXiv:2002.11054},
  year={2020}
}

@inproceedings{zhao2021akg,
  title={AKG: automatic kernel generation for neural processing units using polyhedral transformations},
  author={Zhao, Jie and Li, Bojie and Nie, Wang and Geng, Zhen and Zhang, Renwei and Gao, Xiong and Cheng, Bin and Wu, Chen and Cheng, Yun and Li, Zheng and others},
  booktitle={Proceedings of the 42nd ACM SIGPLAN International Conference on Programming Language Design and Implementation},
  pages={1233--1248},
  year={2021}
}

@article{vasilache2022composable,
  title={Composable and Modular Code Generation in MLIR: A Structured and Retargetable Approach to Tensor Compiler Construction},
  author={Vasilache, Nicolas and Zinenko, Oleksandr and Bik, Aart JC and Ravishankar, Mahesh and Raoux, Thomas and Belyaev, Alexander and Springer, Matthias and Gysi, Tobias and Caballero, Diego and Herhut, Stephan and others},
  journal={arXiv preprint arXiv:2202.03293},
  year={2022}
}

@inproceedings{bastoul2004code,
  title={Code generation in the polyhedral model is easier than you think},
  author={Bastoul, C{\'e}dric},
  booktitle={Proceedings. 13th International Conference on Parallel Architecture and Compilation Techniques, 2004. PACT 2004.},
  pages={7--16},
  year={2004},
  organization={IEEE}
}

@article{2018Modeling,
  title={Modeling Deep Learning Accelerator Enabled GPUs},
  author={Raihan, M. A. and Goli, N. and Aamodt, T.},
  journal={arXiv e-prints arXiv:1811.08309},
  year={2018}
}