Spaces:
Running
Running
\relax | |
\providecommand\hyper@newdestlabel[2]{} | |
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument} | |
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined | |
\global\let\oldcontentsline\contentsline | |
\gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}} | |
\global\let\oldnewlabel\newlabel | |
\gdef\newlabel#1#2{\newlabelxx{#1}#2} | |
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}} | |
\AtEndDocument{\ifx\hyper@anchor\@undefined | |
\let\contentsline\oldcontentsline | |
\let\newlabel\oldnewlabel | |
\fi} | |
\fi} | |
\global\let\hyper@last\relax | |
\gdef\HyperFirstAtBeginDocument#1{#1} | |
\providecommand\HyField@AuxAddToFields[1]{} | |
\providecommand\HyField@AuxAddToCoFields[2]{} | |
\citation{2010.05244} | |
\citation{2108.08976} | |
\citation{1911.12675} | |
\citation{1805.10896} | |
\citation{1805.10896,2212.14149} | |
\citation{2004.13342} | |
\citation{2010.05244} | |
\citation{1805.10896} | |
\citation{1805.08355} | |
\citation{2303.15533} | |
\citation{1805.10896,2212.14149} | |
\citation{2010.05244,2004.13342} | |
\@writefile{toc}{\contentsline {section}{\numberline {1}introduction}{1}{section.1}\protected@file@percent } | |
\citation{2108.08976} | |
\citation{2010.05244} | |
\citation{1911.12675} | |
\citation{1805.10896} | |
\citation{2004.13342} | |
\citation{2002.02112} | |
\citation{1904.08994} | |
\@writefile{toc}{\contentsline {section}{\numberline {2}related works}{2}{section.2}\protected@file@percent } | |
\@writefile{toc}{\contentsline {paragraph}{Adversarial Training and Generalization}{2}{section*.1}\protected@file@percent } | |
\@writefile{toc}{\contentsline {paragraph}{Dropout Techniques}{2}{section*.2}\protected@file@percent } | |
\@writefile{toc}{\contentsline {paragraph}{Adaptive Variational Dropout}{2}{section*.3}\protected@file@percent } | |
\@writefile{toc}{\contentsline {paragraph}{DropHead for Multi-head Attention}{2}{section*.4}\protected@file@percent } | |
\@writefile{toc}{\contentsline {paragraph}{Generative Adversarial Networks (GANs)}{2}{section*.5}\protected@file@percent } | |
\@writefile{toc}{\contentsline {section}{\numberline {3}backgrounds}{3}{section.3}\protected@file@percent } | |
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Background}{3}{subsection.3.1}\protected@file@percent } | |
\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Adaptive Dropout Rate}{3}{subsection.3.2}\protected@file@percent } | |
\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Methodology}{3}{subsection.3.3}\protected@file@percent } | |
\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Evaluation Metrics}{3}{subsection.3.4}\protected@file@percent } | |
\@writefile{toc}{\contentsline {section}{\numberline {4}methodology}{4}{section.4}\protected@file@percent } | |
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Adaptive Dropout Rate for Adversarial Generative Neural Networks}{4}{subsection.4.1}\protected@file@percent } | |
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Standard GAN Training Procedure}{4}{subsection.4.2}\protected@file@percent } | |
\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Incorporating Adaptive Dropout Rate}{4}{subsection.4.3}\protected@file@percent } | |
\@writefile{toc}{\contentsline {subsection}{\numberline {4.4}Training Algorithm}{4}{subsection.4.4}\protected@file@percent } | |
\@writefile{toc}{\contentsline {section}{\numberline {5}experiments}{5}{section.5}\protected@file@percent } | |
\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Experimental Setup}{5}{subsection.5.1}\protected@file@percent } | |
\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Results and Discussion}{5}{subsection.5.2}\protected@file@percent } | |
\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Quantitative comparison of our method with other state-of-the-art methods. The best results are highlighted in \textbf {bold}.}}{5}{table.1}\protected@file@percent } | |
\newlabel{tab:comparison}{{1}{5}{Quantitative comparison of our method with other state-of-the-art methods. The best results are highlighted in \textbf {bold}}{table.1}{}} | |
\@writefile{toc}{\contentsline {section}{\numberline {6}conclusion}{5}{section.6}\protected@file@percent } | |
\bibdata{ref} | |
\bibcite{2303.15533}{{1}{2023}{{Arkanath~Pathak}}{{}}} | |
\bibcite{2212.14149}{{2}{2022}{{Chanwoo~Kim}}{{}}} | |
\bibcite{1805.08355}{{3}{2018}{{Dian~Lei}}{{}}} | |
\bibcite{2002.02112}{{4}{2020}{{Hyungrok~Ham}}{{}}} | |
\bibcite{2010.05244}{{5}{2020}{{Jiyang~Xie \& Jianjun~Lei}}{{Jiyang~Xie and Jianjun~Lei}}} | |
\bibcite{1805.10896}{{6}{2018}{{Juho~Lee}}{{}}} | |
\bibcite{2004.13342}{{7}{2020}{{Wangchunshu~Zhou}}{{}}} | |
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Comparison of the loss curves of our method and the baseline methods during training.}}{6}{figure.1}\protected@file@percent } | |
\newlabel{fig:loss_curve}{{1}{6}{Comparison of the loss curves of our method and the baseline methods during training}{figure.1}{}} | |
\bibcite{1904.08994}{{8}{2019}{{Weng}}{{}}} | |
\bibcite{1911.12675}{{9}{2019}{{Xu~Shen}}{{}}} | |
\bibcite{2108.08976}{{10}{2021}{{Zhiyuan~Zhang}}{{}}} | |
\bibstyle{iclr2022_conference} | |