\documentclass[a4paper, 11pt]{article}
\usepackage{comment} % enables the use of multi-line comments (\ifx \fi) 
\usepackage{lipsum} %This package just generates Lorem Ipsum filler text. 
\usepackage{fullpage} % changes the margin
\usepackage[a4paper, total={7in, 10in}]{geometry}
\usepackage[fleqn]{amsmath}
\usepackage{amssymb,amsthm}  % assumes amsmath package installed
\usepackage{bm}
\newtheorem{theorem}{Theorem}
\newtheorem{corollary}{Corollary}
\usepackage{graphicx}
\usepackage{caption}
\captionsetup[figure]{name={图}}
\captionsetup[table]{name={表}}
% \usepackage[pdftex]{graphicx}
\usepackage{tikz}
\usetikzlibrary{arrows}
\usepackage{verbatim}
\usepackage[numbered]{mcode}
\usepackage{float}
\usepackage{tikz}
    \usetikzlibrary{shapes,arrows}
    \usetikzlibrary{arrows,calc,positioning}

    \tikzset{
        block/.style = {draw, rectangle,
            minimum height=1cm,
            minimum width=1.5cm},
        input/.style = {coordinate,node distance=1cm},
        output/.style = {coordinate,node distance=4cm},
        arrow/.style={draw, -latex,node distance=2cm},
        pinstyle/.style = {pin edge={latex-, black,node distance=2cm}},
        sum/.style = {draw, circle, node distance=1cm},
    }
\usepackage{xcolor}
\usepackage{mdframed}
\usepackage[shortlabels]{enumitem}
\usepackage{indentfirst}
\usepackage{hyperref}
\usepackage{CJKutf8}
    
\renewcommand{\thesubsection}{\thesection.\alph{subsection}}

\newenvironment{problem}[2][Q]
    { \begin{mdframed}[backgroundcolor=gray!20] \textbf{#1 #2} \\}
    {  \end{mdframed}}

% Define solution environment
\newenvironment{solution}
    {\textit{Solution:}}
    {}

\renewcommand{\qed}{\quad\qedsymbol}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{document}
\begin{CJK}{UTF8}{gbsn}
%Header-Make sure you update this information!!!!
\noindent
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\large\textbf{课程: 模式识别} \hfill \textbf{作业 2}   \\
北航软件学院 \\
\normalsize 学期: 2025，春季\hfill 提交截止时间:  2025年4月29日, 11：59 PM \\
\noindent\rule{7in}{2.8pt}
\textbf{提醒注意:}
\begin{itemize}
\item 本次作业发布于2025年4月8日，截止于2025年4月29日。
\item 作业一分为三部分：问答题、实训题、以及实训题报告
\begin{itemize}
    \item 问答题答案可以手写并扫描，或者用latex（或word）手打，最终以QA.pdf文件命名。
    \item 实训题按照项目共享链接内要求和基础代码进行作答，并按要求格式和命名进行保存。
    \item 报告要求按照模板全英文书写，以report.pdf文件命名。
    \item 作业提交格式：$<student ID>$\_$<name>$\_A2.zip。比如ZY1921102\_田嘉怡\_A2.zip
    \item {\color{red}提交的zip文件要求（仅）包括}：
    \begin{itemize}
        \item 问答题答案：QA.pdf
        \item 代码：包括作业代码 a2\_Transformer，以及测试结果文件 $<student  ID>$\_ a2\_Transformer.csv
        \item 报告：report.pdf
    \end{itemize}
    
\end{itemize}
\item 作业压缩包需要在spoc平台上提交。
\item 每迟交1天（不满1天按1天计算），本次作业扣除10\%分数。
\item 不按作业要求和格式提交，视情况扣分。不得抄袭。
\end{itemize}

\noindent\rule{7in}{1.5pt}
\textbf{第一部分：问答题（共6分）}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Problem 1
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{problem}{1 随机失活（1分）}
随机失活 (dropout) 是一个训练神经网络的常用技巧, 它由 Srivas­ tava 等人在发表在 JMLR2014 的论文 “Dropout: A Simple Way to Prevent Neural Networks from Overfitting”~\cite{srivastava2014dropout} 中提出，仔细阅读该论文并回答如下问题 (请将每问的答案组织为一句简洁的句子).
\begin{enumerate}[(a)]
\item 随机失活在训练时如何操作?
\item 随机失活在测试时如何操作?
\item 随机失活有什么好处?
\item 为什么随机失活可以得到这个好处?
\end{enumerate}
% Explain the difference between L1 and L2 regularization
\end{problem}


%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Problem 2
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


\begin{problem}{2 ResNet（2分）}
 ResNet是一个深层卷积神经网络模型，由He等人在CVPR2016发表的论文 “Deep Residual Learning for Image Recognition” ~\cite{he2016deep}中提出 。仔细阅读该论文并回答如下问题 (请将每问的答案组织为一句简洁的句子)。

\begin{enumerate}[(a)]
\item 尽管VGG16~\cite{simonyan2014very}和GoogLeNet~\cite{szegedy2015going}在训练大约20–30层深的网络时遇到了困难，是什么使得ResNet可以训练深如1000层的网络？
\item VGG16~\cite{simonyan2014very}是一个前馈网络, 其中每一层只有一个输入和只有一个输 出。而GoogLeNet~\cite{szegedy2015going}和 ResNet~\cite{he2016deep}是有向无环图结构 (DAGs，directed acyclic graphs)，只要网络结构中的数据流没有构成一个环，一层能够有多个输入和多个输出。DAG和前馈结构相比有什么好处？
\item VGG16~\cite{simonyan2014very}有两个全连接层(fc6 和 fc7), 而 ResNet~\cite{he2016deep}和 GoogLeNet~\cite{szegedy2015going}没有全连接层(除了用于分类的最后一层)。 什么被用来取代 FC? 这有什么好处?
\end{enumerate}
\end{problem}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Problem 3
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%


\begin{problem}{3 Transformer（2分）}
Transformer是基于Self-attention的深度学习模型，由Vaswani等人在NIPS2017 发表的论文“Attention is All you Need”~\cite{Vaswani2017}中提出。仔细阅读该论文并回答如下问题 (请将每问的答案组织为一句简洁的句子)。
\begin{enumerate}[(a)]
\item 在Transformer原始论文中，一共使用了几层self-attention layers？
\item 在Transformer原始论文中，每层self-attention layer包含几个attention heads？
\item 与Recurrent Neural Networks (RNN) 相比，Transformer的优势是什么？
\item 与Convolutional Neural Networks (CNN) 相比，Transformer的优势是什么？
\item Transformer Decoder 中为什么需要使用Masked Attention?
\item 为什么位置编码（positional embedding）在Transformer中有存在的必要？Transformer原文中如何实现位置编码？通过这种方式实现位置编码的依据和原因是什么？
\end{enumerate}
\end{problem}

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Problem 4
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\begin{problem}{4 BERT（1分）}
Bidirectional Encoder Representations from Transformers （BERT)~\cite{Devlin2019}是基于Transformer~\cite{Vaswani2017}的大规模自监督预训练语言模型，由Devlin等人在 NAACL-HLT2019 发表的论文“BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding”~\cite{Devlin2019}中提出。仔细阅读该论文并回答如下问题 (请将每问的答案组织为一句简洁的句子)。
\begin{enumerate}[(a)]
\item 如何理解Bidirectional Encoder？
\item 与基于CBOW~\cite{mikolov2013efficient}的word embedding方法相比，BERT的优势体现在哪些方面？
\item 请分析Masked LM自监督预训练策略为什么可以学到语义信息。
\end{enumerate}
\end{problem}

%\begin{figure}[h!]
%\centering
%\caption{模型预测的结果和真实结果}
%\label{fig:ROC}
%\includegraphics[width=0.3\linewidth]{ROC.png}
%\end{figure}



%\newpage
\noindent\rule{7in}{1.5pt}
\textbf{第二部分：实训题（共20分）}
\\ 

\begin{problem}{1 基于Transformer的语者识别(20分)}
\paragraph{1. 实验要求}
\begin{itemize}
    \item 作业要求以及基础代码以Aistudio项目的形式发布。
    \item 发布项目链接有效期3天，请在作业发布3天内fork这个项目，生成``我的项目''，并在自己fork的项目下进行作答，生成答案后按要求保存提交。
\end{itemize}

\paragraph{2. 实验目标}
\begin{itemize}
\item  以语者识别为应用，熟悉基于Transformer有监督序列数据特征学习方法。
\item 了解Transformer中不同参数对识别结果的影响。
\item 通过实现Conformer了解不同Transformer结构对识别结果的影响。
\end{itemize}

\paragraph{3. 实验内容}
\begin{itemize}
    \item 完成基于Transformer的语者识别任务。
    \item 实验介绍详情和基础代码参见 Aistudio中的共享项目\href{https://aistudio.baidu.com/studio/project/partial/verify/8895665/85362f4d6fe1467b817ba997e2942d8e}{“PR\_2025\_Spring\_A2”}。
\end{itemize}


\paragraph{4. 评分细则}
\begin{itemize}
    \item (2分) 提交zip文件到spoc系统；
   \item (3分) Easy baseline；
   \item (4分) Medium baseline；
   \item (7分) Strong baseline;
   \item (4分) Extra: Implement triplet loss.
\end{itemize}
\end{problem}

\noindent\rule{7in}{1.5pt}
\textbf{第三部分：实训题实验报告（共4分）}
\begin{itemize}
    \item 请按照实验报告模板完成实验报告。
\end{itemize}
\bibliographystyle{ieee_fullname}
\bibliography{reference}


\end{CJK}
\end{document}