%!TEX root = ../../main.tex
\subsection{Malware Detection}
\label{sec:malwaredetection}

Malware, short for Malicious Software. In  order to protect legitimate users from malware, machine learning based efficient malware detection methods are proposed~\cite{ye2017survey}. In the classical machine learning methods, the process of malware detection is usually divided into two stages: feature extraction and classification/clustering. The performance of traditional malware detection approaches critically depend on the extracted features and the methods for classification/clustering. The challenge associated in malware detection problems is the seer scale of data, for instance considering data as bytes a certain  sequence classification problem could be  of the order of two million time steps. Furthermore the malware is very adpative in nature, wherein the attackers would use advanced techniques to hide the malicious behaviour. Some DAD techniques  which address these challenges effectively and detect malware are shown in Table~\ref{tab:malwareDetect}.

\begin{table*}
  \begin{center}
   \caption{Examples of DAD techniques used for malware detection.
             \\AE: Autoencoders, LSTM : Long Short Term Memory Networks
             \\RBM: Restricted Botlzmann Machines, DNN : Deep Neural Networks
             \\GRU: Gated Recurrent Unit, RNN: Recurrent Neural Networks
             \\CNN: Convolutional Neural Networks,VAE: Variational Autoencoders
             \\GAN: Generative Adversarial Networks,CNN-BiLSTM: CNN- Bidirectional LSTM}
    \captionsetup{justification=centering}
    \label{tab:malwareDetect}
    \scalebox{0.85}{
    \begin{tabular}{|p{3cm}|p{2cm}|p{10cm}|}
      \hline
      \textbf{Technique Used} & \textbf{Section} & \textbf{References}\\
      \hline
      AE &  Section ~\ref{sec:ae}  & ~\cite{yousefi2017autoencoder},~\cite{hardy2016dl4md},~\cite{yousefi2017autoencoder},~\cite{de2018malware},~\cite{sewak2018investigation},~\cite{kebede2017classification},~\cite{de2018malware},~\cite{david2015deepsign}\\\hline
      word2vec & Section ~\ref{sec:word2vec} &  ~\cite{cakir2018malware},~\cite{silva2018improving}\\\hline
      CNN & Section ~\ref{sec:cnn} &  ~\cite{kolosnjaji2018adversarial},~\cite{suciu2018exploring},~\cite{srisakaokul2018muldef},~\cite{srisakaokul2018muldef},~\cite{king2018artificial},~\cite{huang2017r2},~\cite{guo2017malware},~\cite{abdelsalam2018malware},\newline ~\cite{raff2017malware},~\cite{karbab2018maldozer},~\cite{martinelli2017evaluating},~\cite{mclaughlin2017deep},~\cite{gibert2018using},~\cite{kolosnjaji2017empowering}\\\hline
      DNN & Section ~\ref{sec:dnn} &  ~\cite{rosenberg2018end},~\cite{wang2017adversary}\\\hline
      DBN & Section ~\ref{sec:dnn} &  ~\cite{david2015deepsign},~\cite{yang2016application},~\cite{ding2016application},~\cite{yuxin2017malware},~\cite{selvaganapathy2018deep},~\cite{yuxin2017malware},~\cite{hou2017deep}\\\hline
      LSTM & Section ~\ref{sec:rnn_lstm_gru} &  ~\cite{tobiyama2016malware}, ~\cite{hu2017black},~\cite{tobiyama2018method} ,~\cite{passalislong} \\\hline
      CNN-BiLSTM& Section ~\ref{sec:cnn},~\ref{sec:rnn_lstm_gru} &  ~\cite{le2018deep},~\cite{wang2017adversary} \\\hline
      GAN& Section ~\ref{sec:gan_adversarial} &  ~\cite{kim2018zero} \\\hline
      Hybrid model(AE-CNN),(AE-DBN) & Section ~\ref{sec:hybridModels} &  ~\cite{wang2018effective},~\cite{li2015hybrid} \\\hline
      RNN & Section ~\ref{sec:rnn_lstm_gru} &  ~\cite{haddadpajouh2018deep} \\\hline
    \end{tabular}}
  \end{center}
\end{table*}



% OCSVM~\cite{scholkopf2002support}, SVDD~\cite{scholkopf2002support}
% SVM~\cite{cortes1995support}
% KNN~\cite{altman1992introduction}
% Random Forest~\cite{ho1995random}
% Relief~\cite{kira1992feature}
% CSI~\cite{ruchansky2017csi}
% \ref{sec:dnn}
% \ref{sec:stn}
% \ref{sec:spn}
% \ref{sec:word2vec}
% \ref{sec:gan_adversarial}
% \ref{sec:cnn}
% \ref{sec:rnn_lstm_gru}
% \ref{sec:ae}




