% !TEX program = xelatex

\documentclass{resume}
%\usepackage{zh_CN-Adobefonts_external} % Simplified Chinese Support using external fonts (./fonts/zh_CN-Adobe/)
%\usepackage{zh_CN-Adobefonts_internal} % Simplified Chinese Support using system fonts
% This version was used to apply for my PhD!
\usepackage{hyperref}
\hypersetup{
    colorlinks=true,
    linkcolor=blue,
    filecolor=blue,      
    urlcolor=blue,
    citecolor=cyan,
}

\begin{document}
\pagenumbering{gobble} % suppress displaying page number

\name{Jinyang Liu}

\basicInfo{
  \email{jyliu@cse.cuhk.edu.hk} \textperiodcentered\ 
  \phone{Tel/Wechat (+86) 136-0975-2831}}

\section{\faFlag~RESEARCH INTERESTS}
\textbf{Artificial Intelligence for IT Operations (AIOps), Recommendation Systems}
 \begin{itemize}  \itemsep -1pt %reduce space between items
  \item Anomaly detection, log analysis, time series analysis
  \item Deep learning-based models, distillation models and graph neural networks in Recommendation Systems
 \end{itemize}
 

%%%%%%%%%%%% Education %%%%%%%%%%%
\section{\faGraduationCap~Education}
\datedsubsection{\textbf{The Chinese University of Hong Kong (CUHK)}, Hong Kong, China}{2020 -- present}
\textit{PhD student} in Computer Science

{Supervisor: \href{http://www.cse.cuhk.edu.hk/lyu/}{Prof.~Michael Lyu}~(Google Scholar citation: 41407)}


\datedsubsection{\textbf{Sun Yat-Sen University (SYSU)}, Guangzhou, China}{2018 -- 2020}
\textit{M.Eng} in Computer Technology

{Supervisor: \href{http://sdcs.sysu.edu.cn/node/2559}{Prof.~Zibin Zheng},  Dean of the Software Engineering Department}

GPA: 90.41/100

\datedsubsection{\textbf{Sun Yat-Sen University (SYSU)}, Guangzhou, China}{2014 -- 2018}
\textit{B.E.} in Software Engineering

GPA:~3.9/5.0,~rank:~36/388~(top~10\%)

%%%%%%%%%%%% Working Experience %%%%%%%%%%%
\section{\faPencil\ Working Experience}

\datedsubsection{\textbf{Cloud \& AI BG, Huawei}, Shenzhen, China}{2020.07 -- present}
\role{Research Intern}

\datedsubsection{\textbf{Chinese University of Hong Kong}, HongKong, China}{2019.07 -- 2020.02}
\role{Research Assistant}{Supervisor: \href{https://www.cse.cuhk.edu.hk/lyu/home}{Prof.~Michael R. Lyu}}

\datedsubsection{\textbf{Huawei 2012 Lab}, Shenzhen, China}{2018.01 -- 2018.08}
\role{Research Intern}{Mentor: \href{http://jiemingzhu.github.io}{Dr.~Jieming Zhu} }

%%%%%%%%%%%% Research Experience %%%%%%%%%%%
\section{\faInfo\ Research Experience}
%%%%%%%%%%%% AIOps %%%%%%%%%%%
\datedsubsection{\texttt{== Research on AIOps ==}}{2018~-~Present}

\textbf{Benchmarking Models for Multivariate KPIs Anomaly Detection}
\begin{itemize}[parsep=0.5ex]
\item Investigated \textbf{12} state-of-the-art multivariate KPIs (Key Performance Indicators) anomaly detection models based on deep learning (DAGMM [ICLR'18], MSCRED [AAAI'19], OmniAnomaly [KDD'19], etc) and traditional machine learning (KNN, PCA, Isolation Forest, etc).
\item We proposed a comprehensive evaluation protocol (with accuracy, salience, efficiency, delay) to evaluate these models on five public datasets. Especially, the novel metric salience was proposed to measure how much a model can highlight an anomaly. We found that deep learning-based models may not necessarily outperform the traditional machine learning-based methods.
\item The tool and the evaluation protocol were open-sourced in our \href{https://github.com/ase21-843/code}{Github repository}. The corresponding paper is \textbf{under review in ASE 2021}.

\end{itemize}


\textbf{Modeling Dependency of Multivariate KPIs for Anomaly Detection}
\begin{itemize}[parsep=0.5ex]
\item We explored dependency within multivariate KPIs. For example, numbers of requests to a server increasing can lead to a higher CPU utilization. This dependency might facilitate multivariate KPIs anomaly detection.
\item We proposed collaborative machine (CM) to model such dependency across time (i.e., dependency among different timestamps) and KPIs (i.e., dependency among different KPIs). Moreover, inspired by factorization machine (FM), we proposed to simplify our model from quadratic to linear time complexity.
\item Our model achieved 0.9494 average F1, outperforming baselines by 4.45\% and ran $10\times \sim 20\times$ faster.
\end{itemize}


\textbf{Textual Log Compression}
\begin{itemize}[parsep=0.5ex]
  \item We proposed logzip to optimize existing compression tools for log compression. 
  \item We conducted iterative clustering to parse logs to generate intermediate
representations that can be compressed with a higher compression ratio by existing tools.
  \item Logzip was designed to be highly parallel. It achieved \textbf{$\sim$4.56x} the compression ratio of gzip and comparable efficiency with gzip when using multiple workers. The corresponding paper was \textbf{accepted by ASE 2019}.
\end{itemize}

\textbf{Benchmarking Methods for Log Parsing}
\begin{itemize}[parsep=0.5ex]
  \item Implemented, reorganized, and tuned 13 state-of-the-art log parsing algorithms : IPLoM [TKDE'12], LogSig [CIKM'11], LogMine [CIKM'16], MoLFI [ICPC'18], etc.
  \item We evaluated their accuracy, efficiency, and robustness on 16 datasets.
  \item We released the benchmark and datasets. The corresponding paper was \textbf{accepted by ICSE-SEIP 2019}.
\end{itemize}

\textbf{Huawei Phone Duplicate Issues Detection}
\begin{itemize}[parsep=0.5ex]
  \item To detect duplicate issue reports (including issue descriptions and logs) from Huawei phone users.
  \item I was responsible for log parsing, log matching and feature extraction.
  \item The system went online and achieved more than \textbf{80\%} accuracy.
\end{itemize}

%%%%%%%%%%%% CTR %%%%%%%%%%%
\datedsubsection{\texttt{== Research on Recommendation Systems ==}}{Before 2019}

\textbf{Benchmarking Models for CTR Prediction }
\begin{itemize}[parsep=0.5ex]
  \item Investigated/reproduced 24 state-of-the-art CTR (Click-Through Rate) prediction models and ran over 4,600 experiments for more than 12,000 GPU hours.
  \item I was responsible for the Pytorch version implementation of some models.
  \item We found many models have smaller differences than expected and sometimes are even inconsistent with what was reported in the literature. The corresponding paper was \textbf{under review in CIKM 2021}.
\end{itemize}


\textbf{Models Ensemble via Knowledge Distillation (KD)}
\begin{itemize}[parsep=0.5ex]
  \item Investigated/implemented (1) KD models: Knowledge Distillation [G.E. Hinton], Rocket~[Alibaba], etc. (2) CTR prediction models: FM, DeepFM~[Huawei’17], Wide\&Deep~[Google’16], xDeepFM~[KDD'18], etc.
  
  \item Deploying a new model in production requires going through a long and tedious process (maybe months) of online code modification and rigorous testing for model online serving. 
  \item We proposed the use of KD to unify the model online serving for CTR prediction, and successfully obtain a unified and easy-to-deploy model that can surprisingly outperform state-of-the-art models. We also distilled multiple models into a single model. The corresponding paper was \textbf{accepted by CIKM 2020}.
\end{itemize}



\section{\faHeartO~Honors and Awards}
\datedline{Outstanding thesis award}{2020}
\datedline{Outstanding graduates award}{2020}
\datedline{National Scholarship for graduate students}{2019}
\datedline{First Class Scholarship for graduate students}{2018}
\datedline{Mathematical Contest in Modeling (MCM), Meritorious Winner}{2017}
\datedline{First Class Scholarship for undergraduate students}{2015}


% \section{\faInfo\ Miscellaneous}
% \begin{itemize}[parsep=0.5ex]
%   \item Blog: http://your.blog.me
%   \item GitHub: https://github.com/username
%   \item Languages: English - Fluent, Mandarin - Native speaker
% \end{itemize}


% \datedsubsection{\textbf{xxx Projects}}{Jan. 2015 -- Present}
% \role{C, Python, Django, Linux}{Individual Projects, collaborated with xxx}
% Brief introduction: xxx
% \begin{itemize}
%   \item Implemented xxx feature
%   \item Optimized xxx 5\%
%   \item xxx
% \end{itemize}


% Reference Test
%\datedsubsection{\textbf{Paper Title\cite{zaharia2012resilient}}}{May. 2015}
%An xxx optimized for xxx\cite{verma2015large}
%\begin{itemize}
%  \item main contribution
%\end{itemize}

%%%%%%%%%%%% Skills %%%%%%%%%%%
\section{\faCogs~Skills}
\begin{itemize}[parsep=0.5ex]
  \item IELTS band:~7.0 (L: 7.5, \textbf{R: 9.0}, W: 6.5, S: 5.5)
  \item Programming Languages/Frameworks: Python > C++ > Java~~||~~Pytorch == Keras > Tensorflow
  \item Open source projects: I am a member of \href{www.logpai.com}{LogPAI} team and a contributor of \href{https://github.com/logpai/logparser)}{logparser} where we implemented most state-of-the-art log parsing algorithms and gained more than \textbf{650} stars.
\end{itemize}

%%%%%%%%%%%% PaperList %%%%%%%%%%%
\section{\faBook~PaperList}
\textit{The following papers were \textbf{accepted}.}
\begin{itemize}[parsep=0.5ex]
  \item \underline{Jinyang Liu}, Jieming Zhu, Shilin He, Pinjia He, Zibin Zheng, and Michael R. Lyu. \href{http://inpluslab.com/files/ASE19_logzip.pdf}{Logzip: Extracting Hidden Structures via Iterative Clustering for Log Compression}. In Proceedings of the 34th IEEE/ACM International Conference on Automated Software Engineering (\textbf{ASE 2019,~citation: 12}),

  \item Jieming Zhu, Shilin He, \underline{Jinyang Liu}, Pinjia He, Qi Xie, Zibin Zheng, Michael R. Lyu.  \href{https://arxiv.org/pdf/1811.03509.pdf}{Tools and Benchmarks for Automated Log Parsing}. In Proceedings of the 41st International Conference on Software Engineering: Software Engineering in Practice  (\textbf{ICSE 2019,~citation: 119}).
  
  \item Jieming Zhu, \underline{Jinyang Liu}, Weiqi Li, Jincai Lai, Xiuqiang He, Liang Chen and Zibin Zheng. \href{https://appsrv.cse.cuhk.edu.hk/~jyliu/media/papers/cikm20.pdf}{Towards Unified and Ensembled CTR Prediction via Knowledge Distillation}.  In Proceedings of the 29th International Conference on Information and Knowledge Management (\textbf{CIKM 2020}).
\end{itemize}

\textit{The following papers were \textbf{under review or revision}.}
\begin{itemize}
  \item \underline{Jinyang Liu}, Wenwei Gu, Zhuangbin Chen, Yichen Li, Yuxin Su, Xiao Ling, Yongqiang Yang and Michael R. Lyu. Are We Really Making Progress? Benchmarking Multivariate KPIs Anomaly Detection. Submitted to 36th IEEE/ACM International Conference on Automated Software Engineering (\textbf{ASE 21, under review}),
  
  \item \underline{Jinyang Liu}, Zhuangbin Chen, Yuxin Su, Xiao Ling, Yongqiang Yang and Michael R. Lyu. CMAnomaly: Collaborative Machine-based Multivariate KPIs Anomaly Detection. (\textbf{Under revision}).
  
  \item Jieming Zhu, \underline{Jinyang Liu}, Shuai Yang, Qi Zhang and Xiuqiang He. \href{https://arxiv.org/pdf/2009.05794.pdf}{Towards Open Benchmarking for Click-Through Rate Prediction.} Submitted to the 30th International Conference on Information and Knowledge Management (\textbf{CIKM 2021, under review}).
  
  
  
\end{itemize}


%% Reference
%\newpage
%\bibliographystyle{IEEETran}
%\bibliography{mycite}
% \textit{*END}
\end{document}
