\documentclass[margin,line]{res}

\usepackage{tabto}
\oddsidemargin -.5in
\evensidemargin -.5in
\textwidth=6.0in
\itemsep=0in
\parsep=0in
% if using pdflatex:
%\setlength{\pdfpagewidth}{\paperwidth}
%\setlength{\pdfpageheight}{\paperheight}

\newenvironment{list1}{
  \begin{list}{\ding{113}}{%
      \setlength{\itemsep}{0in}
      \setlength{\parsep}{0in} \setlength{\parskip}{0in}
      \setlength{\topsep}{0in} \setlength{\partopsep}{0in}
      \setlength{\leftmargin}{0.17in}}}{\end{list}}
\newenvironment{list2}{
  \begin{list}{$\bullet$}{%
      \setlength{\itemsep}{0in}
      \setlength{\parsep}{0in} \setlength{\parskip}{0in}
      \setlength{\topsep}{0in} \setlength{\partopsep}{0in}
      \setlength{\leftmargin}{0.2in}}}{\end{list}}


\begin{document}

\name{Yang Wang \vspace*{.1in}}

\begin{resume}
\section{\sc Contact Information}
\vspace{.05in}
\begin{tabular}{@{}p{3in}p{4in}}
Department of Computer Science and Engineering             & {\it Office:}  DL 689 \\
The Ohio State University   & {\it Voice:}    (614) 292-2577 \\
2015 Neil Avenue & {\it E-mail:}  wang.7564@osu.edu\\
Columbus, Ohio 43210, USA  & http://web.cse.ohio-state.edu/\verb+~+yangwang \\
\end{tabular}


\section{\sc Research Interests}
Large-scale distributed systems, cloud storage, fault tolerance



\NumTabs{4}
\section{\sc Education}
{\bf Ph.D.} \tab{Computer Science, The University of Texas at Austin, 2014} \\
\tab{}\tab{Advisors:  Dr. Lorenzo Alvisi and Dr. Mike Dahlin}



{\bf M.E.} \tab{Computer Science and Technology, Tsinghua University, 2008}\\
{\bf B.E.} \tab{Computer Science and Technology, Tsinghua University, 2005}\\
%{\em Department of Mathematics and Statistics}

\section{\sc Academic Positions}
{\bf Assistant Professor} \tab{Computer Science, the Ohio State University, Jan. 2015 - Present}\\
{\bf Research Assistant} \tab{Computer Science, the University of Texas at Austin, 2009 - 2014}\\
{\bf Teaching Assistant} \tab{Computer Science, the University of Texas at Austin, 2008 - 2009}

\section{\sc Honors and Awards}
Google PhD Fellowship in Distributed Computing, 2013 - 2014

\vspace*{-2.5mm}
Best paper award, Systor 2014





\section{\sc Refereed Publications}

Chao Xie, Chunzhi Su, Manos Kapritsos, Yang Wang, Navid Yaghmazadeh, Lorenzo Alvisi, and Prince Mahajan. "Salt: Combining ACID and BASE in a Distributed Database". \emph{Proceedings of the 11th USENIX Symposium on Operating Systems Design and Implementation (OSDI)}, Broomfield, CO, October, 2014.

Mark Silberstein, Lakshmi Ganesh, Yang Wang, Lorenzo Alvisi, and Mike Dahlin. "Lazy Means Smart: Reducing Repair Bandwidth Costs in Erasure-coded Distributed Storage". \emph{Proceedings of the 7th ACM Internationl Systems and Storage Conference (Systor)}, Haifa, Israel, June, 2014. Best paper award.

Yang Wang, Manos Kapritsos, Lorenzo Alvisi, and Mike Dahlin. ``Exalt: Empowering Researchers to Evaluate Large-Scale Storage Systems''. \emph{Proceedings of the 11th USENIX Symposium on Networked Systems Design and Implementation (NSDI)}, Seattle, WA, April, 2014.

Yang Wang, Manos Kapritsos, Zuocheng Ren, Prince Mahajan, Jeevitha Kirubanandam, Lorenzo Alvisi, and Mike Dahlin. ``Robustness in the Salus scalable block store'', \emph{Proceedings of the 10th USENIX Symposium on Networked Systems Design and Implementation (NSDI)}, Lombard, IL, April 2013.

Manos Kapritsos, Yang Wang, Vivien Quema, Allen Clement, Lorenzo Alvisi, and Mike Dahlin. ``All about Eve: Execute-Verify Replication for Multi-Core Servers'', \emph{Proceedings of 2012 Symposium on Operating Systems Design and Implementation (OSDI)}, Hollywood, CA, October 2012.

Yang Wang, Lorenzo Alvisi, and Mike Dahlin. ``Gnothi: Separating Data and Metadata for Efficient and Available Storage Replication'', \emph{Proceedings of 2012 USENIX Annual Technical Conference (USENIX ATC)}, Boston, MA, June 2012.

Yang Wang, Jiwu Shu, Guangyan Zhang, Wei Xue, and Weimin Zheng. ``SOPA: Selecting the Optimal Policy Adaptively''. \emph{ACM transactions on storage (TOS)}, Volume 6 Issue 2, July 2010.

Allen Clement, Manos Kapritsos, Sangmin Lee, Yang Wang, Lorenzo Alvisi, Mike Dahlin, and Taylor Riche. ``UpRight Cluster Services''. \emph{Proceedings of the 22nd ACM Symposium on Operating Systems Principles (SOSP)}, Big Sky, MT, October 2009.

\section{\sc Other publications}

Yang Wang, Manos Kapritsos, Lorenzo Alvisi, and Mike Dahlin.`` Exalt: Empowering Researchers to Evaluate Large-Scale Storage Systems'', \emph{Poster at  the 24th ACM Symposium on Operating Systems Principles (SOSP 2013)}, Farmington, Pennsylvania, November 2013.

Manos Kapritsos, Yang Wang, Vivien Quema, Allen Clement, Lorenzo Alvisi, and Mike Dahlin. ``All about Eve: Execute-Verify Replication for Multi-Core Servers'', \emph{Poster at 2012 Symposium on Operating Systems Design and Implementation (OSDI)}, Hollywood, CA, October 2012.

Yang Wang, Manos Kapritsos, Zuocheng Ren, Prince Mahajan, Jeevitha Kirubanandam, Lorenzo Alvisi, and Mike Dahlin. ``Robustness in the Salus scalable block store''. Technical Report TR-12-24, The University of Texas at Austin, Department of Computer Science, September 2012.

Manos Kapritsos, Yang Wang, Vivien Quema, Allen Clement, Lorenzo Alvisi, and Mike Dahlin. ``All about Eve: Execute-verify replication for multi-core servers (extended version)''. Technical Report TR-12-23, Department of Computer Science, The University of Texas at Austin, September 2012.

Manos Kapritsos, Yang Wang, Vivien Quema, Allen Clement, Lorenzo Alvisi, and Mike Dahlin. ``EV: Replicating Multithreaded Servers''.
\emph{Poster at the 23rd ACM Symposium on Operating Systems Principles (SOSP)}, Cascais, Portugal, October 2011.

Yang Wang, Lorenzo Alvisi, and Mike Dahlin. ``Gnothi: Separating Data and Metadata for Efficient and Available Storage Replication'', \emph{Poster at 2012 USENIX Annual Technical Conference (USENIX ATC)}, Boston, MA, June 2012.

Allen Clement, Manos Kapritsos, Sangmin Lee, Yang Wang, Lorenzo Alvisi, Mike Dahlin, and Taylor Riche. ``UpRight Cluster Services''. \emph{Poster at the 22nd ACM Symposium on Operating Systems Principles (SOSP)}, Big Sky, MT, October 2009.

\section{\sc Presentations}
``Exalt: Empowering Researchers to Evaluate Large-Scale Storage Systems''. At \emph{11th USENIX Symposium on Networked Systems Design and Implementation (NSDI)}, Seattle, WA, April, 2014.

``Exalt: Empowering Researchers to Evaluate Large-Scale Storage Systems''. Invited talk at Facebook, Mountain View, CA, August 2013.

``Robustness in the Salus scalable block store''. At \emph{10th USENIX Symposium on Networked Systems Design and Implementation (NSDI)}, Lombard, IL, April 2013.

``Gnothi: Separating Data and Metadata for Efficient and Available Storage Replication''. At \emph{2012 USENIX Annual Technical Conference (USENIX ATC)}, Boston, MA, June 2012.

\iffalse
\section{\sc Research Experience}
{\bf The University of Texas at Austin}, Austin, Texas USA  \hfill {Graduate Research Assistant}

\vspace{-.3cm}
{\em Exalt: An Emulator for evaluating large-scale storage systems} \hfill {\bf 2012 - present}\\
It is difficult to get thousands of machines and tens of thousands of disks to fully evaluate a large-scale storage system.
We have built Exalt, an emulation tool to run a large storage system with 100 times fewer machines.
To achieve this, Exalt colocates multiple unmodified processes on a single physical machine by
compressing data on I/O devices and in memory. To achieve efficient compression,
we have leveraged the observation that the behavior of storage systems often does
not depend on the actual data being stored and developed Tardis, a synthetic data format that allows applications
to quickly achieve high rates of data compression. With Exalt,
we emulated a 10,000 node HDFS cluster on 100 physical machines, found new
scalability bottlenecks that are not observable at low scale, and fixed them.

{\bf Facebook}, Mountain View, CA USA  \hfill {Intern Software Engineer}

\vspace{-.3cm}
{\em Identifying bottlenecks in a complex cluster} \hfill {\bf 2013 summer}\\
I worked on the Service Nuance Observer (SNOB) project to identify services that currently are bottlenecks
or are likely to become future bottlenecks. SNOB collects multiple resource utilization metrics from all machines
and performs curve-fitting techniques on these metrics to detect abnormal resource utilization.
Furthermore, based on the curve-fitting results, it predicts the capacity of each service
when the scale of the system grows. SNOB is already deployed in Facebook to help improve the efficiency of its clusters.

{\bf The University of Texas at Austin}, Austin, Texas USA  \hfill {Graduate Research Assistant}

{\em Salus: Robust and scalable block store} \hfill {\bf 2011-2012}\\
Scalable storage systems can experience data corruption or loss because of a wide range of failures, including
CPU errors and disk or memory corruption. We have designed and built Salus, a block store that can scale to thousands of nodes
while providing end-to-end protection from data corruption. To achieve that, we have applied three new ideas:
a pipelined commit protocol to provide ordering guarantees for a client while still allowing its
requests to be processed in parallel; end-to-end verification so that a client can detect corruption when
it reads data from a single server; and replication of the server's computation and storage layers to remove single
points of failures. Surprisingly, such increased protection does not come at the cost of performance: Salus can
achieve comparable performance compared to HBase, the code base from which Salus descends; and in
environments where disk bandwidth exceeds network bandwidth, Salus can outperform HBase by 74\%.

{\em Gnothi: Efficient and available storage replication} \hfill {\bf 2011 - 2012}\\
Previous replication techniques can not achieve efficiency and availability simultaneously:
synchronous replication, e.g. primary backup, relies on conservative timeouts for accurate failure detection,
and the system is not available after a replica fails until the timeout is triggered; asynchronous replication,
e.g. Paxos, can use aggressive and potentially inaccurate timeouts but requires more replicas. Gnothi achieves both high efficiency and availability
by separating data from medata: it stores data on $f+1$ replicas to tolerate $f$ failures, and, by replicating
metadata to $2f+1$ replicas through a Paxos-like protocol, can use aggressive timeouts without risking correctness:
such metadata is used to identify correct replicas of the data despite potentially inaccurate timeout.
By reducing the bandwidth consumed by writes, Gnothi can achieve 40\%-64\% more
write throughput compared to a state-of-the-art Paxos-based system. In addition,
it can provide 100\%-200\% more throughput during recovery.

{\em Eve: Execute-Verify replication for multi-core servers} \hfill {\bf 2009 - 2011}\\
It is challenging to replicate a multithreaded application since its behavior can be nondeterministic and this can
cause different replicas to diverge. To solve this problem, Eve treats the results of replicated parallel execution
as speculative: if the replicas reach the same states, Eve allows them to continue; if their states diverge, Eve rolls them back and
tells each to re-execute sequentially. To reduce the number of rollbacks, Eve introduces a mixer to predict which requests
may be executed in parallel without conflicts. On the TPC-W benchmark over the H2 in-memory database, Eve can
achieve close to an 8x speedup on 16-core machines with an overhead of about 10\%. My work in this project
focuses on how to efficiently compare the states of different replicas and how to roll back their states when necessary.

{\em UpRight cluster services} \hfill {\bf 2008 - 2009}\\
This work aims at making Byzantine Fault Tolerance (BFT) a practical alternative to build high-availability distributed
systems. It significantly reduces the effort required to modify an application, while still achieving an acceptable
performance. My work in this project includes incorporating the Hadoop Zookeeper Distributed Lock Service into the UpRight framework
and designing a general logging and checkpoint mechanism for applications.

{\bf Tsinghua University}, Beijing, China \hfill {Graduate Research Assistant}

\vspace{-.3cm}

{\em Selecting the optimal policy adaptively for a cache system} \hfill {\bf 2005 - 2007}\\
No single caching policy can achieve the highest hit ratio under all workloads.
We designed a cache system that can switch to different caching policies based on the current workload.
Our system first encapsulates caching policies into modules so that it can switch among policies, and then analyzes
storage access patterns online and attempts different policies to find the optimal one for the current pattern.
The evaluation demonstrates that it outperforms the well-known adaptive policy by up to 11.9\% in average response time.

{\bf Netease R\&D}, Beijing, China \hfill {Intern Software Engineer}

\vspace{-.3cm}
{\em Large-scale random accessible storage system} \hfill {\bf 2006-2007}\\
I worked on building a terabyte-scale random accessible storage system over hundreds of machines.
The system stores half-structured data and supports both random search and sequential scan operations. I was one of
the core developers and maintainers. The system is now used to support Youdao, one of the largest search engines in China.
\fi

\section{\sc Professional Experience}
External reviewer of \emph{ACM Transactions on Parallel Computing (TOPC)}.

External reviewer of \emph{Eurosys 2014}.

External reviewer of the \emph{Nineteenth Annual International Conference on VLSI Design Automation in Asia and South Pacific region (ASP-DAC 2014)}

External reviewer of \emph{The Fourth International Workshop on Hot Topics in Peer-to-peer computing and Online Social neTworking (HotPOST 2012)}

\section{\sc Teaching Experience}
Teaching Assistant at the University of Texas at Austin, 2008 - 2009\\
\vspace*{-.15in}
\begin{list2}
\item CS345 Programming Languages, Spring 2009.
\item CS352 Computer Systems Architecture, Fall 2008.
\end{list2}

%\input{reference4}


\end{resume}
\end{document}




