\documentclass{beamer}
\usepackage[utf8]{inputenc}
\usetheme{Kalgan}
\setbeamercovered{highly dynamic}
\setbeamersize{text margin left=5pt}

\title{Automatic Natural Language Classification using Distributed SOM Learning}
\author{Ayala, Bahena, Rodríguez, Zavaleta}%
\institute{CINVESTAV - ORACLE}
\date{April 2014}

\begin{document}
	\begin{frame}[plain]
	  \titlepage
	\end{frame}
  
	\begin{frame}[plain]
	 	\frametitle{The Problems}
		\begin{block}{Natural Language (automatic) Classification}
      Two problems studied by Kohonen (creator of SOM).
		\end{block} 

		\begin{block}{Word categorization (Semantic Maps)}
      Learn with Symbol + Attributes (context), test with Symbol.
      Variations: Sparse vectors, no average, sentence boundaries.
		\end{block}

		\begin{block}{Document classification (WebSOM)}
      Histograms of words, does not consider ordering (Big-Data?).
      Variations: No dimension reduction, word relationships within
      document (order).
		\end{block}
	\end{frame}

	\begin{frame}[plain]
	 	\frametitle{The Machine Learning Tools}
		\begin{block}{Self-Organizing Maps}
      ANN inspired by retina models, unsupervised learning.
		\end{block} 

		\begin{block}{Online-learning}
      Winner (closest) computation and neighborhood based update.
      Recursive dependency among iterations.
		\end{block}

		\begin{block}{Offline-learning}
      Also called batch mode, for each epoch, Voronoi-set calculation
      and rebuilding of SOM state (cell + neighbors). Both parts can
      be done in parallel.
		\end{block}
	\end{frame}

	\begin{frame}[plain]
	 	\frametitle{The Software Tools}
		\begin{block}{Python}
      RAD + FLOSS frameworks. OO + Functional paradigm. 
		\end{block} 

		\begin{block}{Hadoop}
      Each iteration is a chain of two map-reduce cycles, connected by
      files. 
      Among iteration only communication is also file (HDFS).
      Intrusive due forced ``batch'' paradigm. 
		\end{block} 

		\begin{block}{Spark}
      Resilent Distributed Datasets (RDD), parallelism over clusters.
      Less intrusive (still map-reduce but at collection manipulations).
      HDFS Support. More suitable for iterative algorithms (MLLib).
		\end{block}

		\begin{block}{Other tools}
      NLTK: word and sentence tokenization, trigrams, POS-tagging, etc.
      NumPy: Efficient array-based scientific calculations ($R^n$).
		\end{block}
	\end{frame}

	\begin{frame}[plain]
	 	\frametitle{The Data Sources}
		\begin{block}{Word categorization}
      Books from Project Gutemberg (Grimm: 5k vocab, 100k words).
      NLTK POS-tagger is the pre-classification.
		\end{block}

		\begin{block}{Text classification}
      News/mail list archives, or public blogs.
      Need to be pre-classified.
		\end{block}

		\begin{block}{Visualization}
      Map existing categories into colors (sub-colors - nested clustering).
      Represent trained SOM as pixel-map.
      Take a (subset) of trainset and calculate closest neuron.
      Draw pixel at neuron position with assigned color.
		\end{block}
	\end{frame}
\end{document}
