{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import sys\n",
    "import numpy as np\n",
    "import lda\n",
    "import json\n",
    "import pandas as pd\n",
    "from collections import Counter, OrderedDict\n",
    "import nltk\n",
    "from nltk.corpus import stopwords\n",
    "from itertools import *\n",
    "\n",
    "from sklearn.feature_extraction.text import CountVectorizer\n",
    "\n",
    "class topic_analysis(object):\n",
    "\n",
    "\t\"\"\"\n",
    "\tA class to extract topics and associate all the verbatims with a specific topics\n",
    "\tInput: dataframe, column_name with cleaned verbatims\n",
    "\tOutput: verbatims & topics, top keywords\n",
    "\t\"\"\"\n",
    "\n",
    "\tdataframe = None\n",
    "\tvocab = None\n",
    "\tmodel = None\n",
    "\tvectorizer = None\n",
    "\ttopics = []\n",
    "\n",
    "\tdef __init__(self):\n",
    "\t\tpass\n",
    "\n",
    "\tdef compute_lda_model(self,learning_set_lda,n_topics=10):\n",
    "\t\t\"\"\"\n",
    "\t\t\tMethod to compute/generate LDA model\n",
    "\t\t\"\"\"\n",
    "\n",
    "\t\t# Initialize the \"CountVectorizer\" object\n",
    "\t\tvectorizer = CountVectorizer(analyzer = \"word\",   \\\n",
    "\t\t                             tokenizer = None,    \\\n",
    "\t\t                             preprocessor = None, \\\n",
    "\t\t                             stop_words = None,   \\\n",
    "\t\t                             max_features = 5000)\n",
    "\n",
    "\t\ttrain_data_features = vectorizer.fit_transform(learning_set_lda)\n",
    "        \n",
    "\t\tself.vectorizer = vectorizer\n",
    "\n",
    "\t\ttrain_data_features = train_data_features.toarray()\n",
    "\n",
    "\t\tself.vocab = vectorizer.get_feature_names()\n",
    "\n",
    "\t\tX = train_data_features\n",
    "\n",
    "\n",
    "\t\tmodel = lda.LDA(n_topics, n_iter=1500, random_state=1) \n",
    "\t\tmodel.fit(X) \n",
    "\n",
    "\t\treturn model\n",
    "\n",
    "\tdef get_results(self,dataframe,column_name,n_topics):\n",
    "\n",
    "\t\tself.dataframe = dataframe\n",
    "\t\tself.topics = []\n",
    "\t\tweights = []\n",
    "\n",
    "\t\t#LDA Model calibration\n",
    "\t\tmodel = self.compute_lda_model(self.dataframe[column_name],n_topics)\n",
    "        \n",
    "\t\tself.model = model\n",
    "\n",
    "\t\t#Add topics to data frame\n",
    "\t\tdoc_topic = model.doc_topic_\n",
    "\n",
    "\t\ttopics = []\n",
    "\t\tscores = []\n",
    "\n",
    "\t\tfor i in doc_topic:\n",
    "\t\t\ttopics.append(i.argmax())\n",
    "\t\t\tscores.append(i.max())\n",
    "\n",
    "\t\tself.dataframe['Topic_id'] = topics\n",
    "\t\tself.dataframe['Score'] = scores\n",
    "\n",
    "\t\t###### PRINT RESULTS ######\n",
    "\n",
    "\t\ttopic_word = model.topic_word_ \n",
    "\t\tn_top_words = 50 \n",
    "\n",
    "\t\t#####Store results in topics\n",
    "\t\tfor i, topic_dist in enumerate(topic_word):\n",
    "\t\t\t#Get the topic\n",
    "\t\t\ttopic_words = np.array(self.vocab)[np.argsort(topic_dist)][:-n_top_words:-1]\n",
    "\t\t\tweights = ' '.join((topic_dist[np.argsort(topic_dist)][:-n_top_words:-1].astype('str')))\n",
    "\t\t\twords = ' '.join(topic_words)\n",
    "\t\t\tself.topics.append((i,words,weights))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
