import nltk
import csv
import sys, os
import sqlite3


class SpeechClassifier( object ):
    def __init__(self, train_speeches_dir, test_speeches_dir, all_speeches_dir):
        self.train_speeches_dir = os.path.join(os.getcwd(), train_speeches_dir)
        self.test_speeches_dir = os.path.join(os.getcwd(), test_speeches_dir)
        self.all_speeches_dir = os.path.join(os.getcwd(), all_speeches_dir)
        self.get_pos()
        
    def get_pos(self):
        '''
        get list of valid part of speech which will
        be used to filter out irrelevant words in
        the tokenize_and_filter() method
        '''
        self.pos = []
        reader = csv.reader(open("pos.csv", 'U'))
        for row in reader:
            self.pos.append(row[0])
        return self.pos

    def words(self):
        '''
        get all the words from all the speeches in the corpus
        '''
        wlist = []
        for fname in os.listdir(self.test_speeches_dir):
            if '.speech' in fname:
                fp = open(self.test_speeches_dir+'/'+fname, "rt")
                x = self.tokenize_and_filter(fp.read())
                wlist = wlist + x
                fp.close()
        return wlist
    
    def paragraphs(self, speech_file):
        '''
        list of raw paragraphs from a single speech
        '''
        p = []
        fp = open(speech_file, "rt")
        for line in fp.read().split('\n'):
            p.append(line)
        fp.close()
        return p

    def tokenize_and_filter(self, raw):
        '''
        This method takes in a raw paragraph and tokenizes it and eliminates
        irrelevant words based on parts of speech
        '''
        l = nltk.pos_tag(nltk.word_tokenize(raw))
        wl = []
        for w, pos in l:
            if w.isalpha() and len(w) > 2 and pos in self.pos:
                wl.append(w)
        return wl

    def train_test_paragraphs(self, speeches_dir):
        c = []
#        for speech_index, speech_file in enumerate(self.train_speeches):
        for speech_index, file in enumerate(os.listdir(speeches_dir)):
            if '.speech' in file:
                speech_file = speeches_dir+'/'+file
                print 'speech', speech_index, speech_file
                plist = self.paragraphs(speech_file)
                print len(plist)
                reader = csv.reader(open(speech_file.split('.')[0]+"_A", 'U'))
                i=0
                for row in reader:
                    # tokenize the paragraph words
                    #print i, plist[i]
                    pw = self.tokenize_and_filter(plist[i])
                    c.append((pw, row[1].strip().lower()))
                    i = i+1
        return c
        
    def speech_features(self, word_features, speech): 
        speech_words = set(speech)
        features = {}
        for word in word_features:
            features['contains(%s)' % word] = (word in speech_words)
        return features
    
    def train(self):
        speeches = self.train_test_paragraphs(self.train_speeches_dir)
        all_words = nltk.FreqDist(w.lower() for w in self.words())
        print 'all_words', len(all_words)
        self.word_features = all_words.keys()[:2000]
        train_set = [(self.speech_features(self.word_features, s), c) for (s,c) in speeches]
        print 'train_set', len(train_set)
        self.classifier = nltk.NaiveBayesClassifier.train(train_set)
        print 'accuracy train: ', nltk.classify.accuracy(self.classifier, train_set)
        
    def test(self):
        speeches = self.train_test_paragraphs(self.test_speeches_dir)
        test_set = [(self.speech_features(self.word_features, s), c) for (s,c) in speeches]
        i=0
        for t in test_set:
            pd = self.classifier.prob_classify(t[0])
            if pd.max() !=  t[1].lower():
                print speeches[i]
                print 'MAX P(%s): ' % pd.max(), pd.prob(pd.max())
                print 'CORRECT: ', t[1]
                print '-'*40
                i=i+1
        print 'RESULT:', float(i/len(test_set))
        
    def run(self):
        '''
        This method creates the final sqlite db file
        '''
        # open the db
#        conn = sqlite3.connect(os.path.join(os.getcwd(), 'speechviz1.sqlite'))
#        c = conn.cursor()
#        #create the sqlite table
#        c.execute('''CREATE TABLE "speech_table" ("id" INTEGER PRIMARY KEY  NOT NULL , 
#                                                  "type" VARCHAR NOT NULL , 
#                                                  "date" DATETIME NOT NULL , 
#                                                  "president" VARCHAR NOT NULL , 
#                                                  "title" TEXT NOT NULL ,
#                                                  "paragraph_topic" VARCHAR NOT NULL ) 
#                  ''')
#        conn.commit()
#        c.close()
        reader = csv.reader(open('final_speech_list.csv', 'U'))
        writer = csv.writer(open('speechdb.csv', 'w'))
        for row in reader:
            id = row[0]
            speech_type = row[1]
            speech_date = row[2]
            president = row[3]
            speech_title = row[4]
            # open the speech text file and traverse through all the paragraphs
            fp = open(os.path.join(self.all_speeches_dir, id+'.speech'), 'U')
            print 'classfying', id, fp.name
            i=1
            for line in fp.read().split('\n'):
                feature_set = self.speech_features( self.word_features, self.tokenize_and_filter(line) )
                paragraph_topic = self.classifier.prob_classify( feature_set ).max()
                paragraph_length = len(line.split(' '))
                x = [id, speech_type, speech_date, president, speech_title, i, paragraph_topic, paragraph_length]
                writer.writerow(x)
                print x
                i=i+1
            

        

def main():
    sc = SpeechClassifier(train_speeches_dir="train_speeches",
                          test_speeches_dir="test_speeches",
                          all_speeches_dir="all_speeches" )
    sc.train()
    sc.test()
    sc.run()
    
    

if __name__ == "__main__":
    main()
