"""
Things to be done
1. Identify the correct stop word list
2. link:file:///D:/Research/Rshabarth/related_work/Corpora%20and%20Vector%20Spaces%20%E2%80%94%20gensim.htm
"""





"""=============================================
OLD---OLD-----OLD----OLD
================================================
=============================================="""

import logging, gensim, bz2
import MySQLdb
from gensim import corpora, similarities, models
from preprocess import  pp 
import collections 

stop_word_file_path = "D:\Research\Rshabarth\Results\stop_words.csv"
data_set_file_path = "D:\Research\Rshabarth\data\chromium_issue_report.csv"


logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)


db = MySQLdb.connect(host="localhost", user="root", passwd="123",db="lsi")
table="lsi_term_score_python"

cursor = db.cursor()

stop_list=[]
for line in open(stop_word_file_path):
    processed_word = pp(line)
    first_half=processed_word.split(' ')[0]#Conver he'll ->he
    stop_list.append(first_half)
    
    
stop_list = set(stop_list)  #Remove Duplicates

print "list=", stop_list

#stoplist = set('for a of the and to in'.split())

""" Orginal
dictionary = corpora.Dictionary(line.lower().split() for line in open('D:\hi.txt'))"""

#dictionary = corpora.Dictionary(line.lower().split() for line in open('D:\hi.txt'))
#text = " "
#for line in open('D:\Research\Rshabarth\data\chromium_issue_report.csv'):
             #text=text.join(line)
             #print text
#             dictionary = corpora.Dictionary(line.lower().split())             # assume there's one document per line, tokens separated by whitespace
dictionary = corpora.Dictionary(pp(line).split() for line in open('D:\Research\Rshabarth\data\chromium_issue_report.csv'))             



stop_ids = [dictionary.token2id[stopword] for stopword in stop_list
             if stopword in dictionary.token2id]

docFreq3_ids = [tokenid for tokenid, docfreq in dictionary.dfs.iteritems() if docfreq <= 3]

#word_len2 = [dictionary.id2token[tokenid] for tokenid in dictionary]

word_len2=[]
for tokenid in dictionary:
    if len((str)(dictionary[tokenid]))<=2:
        word_len2.append(tokenid)
#print "len3", dictionary.id2token[178]#word_len2
for id in word_len2:
    print dictionary[id]

print dictionary.id2token

dictionary.filter_tokens(stop_ids + docFreq3_ids+word_len2) # remove stop words and words that appear only once

print "new=",dictionary



dictionary.compactify() # remove gaps in id sequence after words that were removed

print dictionary

print dictionary.token2id

exit()

""" Original
class MyCorpus(object):
     def __iter__(self):
         for line in open('D:\hi.txt'):
             # assume there's one document per line, tokens separated by whitespace
             yield dictionary.doc2bow(line.lower().split())
"""
     
  
class MyCorpus(object):
     def __iter__(self):
         for text in open('D:\Research\Rshabarth\data\chromium_issue_report.csv'):
             #line = text #Not Required
             # assume there's one document per line, tokens separated by whitespace
             yield dictionary.doc2bow(pp(text).lower().split())

                  

corpus_memory_friendly = MyCorpus() # doesn't load the corpus into memory!
print corpus_memory_friendly

for vector in corpus_memory_friendly:
    print vector


""" Tf-Idf 
tfidf = models.TfidfModel(corpus_memory_friendly)
corpus_tfidf = tfidf[corpus_memory_friendly]
lsi = gensim.models.lsimodel.LsiModel(corpus=corpus_tfidf, id2word=dictionary, num_topics=2)
"""

lsi = gensim.models.lsimodel.LsiModel(corpus=corpus_memory_friendly, id2word=dictionary, num_topics=2)


lsi.print_topic(2)

"""
Get query to term similarities
"""

termcorpus= gensim.matutils.Dense2Corpus(lsi.projection.u.T)
print list(termcorpus)


# create help structure for similarity queries 
# (this also stretches each corpus vector to unit length) 
index = gensim.similarities.MatrixSimilarity(termcorpus) 

def printsims(query,queryid): 
     # get cosine similarity of the query to each one of the 12 terms 
     sims = index[query] 
     # print the result, converting ids (integers) to words (strings) 
     ###fmt = ["%s(%f)" % (dictionary.id2token[idother], sim) for idother, sim in enumerate(sims)] 
     ###print "the query is similar to", ', '.join(fmt) 
     for idother, sim in enumerate(sims):
         print  dictionary.id2token[idother], sim 
         q=dictionary.id2token[queryid]
         o=dictionary.id2token[idother]
         sql = 'insert into %(tab)s values(\'%(que)s\',\'%(other_term)s\',%(simi)f)' % {'tab':table,'que':q,'other_term':o,'simi':sim}#,dictionary.token2id[query],dictionary.token2id[idother],sim}
         print sql
         try:
          cursor.execute(sql)
          db.commit()
      
         except:
             print "eror",q,o
         # Commit your changes in the database
      
    

 # let's use the first term ("computer") as the query 
 
"""query = list(termcorpus)[10]
print "query is=", dictionary.id2token[10]
queryid=10
printsims(query,queryid)""" 

for id in dictionary:
    queryid = id
    query = list(termcorpus)[queryid]
    printsims(query,queryid)
    

##
cursor.close()
db.close()


