"""
Things to be done
1. Identify the correct stop word list
2. link:file:///D:/Research/Rshabarth/related_work/Corpora%20and%20Vector%20Spaces%20%E2%80%94%20gensim.htm
"""

"""
@Note: 
1. Doing it for top 200
1.This is copy of lsi_python3 with filter extremes used as one of the factor to remove domain 
specific stop words
2. dictionary will be dumped to db 
3. Added CSV reader :D
4.Need to perm stemming as it it currently not looking quite good [Done]

5. I will write functions such that i can store 

:( :( :(
a. Need to download data again
b. Need to change my program so that it can read from  database[not required i will import to csv]


"""
import logging, gensim, bz2
import MySQLdb
from gensim import corpora, similarities, models
from preprocess import  pp 
import csv 




logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

""" ===========Input=====================================================
========================================================================="""
stop_word_file_path = "D:\Research\Rshabarth\Results\stop_words.csv"
#data_set_file_path = "D:\Research\Rshabarth\Results\chromium_issue_report.csv"
data_set_file_path = "D:\Research\Rshabarth\Results\sample_db.csv"
db = MySQLdb.connect(host="localhost", user="root", passwd="123",db="lsi_python")
table="lsi_term_score_python"
term_freq_table = "term_freq_without_comm_py"
dictable="dictionary"

"""No of term that you want to keep indictionary"""
#top=15000#4 [No of terms in dictionary]
top_count=31 #Greater +1
""" add|remove """
No_of_topics = 2
dic_path = "D:\Research\Rshabarth\Results\lsi\dict_"+(str)(No_of_topics)+"_"+(str)(top_count)+".dict"
lsi_path = "D:\Research\Rshabarth\Results\lsi\lsi_"+(str)(No_of_topics)+"_"+(str)(top_count)+".lsi"

print "dicPath",dic_path
print "lsi_path",lsi_path

cursor = db.cursor()


dictionary = corpora.Dictionary.load(dic_path)
lsi = models.LsiModel.load(lsi_path)

print dictionary.items()

lsi.print_topic(No_of_topics)
termcorpus= gensim.matutils.Dense2Corpus(lsi.projection.u.T)
print list(termcorpus) 

termcorpus= gensim.matutils.Dense2Corpus(lsi.projection.u.T)

 
index = gensim.similarities.MatrixSimilarity(termcorpus) 

def insertsims(query,queryid): 
     # get cosine similarity of the query to each one of the 12 terms 
     sims = index[query] 
     #sims = sorted(enumerate(sims), key=lambda item: -item[1])
     
     count=0
     sorted_sims =sorted(enumerate(sims), key=lambda item: -item[1])
     #print "sims",enumerate(sims)
     #print "soretd sims",sorted_sims
     #for idother, sim in sorted(enumerate(sims), key=lambda item: -item[1]):
     for idother,sim_val in sorted_sims:#sorted_sims:
             count+=1
         
             if count > top_count:
                 break     
             q=dictionary.id2token[queryid]
             o=dictionary.id2token[idother]
             sql = 'insert into %(tab)s values(\'%(que)s\',\'%(other_term)s\',%(simi)f)' % {'tab':table,'que':q,'other_term':o,'simi':sim_val}#,dictionary.token2id[query],dictionary.token2id[idother],sim}
             #print sql
             try:
                 cursor.execute(sql)          
      
             except:
                 print "eror",q,o
         # Commit your changes in the database
     db.commit()

print "inserting..."
for id in dictionary:
    queryid = id
    query = list(termcorpus)[queryid]
    insertsims(query,queryid)
   
print "done..." 

##
cursor.close()
db.close()



  