"""=================================================================================================================
@Note:Version modified version of load_dictionary4_doc-doc_score.py
@Uses:This file will calculate score between docs-docs and will store in lsi_doc_score_python_no_od_topics_no_of_docs
===================================================================================================================="""

import logging, gensim, bz2
import MySQLdb
from gensim import corpora, similarities, models
from preprocess import  pp 
import csv 
import sys

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

""" ===========Input=====================================================
========================================================================="""
stop_word_file_path = "D:\Research\Rshabarth\data\stop_words.csv"
#data_set_file_path = "D:\Research\Rshabarth\Results\chromium_issue_report.csv"
data_set_file_path = "D:\Research\Rshabarth\data\sample_db.csv"
id_map_file = "D:\Research\Rshabarth\data\dup_issue_id_for_id_mapping.csv"
dup_issue_id_path =  "D:\Research\Rshabarth\data\only_dup_issueid_20396.csv"
db = MySQLdb.connect(host="localhost", user="root", passwd="123",db="lsi_python")


"""No of term that you want to keep indictionary"""
#top=15000#4 [No of terms in dictionary]
top_count=5 #Greater +1
No_of_topics = 2
table="lsi_doc_score_python_"+(str)(No_of_topics)+"_"+(str)(top_count)
dic_path = "D:\Research\Rshabarth\Results\lsi\dict_"+(str)(No_of_topics)+"_"+(str)(top_count)+".dict"
lsi_path = "D:\Research\Rshabarth\Results\lsi\lsi_"+(str)(No_of_topics)+"_"+(str)(top_count)+".lsi"
corpus_path="D:\Research\Rshabarth\Results\lsi\coupus_"+(str)(No_of_topics)+"_"+(str)(top_count)+".mm"
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')


print "dicPath",dic_path
print "lsi_path",lsi_path
cursor = db.cursor()

#Create enumeration function, this function will convert between integer ids and character ids. 


dictionary = corpora.Dictionary.load(dic_path)
print dictionary.items()
#for line in open(lsi_path):
#    print line

class MyCorpus(object):
     def __iter__(self):
         #for text in open(data_set_file_path)://normal File
         for text in data_file_reader:
             #line = text #Not Required
             # assume there's one document per line, tokens separated by whitespace
             yield dictionary.doc2bow(pp(''.join(text)).lower().split())

corpus_memory_friendly = MyCorpus() # doesn't load the corpus into memory!
print corpus_memory_friendly
corpora.MmCorpus.serialize(corpus_path, corpus_memory_friendly)
loaded_corpus = corpora.MmCorpus(corpus_path)
print "loaded_corpus",loaded_corpus

#lsi = models.LsiModel.load(lsi_path)
#print "printing..."
#lsi.print_topic(No_of_topics)
lsi = gensim.models.lsimodel.LsiModel(corpus=loaded_corpus, id2word=dictionary, num_topics=No_of_topics)
#lsi.print_topic(5)
index= gensim.similarities.MatrixSimilarity(lsi[loaded_corpus])


"""create enumerations"""
map_pusdo_id_to_issue_id={}
csv_data_file_2=open(id_map_file,'rb')
data_file_reader_2 = csv.reader(csv_data_file_2, delimiter=',')
"""
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')"""


count = 0
for line in data_file_reader_2:
    #print line[0]
    map_pusdo_id_to_issue_id[count]=(str)(line[0])
    count=count+1    
print "dict",map_pusdo_id_to_issue_id



csv_data_file_3=open(dup_issue_id_path,'rb')
data_file_reader_3 = csv.reader(csv_data_file_3, delimiter=',')
dup_id = [(str)(line[0]) for line in data_file_reader_3]
print "dup_id",dup_id
"""===========Change from Here"""


count  = 0
for vector in loaded_corpus:
    psudoqid = count
    querydoc = map_pusdo_id_to_issue_id[psudoqid]
    print "count",count
    print "querydoc",type(querydoc)
    try:
        check = dup_id.index(querydoc) 
        print "index",check   
        #print "vec",vector
        vec_lsi =lsi[vector]
        #print "vec_lsi", vec_lsi
        initial_sims = index[vec_lsi]
        #print list(enumerate(initial_sims))
        sims = sorted(enumerate(initial_sims), key=lambda item:-item[1])
        #print "sims",sims
        #qid =    
        i=0
        for val in sims:
            psudoaid =val[0]
            score = val[1]
            answerdoc = map_pusdo_id_to_issue_id[psudoaid]
            #print "qid=",psudoqid," qdoc=",querydoc, " psudoaid= ",psudoaid, " answerdoc=",answerdoc, " score=",score
            insertStr = "insert into %(score_table)s values(\'%(qid)s\',\'%(aid)s\',%(score)f,%(psudoqid)d,%(psudoaid)d)" %{'score_table':table,'qid':querydoc,'aid':answerdoc,'score':score,'psudoqid':psudoqid,'psudoaid':psudoaid}
            print "insertStr",insertStr
            cursor.execute(insertStr)
            i=i+1
            if i>top_count:
                break## For loop
        db.commit() 
        count=count+1
    except Exception, err:
        #sys.stderr.write('ERROR: %s\n' % str(err))
        print "error" 
        count=count+1

db.close()

""" This is for inseting all the rows====
for vector in loaded_corpus:
    #print "vec",vector
    vec_lsi =lsi[vector]
    #print "vec_lsi", vec_lsi
    initial_sims = index[vec_lsi]
    #print list(enumerate(initial_sims))
    sims = sorted(enumerate(initial_sims), key=lambda item:-item[1])
    #print "sims",sims
    #qid = 
    psudoqid = count
    querydoc = map_pusdo_id_to_issue_id[psudoqid]
    i=0
    for val in sims:
        psudoaid =val[0]
        score = val[1]
        answerdoc = map_pusdo_id_to_issue_id[psudoaid]
        #print "qid=",psudoqid," qdoc=",querydoc, " psudoaid= ",psudoaid, " answerdoc=",answerdoc, " score=",score
        insertStr = "insert into %(score_table)s values(\'%(qid)s\',\'%(aid)s\',%(score)f,%(psudoqid)d,%(psudoaid)d)" %{'score_table':table,'qid':querydoc,'aid':answerdoc,'score':score,'psudoqid':psudoqid,'psudoaid':psudoaid}
        print "insertStr",insertStr
        cursor.execute(insertStr)
        i=i+1
        if i>top_count:
            break
    db.commit()
    count=count+1

db.close()

=========================end====="""



"""
termcorpus= gensim.matutils.Dense2Corpus(lsi.projection.u.T)
print list(termcorpus) 

termcorpus= gensim.matutils.Dense2Corpus(lsi.projection.u.T)

 
index = gensim.similarities.MatrixSimilarity(termcorpus) 

def insertsims(query,queryid): 
     # get cosine similarity of the query to each one of the 12 terms 
     sims = index[query] 
     #sims = sorted(enumerate(sims), key=lambda item: -item[1])
     
     count=0
     sorted_sims =sorted(enumerate(sims), key=lambda item: -item[1])
     #print "sims",enumerate(sims)
     #print "soretd sims",sorted_sims
     #for idother, sim in sorted(enumerate(sims), key=lambda item: -item[1]):
     for idother,sim_val in sorted_sims:#sorted_sims:
             count+=1
         
             if count > top_count:
                 break     
             q=dictionary.id2token[queryid]
             o=dictionary.id2token[idother]
             sql = 'insert into %(tab)s values(\'%(que)s\',\'%(other_term)s\',%(simi)f)' % {'tab':table,'que':q,'other_term':o,'simi':sim_val}#,dictionary.token2id[query],dictionary.token2id[idother],sim}
             #print sql
             try:
                 cursor.execute(sql)          
      
             except:
                 print "eror",q,o
         # Commit your changes in the database
     db.commit()

print "inserting..."
for id in dictionary:
    queryid = id
    query = list(termcorpus)[queryid]
    insertsims(query,queryid)
   
print "done..." 

##
cursor.close()
db.close()
"""


  