
"""
@Uses: This python file will create a VSM model and insert its similarity score in the desired file
"""

import logging, gensim, bz2
import MySQLdb
from gensim import corpora, similarities, models
from preprocess import  pp 
import csv 

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

""" ===========Input=====================================================
========================================================================="""
"""
Config="D:"
User ="root"
Pass = "123"
"""

Config="E:\\Sangeeta"
User="Sangeeta"
Pass = "sangeeta@123456"


stop_word_file_path = Config+"\\Research\\Rshabarth\\data\\stop_words.csv"
data_set_file_path = Config+"\\Research\\Rshabarth\\data\\dup_exp_data.csv"
#data_set_file_path = Config+"\\Research\\Rshabarth\\data\\sample_db.csv"
id_map_file = Config+"\\Research\\Rshabarth\\data\\dup_issue_id_for_id_mapping.csv"
dup_issue_id_path =  Config+"\\Research\\Rshabarth\\data\\only_dup_issueid_20396.csv"


db = MySQLdb.connect(host="localhost", user=User, passwd=Pass,db="vsm_python")
#term_freq_table = "term_freq_without_comm_py"
dictable="dictionary"

"""No of term that you want to keep indictionary"""
"""top count 1 greater than """
top_count=10 #Greater +1 (You can give exact value))
No_of_topics = 0 #Constant
filter_ext = 10
table="vsm_doc_score_python_"+(str)(No_of_topics)+"_"+(str)(top_count)
dic_path = Config+"\\Research\\Rshabarth\\Results\\vsm\\dict_"+(str)(No_of_topics)+"_"+(str)(top_count)+".dict"
corpus_path=Config+"\\Research\\Rshabarth\\Results\\vsm\\coupus_"+(str)(No_of_topics)+"_"+(str)(top_count)+".mm"
index_path=Config+"\\Research\\Rshabarth\\Results\\vsm\\index_"+(str)(No_of_topics)+"_"+(str)(top_count)+".mm"

print "dicPath",dic_path
#print "vsm_path",vsm_path
"""##SHIVANI PAPER MAX MAP"""


cursor = db.cursor()


stop_list=[]#Not a CSV file
for line in open(stop_word_file_path):
    processed_word = pp(line)
    first_half=processed_word.split(' ')[0]#Conver he'll ->he
    stop_list.append(first_half)
 
    
stop_list = set(stop_list)  #Remove Duplicates

"""print "list=", stop_list"""
   
"""CSV Hack For Dictioary Creation"""
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')
dictionary = corpora.Dictionary(pp(''.join(line)).split() for (line) in data_file_reader ) 
""" Hope This works """  


print dictionary.items()

stop_ids = [dictionary.token2id[stopword] for stopword in stop_list
             if stopword in dictionary.token2id]

#docFreq3_ids = [tokenid for tokenid, docfreq in dictionary.dfs.iteritems() if docfreq <= 3]


""" Print
for id in stop_ids:
    print "stopid", dictionary.id2token[id]
    
for id in docFreq3_ids:    
    print "DocFreq3Ids", dictionary.id2token[id] """


word_len2=[]
for tokenid in dictionary:
    if len((str)(dictionary[tokenid]))<=2:
        word_len2.append(tokenid)

dictionary.filter_tokens(stop_ids +word_len2) # remove stop words, words that appear only once, and words with length 2
                         

dictionary.filter_extremes(filter_ext, .95)# put top=top {it will store no of terms you want in dictionary}
  
    
dictionary.compactify() # remove gaps in id sequence after words that were removed
print dictionary.items()

"""
for id in dictionary:
    str="insert into %(dictable)s values (\' %(query)s \')" %{'dictable':dictable, 'query':dictionary.id2token[id]}
    cursor.execute(str)
print "Reached1"
db.commit()
"""
#foo =raw_input("continue dictionary created....")
#print foo

"""
Save Dictionary
"""
dictionary.save(dic_path)

     
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')     
    
class MyCorpus(object):
     def __iter__(self):
         #for text in open(data_set_file_path)://normal File
         for text in data_file_reader:
             #line = text #Not Required
             # assume there's one document per line, tokens separated by whitespace
             yield dictionary.doc2bow(pp(''.join(text)).lower().split())

                  

corpus_memory_friendly = MyCorpus() # doesn't load the corpus into memory!
print corpus_memory_friendly


corpora.MmCorpus.serialize(corpus_path, corpus_memory_friendly)
loaded_corpus = corpora.MmCorpus(corpus_path)
print "loaded_corpus",loaded_corpus


index= gensim.similarities.docsim.Similarity(index_path,loaded_corpus, dictionary.__len__())#Note Matrix similarity dowws not scale well
#index.num_best=top_count#Set How many top docs you want



"""create enumerations"""
map_pusdo_id_to_issue_id={}
csv_data_file_2=open(id_map_file,'rb')
data_file_reader_2 = csv.reader(csv_data_file_2, delimiter=',')
"""
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')"""


count = 0
for line in data_file_reader_2:
    #print line[0]
    map_pusdo_id_to_issue_id[count]=(str)(line[0])
    count=count+1    
print "dict",map_pusdo_id_to_issue_id



csv_data_file_3=open(dup_issue_id_path,'rb')
data_file_reader_3 = csv.reader(csv_data_file_3, delimiter=',')
dup_id = [(str)(line[0]) for line in data_file_reader_3]
print "dup_id",dup_id
"""===========Change from Here"""


count  = 0
for vector in loaded_corpus:
    psudoqid = count
    querydoc = map_pusdo_id_to_issue_id[psudoqid]
    print "count",count
    #print "querydoc",type(querydoc)
    try:
        check = dup_id.index(querydoc) 
        print "index",check   
        #print "vec",vector
        #vec_lsi =lsi[vector]
        #print "vec_lsi", vec_lsi
        initial_sims = index[vector]
        
        #print list(enumerate(initial_sims))
        sims = sorted(enumerate(initial_sims), key=lambda item:-item[1])
        #print "sims",sims
        #qid =    
        i=0
        for val in sims:
            psudoaid =val[0]
            score = val[1]
            answerdoc = map_pusdo_id_to_issue_id[psudoaid]
            #print "qid=",psudoqid," qdoc=",querydoc, " psudoaid= ",psudoaid, " answerdoc=",answerdoc, " score=",score
            insertStr = "insert into %(score_table)s values(\'%(qid)s\',\'%(aid)s\',%(score)f,%(psudoqid)d,%(psudoaid)d)" %{'score_table':table,'qid':querydoc,'aid':answerdoc,'score':score,'psudoqid':psudoqid,'psudoaid':psudoaid}
            print "insertStr",insertStr
            cursor.execute(insertStr)
            i=i+1
            if i>top_count:
                break## For loop
        db.commit() 
        count=count+1
    except Exception, err:
        #sys.stderr.write('ERROR: %s\n' % str(err))
        print "error" 
        count=count+1

db.close()
"""================
for doc in loaded_corpus:
    similarities = index[doc] 
    print "sim ", similarities
 """  