"""
Things to be done
1. Identify the correct stop word list
2. link:file:///D:/Research/Rshabarth/related_work/Corpora%20and%20Vector%20Spaces%20%E2%80%94%20gensim.htm
"""

import logging, gensim, bz2
import MySQLdb
from gensim import corpora, similarities, models
from preprocess import  pp 
import collections 




logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

""" ===========Input=====================================================
========================================================================="""
stop_word_file_path = "D:\Research\Rshabarth\Results\stop_words.csv"
data_set_file_path = "D:\Research\Rshabarth\Results\chromium_issue_report.csv"
#data_set_file_path = "D:\hi.txt"
db = MySQLdb.connect(host="localhost", user="root", passwd="123",db="lsi_python")
table="lsi_term_score_python"
term_freq_table = "term_freq_without_comm_py"

cursor = db.cursor()


"""=========Initialize==========================================================
    1. Delete everything from term_freq and lsi score table
============================================================================="""
del_str1 = "delete from %(tab)s" %{'tab':table}
del_str2 = "delete from %(tab)s" %{'tab':term_freq_table}

cursor.execute(del_str1)
db.commit()
cursor.execute(del_str2)
db.commit()


stop_list=[]
for line in open(stop_word_file_path):
    processed_word = pp(line)
    first_half=processed_word.split(' ')[0]#Conver he'll ->he
    stop_list.append(first_half)
    
    
stop_list = set(stop_list)  #Remove Duplicates

"""print "list=", stop_list"""


           # assume there's one document per line, tokens separated by whitespace
dictionary = corpora.Dictionary(pp(line).split() for line in open(data_set_file_path) )           

#print dictionary.items()

stop_ids = [dictionary.token2id[stopword] for stopword in stop_list
             if stopword in dictionary.token2id]

docFreq3_ids = [tokenid for tokenid, docfreq in dictionary.dfs.iteritems() if docfreq <= 3]


""" Print
for id in stop_ids:
    print "stopid", dictionary.id2token[id]
    
for id in docFreq3_ids:    
    print "DocFreq3Ids", dictionary.id2token[id] """


word_len2=[]
for tokenid in dictionary:
    if len((str)(dictionary[tokenid]))<=2:
        word_len2.append(tokenid)

dictionary.filter_tokens(stop_ids + docFreq3_ids+word_len2) # remove stop words, words that appear only once, and words with length 2
                         
freq={}
for id in dictionary:
    term =dictionary[id]
    freq[term]=0
    for line in open(data_set_file_path):
        line = pp(line)
        line_data = line.split()
        for word in line_data:
            if word==term:
                freq[term]+=1

""" print
for id in freq:
    print id,"=",freq[id] """


"""domain specific stop words"""#Get top 20 frequent terms from table 
for id in freq:
    str = 'insert into %(tf_table)s values(\'%(term)s\',%(freq)d)' %{'tf_table':term_freq_table,'term':id,'freq':freq[id]}
    try:
          cursor.execute(str)
          #db.commit()@@Not aomment
      
    except:
             print "eror=",term
            
db.commit()            
         # Commit your chang
    #print "str=",str
    #sql = 'insert into %(tab)s values(\'%(que)s\',\'%(other_term)s\',%(simi)f)' % {'tab':table,'que':q,'other_term':o,'simi':sim}#,dictionary.token2id[query],dictionary.token2id[idother],sim}
         

"""=======Domain Specifi Stop Words=============
1. High Frequency Terms
2. Very Low Occuring Term
================================================"""   #//20
select_str1 ="select term from %(tab)s order by freq desc limit 0,20" %{'tab':term_freq_table}
cursor.execute(select_str1)
domain_stop_words  = cursor.fetchall()

""" print
for word in domain_stop_words:
    print "d word=",word """

domain_stop_word_ids = [dictionary.token2id[word[0]] for word in domain_stop_words]


select_str2 ="select term from %(tab)s where freq<=20" %{'tab':term_freq_table}
cursor.execute(select_str2)
domain_low_freq_words  = cursor.fetchall()

"""print
for word in domain_low_freq_words:
    print "d word=",word """

domain_low_freq_ids = [dictionary.token2id[word[0]] for word in domain_low_freq_words]

  
dictionary.filter_tokens(domain_stop_word_ids+domain_low_freq_ids)  
    
dictionary.compactify() # remove gaps in id sequence after words that were removed
print dictionary.items()
print "Reached1"

""" print
print "dic=",dictionary

print dictionary.token2id

foo2 = raw_input("Enter anything:")
print "raw_input =", foo2 """



"""========Completed Preprocessing====================="""

""" Original
class MyCorpus(object):
     def __iter__(self):
         for line in open('D:\hi.txt'):
             # assume there's one document per line, tokens separated by whitespace
             yield dictionary.doc2bow(line.lower().split())
"""
     
  
class MyCorpus(object):
     def __iter__(self):
         for text in open(data_set_file_path):
             #line = text #Not Required
             # assume there's one document per line, tokens separated by whitespace
             yield dictionary.doc2bow(pp(text).lower().split())

                  

corpus_memory_friendly = MyCorpus() # doesn't load the corpus into memory!
print corpus_memory_friendly

print "reached 2"
""" print
for vector in corpus_memory_friendly:
    print vector """



""" Tf-Idf 
tfidf = models.TfidfModel(corpus_memory_friendly)
corpus_tfidf = tfidf[corpus_memory_friendly]
lsi = gensim.models.lsimodel.LsiModel(corpus=corpus_tfidf, id2word=dictionary, num_topics=2)
"""

lsi = gensim.models.lsimodel.LsiModel(corpus=corpus_memory_friendly, id2word=dictionary, num_topics=2)


lsi.print_topic(2)



"""
Get query to term similarities
"""

termcorpus= gensim.matutils.Dense2Corpus(lsi.projection.u.T)

""" print
print list(termcorpus) """


# create help structure for similarity queries 
# (this also stretches each corpus vector to unit length) 
index = gensim.similarities.MatrixSimilarity(termcorpus) 

def insertsims(query,queryid): 
     # get cosine similarity of the query to each one of the 12 terms 
     sims = index[query] 
   
     for idother, sim in enumerate(sims):
         #print  dictionary.id2token[idother], sim 
         q=dictionary.id2token[queryid]
         o=dictionary.id2token[idother]
         sql = 'insert into %(tab)s values(\'%(que)s\',\'%(other_term)s\',%(simi)f)' % {'tab':table,'que':q,'other_term':o,'simi':sim}#,dictionary.token2id[query],dictionary.token2id[idother],sim}
         print sql
         try:
          cursor.execute(sql)
          db.commit()
      
         except:
             print "eror",q,o
         # Commit your changes in the database


for id in dictionary:
    queryid = id
    query = list(termcorpus)[queryid]
    insertsims(query,queryid)
    

##
cursor.close()
db.close()



      
    

 # let's use the first term ("computer") as the query 
 
"""query = list(termcorpus)[10]
print "query is=", dictionary.id2token[10]
queryid=10
printsims(query,queryid)""" 

"""
  # print the result, converting ids (integers) to words (strings) 
     ###fmt = ["%s(%f)" % (dictionary.id2token[idother], sim) for idother, sim in enumerate(sims)] 
     ###print "the query is similar to", ', '.join(fmt) 
     """
