"""
Things to be done
1. Identify the correct stop word list
2. link:file:///D:/Research/Rshabarth/related_work/Corpora%20and%20Vector%20Spaces%20%E2%80%94%20gensim.htm
"""

"""
@Note: 
1. Doing it for top 200
1.This is copy of lsi_python3 with filter extremes used as one of the factor to remove domain 
specific stop words
2. dictionary will be dumped to db 
3. Added CSV reader :D
4.Need to perm stemming as it it currently not looking quite good [Done]

5. I will write functions such that i can store 

:( :( :(
a. Need to download data again
b. Need to change my program so that it can read from  database[not required i will import to csv]


"""
import logging, gensim, bz2
import MySQLdb
from gensim import corpora, similarities, models
from preprocess import  pp 
import csv 




logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

""" ===========Input=====================================================
========================================================================="""
stop_word_file_path = "D:\Research\Rshabarth\Results\stop_words.csv"
data_set_file_path = "D:\Research\Rshabarth\Results\chromium_issue_report.csv"
#data_set_file_path = "D:\Research\Rshabarth\Results\sample_db.csv"
db = MySQLdb.connect(host="localhost", user="root", passwd="123",db="lsi_python")
table="lsi_term_score_python"
term_freq_table = "term_freq_without_comm_py"
dictable="dictionary"
 
"""No of term that you want to keep indictionary"""
top='None'#4 [No of terms in dictionary]
top_count=101 #Greater +1
""" add|remove """
No_of_topics = 2
"""##SHIVANI PAPER MAX MAP"""



cursor = db.cursor()


"""=========Initialize==========================================================
    1. Delete everything from term_freq and lsi score table
============================================================================="""
del_str1 = "delete from %(tab)s" %{'tab':table}
del_str2 = "delete from %(tab)s" %{'tab':term_freq_table}
del_str2 = "delete from %(tab)s" %{'tab':dictable}

cursor.execute(del_str1)
db.commit()
cursor.execute(del_str2)
db.commit()
cursor.execute(del_str2)
db.commit()

stop_list=[]#Not a CSV file
for line in open(stop_word_file_path):
    processed_word = pp(line)
    first_half=processed_word.split(' ')[0]#Conver he'll ->he
    stop_list.append(first_half)
    
    
stop_list = set(stop_list)  #Remove Duplicates

"""print "list=", stop_list"""


# assume there's one document per line, tokens separated by whitespace
#dictionary = corpora.Dictionary(pp(line).split() for line in open(data_set_file_path) )           

"""CSV Hack For Dictioary Creation"""
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')
dictionary = corpora.Dictionary(pp(''.join(line)).split() for (line) in data_file_reader ) 
""" Hope This works """  


#print dictionary.items()

stop_ids = [dictionary.token2id[stopword] for stopword in stop_list
             if stopword in dictionary.token2id]

#docFreq3_ids = [tokenid for tokenid, docfreq in dictionary.dfs.iteritems() if docfreq <= 3]


""" Print
for id in stop_ids:
    print "stopid", dictionary.id2token[id]
    
for id in docFreq3_ids:    
    print "DocFreq3Ids", dictionary.id2token[id] """


word_len2=[]
for tokenid in dictionary:
    if len((str)(dictionary[tokenid]))<=2:
        word_len2.append(tokenid)

dictionary.filter_tokens(stop_ids +word_len2) # remove stop words, words that appear only once, and words with length 2
                         

dictionary.filter_extremes(5, .95)# put top=top {it will store no of terms you want in dictionary}
  
    
dictionary.compactify() # remove gaps in id sequence after words that were removed
print dictionary.items()
for id in dictionary:
    str="insert into %(dictable)s values (\' %(query)s \')" %{'dictable':dictable, 'query':dictionary.id2token[id]}
    cursor.execute(str)
print "Reached1"
db.commit()
foo =raw_input("continue dictionary created....")
print foo

""" print
print "dic=",dictionary

print dictionary.token2id

foo2 = raw_input("Enter anything:")
print "raw_input =", foo2 """



"""========Completed Preprocessing====================="""

""" Original
class MyCorpus(object):
     def __iter__(self):
         for line in open('D:\hi.txt'):
             # assume there's one document per line, tokens separated by whitespace
             yield dictionary.doc2bow(line.lower().split())
"""
     
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')     
    
class MyCorpus(object):
     def __iter__(self):
         #for text in open(data_set_file_path)://normal File
         for text in data_file_reader:
             #line = text #Not Required
             # assume there's one document per line, tokens separated by whitespace
             yield dictionary.doc2bow(pp(''.join(text)).lower().split())

                  

corpus_memory_friendly = MyCorpus() # doesn't load the corpus into memory!
print corpus_memory_friendly

print "reached 2"


#for vector in corpus_memory_friendly:
#    print vector  



""" Tf-Idf 
tfidf = models.TfidfModel(corpus_memory_friendly)
corpus_tfidf = tfidf[corpus_memory_friendly]
lsi = gensim.models.lsimodel.LsiModel(corpus=corpus_tfidf, id2word=dictionary, num_topics=2)
"""

lsi = gensim.models.lsimodel.LsiModel(corpus=corpus_memory_friendly, id2word=dictionary, num_topics=No_of_topics)


lsi.print_topic(No_of_topics)



"""
Get query to term similarities
"""

termcorpus= gensim.matutils.Dense2Corpus(lsi.projection.u.T)

""" print
print list(termcorpus) """


# create help structure for similarity queries 
# (this also stretches each corpus vector to unit length) 
index = gensim.similarities.MatrixSimilarity(termcorpus) 

def insertsims(query,queryid): 
     # get cosine similarity of the query to each one of the 12 terms 
     sims = index[query] 
     #sims = sorted(enumerate(sims), key=lambda item: -item[1])
     count=0
   
     for idother, sim in sorted(enumerate(sims), key=lambda item: -item[1]):
     #for idother,sims in (enumerate(sims),repeat=top_count):
             count+=1
         #print  dictionary.id2token[idother], sim 
             if count > top_count:
                 break     
             q=dictionary.id2token[queryid]
             o=dictionary.id2token[idother]
             sql = 'insert into %(tab)s values(\'%(que)s\',\'%(other_term)s\',%(simi)f)' % {'tab':table,'que':q,'other_term':o,'simi':sim}#,dictionary.token2id[query],dictionary.token2id[idother],sim}
             #print sql
             try:
                 cursor.execute(sql)          
      
             except:
                 print "eror",q,o
         # Commit your changes in the database
     db.commit()

print "inserting..."
for id in dictionary:
    queryid = id
    query = list(termcorpus)[queryid]
    insertsims(query,queryid)
   
print "done..." 

##
cursor.close()
db.close()



      
    

 # let's use the first term ("computer") as the query 
 
"""query = list(termcorpus)[10]
print "query is=", dictionary.id2token[10]
queryid=10
printsims(query,queryid)""" 

"""
  # print the result, converting ids (integers) to words (strings) 
     ###fmt = ["%s(%f)" % (dictionary.id2token[idother], sim) for idother, sim in enumerate(sims)] 
     ###print "the query is similar to", ', '.join(fmt) 
     """
