"""
Things to be done
1. Identify the correct stop word list
2. link:file:///D:/Research/Rshabarth/related_work/Corpora%20and%20Vector%20Spaces%20%E2%80%94%20gensim.htm
"""
"""
@Note: 
1. Doing it for top 200
1.This is copy of lsi_python3 with filter extremes used as one of the factor to remove domain 
specific stop words
2. dictionary will be dumped to db 
3. Added CSV reader :D
4.Need to perm stemming as it it currently not looking quite good [Done]
5. I will write functions such that i can store 
a. Need to download data again :( :( :(
b. Need to change my program so that it can read from  database[not required i will import to csv]

 ===========================FinalFile========================
"""
import logging, gensim, bz2
import MySQLdb
from gensim import corpora, similarities, models
from preprocess import  pp 
import csv 

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

""" ===========Input=====================================================
========================================================================="""
stop_word_file_path = "D:\Research\Rshabarth\data\stop_words.csv"
#data_set_file_path = "D:\Research\Rshabarth\data\chromium_issue_report_exp_data.csv"
data_set_file_path = "D:\Research\Rshabarth\data\sample_db.csv"
db = MySQLdb.connect(host="localhost", user="root", passwd="123",db="lsi_python")
table="lsi_term_score_python"
#term_freq_table = "term_freq_without_comm_py"
dictable="dictionary"

"""No of term that you want to keep indictionary"""
#top=15000#4 [No of terms in dictionary]
top_count=10 #Greater +1 (You can give exact value))
""" add|remove """
No_of_topics = 100
filter_ext = 10
dic_path = "D:\Research\Rshabarth\Results\lsi\dict_"+(str)(No_of_topics)+"_"+(str)(top_count)+".dict"
lsi_path = "D:\Research\Rshabarth\Results\lsi\lsi_"+(str)(No_of_topics)+"_"+(str)(top_count)+".lsi"

print "dicPath",dic_path
print "lsi_path",lsi_path
"""##SHIVANI PAPER MAX MAP"""


cursor = db.cursor()


stop_list=[]#Not a CSV file
for line in open(stop_word_file_path):
    processed_word = pp(line)
    first_half=processed_word.split(' ')[0]#Conver he'll ->he
    stop_list.append(first_half)
 
    
stop_list = set(stop_list)  #Remove Duplicates

"""print "list=", stop_list"""
   
"""CSV Hack For Dictioary Creation"""
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')
dictionary = corpora.Dictionary(pp(''.join(line)).split() for (line) in data_file_reader ) 
""" Hope This works """  


#print dictionary.items()

stop_ids = [dictionary.token2id[stopword] for stopword in stop_list
             if stopword in dictionary.token2id]

#docFreq3_ids = [tokenid for tokenid, docfreq in dictionary.dfs.iteritems() if docfreq <= 3]


""" Print
for id in stop_ids:
    print "stopid", dictionary.id2token[id]
    
for id in docFreq3_ids:    
    print "DocFreq3Ids", dictionary.id2token[id] """


word_len2=[]
for tokenid in dictionary:
    if len((str)(dictionary[tokenid]))<=2:
        word_len2.append(tokenid)

dictionary.filter_tokens(stop_ids +word_len2) # remove stop words, words that appear only once, and words with length 2
                         

dictionary.filter_extremes(filter_ext, .95)# put top=top {it will store no of terms you want in dictionary}
  
    
dictionary.compactify() # remove gaps in id sequence after words that were removed
print dictionary.items()

"""
for id in dictionary:
    str="insert into %(dictable)s values (\' %(query)s \')" %{'dictable':dictable, 'query':dictionary.id2token[id]}
    cursor.execute(str)
print "Reached1"
db.commit()
"""
#foo =raw_input("continue dictionary created....")
#print foo

"""
Save Dictionary
"""
dictionary.save(dic_path)

     
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')     
    
class MyCorpus(object):
     def __iter__(self):
         #for text in open(data_set_file_path)://normal File
         for text in data_file_reader:
             #line = text #Not Required
             # assume there's one document per line, tokens separated by whitespace
             yield dictionary.doc2bow(pp(''.join(text)).lower().split())

                  

corpus_memory_friendly = MyCorpus() # doesn't load the corpus into memory!
print corpus_memory_friendly

print "reached 2"


#for vector in corpus_memory_friendly:
#    print vector  

lsi = gensim.models.lsimodel.LsiModel(corpus=corpus_memory_friendly, id2word=dictionary, num_topics=No_of_topics)
lsi.save(lsi_path)

lsi.print_topic(No_of_topics)



"""=========Initialize==========================================================
    1. Delete everything from term_freq and lsi score table
============================================================================="""
"""del_str1 = "delete from %(tab)s" %{'tab':table}
del_str2 = "delete from %(tab)s" %{'tab':term_freq_table}
del_str2 = "delete from %(tab)s" %{'tab':dictable}

cursor.execute(del_str1)
db.commit()
cursor.execute(del_str2)
db.commit()
cursor.execute(del_str2)
db.commit()
"""
""" Tf-Idf 
tfidf = models.TfidfModel(corpus_memory_friendly)
corpus_tfidf = tfidf[corpus_memory_friendly]
lsi = gensim.models.lsimodel.LsiModel(corpus=corpus_tfidf, id2word=dictionary, num_topics=2)
"""

