"""
Things to be done
1. Identify the correct stop word list
2. link:file:///D:/Research/Rshabarth/related_work/Corpora%20and%20Vector%20Spaces%20%E2%80%94%20gensim.htm
"""

"""
@Note: 
1. Doing it for top 200
1.This is copy of lsi_python3 with filter extremes used as one of the factor to remove domain 
specific stop words
2. dictionary will be dumped to db 
3. Added CSV reader :D
4.Need to perm stemming as it it currently not looking quite good [Done]

5. I will write functions such that i can store 

:( :( :(
a. Need to download data again
b. Need to change my program so that it can read from  database[not required i will import to csv]


"""
import logging, gensim, bz2
import MySQLdb
from gensim import corpora, similarities, models
from preprocess import  pp 
import csv 




logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

""" ===========Input=====================================================
========================================================================="""
stop_word_file_path = "D:\Research\Rshabarth\Results\stop_words.csv"
#data_set_file_path = "D:\Research\Rshabarth\Results\chromium_issue_report.csv"
data_set_file_path = "D:\Research\Rshabarth\Results\sample_db.csv"
db = MySQLdb.connect(host="localhost", user="root", passwd="123",db="lsi_python")
table="lsi_term_score_python"
term_freq_table = "term_freq_without_comm_py"
dictable="dictionary"

No_of_topics = 10

cursor = db.cursor()


"""=========Initialize==========================================================
    1. Delete everything from term_freq and lsi score table
============================================================================="""
del_str1 = "delete from %(tab)s" %{'tab':table}
del_str2 = "delete from %(tab)s" %{'tab':term_freq_table}
del_str2 = "delete from %(tab)s" %{'tab':dictable}

cursor.execute(del_str1)
db.commit()
cursor.execute(del_str2)
db.commit()
cursor.execute(del_str2)
db.commit()

stop_list=[]#Not a CSV file
for line in open(stop_word_file_path):
    processed_word = pp(line)
    first_half=processed_word.split(' ')[0]#Conver he'll ->he
    stop_list.append(first_half)
    
    
stop_list = set(stop_list)  #Remove Duplicates

"""print "list=", stop_list"""


# assume there's one document per line, tokens separated by whitespace
#dictionary = corpora.Dictionary(pp(line).split() for line in open(data_set_file_path) )           

"""CSV Hack For Dictioary Creation"""
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')
dictionary = corpora.Dictionary(pp(''.join(line)).split() for (line) in data_file_reader ) 
""" Hope This works """  


#
dictionary.filter_extremes(5, .95,4)
  
    
dictionary.compactify() # remove gaps in id sequence after words that were removed
print dictionary.items()
for id in dictionary:
    str="insert into %(dictable)s values (\' %(query)s \')" %{'dictable':dictable, 'query':dictionary.id2token[id]}
    cursor.execute(str)
print "Reached1"
db.commit()
foo =raw_input("continue dictionary created....")
print foo

 
print "dic=",dictionary

print dictionary.token2id

foo2 = raw_input("Enter anything:")
print "raw_input =", foo2 



"""========Completed Preprocessing====================="""

""" Original
class MyCorpus(object):
     def __iter__(self):
         for line in open('D:\hi.txt'):
             # assume there's one document per line, tokens separated by whitespace
             yield dictionary.doc2bow(line.lower().split())
"""
 
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')     
  


class MyCorpus(object):
     def __iter__(self):
         #for text in open(data_set_file_path)://normal File
         for text in data_file_reader:
             print text
             #line = text #Not Required
             # assume there's one document per line, tokens separated by whitespace
             yield dictionary.doc2bow(pp(''.join(text)).lower().split())

                  

corpus_memory_friendly = MyCorpus() # doesn't load the corpus into memory!
print corpus_memory_friendly

print "reached 2"

for vector in corpus_memory_friendly:
    print vector 

