"""
@Uses: This program will be used  save dictionay in the mysql table
"""


import logging, gensim, bz2
import MySQLdb
from gensim import corpora, similarities, models
from preprocess import  pp 
import csv 

logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)

""" ===========Input=====================================================
========================================================================="""
"""
Config="D:"
User ="root"
Pass = "123"
"""

Config="E:\\Sangeeta"
User="Sangeeta"
Pass = "sangeeta@123456"

stop_word_file_path = Config+"\\Research\\Rshabarth\\data\\stop_words.csv"
data_set_file_path = Config+"\\Research\\Rshabarth\\data\\dup_exp_data.csv"
#data_set_file_path = Config+"\\Research\\Rshabarth\\data\\sample_db.csv"
id_map_file = Config+"\\Research\\Rshabarth\\data\\dup_issue_id_for_id_mapping.csv"
dup_issue_id_path =  Config+"\\Research\\Rshabarth\\data\\only_dup_issueid_20396.csv"

filter_ext = 10
db = MySQLdb.connect(host="localhost", user=User, passwd=Pass,db="lsi_python")
#term_freq_table = "term_freq_without_comm_py"
dictable="dictionary"

cursor = db.cursor()


stop_list=[]#Not a CSV file
for line in open(stop_word_file_path):
    processed_word = pp(line)
    first_half=processed_word.split(' ')[0]#Conver he'll ->he
    stop_list.append(first_half)
 
    
stop_list = set(stop_list)  #Remove Duplicates

"""print "list=", stop_list"""
   
"""CSV Hack For Dictioary Creation"""
csv_data_file=open(data_set_file_path,'rb')
data_file_reader = csv.reader(csv_data_file, delimiter=',')
dictionary = corpora.Dictionary(pp(''.join(line)).split() for (line) in data_file_reader ) 
""" Hope This works """  


print dictionary.items()

stop_ids = [dictionary.token2id[stopword] for stopword in stop_list
             if stopword in dictionary.token2id]

#docFreq3_ids = [tokenid for tokenid, docfreq in dictionary.dfs.iteritems() if docfreq <= 3]


""" Print
for id in stop_ids:
    print "stopid", dictionary.id2token[id]
    
for id in docFreq3_ids:    
    print "DocFreq3Ids", dictionary.id2token[id] """


word_len2=[]
for tokenid in dictionary:
    if len((str)(dictionary[tokenid]))<=2:
        word_len2.append(tokenid)

dictionary.filter_tokens(stop_ids +word_len2) # remove stop words, words that appear only once, and words with length 2
                         

dictionary.filter_extremes(filter_ext, .95)# put top=top {it will store no of terms you want in dictionary}
  
    
dictionary.compactify() # remove gaps in id sequence after words that were removed
print dictionary.items()
"""
for id,docFreq in dictionary.dfs.iteritems():
    print "id", id
    print " key", dictionary.id2token[id]
    print " fre=",docFreq
    print"\n"

"""

for id in dictionary:
    str="insert into %(dictable)s values (\'%(query)s\')" %{'dictable':dictable, 'query':dictionary.id2token[id]}
    cursor.execute(str)
print "Reached1"

db.commit()

#foo =raw_input("continue dictionary created....")
#print foo

"""
#Save Dictionary

dictionary.save(dic_path) 
"""     