'''
Created on 2012. 11. 18.

@author: love
'''
from gensim import corpora, models, similarities
#import logging
import threading
import Queue
import glob
from gensim.models import ldamodel
import MySQLdb

#logging.basicConfig(format='%d(asctime)s : %(levelname)s : %s(message)s', level=logging.INFO)

files = glob.glob('C:/workspace/data/index/*')
#files = ['C:/workspace/data/index/512798.txt', 'C:/workspace/data/index/512799.txt', 'C:/workspace/data/index/512800.txt', 'C:/workspace/data/index/512801.txt', 'C:/workspace/data/index/512802.txt']

file_queue = Queue.Queue()
documents = []
texts = []
seqidcount = {}

class ThreadDocument(threading.Thread):
    def __init__(self, queue):
        threading.Thread.__init__(self)
        self.queue = queue       
        
    def run(self):          
        while True:              
            try:
                wordlist = [] 
                filename = self.queue.get()
                seq = filename[-10:-4]                
                for line in file(filename):
                    for word in line.split():
                        wordlist.append((seq, word))            
                
                documents.append(wordlist)
    
            except Exception as e:
                print e
            
            finally:            
                self.queue.task_done()


                       
def main():                        
    for _ in range(2):
        dt = ThreadDocument(file_queue)
        dt.setDaemon(True)
        dt.start()
        
    for fname in files:
        file_queue.put(fname)
    file_queue.join()        
    
    all_tokens = []
    for document in documents:
        for word in document:        
            all_tokens += word[1]
    
    tokens_once = set(word for word in set(all_tokens) if all_tokens.count(word) == 1)
    texts = [[word for word in document if word not in tokens_once] for document in documents]
    
    dictionary = corpora.Dictionary(texts)
    dictionary.save('C:/workspace/data/model/trends.dict')
    id2word = corpora.Dictionary.load('C:/workspace/data/model/trends.dict')    
    
    corpus = [dictionary.doc2bow(text) for text in texts]
    corpora.MmCorpus.serialize('C:/workspace/data/model/trends.mm', corpus)    
    mm = corpora.MmCorpus('C:/workspace/data/model/trends.mm')
    
                    
    lda = ldamodel.LdaModel(corpus=mm, id2word=id2word, num_topics=300)    
    lda.save('C:/workspace/data/model/trends.lda')
               
#    index = similarities.SparseMatrixSimilarity(corpus=lda[corpus], num_features=lda.num_topics, num_terms=lda.num_topics)
#    index.save('C:/workspace/data/model/trends.index')
    
    
    for document in documents:
        for word in document:
            seq = int(word[0])
            vec_bow = dictionary.doc2bow(word[1])
            vec_lda = lda[vec_bow]                
            id = sorted(vec_lda, key=lambda item: -item[1])[0][0]
            
            seqidcount.setdefault(seq, {})
            seqidcount[seq].setdefault(id, 0)
            seqidcount[seq][id] += 1
        
    db = MySQLdb.connect('127.0.0.1', 'root', 'test', 'trends', charset='utf8', use_unicode=True)
    cursor = db.cursor()
    
    for (seq, item) in seqidcount.items():
        for (id, count) in item.items():
            sql = 'insert into seq_id_count (seq, id, count) values (%d, %d, %d)' % (seq, id, count)            
            cursor.execute(sql)
    
    db.commit()        
    db.close()
                
        
if __name__ == '__main__':
    main()