#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os

data_fold = os.path.join(os.path.split(os.path.realpath(__file__))[0],"data")
text0 = os.path.join(data_fold,"nlp_test0.txt");
text1 = os.path.join(data_fold,"nlp_test1.txt");
text2 = os.path.join(data_fold,"nlp_test2.txt");
text3 = os.path.join(data_fold,"nlp_test3.txt");
text4 = os.path.join(data_fold,"nlp_test4.txt");
text5 = os.path.join(data_fold,"nlp_test5.txt");

import jieba

def corpus():
    jieba.suggest_freq('沙瑞金', True)
    jieba.suggest_freq('易学习', True)
    jieba.suggest_freq('王大路', True)
    jieba.suggest_freq('京州', True)
    #第一个文档分词#
    with open(text0,encoding="UTF-8") as f:
        document = f.read()
        document_cut = jieba.cut(document)
        #print  ' '.join(jieba_cut)
        result = ' '.join(document_cut)
        with open(text3, mode='w', encoding="UTF-8") as f2:
            f2.write(result)
    f.close()
    f2.close()

    #第二个文档分词#
    with open(text1,encoding="UTF-8") as f:
        document2 = f.read()
        document2_cut = jieba.cut(document2)
        #print  ' '.join(jieba_cut)
        result = ' '.join(document2_cut)
        with open(text4, mode='w', encoding="UTF-8") as f2:
            f2.write(result)
    f.close()
    f2.close()

    #第三个文档分词#
    jieba.suggest_freq('桓温', True)
    with open(text2,encoding="UTF-8") as f:
        document3 = f.read()
        document3_cut = jieba.cut(document3)
        #print  ' '.join(jieba_cut)
        result = ' '.join(document3_cut)
        with open(text5, mode='w', encoding="UTF-8") as f3:
            f3.write(result)
    f.close()
    f3.close()

with open(text3, encoding="UTF-8") as f3:
    res1 = f3.read()
with open(text4, encoding="UTF-8") as f4:
    res2 = f4.read()
with open(text5, encoding="UTF-8") as f5:
    res3 = f5.read()

from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
corpus = [res1,res2,res3]
cntVector = CountVectorizer()
cntTf = cntVector.fit_transform(corpus)
lda = LatentDirichletAllocation(n_topics=2,
                                learning_offset=50.,
                                random_state=0)
docres = lda.fit_transform(cntTf)

print (docres)
print (lda.components_)