# coding:utf-8
import multiprocessing
import wordCut
import os
import codecs
import chardet
from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer
from sklearn.linear_model import LogisticRegression


def tokenFile(file_path, write_path):
    word_divider = wordCut.WordCut()
    with codecs.open(write_path, 'w',encoding='utf-8') as w:
        # print(file_path,write_path)
        with codecs.open(file_path, 'r', encoding='utf-8') as f:
            # print(file_path,f.readlines())
            for line in f.readlines():
                # print('9',len(line),'\n',type(line))
                line = line.strip()
                # print('8')
                sline = line.split('\t')[1]
                # print('5')
                token_sen = word_divider.seg_sentence(sline)
                # print('7')
                w.write(line.split('\t')[0] + '\t' + token_sen + '\n')
                print('6')

    print(file_path + ' has been token and token_file_name is ' + write_path)
def constructDataset(path):
    """
    path: file path
    rtype: lable_list and corpus_list
    """
    label_list = []
    corpus_list = []
    with open(path, 'r',encoding='utf-8') as p:
        for line in p.readlines():
            # print(line,type(line))
            label_list.append(line.split('\t')[0])
            corpus_list.append(line.split('\t')[1])
    return label_list, corpus_list

def calTfIdf():
    """
一是计算tf-idf是全量计算，所以需要将train+test+val的所有corpus都相加
二是为了防止文本特征过大，需要去低频词
    :param corpus:
    :return:
    """
    val_token=r"F:\Resources\pack\cnews\val_token.txt"
    train_token=r"F:\Resources\pack\cnews\train_token.txt"
    test_token=r"F:\Resources\pack\cnews\test_token.txt"
    val_label, val_set = constructDataset(val_token)
    test_label, test_set = constructDataset(test_token)
    train_label, train_set = constructDataset(train_token)

    corpus_set = train_set + val_set + test_set
    print("length of corpus is: " + str(len(corpus_set)),type(corpus_set))
    vectorizer=CountVectorizer(min_df=1e-5) # drop df < 1e-5,去低频词
    transformer = TfidfVectorizer()
    t = vectorizer.fit_transform(corpus_set)
    # 元素w[i][j]表示j词在i类文本中的tf-idf权重
    tfidf = transformer.fit_transform(t)
    words = vectorizer.get_feature_names()
    print(words)
    print("how many words: {0}".format(len(words)))
    print("tf-idf shape: ({0},{1})".format(tfidf.shape[0], tfidf.shape[1]))


if __name__ == "__main__":
    tmp_catalog = r'F:\Resources\pack\cnews\\'
    # tmp_catalog = os.path.normpath(tmp_catalog)+r"\\"
    # file_list = [os.path.join(tmp_catalog,r'train.txt'), os.path.join(tmp_catalog,r'test.txt')]
    # file_list = [tmp_catalog + 'train.txt', tmp_catalog + 'test.txt']
    file_list = [tmp_catalog + 'cnews.val.txt']
    write_list = [tmp_catalog + 'val_token.txt']
    calTfIdf()
    # pool = multiprocessing.Pool(processes=4)
    # for file_path, write_path in zip(file_list, write_list):
    #     pool.apply_async(tokenFile, (file_path, write_path, ))
    # pool.close()
    # pool.join()  # 调用join()之前必须先调用close()
    # print("Sub-process(es) done.")