import jieba.posseg as pseg
import os
import re
import traceback
import time
from collections import Counter


#所有文件分词 并 存储
def FileCutWords(fileRoot,fileSaveRoot):
    fileCount = 0
    stopwords = [line.strip() for line in open('./stopWords.txt', 'r').readlines()]
    stop_flag = ['x', 'c', 'u', 'd', 'p', 't', 'uj', 'm', 'f', 'r']
    for filename in os.listdir(fileRoot):
        fileSinglePath = fileRoot + filename
        fopen = open(fileSinglePath,'r',encoding='gbk',errors='ignore')
        fileCount += 1
        # if(fileCount > 5):
        #     break
        fileSavePath = fileSaveRoot + str(fileCount) + " " + filename
        fsave = open(fileSavePath,'w')
        try:
            for line in fopen:
                cutW = pseg.cut(line)
                # s = ' '.join(cutW)
                # print(s)
                # s = re.split('[a-zA-Z0-9’!"#$%&\'()*+,-./:;<=>?@，。?★■、…【】《》？“”‘’！[\\]^_`{|}（）~ ]', s)
                #去除停用词
                for word,flag in cutW:
                    if flag not in stop_flag and word not in stopwords:
                        fsave.write(word + " ")
        except Exception as e:
            print(traceback.print_exc())
            print(fileCount)
        fsave.close()
        fopen.close()
#分词 编号文件 并存储 到cutfile文件夹
fileRoot = './演示语料/'
fileSaveRoot = './cutfile2/'
FileCutWords(fileRoot,fileSaveRoot)

# 统计所有的 词
def build_vocabulary(fileroot):
    vocabulary = set()
    for filename in os.listdir(fileroot):
        fileSinglePath = fileroot + filename
        fopen = open(fileSinglePath,'r')
        for line in fopen:
            for word in line.split(' '):
                if word == "":
                    continue
                vocabulary.add(word)
    return vocabulary

# fileRoot = "./cutfile1/"
# saveFilePath = "./vector/"
# vocabulary = build_vocabulary(fileRoot)
# #总词汇 存文件
# fileName = saveFilePath + "vocabulary1.txt"
# fs = open(fileName,'w')
# fs.write(' '.join(vocabulary))
# print("总共有："  +str(len(vocabulary)) + "词")
# print(vocabulary)
#111448词


# #针对 每一篇文档计算 词频
# doc = open("./测试/1025 京津间建第三条高速通道 “3310”路网初步形成.txt",'r')
# tf = Counter()
# for line in doc:
#     for word in line.split(' '):
#         if word == '':
#             continue
#         tf[word] += 1
# print(tf.items())

#得到所有词汇
def GetVocabulary(fileName):
    vocabulary = []
    fopen = open(fileName,'r')
    for line in fopen:
        for word in line.split(' '):
            vocabulary.append(word)
    return vocabulary

#统计每个文档词频
def WordFrequency(fileroot,vocabulary):
    fileSavePath = "./vector/wordfrequency.txt"
    fs = open(fileSavePath, 'w')
    for fileName in os.listdir(fileroot):
        fileName = fileroot + fileName
        fopen = open(fileName,'r')
        for word in vocabulary:
            tf = 0
            for line in fopen:
                tf += line.split(' ').count(word)
                # for word in line.split(' '):
                #     if word == '':
                #         continue
                #     tf += word
            fs.write(str(tf))
            fs.write(",")
        fs.write('\n')
# startTime = time.time()
# fileName = "./vector/vocabulary.txt"
# vocabulary = GetVocabulary(fileName)
# endTime = time.time()
# print("获取所有词汇的时间： " + str (endTime - startTime))
# print(vocabulary)
# print("所有词长度" + str(len(vocabulary)))
# #统计词频
# startTime = time.time()
# fileRoot = "./cutfile/"
# WordFrequency(fileRoot,vocabulary)
# endTime = time.time()
# print("统计所有词频的时间： " + str (endTime - startTime))

#测试数据正确
# fopen = open("./vector/wordfrequency.txt")
# linenum = 0
# for line in fopen:
#     linenum += 1
#     for item in line.split(','):
#         if item != "0":
#             print(item,end=" ")
# print("总共 " + str(linenum) + " 行")

# s1 = "我的 你的 他的 什么 鬼呀 我的 你的呀 他的 什么"
# s = "什么呀"
# tf = 0
# tf += s1.split(' ').count(s)
# print(tf)