#  对训练的数据进行各种预处理，主要是提取文本特征，并保存为文件
# 文件保存格式：列名(特征名称+标签)、每行表示一个文本的特征及对应的作者标签

# 文章长度
# 句子长度
# 词语平均长度
# 四字成语 idiom
# 词语个数
# 名词个数（去）
# 动词个数
# 虚词个数 ： 文件alldoc/yuqici.txt中包含的所有词(副词(d)、助词(u)、连词(c)、介词(p)、叹词(e)、拟声词(o)、语气词(y))
# 逗号出现的次数
# 句号出现的次数
# 问号出现的次数
# 感叹号出现的次数
# 冒号出现的次数
# 分号出现的额次数

import re
import os
from jieba import posseg as pseg
import csv

def isAlpha(word):
    try:
        return word.encode('ascii').isalpha()
    except UnicodeEncodeError:
        return False

def isDigit(word):
    try:
        return word.encode('ascii').isdigit()
    except UnicodeEncodeError:
        return False

dir_count = 0
i =0
label_to_idx ={}

# f_1 = codecs.open('alldoc/corpora_test.txt', 'w', encoding='UTF-8')
path = 'E:\corpora_test'

for file in os.listdir(path):
    if(os.path.isdir(path+'/'+file)):
        dir_count +=1

sls = open('alldoc/punctuation.txt', encoding='UTF-8') #仅把标点符号去掉
stopwords = [line.strip() for line in sls]
yuqici_lst = open('alldoc/yuqici.txt', encoding='UTF-8') #仅涉及文本中包含的语气词，叹词等
yuqici =[line.strip() for line in yuqici_lst]

with open("alldoc/corpora_1.csv", "w",newline='') as csvfile: # 加newline= '',中间不会出现空行
    writer = csv.writer(csvfile)
    # 先写入columns_name
    writer.writerow(["txt_length", "ave_sentence_length", "ave_word_length", "richness_rate",
                     "idiom_count", "emptyword_count", "comma_count", "period_count", "question_mark_count",
                     "exclamation_mark", "colon_count", "semicolon_count", "punctuation_mark_count",
                     "noun_count", "verb_count", "label"])

    for dir_path, dir_names, file_names in os.walk(path):
        i = i + 1
        print(u'正在处理%s个中的第%s个'%(dir_count, i))
        for file_name in file_names:
            idiom_lst = []
            words_lst = []
            total_word_count = 0
            total_sentence_count = 0
            total_word_length = 0
            total_sentence_length = 0
            emptyword_count = 0
            emptyword_tag = ['d', 'u', 'c', 'p', 'e', 'o', 'y']  # 虚词备选词性
            emptyword_lst = []
            noun_count = 0
            noun_tag = ['n', 'nr', 'ns', 'nt', 'nz']  # 名词，包括人名，地名，机构名，其他名词
            noun_lst = []
            verb_count = 0
            verb_tag = ['v', 'vd', 'vn']  # 动词，副动词，名动词
            verb_lst = []
            comma_count = 0  # 逗号
            period_count = 0  # 句号
            question_mark_count = 0  # 问号
            exclamation_mark = 0  # 感叹号
            colon_count = 0  # 冒号
            semicolon_count = 0  # 分号
            punctuation_mark_count = 0  # 顿号
            feature_lst = []
            file_path = '%s%s%s' % (dir_path, os.sep, file_name)
            f_2 = open(file_path)
            f = f_2.read()
            f = f.replace('\t', '').replace('\n', '')

            # 特征1：文章长度(包含标点符号)
            txt_length = len(f)
            feature_lst.append(txt_length)

            # 特征2：平均句子长度,包含了逗号，顿号等符号
            sentences = re.split('[。|\.|！|\!|？|\?|：|\:|；|\;]',f)  #不保留分隔符,若要保留符号，把[]换成()
            for sentence in sentences:
                if len(sentence) != 0:
                    total_sentence_count += 1
                    total_sentence_length += len(sentence)
            ave_sentence_length = float(total_sentence_length/total_sentence_count)
            feature_lst.append(ave_sentence_length)

            # 特征3：平均词长
            # 特征4：文章的词汇丰富率
            # 特征5：包含不同成语的个数
            # 特征6： 虚词的个数
            # 特征7-13： 各种标点符号的个数
            # 特征14： 名词的个数
            # 特征15： 动词的个数
            seg_list = pseg.cut(f)
            for word,flag in seg_list:
                if word in [',',u'，']:
                    comma_count += 1
                elif word in ['.',u'。']:
                    period_count += 1
                elif word in ['?', u'？']:
                    question_mark_count += 1
                elif word in ['!', u'！']:
                    exclamation_mark += 1
                elif word in [':', u'：']:
                    colon_count += 1
                elif word in [';', u'；']:
                    semicolon_count += 1
                elif word in ['、']:
                    punctuation_mark_count +=1

                if word not in stopwords:
                    if isAlpha(word) == False:  # 去掉英文和数字
                        if isDigit(word) == False:
                            if word != '\t' and word != ' ':
                                total_word_count += 1    #单词的总数，包含重复的词语
                                total_word_length += len(word) # 单词的总长度，包含重复的词语
                                if word not in words_lst:
                                    words_lst.append(word)
                                if flag == 'i':
                                    if word not in idiom_lst:
                                        idiom_lst.append(word)
                                elif flag in noun_tag:
                                    noun_count += 1
                                elif flag in verb_tag:
                                    verb_count += 1
                                if word in yuqici:
                                    emptyword_lst.append(word)
                                    emptyword_count += 1

            # 平均词长：所有单词的总长度/所有单词的总个数 （均包含重复的单词，但去掉了标点符号，数字，英文）
            ave_word_length = float(total_word_length/total_word_count)
            # print("平均单词长度为：",ave_word_length)

            # 不同单词的总个数/所有单词的个数(包含重复的词语，但去掉了标点符号，数字，英文)
            nonsame_word_count = len(words_lst)
            richness_rate = float(nonsame_word_count/total_word_count)
            # print("文章的词汇量丰富率：",richness_rate)

            idiom_count = len(idiom_lst)
            # print("包含成语个数：",idiom_count)
            label = str(dir_path).split('E:\corpora_test\\')[-1]
            if label not in label_to_idx:
                label_to_idx[label] = len(label_to_idx)

            feature_lst.append(ave_word_length)
            feature_lst.append(richness_rate)
            feature_lst.append(idiom_count)
            feature_lst.append(emptyword_count)
            feature_lst.append(comma_count)
            feature_lst.append(period_count)
            feature_lst.append(question_mark_count)
            feature_lst.append(exclamation_mark)
            feature_lst.append(colon_count)
            feature_lst.append(semicolon_count)
            feature_lst.append(punctuation_mark_count)
            feature_lst.append(noun_count)
            feature_lst.append(verb_count)
            feature_lst.append(label)

            writer.writerow(feature_lst)

fo = open('alldoc/corpora_test' + ".label_to_idx", "w")
for label, _ in sorted(label_to_idx.items(), key = lambda x: x[1]):
    fo.write(label + "\n")
fo.close()
print('eeee')