import os
import pandas as pd
import re
import jieba
import numpy as np
from sklearn.preprocessing import LabelEncoder
import tensorflow.contrib.keras as kr

tmp_segment = r'tmp_segment.txt'

# 加载停用词
def load_stopWords(path):
    stopwords = ['\n', '', ' ', '\n\n']
    with open(path, "r", encoding='utf8') as f:
        lines = f.readlines()
        for line in lines:
            stopwords.append(line.strip())
    return stopwords

# 获取所有样本内容、保存content_list
def getFileContent(filePath):
    with open(filePath, encoding='utf8') as file:
        fileStr = ''.join(file.readlines(1000))
    return fileStr

# 写文件
def write_txt(file_name, content_list, mode="w"):
    with open(file_name,mode, encoding='UTF-8') as f:
        for line in content_list:
            f.write(line+"\n")

# 获取file_dir目录下，所有文本路径，包括子目录文件
def getFilePathList(file_dir):
    filePath_list = []
    for walk in os.walk(file_dir):
        part_filePath_list = [os.path.join(walk[0], file) for file in walk[2]]
        filePath_list.extend(part_filePath_list)
    return filePath_list

# 对内容进行分词
def contentForJieba(content_list):
    jieba_content_list = []
    for content in content_list:
        content = jieba.cut(content, cut_all=False)
        jieba_content_list.append(' '.join(content))
    return jieba_content_list

#JIEBA分词并写入文件
def cutForJieba(no_swd_content_list, no_swd_content, out_dir):
    jieba_content_list = contentForJieba(no_swd_content_list)
    document_cut = jieba.cut(no_swd_content, cut_all=False)
    result = ' '.join(document_cut)
    result = result.encode('utf-8')
    with open(out_dir + '/' + tmp_segment, 'wb+',) as f1:
        f1.write(result) # 读取的方式和写入的方式要一致
    f1.close()
    return out_dir + '/' + tmp_segment,jieba_content_list

# 删除停用词
def delete_stopwords(lines_list, stopwords= []):
    sentence_segment=[]
    for sentence in lines_list:
        tmp_str = ''
        for word in sentence:
            if word not in stopwords:
                tmp_str = tmp_str + word
        sentence_segment.append(tmp_str)
    return sentence_segment


# 去停用词后结巴分词
def cutNoSwdForjieba(content_list,stopwords=[]):
    no_swd_content_list = delete_stopwords(content_list, stopwords)
    jieba_content_list = contentForJieba(no_swd_content_list)
    return jieba_content_list

# 制作词汇表
def getVocabularyList(content_list, out_dir, stopwords=[]):
    no_swd_content_list = delete_stopwords(content_list, stopwords)
    no_swd_all_str = ''.join(no_swd_content_list)
    tmp_path,jieba_content_list = cutForJieba(no_swd_content_list,no_swd_all_str, out_dir)
    return tmp_path,jieba_content_list


# 获取files_dir路径下所有文件路径，以及labels,其中labels用子级文件名表示
# files_dir目录下，同一类别的文件放一个文件夹，其labels即为文件的名
# :param files_dir:
# :返回：filePath_list所有文件的路径,label_list对应的labels
def gen_files_labels(files_dir,out_dir):
    filePath_list = getFilePathList(files_dir)
    print("files nums:{}".format(len(filePath_list)))
    # 获取所有样本标签
    label_list = []
    content_list = []
    for filePath in filePath_list:
        label = filePath.split(os.sep)[-2]
        label_list.append(label)
        fileStr = getFileContent(filePath)
        content = re.sub('\s+', ' ', fileStr)
        content_list.append(content)
    labels_set = list(set(label_list))
    write_txt(out_dir + r'/labels.txt', labels_set)
    print("labels:{}".format(labels_set))
    # 标签统计计数
    print(pd.value_counts(label_list))
    return filePath_list,label_list,content_list


def label_encodeingEx(label_list):
    # 调用LabelEncoder对象的fit_transform方法做标签编码
    labelEncoder = LabelEncoder()
    label_codes = labelEncoder.fit_transform(label_list)
    return label_codes, labelEncoder


def get_labels_set(label_list):
    labels_set = list(set(label_list))
    print("labels:{}".format(labels_set))
    return labels_set

# 将字符串类型的label编码成int,-1表示未知的labels
# :param label_list:
# 将labels转为整数编码 中文：数字
def labels_encoding(label_list,labels_set=None):
    if labels_set is None:
        labels_set = list(set(label_list))
    labels = []
    for label in label_list:
        if label in labels_set:
            k=labels_set.index(label)
            labels+=[k]
            #print(label,'->',k)
        else:
            print("warning unknow label")
            labels+=[-1] # -1表示未知的labels,unknow

    labels = np.asarray(labels)
    # 也可以用下面的方法：将labels转为整数编码
    # labelEncoder = preprocessing.LabelEncoder()
    # labels = labelEncoder.fit_transform(label_list)
    # labels_set = labelEncoder.classes_
    for i in range(len(labels_set)):
        print("labels:{}->{}".format(labels_set[i],i))

    return labels, labels_set


# 将int类型的label解码成字符串类型的label
# param label_list:
# return:
def labels_decoding(labels,labels_set):
    for i in range(len(labels_set)):
        print("labels:{}->{}".format(labels_set[i],i))
    labels_list = []
    for i in labels:
        if i == -1:
            print("warning unknow label")
            labels_list.append('unknow')
            continue
        labels_list.append(labels_set[i])
    return labels_list