"""数据预处理"""
import codecs
import os
import shutil
import sys
from pathlib import Path
import pickle
from gensim.models import KeyedVectors
from tqdm import tqdm_notebook, tqdm
from collections import defaultdict
import json
import re
import jieba
import pyodbc
import numpy as np
import pandas as pd
import copy
import random
# from word2vec import save_vocab
from data.verse.word2vec import save_vocab, save_w2v_model, train_w2v_model, save_wv
from utils import WordEmbedding, add_pad_unk
from utils.util import ensure_dir


# 按文件分词，file to file
def word_cut_f2f(inputfile: str, outputfile: str, method: str):  # 分词方法method:['jieba','segtag','char']
    print('word_cut_f2f {} start!'.format(method))
    if method == 'jieba':
        content_wordcut_list = []
        with codecs.open(inputfile, 'r', 'utf-8') as in_file:
            for line in in_file:  # 按行分词
                tmp_wordcut_list = jieba.lcut(line.replace('\n', ''))  # 使用jieba分词
                content_wordcut = ' '.join(tmp_wordcut_list)  # 一首诗的分词结果，保留标点符号
                content_wordcut_list.append(content_wordcut)
        with codecs.open(outputfile, 'w', 'utf-8') as out_file:
            out_file.write(' \n'.join(content_wordcut_list) + ' \n')  # 将分词后的结果写入输出文件
    elif method == 'segtag':
        output = os.system(r'D:\XMU\1-4.大四\大四下\毕业设计\分词工具\segtag2017\segtag2017\segtag\segtag64.exe '
                           r'-g -s -v -r '
                           '{} {}'.format(inputfile.replace('/', '\\'), outputfile.replace('/', '\\')))
        print('segtag分词输出：{}'.format(output))
    elif method == 'char':
        with codecs.open(inputfile, 'r', 'utf-8') as in_file:
            content_list = in_file.read().split('\n')
            content_charcut_list = []
            for i in range(len(content_list)):
                content_charcut_list.append(' '.join(list(content_list[i])))  # 按字分词
        with codecs.open(outputfile, 'w', 'utf-8') as out_file:
            out_file.write(' \n'.join(content_charcut_list))  # 将分词后的结果写入输出文件
    else:
        print('error:不支持method={}！'.format(method))
    print('word_cut_f2f {} finished!'.format(method))


# 准备分词后数据
def prepare_data_for_train_wv_model():
    # 读取txt
    in_dir = 'raw_data/normal_content_by_author/'
    out_dir = 'raw_data/for_train_w2v_model/'
    methods = ['char', 'segtag', 'jieba']
    ensure_dir(out_dir)

    # 读取 in_dir 所有txt
    dirs = os.listdir(in_dir)
    for dir in dirs:
        if dir == "." or dir == "..":
            continue
        if dir == '李白.txt':
            for method in methods:
                tmp_out_dir = "{}{}/".format(out_dir, method)
                ensure_dir(tmp_out_dir)
                word_cut_f2f(inputfile="{}{}".format(in_dir, dir),
                             outputfile="{}{}".format(tmp_out_dir, dir),
                             method=method)
            return


def normalize_data_for_train_wv_model():
    in_dir = 'raw_data/for_train_w2v_model/'
    out_dir = 'raw_data/for_train_w2v_model_norm/'
    methods = ['char', 'jieba', 'segtag']
    dirs = os.listdir(in_dir + methods[2])
    for dir in dirs:
        for method in methods:
            ensure_dir(out_dir + method)
            try:  # 拷贝文件
                shutil.copy(in_dir + method + '/' + dir, out_dir + method)
            except IOError as e:
                print("Unable to copy file. %s" % e)
            except:
                print("Unexpected error:", sys.exc_info())


# 训练词向量模型，并保存词向量到.word中
def train_w2v_model_prepare_wv():
    in_dir = 'raw_data/for_train_w2v_model_norm/'
    out_dir = '../word_embedding/wv/'
    ensure_dir(out_dir)
    methods = {'char': {'size': 76, 'window': 4, 'min_count': 1},
               'segtag': {'size': 100, 'window': 3, 'min_count': 1},
               'jieba': {'size': 110, 'window': 2, 'min_count': 1}}
    for k, v in methods.items():
        # 使用分词后的语料库训练词向量模型
        model = train_w2v_model(corpus_dir=in_dir + k,
                                size=v['size'], window=v['window'], min_count=v['min_count'])
        save_wv(model, out_dir + k + '.verse.word')


def prepare_data_for_classification(range_choice=0):
    # range_choice
    # 0: >=1000
    # 1: 1000>sample num>=500
    # 2: 500>sample num>=100
    sample_num_ranges = [range(1000, 10000), range(500, 1000), range(100, 500)]
    sample_num_range = sample_num_ranges[range_choice]
    min_gear = sample_num_range[0]
    # 从normal中挑选具有threshold首诗的
    in_dir = 'raw_data/for_train_w2v_model_norm/'
    out_dir = 'raw_data/for_classification_{}/'.format(min_gear)
    methods = ['char', 'segtag', 'jieba']
    # 筛选出出拥有1000首以上诗的作者
    dirs = os.listdir(in_dir + methods[1])
    sift_dirs = []
    for dir in dirs:
        with codecs.open(in_dir + methods[1] + '/' + dir, 'r', 'utf-8') as f:
            content_list = f.read().split('\r\n')
            if content_list[len(content_list) - 1] == '':
                content_list.pop()
            if len(content_list) not in sample_num_range:
                sift_dirs.append(dir)
    # 将符合条件的作者的文件拷贝到out_dir
    for method in methods:
        ensure_dir(out_dir + method)
        for dir in sift_dirs:
            try:  # 拷贝文件
                shutil.copy(in_dir + method + '/' + dir, out_dir + method)
            except IOError as e:
                print("Unable to copy file. %s" % e)
            except:
                print("Unexpected error:", sys.exc_info())


def write_to_json(raw_data_list, line_idx, out_json: str):
    writer_fd = Path(out_json).open('a', encoding='UTF-8')
    for raw_data in raw_data_list:  # 遍历对应作者的诗，写入json
        raw_data[1] = raw_data[1].replace('\n', '')
        writer_fd.write(json.dumps({
            'id': line_idx,
            'text': raw_data[1],
            'labels': raw_data[0]
        }, ensure_ascii=False) + '\n')
        line_idx = line_idx + 1
    return line_idx


def sel_data_for_cur_classification_use_dirname(class_num, method, line_idx, dirname_list=None):  # id_list=None表示随机选择
    in_dir = 'raw_data/for_classification/'
    out_dir = 'train_valid_test/'
    ensure_dir(out_dir)
    authors = []
    if dirname_list is None:  # 随机选择
        authors = [dirname_list[0]]
        # 获取所有类别并从中随机选择class_num位
        dirs = os.listdir(in_dir + method)  # 获取目录下的所有文件名
        random.seed()
        id_list = random.sample(range(0, len(dirs)), class_num-1)  # 随机选择class_num位作者，数字互不相同
        for i in id_list:
            authors.append(dirs[i][:-4])
    else:
        for dirname in dirname_list:
            authors.append(dirname[:-4])

    # 根据选出来的类别名读取txt得到数据内容，并使样本均衡化
    content_num = 0  # 每个类别的数据量大小
    text_length = []  # 统计所有文本长度
    sel_data = []
    content_num_list = []  # 单个作者的数据个数
    for label_id in range(class_num):
        dirname = dirname_list[label_id]
        # i = id_list[label]
        with codecs.open(in_dir + method + '/' + dirname, 'r', 'utf-8') as f:
            content_list = f.read().split('\n')
            if content_list[len(content_list) - 1] == '':
                content_list.pop()
            # random.shuffle(content_list)  # 打乱内容顺序，使每次选择的样本集都不一样
            sel_data.append(content_list)
            content_num_list.append(len(content_list))
            # 统计文本长度
            text_length.extend([len(content) for content in content_list])

    # 分割训练、验证、测试集，并输出到json
    # thresh1 = int(0.85 * content_num)
    # thresh2 = int(0.9 * content_num)
    # 取最少的，解决样本不均衡问题
    content_num = min(content_num_list)
    thresh = int(0.9 * content_num)  # 本次组合的分割阈值
    # 将本次组合的训练集数据（剔除多余的训练集）加上主作者的全部验证集作为新的待划分的数据，防止将主作者的多余训练集划分到本组合的验证集
    if content_num != content_num_list[0]:  # 主作者的作品更多时，会将原本的部分训练集划分到验证集中
        main_thresh = int(len(sel_data[0])*0.9)  # 主作者的验证集起始索引
        main_train_data = sel_data[0][:main_thresh].copy()
        random.shuffle(main_train_data)
        main_train_data = main_train_data[:thresh]
        main_valid_data = sel_data[0][main_thresh:].copy()
        random.shuffle(main_valid_data)
        sel_data[0] = main_train_data + main_valid_data
    for label_id in range(class_num):
        sel_data[label_id] = sel_data[label_id][:content_num]
        random.shuffle(sel_data[label_id])  # 打乱内容顺序，使每次选择的训练集都不一样

    train_data_list = []
    valid_data_list = []
    # test_data_list = []
    for label_id in range(class_num):
        content_list = sel_data[label_id]
        train_data_list.extend([[label_id, content] for content in content_list[:thresh]])
        valid_data_list.extend([[label_id, content] for content in content_list[thresh:]])
        # test_data_list.extend([[label_id, content] for content in content_list[thresh2:]])
    line_idx = write_to_json(raw_data_list=train_data_list, line_idx=line_idx,
                             out_json='{}{}/{}/verse_train.jsonl'.format(out_dir, method, authors[0]))
    line_idx = write_to_json(raw_data_list=valid_data_list, line_idx=line_idx,
                             out_json='{}{}/{}/verse_valid.jsonl'.format(out_dir, method, authors[0]))
    # line_idx = write_to_json(raw_data_list=test_data_list, line_idx=line_idx,
    #                          out_json='{}{}/verse_test.jsonl'.format(out_dir, method))
    return authors, text_length, line_idx


def prepare_data_for_binary_classification():
    # 二分类，每个作者对应一个目录/一个模型，共11个目录
    in_dir = 'raw_data/for_classification/'
    dirs = os.listdir(in_dir + 'char')
    out_dir = 'train_valid_test/'
    methods = ['char', 'segtag', 'jieba']
    for method in methods:
        for i in range(len(dirs)):  # 遍历所有作者
            ensure_dir(out_dir + method + '/' + dirs[i][:-4])
            line_idx = 0
            text_length = []
            with codecs.open(out_dir + method + '/' + dirs[i][:-4] + '/authors_pair.txt', 'a', 'utf-8') as f:
                for j in range(len(dirs)):
                    if i == j:
                        continue
                    authors, tmp_text_length, line_idx = sel_data_for_cur_classification_use_dirname(
                        class_num=2, method=method, line_idx=line_idx, dirname_list=[dirs[i], dirs[j]])
                    text_length.extend(tmp_text_length)
                    f.write('{}\n'.format(' '.join(authors)))


def make_word_embedding(in_dir: str, vocab_file: str, output_file: str):
    # 加载word embedding
    wv = KeyedVectors.load_word2vec_format(vocab_file, binary=False, encoding='utf-8', unicode_errors='ignore')
    # in_data = np.load(input_file, allow_pickle=True)  # 直接读取成list。分词后的数据
    word_set = set()
    dirs = os.listdir(in_dir)
    for dir in dirs:
        with codecs.open(in_dir + dir, 'r', 'utf-8') as f:
            content_list = f.read().split('\n')
            if content_list[len(content_list) - 1] == '':
                content_list.pop()
            for content in content_list:  # content_list：单个类中的内容list
                word_set = word_set.union(set(content.split(' ')))

    stoi = defaultdict(int)
    itos = defaultdict(str)
    vectors = []
    add_pad_unk(stoi, itos, vectors, wv)
    for idx, word in enumerate(word_set):
        if word in wv.vocab:
            stoi[word] = len(stoi)
            itos[len(itos)] = word
            vectors.append(wv.get_vector(word))
    word_embedding = WordEmbedding(stoi=stoi, itos=itos, vectors=vectors)
    # 写入pkl缓存文件
    word_embedding_cache = Path(output_file).open('wb')
    pickle.dump(word_embedding, word_embedding_cache)
    word_embedding_cache.close()


def prepare_word_embedding_cache():
    in_dir = 'raw_data/for_classification/'
    wv_dir = '../word_embedding/wv/'
    out_dir = '../word_embedding/.cache/'
    ensure_dir(out_dir)
    methods = ['char', 'segtag', 'jieba']
    for method in methods:
        make_word_embedding(in_dir=in_dir + method + '/',
                            vocab_file=wv_dir + method + '.verse.word',
                            output_file=out_dir + method + '.verse_word_embedding.pkl')


if __name__ == '__main__':
    # # 针对全部语料训练词向量模型，并保存中间产物词向量
    # train_w2v_model_prepare_wv()
    #
    # # 筛选数据集作为分类数据
    prepare_data_for_classification(range_choice=1)

    # prepare_data_for_binary_classification()

    # 根据筛选后的数据集，以及训练好的词向量，生成针对该数据集中的词/字向量缓存
    # prepare_word_embedding_cache()
