# -*- coding: utf-8 -*-
# @Time    : 2020/8/20 4:46 下午
# @Author  : jeffery
# @FileName: train.py
# @website : http://www.jeffery.ink/
# @github  : https://github.com/jeffery0628
# @Description:
import codecs
import os

from utils import WordEmbedding
import torch
import numpy as np
from model import makeModel, makeLoss, makeMetrics, makeOptimizer, makeLrSchedule
from utils import ConfigParser, read_yaml, write_yaml

import yaml
import random
from pathlib import Path
import json

# fix random seeds for reproducibility
from utils.util import ensure_dir

SEED = 123
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(SEED)
random.seed(SEED)


def sel_data_for_cur_classification(class_num, id_list=None):  # id_list=None表示随机选择
    in_dir = 'data/verse/raw_data/for_classification_1000/'
    out_dir = 'data/verse/train_valid_test/'
    ensure_dir(out_dir)
    methods = ['char', 'segtag', 'jieba']

    # 获取所有类别并从中随机选择class_num位
    dirs = os.listdir(in_dir + methods[0])  # 获取目录下的所有文件名
    authors = []
    if id_list is None:  # 随机选择
        random.seed()
        id_list = random.sample(range(0, len(dirs)), class_num)  # 随机选择class_num位作者，数字互不相同
    for i in id_list:
        authors.append(dirs[i][:-4])
    with codecs.open(out_dir + 'authors-{}.txt'.format(id_list_idx), 'w', 'utf-8') as f:
        for i in range(len(authors)):
            f.write('{} {}\n'.format(authors[i], i))
    # 根据选出来的类别名读取txt得到数据内容，并使样本均衡化
    sel_data = dict()
    content_num = 0  # 每个类别的数据量大小
    text_length = []  # 统计所有文本长度
    for method in methods:
        sel_data[method] = dict()
        content_num_list = []  # 单个作者的数据个数
        for label in range(len(id_list)):
            i = id_list[label]
            with codecs.open(in_dir + method + '/' + dirs[i], 'r', 'utf-8') as f:
                content_list = f.read().split('\n')
                if content_list[len(content_list) - 1] == '':
                    content_list.pop()
                # random.shuffle(content_list)  # 打乱内容顺序，使每次选择的训练集都不一样
                sel_data[method][label] = content_list
                content_num_list.append(len(content_list))
                # 统计文本长度
                text_length.extend([len(content) for content in content_list])
        # 取最少的，解决样本不均衡问题
        content_num = min(content_num_list)
        for label_id in range(class_num):
            sel_data[method][label_id] = sel_data[method][label_id][:content_num]
            random.shuffle(sel_data[method][label_id])  # 打乱内容顺序，使每次选择的训练集都不一样
    # 分割训练、验证、测试集，并输出到json
    thresh1 = int(0.85 * content_num)
    thresh2 = int(0.9 * content_num)
    for method in methods:
        line_idx = 0
        ensure_dir(out_dir + method)
        train_data_list = []
        valid_data_list = []
        test_data_list = []
        for label_id, content_list in sel_data[method].items():
            train_data_list.extend([[label_id, content] for content in content_list[:thresh1]])
            valid_data_list.extend([[label_id, content] for content in content_list[thresh1:thresh2]])
            test_data_list.extend([[label_id, content] for content in content_list[thresh2:]])
        line_idx = write_to_json(raw_data_list=train_data_list, line_idx=line_idx,
                                 out_json='{}{}/verse_train.jsonl'.format(out_dir, method))
        line_idx = write_to_json(raw_data_list=valid_data_list, line_idx=line_idx,
                                 out_json='{}{}/verse_valid.jsonl'.format(out_dir, method))
        line_idx = write_to_json(raw_data_list=test_data_list, line_idx=line_idx,
                                 out_json='{}{}/verse_test.jsonl'.format(out_dir, method))
    return authors, text_length, line_idx


def replace_content(find_str, replace_str, filename):
    with codecs.open(filename, "r", encoding="utf-8") as f:
        new_config = f.read().replace(find_str, replace_str)
    with codecs.open(filename + '.new', "w", encoding="utf-8") as f:
        f.write(new_config)
    os.remove(filename)
    os.rename(filename + '.new', filename)


def main(config):
    # ensure_dir('data/cnews/.cache')
    ensure_dir('data/verse/.cache')
    from data_process import makeDataLoader
    # 针对不同的数据，训练过程的设置略有不同。
    # from trainer.weibo_trainer import Trainer # weibo
    # from trainer.cnews_trainer import Trainer  # cnews
    from trainer.verse_trainer import Trainer  # verse
    # from trainer.medical_question_trainer import Trainer

    logger = config.get_logger('train')
    train_dataloader, valid_dataloader, test_dataloader = makeDataLoader(config)

    model = makeModel(config)
    logger.info(model)

    criterion = makeLoss(config)
    metrics = makeMetrics(config)

    optimizer = makeOptimizer(config, model)
    lr_scheduler = makeLrSchedule(config, optimizer, train_dataloader.dataset)

    trainer = Trainer(model, criterion, metrics, optimizer,
                      config=config,
                      data_loader=train_dataloader,
                      valid_data_loader=valid_dataloader,
                      test_data_loader=test_dataloader,
                      lr_scheduler=lr_scheduler)

    trainer.train()


def run(config_fname, only_char=False):
    # 开始训练3种不同分词方式下的分类模型
    methods = ['char', 'segtag', 'jieba']
    # 修改word_embedding目录
    for method in methods:
        if only_char:  # transformers不需要分词
            if method != 'char':  # 按字符分割即可
                continue
        in_dir = 'data/verse/train_valid_test/' + method
        dirs = os.listdir(in_dir)
        for dir in dirs:
            # if dir != '陆游':
            #     continue
            # # 替换class_num
            # replace_content(find_str="class_num 2",
            #                 replace_str="class_num 3",
            #                 filename=config_fname)
            # replace_content(find_str="class_num: 2",
            #                 replace_str="class_num: 3",
            #                 filename=config_fname)
            # 替换配置文件中的word_embedding目录
            replace_content(find_str="/verse_word_embedding.pkl",
                            replace_str="/{}.verse_word_embedding.pkl".format(method),
                            filename=config_fname)
            # 替换配置文件中的train_valid_test目录
            replace_content(find_str="/train_valid_test'",
                            replace_str="/train_valid_test/{}/{}'".format(method, dir),
                            filename=config_fname)
            # 替换配置文件中的wordcut_method: ''
            replace_content(find_str="wordcut_method: ''",
                            replace_str="wordcut_method: '{}'".format(method),
                            filename=config_fname)
            # 替换id_list_idx: 0
            replace_content(find_str="author: ''",
                            replace_str="author: '{}'".format(dir),
                            filename=config_fname)

            # 读取当前配置
            config_params = read_yaml(config_fname)
            config_params['config_file_name'] = config_fname
            config = ConfigParser.from_args(config_params)
            logger = config.get_logger('run')
            logger.info('分词方式={},选取作者({})'.format(method, dir))
            # logger.info('sample num:{}'.format(line_idx))
            # logger.info('sentence length : min:{},max:{},avg:{}'.format(min(text_length), max(text_length),
            #                                                             sum(text_length) / len(text_length)))

            # 配置文件读取完毕，将配置换回去，便于下一个训练
            # replace_content(find_str="class_num 3",
            #                 replace_str="class_num 2",
            #                 filename=config_fname)
            # replace_content(find_str="class_num: 3",
            #                 replace_str="class_num: 2",
            #                 filename=config_fname)
            replace_content(find_str="/{}.verse_word_embedding.pkl".format(method),
                            replace_str="/verse_word_embedding.pkl",
                            filename=config_fname)
            replace_content(find_str="/train_valid_test/{}/{}'".format(method, dir),
                            replace_str="/train_valid_test'",
                            filename=config_fname)
            replace_content(find_str="wordcut_method: '{}'".format(method),
                            replace_str="wordcut_method: ''",
                            filename=config_fname)
            replace_content(find_str="author: '{}'".format(dir),
                            replace_str="author: ''",
                            filename=config_fname)
            # 开始训练
            main(config)


if __name__ == '__main__':
    dissimilar_author_pair = [['李白', '杜甫'], ['李白', '温庭筠'], {'李白', '白居易'}, ['骆宾王', '李商隐']]
    similar_author_pair = [['孟浩然', '王维'], ['高适', '岑参'], {'李商隐', '温庭筠'}, ['李白', '杜甫'], ['王昌龄', '王之焕']]

    # # 随机选择此次分类所需数据（对比三种分词方式）
    # authors = sel_data_for_cur_classification(class_num=2)

    # 固定选择此次分类所需数据（对比三种分词方式）
    # data_dir = 'data/verse/raw_data/for_classification_1000/'
    # dirs = os.listdir(data_dir + 'char')
    # # 二分类
    # id_lists = []
    # with codecs.open(data_dir + 'authors_pair.txt', 'w', 'utf-8') as f:
    #     for i in range(len(dirs)-1):  # 排列组合
    #         for j in range(i+1, len(dirs)):
    #             f.write('{} {}\n'.format(dirs[i][:-4], dirs[j][:-4]))
    #             id_lists.append([i, j])
    # # 三分类
    # thr_id_lists = []
    # with codecs.open(data_dir + 'authors_thr.txt', 'w', 'utf-8') as f:
    #     for i in range(len(dirs) - 2):  # 排列组合
    #         for j in range(i + 1, len(dirs) - 1):
    #             for k in range(j + 1, len(dirs)):
    #                 f.write('{} {} {}\n'.format(dirs[i][:-4], dirs[j][:-4], dirs[k][:-4]))
    #             thr_id_lists.append([i, j, k])

    # 类别选择完毕，开始准备样本数据并训练
    # 识别2个作者身份
    # for id_list_idx in range(len(id_lists)):
    #     id_list = id_lists[id_list_idx]
    #     authors, text_length, line_idx = sel_data_for_cur_classification(class_num=2, id_list=id_list)
    #     # 相同数据集用不同分类模型训练，每批数据集都用三种不同分词方式的词向量
    #     run('configs/multi_classification/word_embedding_text_cnn.yml')
    #     # run('configs/multi_classification/word_embedding_text_cnn_1d.yml')
    #     run('configs/multi_classification/word_embedding_fast_text.yml')
    #     run('configs/multi_classification/word_embedding_rnn.yml')
    #     run('configs/multi_classification/word_embedding_rcnn.yml', only_char=True)
    #     run('configs/multi_classification/word_embedding_rnn_attention.yml', only_char=True)
    #     run('configs/multi_classification/word_embedding_dpcnn.yml', only_char=True)
    #
    #     # run('configs/multi_classification/transformers_pure.yml', only_char=True)
    #     # run('configs/multi_classification/transformers_cnn.yml', only_char=True)
    #     # run('configs/multi_classification/transformers_rnn.yml', only_char=True)
    #     # run('configs/multi_classification/transformers_rcnn.yml', only_char=True)
    # 识别3个作者身份
    # for id_list_idx in range(len(thr_id_lists)):
    #     id_list = thr_id_lists[id_list_idx]
    #     if id_list_idx != 2:
    #         continue
    #     authors, text_length, line_idx = sel_data_for_cur_classification(class_num=3, id_list=id_list)
    #     # 相同数据集用不同分类模型训练，每批数据集都用三种不同分词方式的词向量
    #     # run('configs/multi_classification/word_embedding_text_cnn.yml', only_char=True)
    #     # run('configs/multi_classification/word_embedding_text_cnn_1d.yml', only_char=True)
    #     # run('configs/multi_classification/word_embedding_fast_text.yml', only_char=True)
    #     # run('configs/multi_classification/word_embedding_rnn.yml', only_char=True)
    #     # run('configs/multi_classification/word_embedding_rcnn.yml', only_char=True)
    #     # run('configs/multi_classification/word_embedding_rnn_attention.yml', only_char=True)
    #     # run('configs/multi_classification/word_embedding_dpcnn.yml', only_char=True)
    #
    #     run('configs/multi_classification/transformers_pure.yml', only_char=True)
    #     # run('configs/multi_classification/transformers_cnn.yml', only_char=True)
    #     # run('configs/multi_classification/transformers_rnn.yml', only_char=True)
    #     # run('configs/multi_classification/transformers_rcnn.yml', only_char=True)

    run('configs/multi_classification/word_embedding_rnn.yml', only_char=True)
    # run('configs/multi_classification/word_embedding_rcnn.yml', only_char=True)
    run('configs/multi_classification/word_embedding_rnn_attention.yml', only_char=True)
    run('configs/multi_classification/word_embedding_fast_text.yml', only_char=True)
    run('configs/multi_classification/word_embedding_text_cnn.yml', only_char=True)
    run('configs/multi_classification/word_embedding_text_cnn_1d.yml', only_char=True)
    run('configs/multi_classification/word_embedding_dpcnn.yml', only_char=True)

    run('configs/multi_classification/transformers_pure.yml', only_char=True)
    run('configs/multi_classification/transformers_cnn.yml', only_char=True)
    run('configs/multi_classification/transformers_rnn.yml', only_char=True)
    run('configs/multi_classification/transformers_rcnn.yml', only_char=True)
