# !/usr/bin/python3
# -*- coding:utf-8 -*-
# Copyright 2021 The Chinaunicom Software Team. All rights reserved.
# @FileName: make_dataset.py
# @Author  : Dammy
# @Time    : 2021/12/17

##################################################################################################
##                     Make train/test Dateset & Segment & make ann model                       ##
##                                                                                              ##
##################################################################################################
import os
import json
import time
from tqdm import tqdm
import re
import jieba
import codecs
import random
import concurrent.futures
import pandas as pd
from gensim.models import KeyedVectors, Word2Vec
# from nlpcda import Simbert
from src.intelligent_interaction.engine.sem_utils import _read_json, _write_json, _write_txt
from src.intelligent_interaction.engine.sem_predictor import Predictor
from src.intelligent_interaction.engine.sem_conf import Configuration
from src.intelligent_interaction.engine.annoy_model import AnnoyRecall

config = Configuration()  # 配置加载
### 预加载 ###
# SimBERT
model_path = 'chinese_simbert_L-6_H-384_A-12'
# SimBERTv2
model_path_ro = '/nlpcda_generator/chinese_roformer-sim-char_L-12_H-768_A-12'

model_config = {
    'model_path': model_path,
    'CUDA_VISIBLE_DEVICES': '0',
    'max_len': 32,
    'seed': 1,
}
# sim_bert = Simbert(config=model_config)


class SegTool(object):
    """
    分词分句器
    """
    def __init__(self, stopwords_path=os.getcwd() + '/src/intelligent_interaction/engine/faq_data/stopword.txt', user_dict_path=None):
        self.user_dict_path = user_dict_path
        if user_dict_path and not os.path.exists(user_dict_path):
            raise ValueError("File " + user_dict_path + " does not exist")
        elif user_dict_path:
            jieba.load_userdict(user_dict_path)
        if stopwords_path and not os.path.exists(stopwords_path):
            raise ValueError("File " + stopwords_path + " does not exist")
        self.stopwords = [line.strip() for line in open(stopwords_path, 'r', encoding='UTF-8').readlines()]

    def cutSent(self, paragraph):
        """ 切句 """
        # import nltk
        # tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
        # if "\xa0" in paragraph:
        #     paragraph = re.sub("(?:\xa0)+", ' ', paragraph).replace('\xa0', '')
        # sentences4En = tokenizer.tokenize(paragraph)
        paragraph = re.sub('([。！？?])([^”’])', r"\n", paragraph)  # 单字符断句符
        paragraph = re.sub('(\.{6})([^”’])', r"\n", paragraph)  # 英文省略号
        paragraph = re.sub('(…{2})([^”’])', r"\n", paragraph)  # 中文省略号
        paragraph = re.sub('([。！？?][”’])([^，。！？?])', r'\n', paragraph)
        # 如果双引号前有终止符，那么双引号才是句子的终点，把分句符\n放到双引号后，注意前面的几句都小心保留了双引号
        paragraph = paragraph.rstrip()  # 段尾如果有多余的\n就去掉它
        # 很多规则中会考虑分号;，但是这里我把它忽略不计，破折号、英文双引号等同样忽略，需要的再做些简单调整即可。
        sentences4Cn = paragraph.split("\n")
        return sentences4Cn

    def segWords(self, sentence):
        """
        分词，去停用词
        :param sentence:
        :return:
        """
        sentence = re.sub('[①②③④⑤⑥⑦⑧⑨∵∴；：，。！？?“”‘’]', ' ', sentence)
        sentence = re.sub('(\.{6})|(…{2})', ' ', sentence)
        sentence = re.sub(r'[【】]+', '', sentence.strip())
        sentence_depart = jieba.cut(sentence)
        result = []

        # 结巴分词
        outStr = []
        # 去停用词后拼接 中文分词后的结果和非中文字符串
        unChineseStr = ''
        for word in sentence_depart:
            word = word.strip()
            if not '\u4e00' <= word <= '\u9fff':
                unChineseStr = unChineseStr + word
            else:
                if len(unChineseStr) >= 1:
                    outStr.append(unChineseStr)
                unChineseStr = ''
                outStr.append(word)
        if len(unChineseStr) >= 1:
            outStr.append(unChineseStr)
        for item in outStr:
            if item not in self.stopwords:
                if item and item != '\t':
                    result.append(item)
        if len(result) > 1:
            result = " ".join(result)
        return result


def word_segment(sents):
    """ 分词 """
    segTool = SegTool(stopwords_path=os.getcwd() + '/src/intelligent_interaction/engine/faq_data/stopword.txt')
    sentences = segTool.cutSent(sents)
    if len(sentences) == 1:
        words = segTool.segWords(sentences[0])
    else:
        words = segTool.segWords(" ".join(sentences))
    return words


def make_w2i_json(w2v_path, save_w2i_path):
    """
    制作w2i文件, 从word2vec
    :return:
    """
    w2i_dict = {}
    if w2v_path[-3:] == 'txt':
        model = KeyedVectors.load_word2vec_format(w2v_path)
        print(len(model.wv.vocab))
        for index, word in enumerate(model.wv.vocab):
            w2i_dict.update({word: index})
    else:
        model = Word2Vec.load(w2v_path)  # WORD_MODEL是我已经生成的模型
        print(len(model.wv.vocab))
        # print(model.wv.index2word())  # 获得所有的词汇
        for index, word in enumerate(model.wv.vocab):
            w2i_dict.update({word: index})

    with codecs.open(save_w2i_path, mode='w', encoding='utf-8') as fw:
        fw.write(json.dumps(w2i_dict, ensure_ascii=False))



class MakeTrain:
    def __init__(self, source_path):
        self.df_faq = pd.read_csv(source_path)
        self.questions = self.df_faq['question_all'].tolist()
        self.answers = self.df_faq['answer_all']
        self.gold_questions = [self.questions[i] for i in range(len(self.questions)) if i % 11 == 0]

    def write_dataset(self, path, dataset):
        with open(path, 'a', encoding='utf-8') as fw:
            for line in dataset:
                fw.write(line)

    def train_test_val_split(self, Corpus, proportion):
        """切分训练-测试-验证集"""
        valid_set = random.sample(Corpus, round(len(Corpus) * proportion))
        val_set = valid_set[:round(0.5 * len(valid_set))]
        test_set = valid_set[round(0.5 * len(valid_set)):]
        train_set = list(set(Corpus).difference(set(test_set)))
        return  train_set, test_set, valid_set

    def make_train_dataset(self):
        """
        制作训练数据集流程
        Poly：1、正样本第一条和相似的样本组成正样本对;
              2、除了已有的相似题关系外，再找出得分在0.5以上的topN补充正样本数据集(正样本、负样本整体符合10:10的分布)；
              3、负样本找最不相关的10个;
        """
        # df_faq = pd.read_csv(r'C:\Users\Dammy\Desktop\faq\FAQ_encyclopedia_pair_all.csv')
        # questions = df_faq['question_all'].tolist()
        # answers = df_faq['answer_all']
        # gold_questions = [questions[i] for i in range(len(questions)) if i % 11 == 0]

        flag = ""
        m = 0
        corpus = []      # 数据集
        seg_corpus = []  # 分词后的数据集
        ### 正样本
        for i in tqdm(range(len(self.questions))):
            if m < 1000000:
                if i % 11 == 0:
                    flag = self.questions[i]
                elif i % 11 < 11:
                    # pos_pairs.append((flag, questions[i]))
                    corpus.append(flag + '@@@' + self.questions[i] + '@@@' + '1' + '\n')
                    try:
                        seg_corpus.append(word_segment(flag) + '@@@' + word_segment(self.questions[i]) + '@@@' + '1' + '\n')
                    except:
                        seg_corpus.append(''.join(word_segment(flag)) + '@@@' + ''.join(word_segment(self.questions[i])) + '@@@' + '1' + '\n')
            m += 1
        print("正样本完成")
        ### 负样本
        for j in tqdm(range(len(self.questions))):
            if j % 11 == 0:
                # neg_sample = random.sample(gold_questions, 10)
                neg_sample = random.sample(self.questions, 15)
                for neg in neg_sample:
                    corpus.append(self.questions[j] + '@@@' + neg + '@@@' + '0' + '\n')
                    try:
                        seg_corpus.append(word_segment(self.questions[j]) + '@@@' + word_segment(neg) + '@@@' + '0' + '\n')
                    except:
                        seg_corpus.append(''.join(word_segment(self.questions[j])) + '@@@' + ''.join(word_segment(neg)) + '@@@' + '0' + '\n')
        print("负样本完成")
        print("总数据: ", len(corpus))

        ### 数据切分 ###
        train_set, test_set, val_set = self.train_test_val_split(Corpus=corpus, proportion=0.2)
        seg_train_set, seg_test_set, seg_val_set = self.train_test_val_split(Corpus=seg_corpus, proportion=0.2)
        print("训练集数据:{}, 验证集数据: {}, 测试集数据:{}".format(len(train_set), len(val_set), len(test_set)))

        ### 写入数据 ###
        self.write_dataset(r'faq_data/new_data/faq_pairs_train.txt', train_set)
        self.write_dataset(r'faq_data/new_data/faq_pairs_test.txt', test_set)
        self.write_dataset(r'faq_data/new_data/faq_pairs_val.txt', val_set)

        self.write_dataset(r'faq_data/new_data/faq_pairs_train_seg.txt', seg_train_set)
        self.write_dataset(r'faq_data/new_data/faq_pairs_test_seg.txt', seg_test_set)
        self.write_dataset(r'faq_data/new_data/faq_pairs_val_seg.txt', seg_val_set)

        

def Augmented_question(source, nums):
    """ 数据增广 """
    synonyms = sim_bert.replace(sent=source, create_num=nums)
    return synonyms


def make_predictfile(online_predictfile, predict_data):
    """
    制作生成语义向量数据集(作用：预测成语义向量),制作预测数据
    """
    print("总数据量为: ", len(predict_data))
    for ques in tqdm(predict_data):
        query_seg = word_segment(ques)
        try:
            query = query_seg + '@@@' + ' ' + '@@@' + '0'
        except:
            query = str(query_seg) + '@@@' + ' ' + '@@@' + '0'
        _write_txt(online_predictfile, query, False)



### 采用新的数据制作预测后的并制作ANN ###
class MakeAnnPipeline:
    def __init__(self, save_ann_path, cdssm_model):
        """
        :param init Predictor: 在初始化的时候需要添加模型的路径的参数,包含cdssm_model_path, dssm_model_path, cgru_model_path 三个参数
        """
        self.ann_path = save_ann_path
        self.select_model = "cdssm"
        self.predict = Predictor(model_path=cdssm_model_path)
        self.ar = AnnoyRecall(f_dim=128)

    def pipeline(self, df_data):
        print("Annoy Train Begin>>>")
        qid = df_data['qid'].tolist()
        questions = df_data['question'].tolist()
        
        start = time.time()
        # 1. 制作预测文件
        make_predictfile(config.predict_file, questions)
        # 2. 使用batch_predictor对predictfile数据预测，并保存为npy文件(这里不持久化)
        output_vectors = self.predict.predictor_batch(config.predict_file)
        # 3. 实时训练Annoy
        self.ar.train_annoy_index_online(qid, output_vectors, self.ann_path)
        print("训练数据量为: ", len(qid))
        print("Online Training Annoy finish ! All Time is {} s".format(round(time.time()-start, 5)))


def make_new_ann(source_path, save_ann_path, cdssm_model_path):
    """ 制作Ann模型 """
    from collections import OrderedDict

#     with open(source_path, 'r', encoding='utf8') as f:
#         all_json_data = json.load(f, object_pairs_hook=OrderedDict)
#     print("order_tids:", list(all_json_data.keys())[:20])

    faq_df = pd.read_csv(source_path, header=0)
    print("总数据量为:", faq_df.shape[0])
    ann = MakeAnnPipeline(save_ann_path, cdssm_model_path)
    ann.pipeline(faq_df)
    # 预测后清空预测文件
    with open(config.predict_file, "r+", encoding='utf-8') as f:
        res = f.readlines()
        f.seek(0)
        f.truncate()


def all_pipeline(selection_steps, grade_subject):
    """ 相似题制作数据全部步骤 """
    if selection_steps == 'data_fill':
        # questions_dct = _read_json(r"E:\爱云校--工作内容\2_错题本_project\相似题推荐项目\相似题数据\高中政治\senior_politics_origin_data_20200325.json")
        questions_dct = _read_json(r"./new_data/filled_data/junior_geography_origin_20200805.json")
        fill_answer = data_parser.parse_data(grade_subject, questions_dct)
        # _write_json(fill_answer, r"E:\爱云校--工作内容\2_错题本_project\相似题推荐项目\相似题数据\高中政治\filled_senior_politics_origin_data_20200325.json")
        _write_json(fill_answer, r"./test_dataset/fill_data/filled_junior_geography_origin_20200805.json")

    elif selection_steps == 'train_doc2vec':
        # source_path = r'/media/Data/dyu/Similar_Questions_Recommended_training/dataset/senior_physical/filled_senior_physical_origin_data_20200303.json'
        source_path = os.path.join(r'./new_data/filled_data/', 'filled_'+grade_subject+'_origin_20200805.json')
        all_data_json = _read_json(source_path)
        all_tid = list(all_data_json.keys())

        x_train = []  # 训练数据
        doc_mapping = {}  # 映射字典
        all_start_time = time.time()

        ### 1、多进程: map 方法
        # with concurrent.futures.ProcessPoolExecutor(max_workers=12) as executor:
        #     executor.map(process, range(len(all_tid)))

        ### 2、多进程: submit 方法
        executor = concurrent.futures.ThreadPoolExecutor(max_workers=12)
        for i in range(0, len(all_tid)):
            x = executor.submit(train_doc_process, i, all_data_json, all_tid, grade_subject)
            x_train.append(x.result())
            doc_mapping[i] = all_tid[i]
        executor.shutdown(True)

        all_time = time.time() - all_start_time
        print("all time:", all_time)
        print("all nums", len(all_tid))
        train_doc2vec(x_train, doc_mapping, grade_subject)

    elif selection_steps == 'make_pairs':
        # filled_data_path = r"filled_junior_math_data.json"
        filled_data_path = r"./new_data/filled_data/filled_junior_biology_origin_20200805.json"
        with codecs.open(filled_data_path, mode='r', encoding='utf-8') as fr:
            all_data_json = json.load(fr)
        td_generate_ori2sim(filled_data_path, grade_subject)
        td = TrainData(all_data_json, grade_subject)
        td.find_topN()
        td.batch_process()

    elif selection_steps == 'split_corpus':
        train_file_name = grade_subject+'_train_tid_data.json'
        train_save_path = os.path.join(r"./new_data/sim_sam_reco_data/train_verify_test_dataset/", train_file_name)
        verify_file_name = grade_subject + '_verify_tid_data.json'
        verify_save_path = os.path.join(r"./new_data/sim_sam_reco_data/train_verify_test_dataset/", verify_file_name)
        test_file_name = grade_subject + '_test_tid_data.json'
        test_save_path = os.path.join(r"./new_data/sim_sam_reco_data/train_verify_test_dataset/", test_file_name)
        # except_tid_path = r"./new_data/train_data/except_tid.txt"
        all_processed_file_name = grade_subject+'_ori2sim_20200810_0.3_new.json'
        all_processed_save_path = os.path.join(r"./new_data/sim_sam_reco_data/sam_sim_pair", all_processed_file_name)

        # all_data, test_data, clean_tids = generate_data(all_processed_save_path, except_tid_path)
        all_data, test_data, all_tids = generate_data(all_processed_save_path)

        # executor = concurrent.futures.ProcessPoolExecutor(max_workers=12)
        # for i in range(len(clean_tids)):
        # for i in range(len(all_tids)):
        #     x = executor.submit(pro, i, all_tids, test_data)
        #     train_data.append(x.result())
        # executor.shutdown(True)
        train_data = list(set(all_tids).difference(set(test_data)))

        # 校验集
        verify_data = random.sample(train_data, round(len(train_data) * 0.1))

        # 训练集
        train_data = list(set(train_data).difference(set(verify_data)))

        print("训练测试交集：", len(set(train_data)&set(test_data)))
        print('训练校验交集', len(set(train_data)&set(verify_data)))

        train_json, verify_json, test_json = {}, {}, {}
        for tid in train_data:
            if tid is not None:
                train_json[tid] = all_data[tid]
        for tid in test_data:
            test_json[tid] = all_data[tid]
        for tid in verify_data:
            verify_json[tid] = all_data[tid]

        print("训练数据量：", len(train_json))
        print("校验数据量：", len(verify_json))
        print("测试数据量：", len(test_json))
        _write_json(train_json, train_save_path)
        _write_json(train_json, verify_save_path)
        _write_json(test_json, test_save_path)

    elif selection_steps == 'corpus_fill':
        all_train_data_path = os.path.join(r"./new_data/sim_sam_reco_data/train_data/", grade_subject+'_all_train_data_label_ques_0.3.txt')
        all_test_data_path = os.path.join(r"./new_data/sim_sam_reco_data/train_data/", grade_subject+'_all_test_data_label_ques_0.3.txt')
        make_split_train_test(grade_subject, all_train_data_path, all_test_data_path)

    elif selection_steps == 'make_ann':
        cdssm_model_path = r'/media/Data/NLP_worker/WronyBookBusiness/Similar_Questions_Recommended/predict_model/senior_geography_cdssm_model.hdf5'
        source_path = r'/media/Data/dyu/Similar_Questions_Recommended_training/make_elite_data/elite_data/avail_elite_20200515/senior_geography_elite_20200515.json'
        save_ann_path = r'/media/Data/dyu/Similar_Questions_Recommended_training/ann_model/senior_geography_cdssm_elite_20200520.ann'
        make_new_ann(cdssm_model_path, source_path, save_ann_path)

    else:
        raise KeyError('Please enter the correct parameters...')


def first_step():
    """拿到原始的QA对步骤"""
    df = pd.read_csv(r"C:\Users\Dammy\Desktop\FAQ_encyclopedia.csv", header=0)
    questions = df['question'].tolist()
    print(questions)
    for i, q in enumerate(questions):
        synonyms_q = Augmented_question(q, nums=10)

        
def remake_source_data():
    """
    重新制作 生成带有index的数据文件
    :return:
    """
    df = pd.read_csv(r"E:\P3：联通项目\InnerSource\Unicom_江西操作手册&行业知识问答\faq_code_train\faq_data\FAQ_encyclopedia_pair_all.csv", header=0)
    questions = df['question_all'].tolist()
    answers = df['answer_all'].tolist()
    tids = [str(x) for x in list(range(0, len(questions)))]
    new_df = pd.DataFrame({'qid': tids, 'question': questions, 'answer': answers})
    new_df.to_csv(r"E:\P3：联通项目\InnerSource\Unicom_江西操作手册&行业知识问答\faq_code_train\faq_data\FAQ_encyclopedia_pair_all_qid.csv", index=0)

    ### 用pandas结构来检索数据
    qid = '3'
    answer = df.at[int(qid), 'answer']   # qid是 string



if __name__ == '__main__':
    # 1.拿到原始的QA对
    # 2.用问句改写或生成，生成多句问答句
    # 3.制作word2vec，用已有的数据
    # 4.用问答句训练pair对
    # 5.训练语义提取模型
    # 6.全部文本向量化 -> 训练生成ANN模型
    # 7.存储问题的id，方便后续还原文本
    # 8.输出
    pass
    # first_step()
    
    # makeTrain = MakeTrain(source_path=r'faq_data/FAQ_encyclopedia_pair_all.csv')
    # makeTrain.make_train_dataset()
    
    # make_w2i_json(r"C:\Users\Dammy\Desktop\100000-small.txt", r"C:\Users\Dammy\Desktop\faq\100000-small-w2i.json")
    
    cdssm_model_path = 'faq_data/complete_models/encyclopedia_cdssm_complete/encyclopedia_cdssm_complete.h5'
    source_path = "faq_data/FAQ_encyclopedia_pair_all_qid.csv"
    save_ann_path = 'faq_data/annoy_model/faq_encyclopedia.ann'
    make_new_ann(source_path, save_ann_path, cdssm_model_path)

