# -*- coding: utf-8 -*-
import os
import json
import sys
import jieba.posseg as pseg
import jieba
import pandas as pd
import numpy as np
from util.model_log import create_log
from sklearn import preprocessing
import time
from sklearn.model_selection import train_test_split
from util.make_list import str_to_list
import sys
import pickle

class chat_history_read():

    def __init__(self,FLAG):

        self.FLAG = FLAG
        self.logger = create_log(version="test").get_logger()
        jieba.load_userdict("data/dictionary/UserDic.txt")
        self.intent = {"国际件":0,"业务咨询":1,"查单":2,"下单":3}
        self.sub_task = {"业务咨询":0,"国际件产品名称":1,"反馈需求":2,"是否满足需求":3,"咨询产品":4,"咨询类型":5,"查单伴随需求":6,
                         "下单产品名称":7,"下单结果":8,"下单伴随需求":9}
        self.sub_task_detail = {}
        self.sub_task_detail["国际件"] = {}
        self.sub_task_detail["国际件"]["业务咨询"] = {"国际件":0,"业务咨询":1,"查单":2,"下单":3}
        self.sub_task_detail["国际件"]["产品名称"] = {"标快+":4,"国际特惠":5,"其他":6,"国际标快":7}
        self.sub_task_detail["国际件"]["反馈需求"] = {"托寄物及其他":8,"时效慢":9,"清关":10,"流向少":11,"运费贵":12,"空":13,"税费":14}
        self.sub_task_detail["业务咨询"] = {}
        self.sub_task_detail["业务咨询"]["是否满足需求"] = {"可以满足":15,"不能满足":16,"空":17}
        self.sub_task_detail["业务咨询"]["咨询产品"] = {"时令水果":18,"其他产品":19,"其他":19,"顺丰特惠":20,"顺丰标快":21,
                                                "顺丰即日":22,"海鲜水产":23,
                                        "重货包裹":24,"顺丰次晨":25,"空":26}
        self.sub_task_detail["业务咨询"]["咨询类型"] = {"收派员信息":27,"运费/重量/体积":28,"微信":29,"托寄物品标准":30,"月结咨询":31,
                                        "截单时间":32,"快件时效":33,"其他":34,"空":35,"发票配给":36,"代收货款":37,"保价":38,
                                        "服务范围":39,"其他各服务热线":40,"营业网点信息":41}
        self.sub_task_detail["查单"] = {}
        self.sub_task_detail["查单"]["查单伴随需求"] = {"时效":42,"咨询其他":43,"快件查询":44,"其他":45,"收派员、网点信息":46,
                                                "修改收寄方信息":47,"空":48,"无单号查单":49}
        self.sub_task_detail["下单"] = {}
        self.sub_task_detail["下单"]["产品名称"] = {"顺丰标快":50,"无具体产品名称":51,"其他":52}
        self.sub_task_detail["下单"]["下单结果"] = {"成功下单_客户不满意":53,"其他":54,"未成功下单-客户咨询":55,"成功下单":56,
                                              "未成功下单-客户不满意/我司未满足":57}
        self.sub_task_detail["下单"]["下单伴随需求"] = {"咨询产品时效":58,"其他":59,"咨询产品价格":60,"未咨询仅下单":61,"空":62,
                                                "咨询产品流向/服务范围":63}

        self.get_mask_matrix()

        self.read_vectors()
        self.input_length = self.FLAG.input_length

    def get_mask_matrix(self):

        #一个掩码矩阵，用于进行任务的规划
        self.sub_mask = np.zeros((4,10))
        self.detail_mask = np.zeros((10,64))

        for key in self.sub_task_detail.keys():
            sub_dic = self.sub_task_detail[key]
            for sub_key in sub_dic:
                x1 = self.intent[key]

                if sub_key == '产品名称':
                    sub_key_alter = key + sub_key
                else:
                    sub_key_alter = sub_key

                y1 = self.sub_task[sub_key_alter]
                self.sub_mask[x1][y1] = 1
                detail_dic = sub_dic[sub_key]
                for detai_key in detail_dic:
                    z1 = detail_dic[detai_key]
                    self.detail_mask[y1][z1] = 1



    def read_vectors(self):  # read top n word vectors, i.e. top is 10000
        topn = self.FLAG.embedding_word_limit
        lines_num = 0
        t = time.time()
        with open("data/dictionary/merge_sgns_bigram_char300.txt",
                  encoding='utf-8', errors='ignore') as f:
            token_list = []
            first_line = True
            for line in f:
                if first_line:
                    first_line = False
                    self.dim = int(line.rstrip().split()[1])
                    continue
                lines_num += 1
                tokens = line.rstrip().split(' ')
                token_list.append(tokens[0])
                if lines_num > topn:
                    break

        #进行单词的转换
        self.word_label_enc = preprocessing.LabelEncoder()
        token_list = self.word_label_enc.fit_transform(token_list)
        self.embeddings_matrix = np.zeros((lines_num + 1, self.dim))

        #进行存储
        with open("data/Intermediate_data/word_label.pkl", 'wb') as f:

            pickle.dump(self.word_label_enc, f, 0)

        lines_num = 0

        #进行编码
        with open("data/dictionary/merge_sgns_bigram_char300.txt",
                      encoding='utf-8', errors='ignore') as f:

            first_line = True
            for line in f:
                if first_line:
                    first_line = False
                    continue
                index = token_list[lines_num]
                lines_num += 1
                tokens = line.rstrip().split(' ')
                self.embeddings_matrix[index] = np.asarray([float(x) for x in tokens[1:]])
                if lines_num > topn:
                    break

        self.embeddings_matrix_rows = lines_num
        self.logger.info("The embedding word dic size is "
                         + str(len(self.embeddings_matrix)))

        self.logger.info("The embedding table load cost time is: " + str(time.time() - t))

        return



    #仅仅做简单的拼接
    def simple_concat(self):
        onedata_limit = self.FLAG.dal_data_limit
        if self.FLAG.init_origin_data == True:

            with open("data/raw_data/train_data.json", encoding="UTF-8") as jsonfile:
                self.json_data = json.load(jsonfile)

            jieba.enable_parallel()
            result_list = []
            self.hit_dic_count = 0
            self.un_hit_dic_count = 0
            embedding_table_set = set(self.word_label_enc.classes_)
            sample_num = 0
            t0 = time.time()

            #主要用于做差集
            total_word_set = []

            for one_data in self.json_data:
                result_dic = {}
                dialog = one_data["dialog"]
                tasks = one_data["tasks"]
                words_list = []
                char_list = []
                if sample_num > onedata_limit:
                    break
                for one_dialog in dialog:
                    t = time.time()
                    seg_list = pseg.cut(one_dialog["text"])
                    # self.logger.info("cut word cost time is:" + str(time.time() - t))
                    for word, characteristic in seg_list:
                        t1 = time.time()
                        #过滤标点符号等东西
                        if  characteristic != 'x' \
                                and characteristic != 'y' \
                                and characteristic != 'z' \
                                and characteristic != 'l' \
                                and characteristic != 'm':

                                total_word_set.append(word)
                                words_list.append(word)
                                for char in word:
                                    char_list.append(char)

                                # self.logger.info("each find word cost time is:" + str(time.time() - t1))


                result_dic["words"]       = words_list
                result_dic["char"]        = char_list
                result_dic["length"]      = len(words_list)
                intent      = tasks["intent"]
                result_dic["intent"] = intent
                #进行onehot
                intent_label = np.zeros(4)
                intent_label[self.intent[tasks["intent"]]] = 1
                sublist = tasks["subtasks"]

                sub_task_label = np.zeros(10)
                sub_task_label_detail = np.zeros(64)
                index = 1
                for one_subtask in sublist:
                    # subtask_name = "subtask_name_" + str(index)
                    # sublabel_name = "subtask_labels_" + str(index)

                    subtask_name= one_subtask["subtask_name"]
                    if subtask_name == '产品名称':
                        subtask_name_alter = intent + subtask_name
                    else:
                        subtask_name_alter = subtask_name
                    sub_task_label[self.sub_task[subtask_name_alter]] = 1
                    subtask_labels = one_subtask["subtask_labels"]
                    for label in subtask_labels:
                        # print(subtask_name)
                        index = self.sub_task_detail[intent][subtask_name][label]
                        sub_task_label_detail[index] = 1

                result_dic["intent_label"] = intent_label
                result_dic["sub_task_label"] = sub_task_label
                result_dic["sub_task_label_detail"] = sub_task_label_detail
                result_list.append(result_dic)
                sample_num = sample_num + 1
                #self.logger.info("each sentence cost time is:" + str(time.time() - t))

            total_word_set = set(total_word_set)
            diff = total_word_set.difference(embedding_table_set)
            self.logger.info("total sentence process cost time is:" + str(time.time() - t0))

            self.result_df = pd.DataFrame(result_list)

            self.result_df["char_label"] = self.result_df["char"].apply(lambda x:self.word_label_enc.transform(x))
            def word_label(self,x):
                word_in_dic = []

                for word in x:
                    #如果没有命中
                    if word in diff:
                        self.un_hit_dic_count = self.un_hit_dic_count + 1

                    else:
                        self.hit_dic_count = self.hit_dic_count + 1
                        word_in_dic.append(word)

                return self.word_label_enc.transform(word_in_dic)

            self.result_df["words_label"] = self.result_df["words"].apply(lambda x:word_label(self,x))

            self.logger.info("hit dictioanary num is " + str(self.hit_dic_count))
            self.logger.info("not hit dictioanary num is " + str(self.un_hit_dic_count))
            self.result_df.to_csv("data/Intermediate_data/simple_concat_text.csv",encoding="UTF8",
                                  index=False)

        else:

            self.result_df = pd.read_csv("data/Intermediate_data/simple_concat_text.csv")

    def get_train_data(self,test_size = 0.2):

        x_data = self.result_df["words_label"].tolist()
        x_data_padding = []
        reserve = self.embeddings_matrix.shape[0] -1
        for one_data in x_data:
            #如果是字符串
            if isinstance(one_data, str):
                #进行字符串转字典
                one_data = str_to_list(one_data)

            #如果是series
            if not isinstance(one_data,list):
                one_data = one_data.tolist()

            if len(one_data) > self.input_length:
                x_data_padding.append(one_data[:self.input_length])

            else:
                one_data = one_data + [reserve] * (self.input_length - len(one_data))
                x_data_padding.append(one_data)

        #进行padding 填充
        x_data_padding = np.asarray(x_data_padding)
        intent_label = self.result_df["intent_label"].tolist()
        intent_label = [str_to_list(i,".") for i in intent_label]
        intent_label     = np.asarray(intent_label)

        sub_task_label = self.result_df["sub_task_label"].tolist()
        sub_task_label = [str_to_list(i,".") for i in sub_task_label]
        sub_task_label     = np.asarray(sub_task_label)

        sub_task_label_detail = self.result_df["sub_task_label_detail"].tolist()
        sub_task_label_detail = [str_to_list(i,".") for i in sub_task_label_detail]
        sub_task_label_detail     = np.asarray(sub_task_label_detail)

        X_train, X_test, y_train_intent_label, y_test_intent_label = train_test_split(x_data_padding, intent_label,
                                                            test_size = test_size, random_state = 42)

        _, _, y_train_sub_task, y_test_sub_task = train_test_split(x_data_padding, sub_task_label,
                                                            test_size = test_size, random_state = 42)

        _, _, y_train_detail, y_test_detail = train_test_split(x_data_padding, sub_task_label_detail,
                                                            test_size = test_size, random_state = 42)

        return X_train,X_test,y_train_intent_label, y_test_intent_label,\
               y_train_sub_task, y_test_sub_task,y_train_detail, y_test_detail

    def statistics(self):

        print(self.result_df["length"].describe())
        sta = self.result_df.groupby(["intent"],as_index=False)["length"].count()
        # print(sta)
        
if __name__ == '__main__':


    chat_history_read_ins = chat_history_read()
    chat_history_read_ins.simple_concat()
    chat_history_read_ins.get_train_data()


