import copy
import ast

import pandas as pd
from sklearn.model_selection import train_test_split



from nlp_tools import utils
from nlp_tools.corpus.utils.dialogue_copurs_loader_task import *




class DialogueCorpusLoader():
    __name__ ="dialogue_corpus_loader"
    def __init__(self,data_path, shuffle: bool = True,split_train_test=False,split_test_rate=0.1,max_length=128):
        '''
            加载训练数据
            :param data_path: 数据路径
            :param shuffle: 是否打乱顺序
            :param split_train_test: 是否分割训练集和测试集
            :param split_test_rate: 测试集占比
            :return:
            '''
        self.data_path = data_path
        self.shuffle = shuffle
        self.split_train_test = split_train_test
        self.split_test_rate = split_test_rate
        self.max_length = max_length

    def load_data(self, shuffle: bool = True):
        if shuffle == None:
            shuffle = self.shuffle

        if type(self.data_path) == str:
            df = pd.read_csv(self.data_path)
        else:
            df = self.data_path
        sentences = df["text"].tolist()
        labels = df['label'].tolist()

        input_x = []
        input_label = []

        for sen,label in zip(sentences,labels):
            if type(sen) != list:
                sen = sen.split("\n")
                sen = [item for item in sen if item != ""]

            if type(label) == str:
                label = ast.literal_eval(label)

            sen,label = self.task_special_deal(sen,label)
            input_x.append(sen)
            input_label.append(label)

        if shuffle:
            input_x, labels = utils.unison_shuffled_copies(input_x, input_label)

        data_union = [(x, y) for x, y in zip(input_x, labels)]
        if self.split_train_test:
            return train_test_split(data_union,test_size=self.split_test_rate)
        else:
            return data_union

    def task_special_deal(self,input_x,input_y):
        '''
        不同的任务需要做不同的特殊处理
        :param input_x:
        :param inpput_y:
        :return:
        '''
        return input_x,input_y


    def get_roles(self,texts):
        '''
        将texts的最前面代表role的字符串给提取出来，如果存在关键词列表index,则需要同步更新index
        :param texts:
        :param keys_index:
        :return:
        '''

        # 每句话都是以role: 代表相应的角色，现在默认删除，后面可以根据角色吧toke_type设置不同的
        return [text[:1] for text in texts],[text[2:] for text in texts]





class ContrabandClassifyNerDialogueCorpusLoader(DialogueCorpusLoader):
    '''
    实体分类数据加载类，
    '''
    __name__ = "contraband_classify_ner_dialogue_corpus_loader"
    def __init__(self,data_path, shuffle: bool = True,split_train_test=False,split_test_rate=0.1,generate_type="max_length",max_length=128):
        '''
                加载训练数据
                :param data_path: 数据路径
                :param shuffle: 是否打乱顺序
                :param split_train_test: 是否分割训练集和测试集
                :param split_test_rate: 测试集占比
                :param generate_type:数据集生成方式 last_five or max_length
                :return:
                '''
        super(ContrabandClassifyNerDialogueCorpusLoader, self).__init__(data_path,shuffle,split_train_test,split_test_rate,max_length)
        self.generate_type = generate_type



    def task_dialogue_classify_ner_getlabel(self,text_list, labels):
        '''
        将 labels转化为 key_index和label
        :param text_list:
        :param labels:
        :return:
        '''
        current_index = 0
        key_indexs = []
        new_labels = []
        for text in text_list:
            for label in labels:
                # label格式（start,end，label）
                if label[0] >= current_index and label[1] <= current_index + len(text):
                    key_indexs.append((label[0] - current_index, label[1] - current_index))
                    if len(label) == 3:
                        new_labels.append(label[2])

            current_index += len(text)
        return key_indexs, new_labels


    def task_special_deal(self,input_x,input_y):
        '''
        不同的任务需要做不同的特殊处理
        :param input_x: List<str> texts
        :param inpput_y: List<tuple(int,int,str)> label
        :return:
        '''

        key_index, labels = task_dialogue_classify_ner_getlabel(input_x, input_y)
        roles, sen = self.get_roles(texts=input_x)
        new_input_x,new_input_y = self.create_predict_data_from_dialogue(sen,roles,labels)

        return new_input_x,new_input_y


    def create_predict_data_from_dialogue(self,dialogue_sentences,role,labels,max_sentence_len=None):
        '''
        根据关键词为中心，分割成多个训练集
        :param dialogue_sentences:
        :param roles:
        :param key_index:
        :param labels:
        :param max_sentence_len:
        :return:
        '''
        if max_sentence_len == None:
            max_sentence_len = self.max_length

        predict_dialogues = []
        index = 0

        while index < len(dialogue_sentences):
            label = labels[index]
            if label:

                copy_dialogue_sentences = copy.deepcopy(dialogue_sentences)
                if self.generate_type == "max_length":
                    cargo_context_list,role = self.getCargoContextNotExceedMaxLength(copy_dialogue_sentences, index,role,max_sentence_len)
                else:
                    raise TypeError(f"不支持的generate_type:{self.generate_type}")
                predict_dialogues.append(cargo_context_list)
            index += 1
        return predict_dialogues,labels



    def getCargoContextNotExceedMaxLength(self,sentences, index,role, max_length=128):
        '''
        获取上下文，不超过最大长度
        :param sentences:
        :param index:
        :param role:角色列表
        :param max_length:
        :return:
        '''
        start_index, end_index = index, index
        current_len = len(sentences[index])

        exceed_max_length = False
        while (start_index > 0 or end_index < len(sentences)) and not exceed_max_length:
            if start_index - 1 >= 0:
                if current_len + len(sentences[start_index - 1]) < max_length:
                    current_len += len(sentences[start_index - 1])
                    start_index -= 1
                else:
                    exceed_max_length = True

            if end_index + 1 < len(sentences):
                if current_len + len(sentences[end_index + 1]) < max_length:
                    current_len += len(sentences[end_index + 1])
                    end_index += 1
                else:
                    exceed_max_length = True

            if start_index == 0 and end_index == len(sentences) - 1:
                break

        return sentences[start_index:end_index + 1],role[start_index:end_index + 1]



if __name__ == '__main__':
    ContrabandClassifyNerDialogueCorpusLoader(data_path = r"/home/fanfanfeng/working/contraband/test.csv").load_data()




