# -*- coding:utf8 -*-
# @Time : 2022/10/20 5:38 下午
# @Author : WanJie Wu

import json
from typing import Dict, Tuple
import pandas as pd
from itertools import product
from torch.utils.data import DataLoader, Dataset


class SupTrainDataset(Dataset):
    """
    监督数据集，以为NLI推理数据集形式为基准，形如origin, entailment, contradiction数据
    """
    def __init__(self, tokenizer, data_path: str, max_seq_length: int):
        self.tokenizer = tokenizer
        self.data_path = data_path
        self.max_seq_length = max_seq_length
        self.data = self.__read_data()

    def __len__(self):
        return len(self.data)

    def train_text2ids(self, context: Tuple):
        return self.tokenizer(
            text=[context[0], context[1], context[2]],
            max_length=self.max_seq_length,
            truncation=True,
            padding="max_length",
            return_tensors="pt"
        )

    def __getitem__(self, index: int):
        return self.train_text2ids(self.data[index])

    @staticmethod
    def data_convert(data_lst):
        """数据处理"""
        data = list()
        mid_data = dict()
        for item in data_lst:
            if item["origin"] not in mid_data.keys():
                mid_data[item["origin"]] = {item["type"]: [item["target"]]}
            else:
                if item["type"] in mid_data[item["origin"]].keys():
                    mid_data[item["origin"]][item["type"]].append(item["target"])
                else:
                    mid_data[item["origin"]][item["type"]] = [item["target"]]
        for key, val in mid_data.items():
            if len(val.keys()) != 2:
                continue
            data.extend(product([key], val["entailment"], val["contradiction"]))
        return data

    def __read_data(self):
        data = []
        task_type = self.data_path.split("/")[-2]
        with open(self.data_path, "r", encoding="utf8") as f:
            if task_type in ["Chinese-MNLI", "Chinese-SNLI", "CINLID", "OCNLI"]:
                for line in f.readlines():
                    split_line = line.strip().split("\t")
                    if len(split_line) != 3:
                        continue
                    if split_line[-1] == "neutral":
                        continue
                    if not split_line[1] or not split_line[2]:
                        continue
                    data.append({"origin": split_line[0], "target": split_line[1], "type": split_line[2]})
            else:
                raise NotImplementedError(f"对应类型{task_type}没有实现!")
        return self.data_convert(data)


class UnSupTrainDataset(Dataset):
    """
    监督数据集，以STS-B数据集为例
    """
    def __init__(self, tokenizer, data_path: str, max_seq_length: int):
        self.tokenizer = tokenizer
        self.data_path = data_path
        self.max_seq_length = max_seq_length
        self.data = self.__read_data()

    def __len__(self):
        return len(self.data)

    def train_text2ids(self, context):
        return self.tokenizer(
            text=[context, context],
            max_length=self.max_seq_length,
            truncation=True,
            padding="max_length",
            return_tensors="pt"
        )

    def __getitem__(self, index: int):
        return self.train_text2ids(self.data[index])

    def __read_data(self):
        data = []
        task_type = self.data_path.split("/")[-2]
        if task_type in ["AFQMC", "Chinese-STS-B", "LCQMC", "OPPO-xiaobu", "PKU-Paraphrase-Bank"]:
            with open(self.data_path, "r", encoding="utf8") as f:
                for line in f.readlines():
                    line_split = line.strip().split("\t")
                    data.append(line_split[0])
        elif task_type == "PAWS-X":
            df = pd.read_csv(self.data_path, sep="\t")
            df = df.fillna("")
            for row in df.itertuples():
                sentence1 = row.sentence1.strip()
                if not sentence1:
                    continue
                data.append(sentence1)
        else:
            raise NotImplementedError(f"对应类型{task_type}没有实现!")
        return data


class DevDataSet(Dataset):
    def __init__(self, tokenizer, data_path: str, max_seq_length: int):
        self.tokenizer = tokenizer
        self.data_path = data_path
        self.max_seq_length = max_seq_length
        self.data = self.__read_data()

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        context = self.data[index]
        sentence_1 = self.tokenizer(
            text=context[0],
            max_length=self.max_seq_length,
            truncation=True,
            padding="max_length",
            return_tensors="pt",
        )
        sentence_2 = self.tokenizer(
            text=context[1],
            max_length=self.max_seq_length,
            truncation=True,
            padding="max_length",
            return_tensors="pt",
        )
        spearman = context[2]
        return sentence_1, sentence_2, spearman

    def __read_data(self):
        data = []
        task_type = self.data_path.split("/")[-2]
        with open(self.data_path, mode="r", encoding="utf8") as f:
            if task_type in ["Chinese-STS-B", "AFQMC"]:
                for line in f.readlines():
                    line_split = line.strip().split("\t")
                    data.append((line_split[0], line_split[1], int(line_split[2])))
            else:
                raise NotImplementedError(f"对应类型{task_type}没有实现!")
        return data


def wrapper_dataset(dataset, batch_size, shuffle):
    data_loader = DataLoader(
        dataset=dataset,
        batch_size=batch_size,
        shuffle=shuffle,
    )
    return data_loader


def init_data_loader(tokenizer, train_path, dev_path, test_path, batch_size, max_seq_length, mode):
    if mode == "sup":
        train_loader = wrapper_dataset(SupTrainDataset(tokenizer, train_path, max_seq_length), batch_size, True)
    else:
        train_loader = wrapper_dataset(UnSupTrainDataset(tokenizer, train_path, max_seq_length), batch_size, True)

    dev_loader = wrapper_dataset(DevDataSet(tokenizer, dev_path, max_seq_length), batch_size, False)
    test_loader = wrapper_dataset(DevDataSet(tokenizer, test_path, max_seq_length), batch_size, False)
    return train_loader, dev_loader, test_loader
