# -*- coding: utf-8 -*-
# @Time    : 2023/5/17 3:31 下午
# @Author  : Wu WanJie

import os
import torch
import random
import pandas as pd
from torch.utils.data import Dataset

STYLE_NAMES = [
    "EDA_同义词替换",
    "EDA_随机插入",
    "EDA_随机交换",
    "EDA_随机删除",
    "等价实体替换",
    "同义词替换",
    "近义字替换",
    "字删除",
    "置换邻近的字",
    "等价字替换",
    "内置方法"
]

STYLE_MAPPING = {
    "default": STYLE_NAMES,
    "style_1": ["EDA_同义词替换", "EDA_随机交换", "EDA_随机插入", "EDA_随机删除"],

    "style_2": ["EDA_同义词替换", "EDA_随机交换", "EDA_随机插入"],
    "style_3": ["EDA_同义词替换", "EDA_随机交换", "EDA_随机删除"],
    "style_4": ["EDA_同义词替换", "EDA_随机插入", "EDA_随机删除"],

    "style_5": ["EDA_同义词替换", "EDA_随机交换"],
    "style_6": ["EDA_同义词替换", "EDA_随机插入"],
    "style_7": ["EDA_同义词替换", "EDA_随机删除"],

    "style_8": ["EDA_同义词替换", "等价实体替换", "同义词替换"],
    "style_9": ["内置方法"],

}


def load_label_system(readme_path):
    ori_labels = list()
    count_lst = list()
    with open(readme_path, "r", encoding="utf8") as f:
        for line in f.readlines():
            if not line.strip():
                continue
            split_labels = line.strip().split()
            ori_labels.append(split_labels[0])
            count_lst.append(int(split_labels[1]))

    return {
        "label2id": {key: i for i, key in enumerate(ori_labels)},
        "id2label": {i: key for i, key in enumerate(ori_labels)},
    }


class ClfDataset(Dataset):
    def __init__(self, data_dir, file_name, mode="train", task_type="multi", style=None, target_name="", min_count=500):
        self.data_dir = data_dir
        self.file_name = file_name
        self.min_count = min_count
        self.style = style
        self.target_name = target_name
        self.task_type = task_type
        self.label_map = load_label_system(os.path.join(data_dir, "readme.txt"))
        if mode == "train":
            if self.task_type == "multi":
                tmp_data = self.multi_train_dataset()
            else:
                tmp_data = self.binary_train_dataset()
            self.clf_data = tmp_data["dataset"]
            self.weight = tmp_data["weight"]
        else:
            self.clf_data = self.read_eval_dataset()
            self.weight = None

    def __getitem__(self, idx):
        return self.clf_data[idx]

    def __len__(self):
        return len(self.clf_data)

    def read_eval_dataset(self):
        """读取数据"""
        df = pd.read_csv(os.path.join(self.data_dir, self.file_name))
        if self.task_type == "multi":
            labels = [self.label_map["id2label"][idx] for idx in range(len(self.label_map["id2label"]))]
        else:
            labels = [self.target_name]
        return self._get_dataset(df, labels)

    def binary_train_dataset(self):
        df = pd.read_csv(os.path.join(self.data_dir, self.file_name))
        ori_data = df[df["style"] == "原始文本"]
        ori_count = ori_data[self.target_name].sum()

        if ori_count >= self.min_count:
            return {
                "dataset": self._get_dataset(ori_data, [self.target_name]),
                "weight": [len(ori_data)/((len(ori_data)-ori_count)*2), len(ori_data)/(ori_count*2)]
            }

        create_data = df[df["style"] != "原始文本"]
        candidates = create_data[create_data[self.target_name] == 1]
        candidates = candidates[candidates["style"].isin(STYLE_MAPPING[self.style])]
        samples = candidates.sample(ori_count, replace=True)
        negative = ori_data[ori_data[self.target_name] == 0].sample(2 * self.min_count, replace=True)
        positive = ori_data[ori_data[self.target_name] == 1]
        data1 = pd.concat([negative, positive], axis=0)
        latest_data = pd.concat([data1, samples], axis=0)
        return {
            "dataset": self._get_dataset(latest_data, [self.target_name]),
            "weight": [len(latest_data) / ((len(latest_data) - self.min_count) * 2), len(latest_data) / (self.min_count * 2)]
        }

    def multi_train_dataset(self):
        """读取数据"""
        df = pd.read_csv(os.path.join(self.data_dir, self.file_name))

        ori_data = df[df["style"] == "原始文本"]
        create_data = df[df["style"] != "原始文本"]

        final_data = None
        for idx, label in enumerate(self.label_map["label2id"].keys()):
            positive_count = ori_data[label].sum()
            if positive_count < self.min_count:
                candidates = create_data[create_data[label] == 1]
                candidates = candidates[candidates["style"].isin(STYLE_MAPPING[self.style])]
                samples = candidates.sample(self.min_count - positive_count, replace=True)

            else:
                samples = ori_data[ori_data[label] == 0].sample(self.min_count, replace=True)
            if final_data is None:
                final_data = samples
            else:
                final_data = pd.concat([final_data, samples], axis=0)

        weights = list()
        labels = []
        for idx in range(len(self.label_map["id2label"])):
            name = self.label_map["id2label"][idx]
            labels.append(name)
            weight = len(ori_data) / (ori_data[name].sum() * len(self.label_map))
            # weight = len(res1) / (res1[name].sum() * len(self.label_map))
            weights.append(weight)

        return {
            "dataset": self._get_dataset(final_data, labels),
            "weight": weights
        }

    @staticmethod
    def _get_dataset(df1, labels):
        clf_data = list()
        for row in df1.itertuples():
            text = getattr(row, "text").replace(" ", "")
            if len(labels) == 1:
                per_labels = int(getattr(row, labels[0]))
            else:
                per_labels = [int(getattr(row, _key)) for _key in labels]
            clf_data.append({
                "text": text,
                "labels": per_labels
            })
        random.shuffle(clf_data)
        return clf_data


class ClfCollate:
    def __init__(self, tokenizer, max_seq_length):
        self.max_seq_length = max_seq_length
        self.tokenizer = tokenizer
        # self.text_pair = "性价比,外形外观,模块分区,能耗,散热能力,赠送礼品,送货安装服务,整体尺寸,是否有故障,保鲜效果,活动效果,正品,外观材质,是否结霜,消费者体验,发货和物流,价格意见,空间布局,便捷性,产品功能,容量大小,服务质量,产品描述,制冷效果,客服服务,智能化,售后服务,软冷冻,净味功能,风格设计,噪音大小,向他人推荐,产品质量,箱门密封性,忠诚度,是否包装完好"

    def __call__(self, batch_data):
        id_lst = [item["labels"] for item in batch_data]
        text_lst = [item["text"] for item in batch_data]
        # text_pairs = [self.text_pair for _ in range(len(batch_data))]
        tokenizer_examples = self.tokenizer(
            text=text_lst,
            # text_pair=text_pairs,
            padding="longest",
            truncation=True,
            max_length=self.max_seq_length,
            return_tensors="pt"
        )
        tokenizer_examples["labels"] = torch.tensor(id_lst, dtype=torch.float32)
        return tokenizer_examples
