# encoding: utf-8

import os
import json
import random
import re

import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader

os.environ["TOKENIZERS_PARALLELISM"] = "false"


class PretrainDataset(Dataset):
    def __init__(self, file_path, tokenizer, max_length=512):
        super().__init__()
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.padding = 0
        self.datas = []
        self.load_datas(file_path)

    def load_datas(self, file_path: str):
        with open(file_path, "r", encoding="utf-8") as f:
            for line in f:
                text = f"{self.tokenizer.bos_token}{line}{self.tokenizer.eos_token}"
                self.datas.append(text)


    def __len__(self):
        return len(self.datas)

    def __getitem__(self, index: int):
        text = self.datas[index]
        input_ids = self.tokenizer(text).data["input_ids"][:self.max_length]

        text_len = len(input_ids)
        # 没满最大长度的剩余部分
        padding_len = self.max_length - text_len
        input_ids = input_ids + [self.padding] * padding_len
        # 0表示不计算损失
        loss_mask = [1] * (text_len - 1) + [0] * padding_len
        loss_mask = np.array(loss_mask).astype(np.int64)
        token_id = np.array(input_ids)
        X = np.array(token_id[:-1]).astype(np.int64)
        Y = np.array(token_id[1:]).astype(np.int64)

        return torch.from_numpy(X), torch.from_numpy(Y), torch.from_numpy(loss_mask)


class SFTDataset(Dataset):
    def __init__(self, file_path, tokenizer, max_length=512, prompt_max_len=512, answer_max_len=256):
        super().__init__()
        self.max_length = max_length
        self.prompt_max_len = prompt_max_len
        self.answer_max_len = answer_max_len
        self.tokenizer = tokenizer
        self.padding = 0
        self.bos_id = self.tokenizer('<s>assistant').data['input_ids']
        self.datas = []
        self.load_data(file_path)

    def load_data(self, file_path: str):
        with open(file_path, "r", encoding="utf-8") as f:
            for line in f:
                try:
                    _data = json.loads(line)
                    self.datas.append(_data)
                except Exception as e:
                    continue

    @staticmethod
    def find_sublist_index(main_list, sub_list) -> int:
        last_index = -1
        for i in range(len(main_list) - len(sub_list) + 1):
            if main_list[i:i + len(sub_list)] == sub_list:
                last_index = i
        return last_index

    def __len__(self):
        return len(self.datas)

    def __getitem__(self, item):
        data = self.datas[item]
        instruction = data.get("instruction", "")
        input_ = data.get("input", "")
        output = data.get("output", "")

        messages = [
            {"role": "user", "content": f"{instruction or ''} {input_ or ''}"},
            {"role": "assistant", "content": output},
        ]
        new_prompt = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        input_id = self.tokenizer(new_prompt).data['input_ids'][:self.max_length]

        # 实际长度
        question_length = self.find_sublist_index(input_id, self.bos_id) + len(self.bos_id)
        # 没满最大长度的剩余部分
        padding_len = self.max_length - len(input_id)
        input_id = input_id + [self.padding] * padding_len
        mask_len = len(input_id) - question_length - padding_len
        # 0表示不计算损失
        loss_mask = [0] * question_length + [1] * mask_len + [0] * padding_len

        input_id = np.array(input_id)
        np_x = np.array(input_id[:-1]).astype(np.int64)
        np_y = np.array(input_id[1:]).astype(np.int64)
        loss_mask = np.array(loss_mask[1:]).astype(np.int64)

        x_tensor = torch.from_numpy(np_x)
        y_tensor = torch.from_numpy(np_y)
        loss_mask_tensor = torch.from_numpy(loss_mask)

        return x_tensor, y_tensor, loss_mask_tensor
