# prepare the data to fast the process
import json
import os
import random
from typing import Union

import numpy as np
import torch
from mindify.kaggle import kaggle_api
from mindify.nlp import Alphabet
from pytorch_lightning import LightningDataModule
from pytorch_lightning.utilities.types import TRAIN_DATALOADERS, EVAL_DATALOADERS
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import PreTrainedTokenizer, BertTokenizer
from config.config import Config
from models.common import *


class ImcsDacDataModule(LightningDataModule):
    def __init__(self, config, tokenizer):
        super().__init__()

        self.config = config
        self.tokenizer = tokenizer
        self.max_seq_length = config.max_seq_length
        self.num_workers = config.num_workers

        self.label_alphabet = Alphabet()
        self.speaker_alphabet = Alphabet()

        self.dataset_path = None
        self.train_dataset = None
        self.val_dataset = None
        self.predict_dataset = None

        self.cache = {}

        self.prepare_data()
        self.setup(None)

    def prepare_data(self) -> None:
        if self.dataset_path is None:
            self.dataset_path = kaggle_api.get_dataset("cnjameshu/imcs-dac", unzip=True)

    def setup(self, stage: Union[str, None]) -> None:
        if self.train_dataset is None:
            self.train_dataset = self.load_samples('train')
            self.val_dataset = self.load_samples('dev')
            self.predict_dataset = self.load_samples('test')

    def train_dataloader(self) -> TRAIN_DATALOADERS:
        return DataLoader(self.train_dataset,
                          batch_size=1,
                          num_workers=self.num_workers,
                          collate_fn=self.collate_fn,
                          shuffle=True)

    def val_dataloader(self) -> EVAL_DATALOADERS:
        return DataLoader(self.val_dataset,
                          batch_size=1,
                          num_workers=self.num_workers,
                          collate_fn=self.collate_fn)

    def predict_dataloader(self) -> EVAL_DATALOADERS:
        return DataLoader(self.predict_dataset,
                          batch_size=1,
                          num_workers=self.num_workers,
                          collate_fn=self.collate_fn)

    def load_samples(self, dataset_type):
        with open(os.path.join(self.dataset_path, f"imcs-dac/IMCS-DAC_{dataset_type}.json"), encoding="UTF-8") as fp:
            data = json.load(fp)

        return [self.process_conversation(cid, dialogues) for cid, dialogues in tqdm(data.items(), desc=dataset_type)]

    def process_conversation(self, cid, dialogues):
        sentences = [dialogue['speaker'] + ': ' + dialogue['sentence'] for dialogue in dialogues]
        max_seq_len = np.max([len(sentence) for sentence in sentences])

        return {
            # 对话id
            'cid': cid,
            # 最大长度，用于 padding
            'max_seq_len': max_seq_len,
            'sentences': sentences,
            'label_ids': [self.label_alphabet.lookup(self.normalize_label(dialogue['dialogue_act']))
                          for dialogue in dialogues],
            'speaker_ids': [self.speaker_alphabet.lookup(dialogue['speaker'])
                            for dialogue in dialogues],
            # 原始数据，预测时要用
            'dialogues': dialogues
        }

    @property
    def num_labels(self):
        return self.label_alphabet.size

    @property
    def num_speakers(self):
        return self.speaker_alphabet.size

    def label_weights(self):
        weights = np.zeros(self.num_labels, dtype=np.float64)
        for sample in self.train_dataset:
            for label_id in sample['label_ids']:
                weights[label_id] += 1
        return torch.tensor(np.max(weights) / weights, dtype=torch.float)

    @classmethod
    def normalize_label(cls, dialogue_act):
        return "Other" if dialogue_act == "" else dialogue_act

    def collate_sample(self, sample):
        if sample['cid'] in self.cache:
            return self.cache[sample['cid']]

        max_seq_len = np.min([sample['max_seq_len'], self.max_seq_length])

        tokens = self.tokenizer(sample['sentences'],
                                padding='max_length',
                                max_length=max_seq_len,
                                truncation=True,
                                return_tensors='pt'
                                )
        tokens['speaker_ids'] = torch.tensor(sample['speaker_ids'], dtype=torch.int64)
        tokens['label_ids'] = torch.tensor(sample['label_ids'], dtype=torch.int64)
        tokens['cid'] = torch.tensor(int(sample['cid']), dtype=torch.int64)

        self.cache[sample['cid']] = tokens
        return tokens

    def collate_fn(self, batch_samples):
        assert len(batch_samples) == 1, "模型目前只支持 batch_size = 1"
        return self.collate_sample(batch_samples[0])


if __name__ == '__main__':
    from main import DataArguments

    config = Config()

    datamodule = ImcsDacDataModule(config)

    train_dataloader = datamodule.train_dataloader()

    sent_lens = [len(sentence) for sample in datamodule.train_dataset for sentence in sample['sentences']]
    print("句子: 均长", np.mean(sent_lens), "方差", np.var(sent_lens), "标准差", np.std(sent_lens),
          "最大", np.max(sent_lens), "最小", np.min(sent_lens),
          "50% 75% 80% 85% 90% 95% 99%", np.percentile(sent_lens, [50, 75, 80, 85, 90, 95, 99]))

    sent_nums = [len(sample['sentences']) for sample in datamodule.train_dataset]
    print("对话: 均长", np.mean(sent_nums), "方差", np.var(sent_nums), "标准差", np.std(sent_nums),
          "最大", np.max(sent_nums), "最小", np.min(sent_nums),
          "50% 75% 80% 85% 90% 95% 99%", np.percentile(sent_nums, [50, 75, 80, 85, 90, 95, 99]))
