#! -*- encoding:utf-8 -*-
"""
@File    :   data_processor.py
@Author  :   Zachary Li
@Contact :   li_zaaachary@163.com
@Dscpt   :   
"""
import os
import json
from random import random, sample
from copy import deepcopy
import logging

logger = logging.getLogger("data processor")
console = logging.StreamHandler();console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(name)s - %(message)s', datefmt = r"%y/%m/%d %H:%M")
console.setFormatter(formatter)
logger.addHandler(console)

from tqdm import tqdm
import torch
from torch.utils.data import DataLoader, RandomSampler, TensorDataset

from parada_task.example import *


class ProcessorBase(object):

    def __init__(self, args, dataset_type) -> None:
        self.args = args
        self.dataset_dir = args.dataset_dir
        self.dataset_type = dataset_type
        self.batch_size = args.train_batch_size if self.dataset_type in ['train', 'conti-trian'] else args.evltest_batch_size
        self.raw_parada = []
        self.parada_examples = []

    def load_data(self):
        pass

    def make_dataloader(self, tokenizer, args, shuffle=True, examples=None):
        drop_last = False

        all_input_ids, all_token_type_ids, all_attention_mask = [], [], []
        all_label = []

        for example in tqdm(self.parada_examples):
            feature_dict = example.tokenize(tokenizer, args)
            all_input_ids.append(feature_dict['input_ids'][0])
            all_token_type_ids.append(feature_dict['token_type_ids'][0])
            all_attention_mask.append(feature_dict['attention_mask'][0])
            all_label.append(example.label)

        all_input_ids = torch.stack(all_input_ids)
        all_attention_mask = torch.stack(all_attention_mask)
        all_token_type_ids = torch.stack(all_token_type_ids)
        all_label = torch.tensor(all_label, dtype=torch.long)

        data = (all_input_ids, all_attention_mask, all_token_type_ids, all_label)

        dataset = TensorDataset(*data)
        sampler = RandomSampler(dataset) if shuffle else None
        dataloader = DataLoader(dataset, sampler=sampler, batch_size=self.batch_size, drop_last=drop_last)

        return dataloader

    @staticmethod
    def load_example(case, cs4choice):
        pass


class Baseline_Processor(ProcessorBase):
    
    def __init__(self, args, dataset_type) -> None:
        super(Baseline_Processor, self).__init__(args, dataset_type)
        
    def load_data(self):
        self.load_parada_entity()
        self.make_example()

    def load_parada_entity(self):
        self.raw_parada.clear()
        f = open(os.path.join(self.dataset_dir, 'entity_description', 
            f"{self.dataset_type}_exp.json"), 'r', encoding='utf-8')
        self.raw_parada.extend(json.load(f))

    def make_example(self):
        for _, case in enumerate(self.raw_parada):
            example = BaselineExample.load_from_json(case)
            self.parada_examples.append(example)

    def make_dataloader(self, tokenizer, args, shuffle=True):
        return ProcessorBase.make_dataloader(self, tokenizer, args, shuffle=shuffle, examples=self.parada_examples)


class Baseline_Processor(ProcessorBase):
    
    def __init__(self, args, dataset_type) -> None:
        super(Baseline_Processor, self).__init__(args, dataset_type)
        
    def load_data(self):
        self.load_parada()
        self.make_example()

    def load_parada(self):
        self.raw_parada.clear()
        f = open(os.path.join(self.dataset_dir, 'entity_description', 
            f"{self.dataset_type}_exp.json"), 'r', encoding='utf-8')
        self.raw_parada.extend(json.load(f))

    def make_example(self):
        for _, case in enumerate(self.raw_parada):
            example = BaselineExample.load_from_json(case)
            self.parada_examples.append(example)

    def make_dataloader(self, tokenizer, args, shuffle=True):
        return ProcessorBase.make_dataloader(self, tokenizer, args, shuffle=shuffle, examples=self.parada_examples)


class Merge_Processor(ProcessorBase):

    def __init__(self, args, dataset_type) -> None:
        super(Merge_Processor, self).__init__(args, dataset_type)
        self.concat_parade = []
        self.single_parade = []
        self.entity_parade = []

    def load_data(self):
        self.load_parade()
        self.load_parade_concat()
        self.load_parade_entity()
        self.load_parade_single()
        self.make_example()

    def load_parade(self):
        self.raw_parada.clear()
        f = open(os.path.join(self.dataset_dir, 'entity_description', 
            f"{self.dataset_type}_exp.json"), 'r', encoding='utf-8')
        self.raw_parada.extend(json.load(f))
        # self.raw_parada.extend(json.load(f)[:50])
    
    def load_parade_concat(self):
        self.concat_parade.clear()
        f = open(os.path.join(self.dataset_dir, 'concat_sentence', 
            f"{self.dataset_type}_commonsense.json"), 'r', encoding='utf-8')
        self.concat_parade.extend(json.load(f))

    def load_parade_single(self):
        self.single_parade.clear()
        f = open(os.path.join(self.dataset_dir, 'two_sentence', 
            f"{self.dataset_type}_commonsense.json"), 'r', encoding='utf-8')
        self.single_parade.extend(json.load(f))

    def load_parade_entity(self):
        self.entity_parade.clear()
        f = open(os.path.join(self.dataset_dir, 'entity_description', 
            f"{self.dataset_type}_exp.json"), 'r', encoding='utf-8')
        self.entity_parade.extend(json.load(f))


    def make_example(self):
        for index, case in enumerate(self.raw_parada):
            concat_case = self.concat_parade[index]
            single_case = self.single_parade[index]
            entity_case = self.entity_parade[index]
            example = MergeExample.load_from_json(case, concat_case, single_case, entity_case)
            self.parada_examples.append(example)

    def make_dataloader(self, tokenizer, args, shuffle=True, examples=None):
        drop_last = False

        all_input_ids, all_token_type_ids, all_attention_mask = [], [], []
        long_input_ids, long_token_type_ids, long_attention_mask = [], [], []
        all_label = []

        for example in tqdm(self.parada_examples):
            feature_dict12, feature_dict3 = example.tokenize(tokenizer, args)
            all_input_ids.append(feature_dict3['input_ids'][0])
            all_token_type_ids.append(feature_dict3['token_type_ids'][0])
            all_attention_mask.append(feature_dict3['attention_mask'][0])

            long_input_ids.append(feature_dict12['input_ids'])
            long_token_type_ids.append(feature_dict12['token_type_ids'])
            long_attention_mask.append(feature_dict12['attention_mask'])

            all_label.append(example.label)

        all_input_ids = torch.stack(all_input_ids)
        all_attention_mask = torch.stack(all_attention_mask)
        all_token_type_ids = torch.stack(all_token_type_ids)

        long_input_ids = torch.stack(long_input_ids)
        long_attention_mask = torch.stack(long_attention_mask)
        long_token_type_ids = torch.stack(long_token_type_ids)

        all_label = torch.tensor(all_label, dtype=torch.long)
        data = (all_input_ids, all_attention_mask, all_token_type_ids, long_input_ids, long_attention_mask, long_token_type_ids, all_label)

        dataset = TensorDataset(*data)
        sampler = RandomSampler(dataset) if shuffle else None
        dataloader = DataLoader(dataset, sampler=sampler, batch_size=self.batch_size, drop_last=drop_last)

        return dataloader