import json
import pdb
import random

import torch


class BaseExample(object):

    def __init__(self, label) -> None:
        self.label = label

    def __repr__(self) -> str:
        return f'{self.example_id}: {self.label}'

    def tokenize(self, tokenizer, args):
        '''
        override
        '''
        max_seq_len = args.max_seq_len
        feature_dict = tokenizer.batch_encode_plus(self.text_list, add_special_tokens=False, max_length=max_seq_len, padding='max_length', truncation=True, return_tensors='pt')
        pass
    
    @classmethod
    def load_from(cls):
        pass


class BaselineExample(BaseExample):

    def __init__(self, label, text_list) -> None:
        super(BaselineExample, self).__init__(label)
        self.text_list = text_list

    def tokenize(self, tokenizer, args):
        max_seq_len = args.max_seq_len
        feature_dict = tokenizer.encode_plus(*self.text_list, add_special_tokens=True, max_length=max_seq_len, padding='max_length', truncation=True, return_tensors='pt')
        return feature_dict

    @classmethod
    def load_from_json(cls, json_obj):
        label = 0 if json_obj['label_2']=='0' else 1
        # entity = json_obj['entity']
        sentence1 = json_obj['sentence1'].lower()
        sentence2 = json_obj['sentence2'].lower()
        return cls(label, [sentence1,sentence2])


class MergeExample(BaseExample):

    def __init__(self, label, text_list) -> None:
        super(MergeExample, self).__init__(label)
        self.text_list = text_list

    def tokenize(self, tokenizer, args):
        max_seq_len = args.max_seq_len
        max_sentence_len = args.max_sentence_len
        input_1, input_2, input_3 = self.text_list
        input_1 = input_1.replace("[SEP]", tokenizer.sep_token).lower()
        input_2 = input_2.replace("[SEP]", tokenizer.sep_token).lower()
        input_12 = [input_1, input_2]

        # tokenizer.
        feature_dict12 = tokenizer.batch_encode_plus(input_12, max_length=max_seq_len, add_special_tokens=True, padding="max_length", truncation=True, return_tensors='pt')
        feature_dict3 = tokenizer.encode_plus(*input_3, add_special_tokens=True, max_length=max_sentence_len, padding='max_length', truncation=True, return_tensors='pt')
        return feature_dict12, feature_dict3

    @staticmethod
    def make_text(sentence1,sentence2,sent1_cs,sent2_cs,concat_cs, entity_desc):
        '''
        [CLS] s1 [SEP] sent1_cs*3, concat*2, entity_desc [SEP]
        [CLS] s2 [SEP] sent2_cs*3, concat*2, entity_desc [SEP]
        [CLS] s1 [SEP] s2 [SEP]
        '''
        input_1 = f"{sentence1} [SEP] {' '.join(sent1_cs[:2])} [SEP] {concat_cs[:2]} [SEP] {entity_desc}"
        input_2 = f"{sentence2} [SEP] {' '.join(sent2_cs[:2])} [SEP] {concat_cs[:2]} [SEP] {entity_desc}"
        input_3 = [sentence1, sentence2]

        return [input_1, input_2, input_3]

    @classmethod
    def load_from_json(cls, json_obj, concat_case, single_case, entity_case):
        label = 0 if json_obj['label_2']=='0' else 1
        sentence1 = json_obj['sentence1'].lower()
        sentence2 = json_obj['sentence2'].lower()
        
        sent1_cs = single_case['s1_cs_list']
        sent2_cs = single_case['s2_cs_list']
        concat_cs = concat_case['cs_list']
        entity_desc = entity_case['cs_list']
        
        text_list = cls.make_text(sentence1, sentence2, sent1_cs,sent2_cs,concat_cs, entity_desc)
        return cls(label, text_list)