import os
import pickle
import numpy as np
from transformers import BertTokenizer
import random

fname = {
    'dt_jb':{
        'train':'./dataset/pstance/trump.tp-5wd-6.masked',
        'test':'./dataset/pstance/biden.tp-5wd-6.masked'
    },
    'jb_dt':{        
        'train':'./dataset/pstance/biden.tp-5wd-6.masked',
        'test':'./dataset/pstance/trump.tp-5wd-6.masked'
    },
    'dt_bs':{
        'train':'./dataset/pstance/trump.tp-5wd-6.masked',
        'test':'./dataset/pstance/bernie.tp-5wd-6.masked'
    },
    'bs_dt':{
        'train':'./dataset/pstance/bernie.tp-5wd-6.masked',
        'test':'./dataset/pstance/trump.tp-5wd-6.masked'
    },
    'jb_bs':{
        'train':'./dataset/pstance/biden.tp-5wd-6.masked',
        'test':'./dataset/pstance/bernie.tp-5wd-6.masked'
    },
    'bs_jb':{
        'train':'./dataset/pstance/bernie.tp-5wd-6.masked',
        'test':'./dataset/pstance/biden.tp-5wd-6.masked'
    }
}

stop_words = {'of', 'is', 'a', 'an', }

class Dataset(object):
    def __init__(self, targets, texts, stances, input_idss, attention_masks, token_type_idss):
        self.targets = targets
        self.texts = texts
        self.stances = stances
        self.input_idss = input_idss
        self.attention_masks = attention_masks
        self.token_type_idss = token_type_idss

    def __getitem__(self, index):
        return self.targets[index], self.texts[index], self.stances[index], \
            self.input_idss[index], self.attention_masks[index], self.token_type_idss[index]

    def __len__(self):
        return len(self.targets)


class DatesetReader:
    @staticmethod
    def __read_data__(fnames, tokenizer,maxlen):
        lines = []
        lines_count = [] #记录每个文件有多少行
        for fname in fnames:
            fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
            lines =  lines + fin.readlines()
            fin.close()
            if len(lines_count)>0:
                lines_count.append(len(lines) - lines_count[len(lines_count) - 1])
            else:
                lines_count.append(len(lines))

        targets = []
        texts = []
        stances = []
        input_idss = []
        attention_masks = []
        token_type_idss = []

        for i in range(0, len(lines), 4):  # [遍历一个个的样本对(text,target,stance)]
            target = lines[i + 2].lower().strip()
            text = lines[i].lower().strip()
            maskd_text = lines[i + 1].lower().strip()
            stance = lines[i + 3].strip()

            ts = [s for s in target.split(' ') if s not in stop_words]
            for t in ts:
                maskd_text = maskd_text.replace(t, '[MASK]')

            stance = int(stance)

            org_token = tokenizer(target,
                                  text,
                                  add_special_tokens=True,
                                  max_length=maxlen,
                                  return_tensors='pt',
                                  padding='max_length',
                                  truncation=True)

            targets.append(target)
            texts.append(text)
            stances.append(stance+1) # label不允许小于0
            input_idss.append(org_token['input_ids'])
            attention_masks.append(org_token['attention_mask'])
            token_type_idss.append(org_token['token_type_ids'])

        return targets, texts, stances, input_idss, attention_masks, token_type_idss

    def __init__(self, opt, tokenizer, dataset='dt_hc'):
        print("preparing {0} dataset ...".format(dataset))

        self.tokenizer = tokenizer
        targets, texts, stances, input_idss,attention_masks,token_type_idss = \
            DatesetReader.__read_data__([fname[dataset]['train']], tokenizer,opt.maxlen)
        self.train_data = Dataset(targets, texts, stances, input_idss,
                                  attention_masks, token_type_idss)

        targets, texts, stances, input_idss,attention_masks,token_type_idss = \
            DatesetReader.__read_data__([fname[dataset]['test']], tokenizer,opt.maxlen)
        self.test_data = Dataset(targets, texts, stances, input_idss,
                                 attention_masks, token_type_idss)

        try:
            targets, texts, stances, input_idss,attention_masks,token_type_idss = \
                DatesetReader.__read_data__([fname[dataset]['val']], tokenizer,opt.maxlen)
            self.val_data = Dataset(targets, texts, stances, input_idss,
                                    attention_masks, token_type_idss)
        except:
            pass
