import torch

from torch.utils import data
from transformers import AutoTokenizer

from .augment import Augmenter
import os
import random
os.environ["CUDA_VISIBLE_DEVICES"]='0'
# map lm name to huggingface's pre-trained model names
lm_mp = {'roberta': 'roberta-base',
         'distilbert': 'distilbert-base-uncased'}

def get_tokenizer(lm):
    if lm in lm_mp:
        return AutoTokenizer.from_pretrained(lm_mp[lm])
    else:
        return AutoTokenizer.from_pretrained(lm)


class DittoDataset(data.Dataset):
    """EM dataset"""

    def __init__(self,
                 path,
                 max_len=256,
                 size=None,
                 lm='roberta',
                 da=None,
                 en = None,
                 percent=None,
                 tag = None
                 ):
        try:
            self.tokenizer = get_tokenizer(lm)
        except:
            print("get_tokenizer出错")
        self.pairs = []
        self.labels = []
        self.max_len = max_len
        self.size = size

        if isinstance(path, list):
            lines = path
        else:
            #lines = open(path)
            lines = open(path,'r',encoding='utf-8')

        for line in lines:
            s1, s2, label = line.strip().split('\t')
            self.pairs.append((s1, s2))
            self.labels.append(int(label))



        self.pairs = self.pairs[:size]
        self.labels = self.labels[:size]

        #实现随机抽取特定比例的数据
        self.percent = percent
        if percent is not None and en is None:
            #更新为随机取percent比例的数据 但是pairs和labels仍然一一对应
            numsize = len(self.pairs)
            leave = (int)(numsize * percent * 0.01) #留下这么多
            remove = numsize -leave #剩下的删除
            while(remove > 0):

                index = random.randint(0, numsize-1)
                try:
                    self.pairs.pop(index)
                    self.labels.pop(index)
                except:
                    print(numsize)
                    print(index)
                remove = remove - 1
                numsize = numsize -1

        if percent is not None and en is not None:
            #如果使用主动学习，再考虑使用普通的熵值选择还是改进后的分区机制
            if tag is None:
                numsize = len(self.pairs)
                leave = (int)(numsize * percent * 0.01)  # 留下这么多
                remove = numsize - leave  # 剩下的删除

                while (remove > 0):
                    # pairs  labels en 同步剔除
                    index = en.index(min(en))  # 挑出熵值最小的下标
                    try:
                        self.pairs.pop(index)
                        self.labels.pop(index)
                    except:
                        print(numsize)
                        print(index)
                    en.remove(en[index])  # 将最小熵值的从en list中剔除
                    remove = remove - 1
            else:
                numsize = len(self.pairs)
                leave = (int)(numsize * percent * 0.01)  # 留下这么多
                Z = zip(tag, en, self.pairs, self.labels)

                Z = sorted(Z, reverse=True)
                tag_new, en_new, pairs_new, label_new= zip(*Z)
                remove_0 = leave / 2
                remove_1 = leave / 2
                self.pairs = []
                self.labels = []

                for index, tag_ in enumerate(tag_new):
                        if tag_ == 0:
                            self.pairs.append(pairs_new[index])
                            self.labels.append(label_new[index])
                            remove_0 = remove_0 - 1
                        if(remove_0 <= 0):
                            break
                for index, tag_ in enumerate(tag_new):
                    if tag_ == 1:
                        self.pairs.append(pairs_new[index])
                        self.labels.append(label_new[index])
                        remove_1 = remove_1 - 1
                        if (remove_1 <= 0):
                            break

        self.da = da
        if da is not None:
            self.augmenter = Augmenter()
        else:
            self.augmenter = None


    def __len__(self):
        """Return the size of the dataset."""
        return len(self.pairs)

    def __getitem__(self, idx):
        """Return a tokenized item of the dataset.

        Args:
            idx (int): the index of the item

        Returns:
            List of int: token ID's of the two entities
            List of int: token ID's of the two entities augmented (if da is set)
            int: the label of the pair (0: unmatch, 1: match)
        """
        left = self.pairs[idx][0]
        right = self.pairs[idx][1]

        # left + right
        x = self.tokenizer.encode(text=left,
                                  text_pair=right,
                                  max_length=self.max_len,
                                  truncation=True)

        # augment if da is set
        if self.da is not None:
            combined = self.augmenter.augment_sent(left + ' [SEP] ' + right, self.da)
            left, right = combined.split(' [SEP] ')
            x_aug = self.tokenizer.encode(text=left,
                                      text_pair=right,
                                      max_length=self.max_len,
                                      truncation=True)
            return x, x_aug, self.labels[idx]
        else:
            return x, self.labels[idx]


    @staticmethod
    def pad(batch):
        """Merge a list of dataset items into a train/test batch
        Args:
            batch (list of tuple): a list of dataset items

        Returns:
            LongTensor: x1 of shape (batch_size, seq_len)
            LongTensor: x2 of shape (batch_size, seq_len).
                        Elements of x1 and x2 are padded to the same length
            LongTensor: a batch of labels, (batch_size,)
        """
        if len(batch[0]) == 3:
            x1, x2, y = zip(*batch)

            maxlen = max([len(x) for x in x1+x2])
            x1 = [xi + [0]*(maxlen - len(xi)) for xi in x1]
            x2 = [xi + [0]*(maxlen - len(xi)) for xi in x2]
            return torch.LongTensor(x1), \
                   torch.LongTensor(x2), \
                   torch.LongTensor(y)
        else:
            x12, y = zip(*batch)
            maxlen = max([len(x) for x in x12])
            x12 = [xi + [0]*(maxlen - len(xi)) for xi in x12]
            return torch.LongTensor(x12), \
                   torch.LongTensor(y)

