from torch.utils.data import Dataset, DataLoader
import torch
from data_utils.basic_data_round2 import load_train_val_dataset, load_basic_dataset, load_train_val_dataset_cross
from pytorch_transformers import BertTokenizer
import pandas as pd

ENTITY_POS_SPLIT = False
USE_AUGUMENTED = False

pretrained_model_dir = '/home/njuciairs/wangshuai/pretrained_models/RoBERTa_zh_L12_PyTorch'  # /home/njuciairs/wangshuai/pretrained_models/RoBERTa_zh_Large_PyTorch


class TextEntitySample():
    def __init__(self, id, text, title, senti_label, entity, entity_label, max_len=300):
        if not isinstance(title, str):
            title = ''
        if not isinstance(text, str):
            text = ''
        text = title + '。' + text
        text_len = max_len - 3 - len(entity)
        if ENTITY_POS_SPLIT:
            pos = text.find(entity)
            start = max(0, pos - 100)
        else:
            start = 0
        text = text[start:start + text_len]
        self.text = '[CLS]' + entity + '[SEP]' + text + '[SEP]'
        self.id = id
        senti_label = int(senti_label)
        self.senti_label = int(senti_label)
        self.entity_labels = entity_label
        assert isinstance(senti_label, int)
        assert isinstance(entity_label, int)
        if senti_label == 1 and entity_label == 1:
            label = 1
        elif senti_label == 1 and entity_label == 0:
            label = 1
        else:
            label = 0
        self.label = label


class TestTextEntitySample():
    def __init__(self, id, text, title, entity, max_len=300):
        if not isinstance(title, str):
            title = ''
        if not isinstance(text, str):
            text = ''
        text = title + '。' + text
        text_len = max_len - 3 - len(entity)
        if ENTITY_POS_SPLIT:
            pos = text.find(entity)
            start = max(0, pos - 100)
        else:
            start = 0

        text = text[start:start + text_len]
        self.text = '[CLS]' + entity + '[SEP]' + text + '[SEP]'
        self.id = id


class EntityDataset(Dataset):
    def __init__(self, df, max_len=300):
        self.samples = list(self.make_samples(df, max_len))
        self.len = len(self.samples)

    def make_samples(self, df, max_len):
        texts = df['text'].values
        tiltles = df['title'].values
        estrs = df['entity'].values
        kestrs = df['key_entity'].values
        sents = df['negative'].values
        ids = df['id'].values
        for i, estr in enumerate(estrs):
            for e in estr.split(';'):
                entity_label = int(e in str(kestrs[i]))
                yield TextEntitySample(ids[i], texts[i], tiltles[i], sents[i], e, entity_label, max_len)

    def __getitem__(self, index):
        return self.samples[index]

    def __len__(self):
        return self.len


class TestEntityDataset(Dataset):
    def __init__(self, df, max_len=300):
        self.samples = list(self.make_samples(df, max_len))
        self.len = len(self.samples)

    def make_samples(self, df, max_len):
        texts = df['text'].values
        tiltles = df['title'].values
        estrs = df['entity'].values
        ids = df['id'].values
        for i, estr in enumerate(estrs):
            for e in str(estr).split(';'):
                yield TestTextEntitySample(ids[i], texts[i], tiltles[i], e, max_len)

    def __getitem__(self, index):
        return self.samples[index]

    def __len__(self):
        return self.len


def convert_texts_to_features(texts, tokenizer, max_len):
    input_ids = [tokenizer.encode(text)[:max_len] for text in texts]
    text_lens = [len(ids) for ids in input_ids]
    attention_mask = torch.LongTensor([([1] * l + [0] * (max_len - l)) for l in text_lens])
    input_ids_list = [ids + [tokenizer.pad_token_id] * (max_len - len(ids)) for ids in input_ids]
    sep_pos = [(ids.index(tokenizer.cls_token_id) + 1) for ids in input_ids_list]
    token_types = torch.LongTensor([([0] * pos + [1] * (max_len - pos)) for pos in sep_pos])
    input_ids = torch.LongTensor(input_ids_list)
    return input_ids, attention_mask, token_types


def convert_batch(batch_samples, tokenizer, device, max_len=300):
    texts = [sample.text for sample in batch_samples]
    text_ids, attn_mask, token_types = convert_texts_to_features(texts, tokenizer, max_len)
    text_ids = text_ids.to(device)
    attn_mask = attn_mask.to(device)
    token_types = token_types.to(device)
    labels = torch.LongTensor([sample.label for sample in batch_samples]).to(device)
    return text_ids, token_types, attn_mask, labels


def convert_test_batch(batch_samples, tokenizer, device, max_len=300):
    texts = [sample.text for sample in batch_samples]
    text_ids, attn_mask, token_types = convert_texts_to_features(texts, tokenizer, max_len)
    text_ids = text_ids.to(device)
    attn_mask = attn_mask.to(device)
    token_types = token_types.to(device)
    ids = [sample.id for sample in batch_samples]
    return ids, text_ids, token_types, attn_mask


def get_train_val_data_loader(device, batch_size, shuffle, maxlen=300):
    train_df, val_df = load_train_val_dataset(split_ratio=0.8)
    train_df = train_df.dropna(subset=['entity'])
    val_df = val_df.dropna(subset=['entity'])

    if USE_AUGUMENTED:
        train_df = pd.read_csv(r'/home/njuciairs/wangshuai/data/augumented_train.csv')
    train_df = train_df.dropna(subset=['entity'])
    train_dataset = EntityDataset(train_df, max_len=maxlen)
    val_dataset = EntityDataset(val_df, max_len=maxlen)

    tokenizer = BertTokenizer.from_pretrained(pretrained_model_dir)

    train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=shuffle,
                                  collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                                 maxlen))
    val_dataloader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=shuffle,
                                collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                               maxlen))
    return train_dataloader, val_dataloader, tokenizer


def get_train_val_data_loader_cross(device, batch_size, shuffle, test_number, cross_number, maxlen=300):
    val_df, train_df = load_train_val_dataset_cross(test_number, cross_number)
    train_df = train_df.dropna(subset=['entity'])
    val_df = val_df.dropna(subset=['entity'])

    if USE_AUGUMENTED:
        train_df = pd.read_csv(r'/home/njuciairs/wangshuai/data/augumented_train.csv')
    train_df = train_df.dropna(subset=['entity'])
    train_dataset = EntityDataset(train_df, max_len=maxlen)
    val_dataset = EntityDataset(val_df, max_len=maxlen)

    tokenizer = BertTokenizer.from_pretrained(pretrained_model_dir)

    train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=shuffle,
                                  collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                                 maxlen))
    val_dataloader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=shuffle,
                                collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                               maxlen))
    return train_dataloader, val_dataloader, tokenizer


def get_test_loader(device, batch_size, maxlen=300):
    test_df = load_basic_dataset(split='test')
    test_dataset = TestEntityDataset(test_df, maxlen)
    tokenizer = BertTokenizer.from_pretrained(pretrained_model_dir)
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
                                  collate_fn=lambda batch_samples: convert_test_batch(batch_samples, tokenizer, device,
                                                                                      maxlen))
    return test_data_loader


def get_test_loader_by_split_id(device, batch_size, test_number, cross_number=9, maxlen=300):
    val_df, train_df = load_train_val_dataset_cross(test_number=test_number, cross_number=cross_number)
    test_df = val_df
    test_dataset = TestEntityDataset(test_df, maxlen)
    tokenizer = BertTokenizer.from_pretrained(pretrained_model_dir)
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
                                  collate_fn=lambda batch_samples: convert_test_batch(batch_samples, tokenizer, device,
                                                                                      maxlen))
    return test_data_loader


def get_test_loader_by_df(test_df, device, batch_size, maxlen=300):
    test_dataset = TestEntityDataset(test_df, maxlen)
    tokenizer = BertTokenizer.from_pretrained(pretrained_model_dir)
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
                                  collate_fn=lambda batch_samples: convert_test_batch(batch_samples, tokenizer, device,
                                                                                      maxlen))
    return test_data_loader


if __name__ == '__main__':
    from tqdm import tqdm

    device = torch.device("cuda:%s" % (0) if torch.cuda.is_available() else "cpu")
    train_dataloader, val_dataloader, tokenizer = get_train_val_data_loader(device, 16, True)
    for batch in tqdm(val_dataloader):
        text_ids, token_types, attn_mask, labels = batch
    print(batch)
