import os
import pandas as pd
from tqdm import tqdm
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch
from pytorch_transformers import BertConfig, BertModel, BertTokenizer
from functools import reduce
from config import conf
from data_utils.basic_data import load_basic_dataset, load_train_val_dataset
from results_process.regulizer import remove_short_entity

DEVICE_ID = conf.get('gpu', 'device_id')
token_CLSE = '[CLSE]'
token_SEPE = ';'
token_CLS = '[CLS]'
token_SEP = '[SEP]'
ENTITY_NUM = 10


def remove_short_entity_on_keys(entity_str, key_entity_str):
    """
    除去key_entity中同一实体的较短名称
    :param entity_str:
    :return:
    """
    if not isinstance(entity_str, str):
        return entity_str
    # keys = str(key_entity_str).split(';')
    # entities = entity_str.split(';')
    # states = np.ones(len(entities))
    # for i, e in enumerate(entities):
    #     for p in entities:
    #         if e in p and len(e) < len(p) and e not in keys and ('(' not in p) and ('（' not in p):
    #             states[i] = 0
    #             print('remove %s by %s' % (e, p))
    # rs = []
    # for i, e in enumerate(entities):
    #     if states[i] == 1:
    #         rs.append(e)
    rs = entity_str
    return ';'.join(rs)


class TextEntitySample():
    def __init__(self, text, entity_list, id, senti_label, entity_labels, max_len=300):
        self.predict_text = self.make_predict_text(text, entity_list, max_len)
        self.id = id
        self.senti_label = senti_label
        self.entity_labels = entity_labels
        assert len(entity_list) <= ENTITY_NUM

    def make_predict_text(self, text, entity_list, max_len):
        entity_str = token_SEPE.join([token_CLSE + e for e in entity_list])
        text_len = max_len - len(entity_str) - 3
        text = text[:text_len]
        return token_CLS + text + token_SEP + entity_str + token_SEP


class TestTextEntitySample():
    def __init__(self, text, entity_list, id, max_len=300):
        self.predict_text = self.make_predict_text(text, entity_list, max_len)
        self.id = id
        self.entity_list = entity_list
        assert len(entity_list) <= ENTITY_NUM

    def make_predict_text(self, text, entity_list, max_len):
        entity_str = token_SEPE.join([token_CLSE + e for e in entity_list])
        text_len = max_len - len(entity_str) - 3
        text = str(text)[:text_len]
        return token_CLS + text + token_SEP + entity_str + token_SEP


class EntityDataset(Dataset):
    def __init__(self, df, max_len=300):
        self.samples = list(self.make_samples(df, max_len))
        self.len = len(self.samples)

    def make_samples(self, df, max_len):
        texts = df['text'].values
        tiltles = df['title'].values
        texts = [str(title) + str(text) for title, text in zip(texts, tiltles)]
        df['entity'] = [remove_short_entity_on_keys(estr, kstr) for estr, kstr in df[['entity', 'key_entity']].values]
        entity_lists = [estr.split(';') for estr in df['entity'].values]
        ids = df['id'].values
        senti_label_list = df['negative'].values
        entity_labels_list = [[(e in str(kestr)) for e in estr.split(';')] for estr, kestr in
                              df[['entity', 'key_entity']].values]
        for text, entity_list, id, senti_label, entity_labels in zip(texts, entity_lists, ids, senti_label_list,
                                                                     entity_labels_list):
            for i in range(0, len(entity_list), ENTITY_NUM):
                sub_entity_list = entity_list[i:i + ENTITY_NUM]
                sub_entity_labels = entity_labels[i:i + ENTITY_NUM]
                yield TextEntitySample(text, sub_entity_list, id, senti_label, sub_entity_labels, max_len)

    def __getitem__(self, index):
        return self.samples[index]

    def __len__(self):
        return self.len


class TestEntityDataset(Dataset):
    def __init__(self, df, max_len=300):
        self.samples = list(self.make_samples(df, max_len))
        self.len = len(self.samples)

    def make_samples(self, df, max_len):
        texts = df['text'].values
        tiltles = df['title'].values
        texts = [str(title) + str(text) for title, text in zip(texts, tiltles)]
       # df['entity'] = df['entity'].map(remove_short_entity)
        entity_lists = [str(estr).split(';') for estr in df['entity'].values]
        ids = df['id'].values

        for text, entity_list, id in zip(texts, entity_lists, ids):
            for i in range(0, len(entity_list), ENTITY_NUM):
                sub_entity_list = entity_list[i:i + ENTITY_NUM]
                yield TestTextEntitySample(text, sub_entity_list, id, max_len)

    def __getitem__(self, index):
        return self.samples[index]

    def __len__(self):
        return self.len


def convert_texts_to_features(texts, tokenizer, max_len):
    input_ids = [tokenizer.encode(text)[:max_len] for text in texts]
    text_lens = [len(ids) for ids in input_ids]
    attention_mask = torch.LongTensor([([1] * l + [0] * (max_len - l)) for l in text_lens])
    input_ids = torch.LongTensor([ids + [tokenizer.pad_token_id] * (max_len - len(ids)) for ids in input_ids])
    return input_ids, attention_mask


def convert_batch(batch_samples, tokenizer, device, max_len=300):
    texts = [sample.predict_text for sample in batch_samples]
    text_ids, attn_mask = convert_texts_to_features(texts, tokenizer, max_len)
    text_ids = text_ids.to(device)
    attn_mask = attn_mask.to(device)

    entity_labels = reduce(lambda x, y: x + y, [sample.entity_labels for sample in batch_samples])

    entity_labels = torch.LongTensor(entity_labels).to(
        device)

    sentiment_labels = torch.LongTensor([sample.senti_label for sample in batch_samples]).to(device)
    return text_ids, entity_labels, sentiment_labels, attn_mask


def convert_test_batch(batch_samples, tokenizer, device, max_len=300):
    texts = [sample.predict_text for sample in batch_samples]
    text_ids, attn_mask = convert_texts_to_features(texts, tokenizer, max_len)
    text_ids = text_ids.to(device)
    attn_mask = attn_mask.to(device)
    ids = [sample.id for sample in batch_samples]
    entity_lists = [sample.entity_list for sample in batch_samples]
    return text_ids, attn_mask, ids, entity_lists


def get_train_val_data_loader(device, batch_size, shuffle, maxlen=300):
    train_df, val_df = load_train_val_dataset(split_ratio=0.8)
    train_df = train_df.dropna(subset=['entity'])
    val_df = val_df.dropna(subset=['entity'])
    train_dataset = EntityDataset(train_df, max_len=maxlen)
    val_dataset = EntityDataset(val_df, max_len=maxlen)

    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    tokenizer.add_tokens([token_CLSE, token_SEPE])

    train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=shuffle,
                                  collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                                 maxlen))
    val_dataloader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=shuffle,
                                collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                               maxlen))
    return train_dataloader, val_dataloader, tokenizer


def get_test_loader(device, batch_size, maxlen=300):
    test_df = load_basic_dataset(split='test')
    test_dataset = TestEntityDataset(test_df, maxlen)
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    tokenizer.add_tokens([token_CLSE, token_SEPE])
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
                                  collate_fn=lambda batch_samples: convert_test_batch(batch_samples, tokenizer, device,
                                                                                      maxlen))
    return test_data_loader


def get_test_loader_by_df(test_df, device, batch_size, maxlen=300):
    test_dataset = TestEntityDataset(test_df, maxlen)
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    tokenizer.add_tokens([token_CLSE, token_SEPE])
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
                                  collate_fn=lambda batch_samples: convert_test_batch(batch_samples, tokenizer, device,
                                                                                      maxlen))
    return test_data_loader


if __name__ == '__main__':
    device = torch.device("cuda:%s" % (DEVICE_ID) if torch.cuda.is_available() else "cpu")
    train_dataloader, val_dataloader, tokenizer = get_train_val_data_loader(device, 16, True)
    for batch in tqdm(train_dataloader):
        text_ids, entity_labels, sentiment_labels, attn_mask = batch
    print(batch)
