import os
import pandas as pd
from tqdm import tqdm
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torch
from pytorch_transformers import BertConfig, BertModel, BertTokenizer
from functools import reduce
from config import conf
from data_utils.basic_data import load_basic_dataset, load_train_val_dataset

DEVICE_ID = conf.get('gpu', 'device_id')
token_CLS = '[CLS]'
token_SEP = '[SEP]'


class TextSample():
    def __init__(self, text, id, senti_label, max_len=300):
        self.text = str(text)[:max_len]
        self.id = id
        self.senti_label = senti_label


class TestTextSample():
    def __init__(self, text, id, max_len=300):
        self.text = str(text)[:max_len]
        self.id = id


class TextDataset(Dataset):
    def __init__(self, df, max_len=300):
        self.samples = list(self.make_samples(df, max_len))
        self.len = len(self.samples)

    def make_samples(self, df, max_len):
        texts = df['text'].values
        tiltles = df['title'].values
        texts = [str(title) + str(text) for title, text in zip(texts, tiltles)]
        ids = df['id'].values
        senti_label_list = df['negative'].values
        for text, id, senti_label in zip(texts, ids, senti_label_list):
            yield TextSample(token_CLS + str(text)[:max_len - 2] + token_SEP, id, senti_label, max_len)

    def __getitem__(self, index):
        return self.samples[index]

    def __len__(self):
        return self.len


class TestTextDataset(Dataset):
    def __init__(self, df, max_len=300):
        self.samples = list(self.make_samples(df, max_len))
        self.len = len(self.samples)

    def make_samples(self, df, max_len):
        texts = df['text'].values
        tiltles = df['title'].values
        texts = [str(title) + str(text) for title, text in zip(texts, tiltles)]
        ids = df['id'].values

        for text, id in zip(texts, ids):
            yield TestTextSample(token_CLS + str(text)[:max_len - 2] + token_SEP, id, max_len)

    def __getitem__(self, index):
        return self.samples[index]

    def __len__(self):
        return self.len


def convert_texts_to_features(texts, tokenizer, max_len):
    input_ids = [tokenizer.encode(text)[:max_len] for text in texts]
    text_lens = [len(ids) for ids in input_ids]
    attention_mask = torch.LongTensor([([1] * l + [0] * (max_len - l)) for l in text_lens])
    input_ids = torch.LongTensor([ids + [tokenizer.pad_token_id] * (max_len - len(ids)) for ids in input_ids])
    return input_ids, attention_mask


def convert_batch(batch_samples, tokenizer, device, max_len=300):
    texts = [sample.text for sample in batch_samples]
    text_ids, attn_mask = convert_texts_to_features(texts, tokenizer, max_len)
    text_ids = text_ids.to(device)
    attn_mask = attn_mask.to(device)
    sentiment_labels = torch.LongTensor([sample.senti_label for sample in batch_samples]).to(device)
    return text_ids, sentiment_labels, attn_mask


def convert_test_batch(batch_samples, tokenizer, device, max_len=300):
    texts = [sample.text for sample in batch_samples]
    text_ids, attn_mask = convert_texts_to_features(texts, tokenizer, max_len)
    text_ids = text_ids.to(device)
    attn_mask = attn_mask.to(device)
    ids = [sample.id for sample in batch_samples]
    return text_ids, attn_mask, ids


def get_train_val_data_loader(device, batch_size, shuffle, maxlen=300):
    train_df, val_df = load_train_val_dataset(split_ratio=0.8)
    train_df = train_df.dropna(subset=['entity'])
    val_df = val_df.dropna(subset=['entity'])
    train_dataset = TextDataset(train_df, max_len=maxlen)
    val_dataset = TextDataset(val_df, max_len=maxlen)

    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')

    train_dataloader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=shuffle,
                                  collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                                 maxlen))
    val_dataloader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=shuffle,
                                collate_fn=lambda batch_samples: convert_batch(batch_samples, tokenizer, device,
                                                                               maxlen))
    return train_dataloader, val_dataloader, tokenizer


def get_test_loader(device, batch_size, maxlen=300):
    test_df = load_basic_dataset(split='test')
    test_dataset = TestTextDataset(test_df, maxlen)
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
                                  collate_fn=lambda batch_samples: convert_test_batch(batch_samples, tokenizer, device,
                                                                                      maxlen))
    return test_data_loader


def get_test_loader_by_df(test_df, device, batch_size, maxlen=300):
    test_dataset = TestTextDataset(test_df, maxlen)
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
    test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False,
                                  collate_fn=lambda batch_samples: convert_test_batch(batch_samples, tokenizer, device,
                                                                                      maxlen))
    return test_data_loader


if __name__ == '__main__':
    device = torch.device("cuda:%s" % (1) if torch.cuda.is_available() else "cpu")
    train_dataloader, val_dataloader, tokenizer = get_train_val_data_loader(device, 16, True)
    for batch in tqdm(train_dataloader):
        text_ids, sentiment_labels, attn_mask = batch
    print(batch)
