
import os
import json
import operator

import pandas as pd
from pandas.core.reshape import tile
import torch
import torch.nn.utils.rnn as rnn_utils
from torch.utils.data import Dataset
from transformers import RobertaTokenizer
from datasets import load_dataset, load_metric


class GlueFromHFDataset(Dataset):
    def __init__(self, dataset, feat_names, label_name='label'):
        self.data = dataset

        self.feats = [self.data[feat_name] for feat_name in feat_names]
        self.labels = self.data[label_name]

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return [feat[idx] for feat in self.feats], self.labels[idx]


class Amazon(Dataset):
    def __init__(self, data_path, domain) -> None:
        super().__init__()
        self.data_path = data_path
        self.texts, self.labels = self.read(data_path, domain)

        self.entities = ['' for _ in range(len(self.labels))]  # consistent with fakenewsnet

    def read(self, data_path, domain):
        texts, labels = [], []
        label_dct = {"positive": 0, "negative": 1}
        for label in ['positive', 'negative']:
            data_file = f"{data_path}/{domain}/{domain}_{label}.txt"
            with open(data_file, encoding='utf-8') as f:
                for line in f.readlines():
                    texts.append(line.strip())
                    labels.append(label_dct[label])
        
        return texts, labels
    
    def __len__(self):
        return len(self.labels)
    
    def __getitem__(self, idx):
        # consistent with fakenewsnet
        return self.texts[idx], self.labels[idx]


class CoAID(Dataset):
    def __init__(self, data_path) -> None:
        super().__init__()

        self.data_path = data_path

        self.data = self.read(data_path)

        self.labels = self.data['label'].tolist()

        title = self.data['title'].tolist()
        newstitle = self.data['newstitle'].tolist()
        content = self.data['content'].tolist()
        abstract = self.data['abstract'].tolist()

        self.texts = [
          " ".join([title[i], newstitle[i], abstract[i], content[i]]) \
            for i in range(len(self.labels))
        ]
        self.entities = ['' for i in range(len(self.labels))]  # consistent with fakenewsnet
        self.seq_lens = [len(text) for text in self.texts]

    def read(self, data_path):
        file_names = ["NewsFakeCOVID-19.csv", "NewsRealCOVID-19.csv"]
        data_lst = []
        for data_file in covid_file_gen(data_path, file_names=file_names):
            data = pd.read_csv(data_file)
            if "Real" in data_file:
                data['label'] = 0
            elif "Fake" in data_file:
                data['label'] = 1
            else:
                raise RuntimeError
            data_lst.append(data)
        data = pd.concat(data_lst)
        
        return data.fillna('')
    
    def __len__(self):
        return len(self.labels)
    
    def __getitem__(self, idx):
        # consistent with fakenewsnet
        return self.texts[idx], self.labels[idx]


def covid_file_gen(data_path, file_names):
    """
    - 05-01-2020
    - 07-01-2020
        NewsFakeCOVID-19.csv
        NewsRealCOVID-19.csv
    """
    for date in os.listdir(f'{data_path}/'):
        if date == "README.md":
            continue
        for file_name in file_names:
            data_file = f'{data_path}/{date}/{file_name}'
            yield data_file


class FakeNewsNet(Dataset):
    def __init__(self, data_path) -> None:
        super().__init__()

        self.data_path = data_path
        self.label2id = {"real":0, "fake":1}

        # self.tokenizer = RobertaTokenizer.from_pretrained("roberta-base")

        self.texts, self.labels, self.seq_lens = self.read_data(data_path)

        self.max_len = 512

    def read_data(self, data_path):# TODO tokenize
        max_len = 512
        labels, texts, seq_lens, weights = [], [], [], []
        for data in record_files_gen(data_path):
            label = self.label2id.get(data[0], -1)
            text = read_text(data[1])
            if text is None or len(text.split()) == 0:  # TODO configuration
                continue
            text_split = text.split()
            if len(text_split) > max_len:
                text = " ".join(text_split[:max_len])

            labels.append(label)
            texts.append(text)
            seq_lens.append(len(text))

        return texts, labels, seq_lens
    
    def __getitem__(self, idx):
        return self.texts[idx], self.labels[idx]

    def __len__(self):
        return len(self.labels)


class FakeNewsNetWithEntity(Dataset):
    def __init__(self, data_path) -> None:
        super().__init__()

        self.data_path = data_path
        self.label2id = {"real":0, "fake":1}

        self.texts, self.entities, self.labels, self.seq_lens = self.read_data(data_path)

        self.max_len = 512

    def read_data(self, data_path):# TODO tokenize
        max_len = 512
        labels, texts, seq_lens, weights, entities = [], [], [], [], []
        for i, data_file in data_file_gen(data_path):
            label = data_file.split('/')[-2]
            label = self.label2id.get(label, -1)

            text, entity_seq = read_text_with_entity(data_file)  # type: ignore
            if text is None or len(text.split()) == 0:  
                continue
            text_split = text.split()
            if len(text_split) > max_len:
                text = " ".join(text_split[:max_len])

            labels.append(label)
            texts.append(text)
            seq_lens.append(len(text))
            entities.append(entity_seq)

        return texts, entities, labels, seq_lens
    
    def __getitem__(self, idx):
        return self.texts[idx], self.entities[idx], self.labels[idx], self.seq_lens[idx]

    def __len__(self):
        return len(self.labels)


def read_text(data_file):
    try:
        with open(data_file, 'r') as f:
            line = f.readline()
            dct = json.loads(line)
            return dct['text']
    except FileNotFoundError:
        return


def read_text_with_entity(data_file):
    try:
        with open(data_file, 'r') as f:
            line = f.readline()
            text = line.strip()

            entity_seq = []
            for line in f.readlines():
                try:
                    item = eval(line.strip())
                except SyntaxError:
                    continue
                
                entity_seq.extend(item['entity_name'].split())
            # # sort by index
            # entity_seq, entity_dct = [], {}
            # for line in f.readlines():
            #     try:
            #         item = eval(line.strip())
            #     except SyntaxError:
            #         continue
                
            #     entity_dct[item['entity_name']] = item['index'][0]

            #     entity_sort = sorted(entity_dct.items(), key=lambda x:x[1])

            # for e in entity_sort:
            #     entity_seq.extend(e.split())

            return text, ' '.join(entity_seq)
    except FileNotFoundError:
        return


def data_file_gen(data_path):
    """
    -gossipcop
    -politifact
        -fake
        -real
            -xxx.txt
    """
    for label in os.listdir(f'{data_path}/'):
        for i, file_name in enumerate(os.listdir(f'{data_path}/{label}')):
            data_file = f'{data_path}/{label}/{file_name}'
            yield i, data_file


def record_files_gen(data_path):
    """
    -gossipcop
    -politifact
    -real
    -fake
        -politifact31
            -news content.json
            -tweets
        -...
    """
    for label in os.listdir(data_path):
        records_path = data_path + "/" + label
        for record in os.listdir(records_path):
            record_file = records_path + "/" + record + "/news content.json"
            yield label, record_file


def build_vocab(data_path):
    word_vocab = {}
    for data in record_files_gen(data_path):
        text = read_text(data[1])
        if text is None:
            continue
        for word in text.split():
            try:
                word_vocab[word] += 1
            except KeyError:
                word_vocab[word] = 1
    word_vocab = sorted(word_vocab.items(), key=operator.itemgetter(1), reverse=True)
    return word_vocab


class TokenizedCollator():
    def __init__(self, tokenizer, token_idx, label_idx):
        self.token_idx = token_idx  # the index of data should be tokenized
        self.label_idx = label_idx  # the index of label 

        self.tokenizer = tokenizer
    
    def _collate_fn(self, batch):
        ret = []
                                 
        for i, samples in enumerate(zip(*batch)):
            if i == self.token_idx:
                # max_len = max(len(sentence.split()) for sentence in samples)
                if isinstance(samples, tuple):
                    samples = list(samples)
                input_ids, attention_mask = self.tokenizer(samples,
                                                            padding=True,
                                                            truncation=True,
                                                            return_tensors="pt",
                                                            max_length=512).values()

                # max_len = input_ids.shape[1]
                ret.append(input_ids)
                ret.append(attention_mask)
            else:
                ret.append(torch.tensor(samples))
        # input_ids, attention_mask, label_ids, seq_lens
        return ret

    def __call__(self, batch):
        return self._collate_fn(batch)


class PromptTokenzierCollator():
    def __init__(self, tokenizer, template, token_idx, label_idx, only_mask=False, use_learnable_token=True, using_prefix=True, using_postfix=False):
        self.token_idx = token_idx  # the index of data should be tokenized
        self.label_idx = label_idx  # the index of label 

        self.tokenizer = tokenizer
        self.mask_ids = tokenizer.mask_token_id

        self.only_mask = only_mask
        self.use_learnable_token = use_learnable_token

        if self.only_mask:
            self.prefix_prompt = "<mask>"
        else:
            self.prefix_prompt= template
        self.postfix_prompt = " "
        self.prefix_ids = self.tokenizer(self.prefix_prompt, padding=False, return_tensors="pt")['input_ids']
        self.prefix_ids = self.prefix_ids[0][:-1]  # ignore <\s>
        self.postfix_ids = self.tokenizer(self.postfix_prompt, padding=False, return_tensors="pt")['input_ids']
        self.postfix_ids = self.postfix_ids[0][1:]  # ignore <cls>

        # the last id is <mask>, we use the last but one token as unused token
        if self.use_learnable_token:
            self.unused_ids = torch.tensor([-1])
        else:
            self.unused_ids = torch.tensor([], dtype=torch.int) 
        self.cls_id = torch.tensor([self.prefix_ids[0]])
        self.eos_id = torch.tensor([self.postfix_ids[-1]])

        # add learnable token ids, example(prefix): 
        # <cls> <learnable 0> Here is a piece of news with <mask> information . <learnable 1>
        # ==> [cls_id, learnable_ids, prompt_ids ..., learnable_ids]
        if using_prefix:
            self.prefix_ids = torch.cat([self.cls_id, self.unused_ids, self.prefix_ids[1:], self.unused_ids], dim=0)
        else:
            self.prefix_ids = self.cls_id
        # all learnable
        # self.prefix_ids = torch.cat([self.unused_ids]*10, dim=0)
        # self.prefix_ids[10//2] = self.mask_ids 
        # self.prefix_ids = torch.cat([self.cls_id, self.prefix_ids], dim=0)

        if using_postfix:
            self.postfix_ids = torch.cat([self.unused_ids, self.postfix_ids[:-1], self.unused_ids, self.eos_id], dim=0)
        else:
            self.postfix_ids = self.eos_id
        # all learnable
        # self.postfix_ids = torch.cat([self.unused_ids]*10, dim=0)
        # self.postfix_ids[10//2] = self.mask_ids
        # self.postfix_ids = torch.cat([self.postfix_ids, self.eos_id], dim=0)

        self.add_len = int(len(self.prefix_ids) + len(self.postfix_ids))
        self.add_attention_mask = torch.ones(self.add_len)

        self.max_len = 512 - self.add_len
        
    def _collate_fn(self, batch):
        ret = []
        for i, samples in enumerate(zip(*batch)):
            if i == self.token_idx:
                input_ids_lst, attention_mask_lst = [], []
                for sample in samples:
                    if len(sample) == 2:
                        inputs = self.tokenizer(*sample, return_tensors="pt").values()
                    else:                        
                        inputs = self.tokenizer(sample, return_tensors="pt").values()
                    
                    if len(inputs) == 2:  # roberta
                        input_ids, attention_mask = inputs
                    elif len(inputs) == 3:  # bert
                        input_ids, _, attention_mask = inputs
                    else:
                        raise RuntimeError
                    input_ids = input_ids[0][1:-1]
                    attention_mask = attention_mask[0][1:-1]
                    if len(input_ids) > self.max_len:
                        input_ids = input_ids[:self.max_len]    
                        attention_mask = attention_mask[:self.max_len]                                    
                    input_ids = torch.cat([self.prefix_ids, input_ids, self.postfix_ids], dim=0)
                    attention_mask = torch.cat([attention_mask, self.add_attention_mask], dim=0)
                        
                    input_ids_lst.append(input_ids)
                    attention_mask_lst.append(attention_mask)

                input_ids = rnn_utils.pad_sequence(input_ids_lst, batch_first=True)
                attention_mask = rnn_utils.pad_sequence(attention_mask_lst, batch_first=True)
                # max_len = input_ids.shape[1]
                ret.append(input_ids)
                ret.append(attention_mask)
            else:
                ret.append(torch.tensor(samples))
        template_len = self.add_len - 1  # only ues prefix, ignoring <end>
        ret.append(template_len)
        # input_ids, attention_mask, labels, template_len
        return ret

    def __call__(self, batch):
        return self._collate_fn(batch)


def count_len(data_path):
    lens = []
    cnt = {'real':0, 'fake':0}
    for data in record_files_gen(data_path):
        label = data[0]
        text = read_text(data[1])
        if text is None or len(text.split()) == 0:
            continue
        cnt[label] += 1  

        lens.append(len(text.split()))
    
    print(sum(lens) / len(lens))
    print(max(lens))
    print(min(lens))
    print(cnt)


if __name__ == "__main__":
    from transformers import AutoTokenizer
    tokenizer = AutoTokenizer.from_pretrained('roberta-base')
    
    dataset_name = "super_glue-rte"
    benchmark_name, data_name = dataset_name.split("-")

    feat_names = ["premise", "hypothesis"]

    dataset = load_dataset(benchmark_name, data_name)
    train_data = GlueFromHFDataset(dataset['train'], feat_names)  # type: ignore
    val_data = GlueFromHFDataset(dataset['validation'], feat_names)  # type: ignore

    print()
