import json

import lightning as L
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from transformers import BertTokenizer
from collections import defaultdict

def get_train_data(data_dir, anchor_name2index):
    train_data = []
    with open(data_dir, "r", encoding="utf-8") as f:
        for item in f.readlines():
            item = json.loads(item.replace("\n", ""))
            alert = [1 if anchor_name in anchor_name2index else 0 for anchor_name in item["anchor_name"]]
            if sum(alert)!=len(alert):
                print(item)
                print("ERROR: train data label not in anchor_name")
                continue
            train_data.append(item)
    return train_data


def get_edition2anchor(data_dir):
    return json.load(open(data_dir, "r", encoding="utf-8"))


def get_keyword2index(data_dir):
    return json.load(open(data_dir, "r", encoding="utf-8"))


def get_soft_label(data_dir):
    return np.load(data_dir)

def get_anchor_info(data_dir):
    editions = set()
    anchor_name2index = dict()
    anchor_index2edition = defaultdict(list)
    anchor_info = json.load(open(data_dir, 'r', encoding='utf-8'))
    for item in anchor_info:
        editions.add(item["edition_code"])
        anchor_name2index[item["anchor_name"]] = item["index"]
        if item["edition_code"] not in anchor_index2edition[item["index"]]:
            anchor_index2edition[item["index"]].append(item["edition_code"])
    return anchor_name2index, anchor_index2edition, list(editions)


def generate_soft_label(classes, labels, soft_label):
    res = np.zeros(classes, dtype=np.float32)
    for label in labels:
        res += soft_label[label]
    res = res / len(labels)
    res[labels] = 1.0
    return res


class NLPDataset(Dataset):
    def __init__(self, data, tokenizer, anchor_name2index, anchor_index2edition, keyword2index, soft_label,
                 editions, params, mode="train"):
        super().__init__()
        self.data = data 
        self.tokenizer = tokenizer
        self.params = params
        self.anchor_name2index = anchor_name2index
        self.anchor_index2edition = anchor_index2edition
        self.keyword2index = keyword2index
        self.soft_label = soft_label
        self.editions = editions
        self.mode = mode 

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        knowledge = defaultdict(list)
        for kgs in self.data[index]["knowledge_name"]:
            for kid, kg in enumerate(kgs.split(" ")):
                if kg not in knowledge[kid]:
                    knowledge[kid].append(kg)
        knowledge = ["".join(knowledge[i]) for i in range(len(knowledge), -1, -1)]
        if self.params.subjectphase=="0203":
            knowledge = [kg for kg in knowledge[:3]]
        elif self.params.subjectphase=="0204":
            knowledge = [kg for kg in knowledge[:2]]
        knowledge = "".join(knowledge)
        content = self.data[index]["content"]
        analysis = self.data[index]["analysis"] + self.data[index]["answer"]
        content_token = self.tokenizer.encode_plus(
            content,
            padding="max_length",
            truncation=True,
            max_length=200,
            return_tensors="pt",
        )
        knowledge_token = self.tokenizer.encode_plus(
            knowledge,
            padding="max_length",
            truncation=True,
            max_length=100,
            return_tensors="pt",
        )
        analysis_token = self.tokenizer.encode_plus(
            analysis,
            padding="max_length",
            truncation=True,
            max_length=300,
            return_tensors="pt",
        )
        ques_info = content + analysis + knowledge
        keyword = np.zeros(len(self.keyword2index), dtype=np.float32)
        for item in self.keyword2index:
            keyword[self.keyword2index[item]] = ques_info.count(item)
        keyword = torch.tensor(keyword, dtype=torch.float32)
        raw_label = list(set([self.anchor_name2index[anchor_name] for anchor_name in self.data[index]["anchor_name"]]))
        label = generate_soft_label(self.params.anchor_size, raw_label, self.soft_label)
        label = torch.tensor(label, dtype=torch.float32)
        if self.mode=="train":
            return (
                {"c_ids": content_token["input_ids"].reshape(-1), 
                 "c_mask": content_token["attention_mask"].reshape(-1),
                 "k_ids": knowledge_token["input_ids"].reshape(-1), 
                 "k_mask": knowledge_token["attention_mask"].reshape(-1),
                 "a_ids": analysis_token["input_ids"].reshape(-1), 
                 "a_mask": analysis_token["attention_mask"].reshape(-1)},
                keyword,
                label
            )
        elif self.mode=="test" or self.mode=="val":
            edition_code = self.data[index]["edition_code"]
            mask = [1 if edition_code in self.anchor_index2edition[i] else 0 for i in range(self.params.anchor_size)]
            mask = torch.tensor(np.array(mask), dtype=torch.int32)
            return (
                {"c_ids": content_token["input_ids"].reshape(-1), 
                 "c_mask": content_token["attention_mask"].reshape(-1),
                 "k_ids": knowledge_token["input_ids"].reshape(-1), 
                 "k_mask": knowledge_token["attention_mask"].reshape(-1),
                 "a_ids": analysis_token["input_ids"].reshape(-1), 
                 "a_mask": analysis_token["attention_mask"].reshape(-1)},
                keyword,
                label,
                mask
            )


class APDataModule(L.LightningDataModule):
    def __init__(self, params):
        super().__init__()
        self.params = params
        self.tokenizer = BertTokenizer.from_pretrained(params.bert_dir)
        self.anchor_name2index, self.anchor_index2edition, self.editions = get_anchor_info(params.anchor_info_data_dir)
        self.train_data = get_train_data(params.train_data_dir, self.anchor_name2index)
        self.val_data = get_train_data(params.val_data_dir, self.anchor_name2index)
        self.keyword2index = get_keyword2index(params.keyword_data_dir)
        self.soft_label = get_soft_label(params.soft_label)

    def setup(self, stage):
        if stage == "fit":
            self.train_dataset = NLPDataset(
                self.train_data,
                self.tokenizer,
                self.anchor_name2index, self.anchor_index2edition,
                self.keyword2index,
                self.soft_label,
                self.editions,
                self.params,
                "train"
            )
            self.val_dataset = NLPDataset(
                self.val_data,
                self.tokenizer,
                self.anchor_name2index, self.anchor_index2edition,
                self.keyword2index,
                self.soft_label,
                self.editions,
                self.params,
                "val"
            )

    def train_dataloader(self):
        return DataLoader(
            self.train_dataset, batch_size=self.params.train_batch_size, num_workers=15, shuffle=True, pin_memory=True
        )

    def val_dataloader(self):
        return DataLoader(
            self.val_dataset, batch_size=self.params.val_batch_size, num_workers=15, shuffle=False, pin_memory=True
        )

    def predict_dataloader(self):
        return


def acc4validate(similarities, labels, masks):
    truth_indices = [
        torch.where(label==1)[0] for label in labels
    ]
    similarities[masks==0] = -1e32
    top_1_indices = torch.argmax(similarities, dim=-1).reshape(-1)
    res = [1 if top_1_indices[i] in truth_indices[i] else 0 for i in range(labels.shape[0])]
    return res 
