import torch
from transformers import BertTokenizer, BertForQuestionAnswering
from transformers import AdamW
from torch.utils.data import DataLoader, Dataset
from torch import nn, cuda
from tqdm import tqdm
import json


def get_data(data_root):
    with open(data_root, "r", encoding='utf-8') as f:
        data = json.load(f)
    output = restruct_data(data['data'])
    return output


def restruct_data(data):
    output = []
    for d in data:
        title = d['title']
        paragraphs = d['paragraphs']
        for p in paragraphs:
            qas = p['qas']
            context = p['context']
            for qa in qas:
                question = qa['question']
                id = qa['id']
                answers = qa['answers']
                is_impossible = qa['is_impossible']
                for a in answers:
                    text = a['text']
                    answer_start = a['answer_start']
                    output.append({
                        'question': question,
                        'context': context,
                        'answer': text,
                        'answer_start': answer_start
                    })
                    break
    return output


class QADataset(Dataset):
    def __init__(self, data, tokenizer, max_len):
        self.data = data
        self.tokenizer = tokenizer
        self.max_len = max_len

    def __getitem__(self, idx):
        item = self.data[idx]
        inputs = self.tokenizer.encode_plus(
            item['question'],
            item['context'],
            add_special_tokens=True,
            max_length=self.max_len,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )

        try:
            start_positions = inputs['input_ids'].squeeze().tolist().index(self.tokenizer.encode(item['answer'])[1])
            end_positions = start_positions + len(self.tokenizer.encode(item['answer'])) - 2
        except ValueError:
            start_positions = -1
            end_positions = -3

        return {
            'input_ids': inputs['input_ids'].flatten(),  # 经过 tokenizer 编码后的 token IDs，表示问题和上下文的组合，通常格式为 [CLS] 问题 [SEP] 上下文 [SEP]。
            'attention_mask': inputs['attention_mask'].flatten(),   # 指示哪些 tokens 是实际输入（1）而哪些是填充的（0）。用于在模型中区分有效输入和填充部分。
            'token_type_ids': inputs['token_type_ids'].flatten(),   # 用于区分问题和上下文的标识。问题的 token 通常标记为 0，上下文的 token 标记为 1。
            'start_positions': torch.tensor(start_positions, dtype=torch.long),
            'end_positions': torch.tensor(end_positions, dtype=torch.long),
        }

    def __len__(self):
        return len(self.data)


class Trainer():
    def __init__(self) -> None:
        self.device = "cuda" if cuda.is_available() else "cpu"
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        self.model = BertForQuestionAnswering.from_pretrained('bert-base-uncased')
        self.optimizer = AdamW(self.model.parameters(), lr=1e-5)
        self.loss_fn = nn.CrossEntropyLoss()

    def train(self, num_epochs):
        qa_dataset = QADataset(data=get_data(r"data\dev-v2.0.json"), tokenizer=self.tokenizer, max_len=512)
        dataloader = DataLoader(qa_dataset, 8, shuffle=True, drop_last=True)
        model = self.model.to(self.device)
        model.train()
        for epoch in range(num_epochs):
            t = tqdm(dataloader, desc=f'[epoch {epoch}]')
            for batch in t:
                input_ids = batch['input_ids'].to(self.device)
                attention_mask = batch['attention_mask'].to(self.device)
                start_positions = batch['start_positions'].to(self.device)
                end_positions = batch['end_positions'].to(self.device)
                token_type_ids = batch['token_type_ids'].to(self.device)

                self.optimizer.zero_grad()
                outputs = model(input_ids=input_ids, attention_mask=attention_mask, start_positions=start_positions, end_positions=end_positions, token_type_ids=token_type_ids)
                loss = outputs.loss
                loss.backward()
                self.optimizer.step()

                t.set_postfix(loss=loss.item())
        model.save_pretrained(r"./pretrained_model")

    def test(self):
        qa_dataset = QADataset(data=get_data(r"data\dev-v2.0.json"), tokenizer=self.tokenizer, max_len=512)
        dataloader = DataLoader(qa_dataset, 8, shuffle=True, drop_last=True)
        model = self.model.to(self.device)
        model.eval()
        total_loss = 0
        t = tqdm(dataloader, desc=f'[test]')
        with torch.no_grad():
            for batch in t:
                input_ids = batch['input_ids'].to(self.device)
                attention_mask = batch['attention_mask'].to(self.device)
                start_positions = batch['start_positions'].to(self.device)
                end_positions = batch['end_positions'].to(self.device)

                outputs = model(input_ids=input_ids, attention_mask=attention_mask, start_positions=start_positions, end_positions=end_positions)
                loss = outputs.loss
                total_loss += loss.item()

        return total_loss / len(t)

    def answer_question(self, question, context):
        inputs = self.tokenizer.encode_plus(question, context, return_tensors='pt', max_length=512, truncation=True)
        input_ids = inputs['input_ids'].to(self.device)
        attention_mask = inputs['attention_mask'].to(self.device)
        model = self.model.to(self.device)
        with torch.no_grad():
            outputs = model(input_ids=input_ids, attention_mask=attention_mask)

        start_scores = outputs.start_logits
        end_scores = outputs.end_logits

        start_index = torch.argmax(start_scores)
        end_index = torch.argmax(end_scores) + 1  # inclusive

        answer_ids = input_ids[0][start_index:end_index]
        answer = self.tokenizer.decode(answer_ids)

        return answer

    def main(self, num_epochs):
        self.train(num_epochs)
        self.test()
        answer = self.answer_question(question="What are transformers?", context="Transformers are a type of neural network architecture.")
        print(answer)


if __name__ == '__main__':
    trainer = Trainer()
    trainer.main(10)
