from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
from torch.utils.data import DataLoader, Dataset
from transformers import AdamW
from torch import nn, cuda
from tqdm import tqdm
import pandas as pd


def preprocess_dataset(dataset):
    output = []
    for dialogues in dataset:
        dialogues_ = list(filter(None, list(dialogues.values())))
        paragraph = ''
        for idx, sentences in enumerate(dialogues_):
            if idx % 2 == 0:
                paragraph += '<user>:' + sentences
            elif idx % 2 == 1:
                paragraph += '<bots>:' + sentences
        output.append(paragraph)
    return output


def preprocess_token(processed_dataset, tokenizer):
    encoding = tokenizer(processed_dataset, truncation=True, padding=True, return_tensors='pt', max_length=16)  # tokenizer tokenizer.encode tokenizer.encode_plus有什么区别？
    input_ids = encoding['input_ids']
    attention_mask = encoding['attention_mask']
    return encoding, input_ids, attention_mask


class GPT2Dataset(Dataset):
    def __init__(self, tokenizer):
        dataset = pd.read_csv(r'data\abcnews-date-text.csv')['headline_text']
        processed_dataset = dataset.tolist()[:10000]        # 截取部分样本以缩小训练规模
        self.encoding, self.input_ids, self.attention_mask = preprocess_token(processed_dataset, tokenizer)

    def __len__(self):
        return len(self.input_ids)

    def __getitem__(self, index):
        input_ids = self.input_ids[index]
        attention_mask = self.attention_mask[index]
        return {
            'input_ids': input_ids,
            'attention_mask': attention_mask,
        }


class Trainer():
    def __init__(self) -> None:
        self.device = "cuda" if cuda.is_available() else "cpu"
        self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
        self.tokenizer.pad_token = self.tokenizer.eos_token
        self.model = GPT2LMHeadModel.from_pretrained('gpt2')
        self.optimizer = AdamW(self.model.parameters(), lr=1e-5)

    def train(self, num_epochs):
        ft_dataset = GPT2Dataset(data_files=r'data\LCCC-base-split\LCCC-base_test.json', tokenizer=self.tokenizer)
        dataloader = DataLoader(ft_dataset, 8, shuffle=True, drop_last=True)
        model = self.model.to(self.device)
        model.train()
        for epoch in range(num_epochs):
            t = tqdm(dataloader, desc=f'[epoch {epoch}]')
            for batch in t:
                input_ids = batch['input_ids'].to(self.device)
                attention_mask = batch['attention_mask'].to(self.device)

                self.optimizer.zero_grad()
                outputs = model(input_ids=input_ids, attention_mask=attention_mask, labels=input_ids)
                loss = outputs.loss
                loss.backward()
                self.optimizer.step()

                t.set_postfix(loss=loss.item())
        model.save_pretrained(r"./pretrained_model")

    def main(self, num_epochs):
        self.train(num_epochs)


def answer_question(question, pretrained_model):
    tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
    tokenizer.pad_token = tokenizer.eos_token
    tokenizer.clean_up_tokenization_spaces = True
    model = GPT2LMHeadModel.from_pretrained(pretrained_model)
    generated = model.generate(
        input_ids=torch.tensor(tokenizer.encode(question, add_special_tokens=True)).unsqueeze(0),
        max_length=50,
        temperature=1.0,
        top_k=0,
        top_p=0.9,
        do_sample=True,
        num_return_sequences=1
    )
    generated_title = tokenizer.decode(generated[0], skip_special_tokens=True)
    print(generated_title)
    return generated_title


if __name__ == '__main__':
    # 训练
    # trainer = Trainer()
    # trainer.main(100)
    # 生成
    answer_question(question="Chinese government", pretrained_model=r"./pretrained_model")
