import torch.utils.data
from transformers import AutoTokenizer, AutoModelForCausalLM, GPT2Model, default_data_collator, get_scheduler
from datasets import load_from_disk, load_dataset
from dltools import dltools

# HF_ENDPOINT=https://hf-mirror.com
tokenizer = AutoTokenizer.from_pretrained(
    "distilgpt2", use_fast=True, cache_dir="./MNIST/cache", force_download=False)
tokens = tokenizer.batch_encode_plus([
    "hide new secretions from the parental units",
    "this movie like a gold"
])
print(f"tokens{tokens}")
dataset = load_from_disk("./MNIST/glue_sst2")
print(f"dataset:{dataset}")


def f1(data, tokenizer):
    return tokenizer.batch_encode_plus(data["sentence"])


dataset = dataset.map(f1, batched=True, batch_size=1000, remove_columns=["sentence", "idx", "label"],
                      fn_kwargs={"tokenizer": tokenizer})
print(dataset)


def f2(data):
    return [len(i) >= 8 for i in data["input_ids"]]


dataset = dataset.filter(f2, batched=True, batch_size=1000)
print(dataset)


def f3(data):
    data["input_ids"] = [i[:8] for i in data["input_ids"]]
    data["attention_mask"] = [[1] * 8] * len(data["attention_mask"])
    data["labels"] = data["input_ids"]
    return data


dataset = dataset.map(f3, batched=True, batch_size=1000)
print(dataset)
# collate_fn=default_data_collator 不能省略，不然批次数据最外层不是tensor格式
train_loader = torch.utils.data.DataLoader(dataset=dataset["train"], batch_size=16, shuffle=True, drop_last=True,
                                           collate_fn=default_data_collator)
device = dltools.try_gpu()


class PredLastWordModel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.pretrained = GPT2Model.from_pretrained("distilgpt2", cache_dir="./MNIST/cache", force_download=False)
        self.linear = torch.nn.Linear(768, tokenizer.vocab_size, bias=False)
        parameters = AutoModelForCausalLM.from_pretrained("distilgpt2")
        self.linear.load_state_dict(parameters.lm_head.state_dict())
        self.loss_fn = torch.nn.CrossEntropyLoss()

    def forward(self, input_ids, attention_mask, labels=None):
        # print(f"input_ids:{input_ids}")
        # print(f"attention_mask:{attention_mask}")
        input_ids = input_ids.to(device)
        attention_mask = attention_mask.to(device)
        labels = labels.to(device)
        logits = self.pretrained(input_ids=input_ids, attention_mask=attention_mask)
        # print(f"logits.last_hidden_state.shape:{logits.last_hidden_state.shape}")
        logits = logits.last_hidden_state
        logits = self.linear(logits)
        # print(f"logits.shape:{logits.shape}")

        loss = None
        if labels is not None:
            # print(f"logits[:, -1]:{logits[:, -1]}")
            # 模型会在最后一位填充一个偏移 取1-7位对应的是label的2-8位
            shift_logits = logits[:, :-1].reshape(-1, tokenizer.vocab_size)
            shift_labels = labels[:, 1:].reshape(-1)
            loss = self.loss_fn(shift_logits, shift_labels)
        return {"loss": loss, "logits": logits}


model = PredLastWordModel()
# 参数量
print(sum(i.numel() for i in model.parameters()) / 10000)

for data in train_loader:
    # print(f"data:{data}")
    # **可以自动把字典解包为方法的入参
    out = model(**data)
    print(f"{out['loss']}, {out['logits'].shape}")
    break


def test():
    model.eval()
    test_loader = torch.utils.data.DataLoader(dataset=dataset["test"], batch_size=16, shuffle=False, drop_last=False,
                                              collate_fn=default_data_collator)
    correct = 0
    total = 0
    for i, data in enumerate(test_loader):
        # 只计算最后一个词的准确率
        label = data["input_ids"][:, -1].clone()
        # 入参中抹除最后一个词防止模型作弊
        data["input_ids"][:, -1] = 0
        data["labels"][:, :] = 0

        # 计算
        with torch.no_grad():
            out = model(**data)
        last_word = out["logits"].argmax(dim=-1)[:, -2]
        correct += (label == last_word).sum().item()
        total += len(label)
        if i % 10 == 0:
            print(f"{i}:last_word:{last_word} label:{label}")
        if i == 50:
            break
    print(f"acc:{correct / total}")
    for i in range(8):
        print(tokenizer.decode(data["input_ids"][i, :-1]))
        print(tokenizer.decode(last_word[i]))
        print(tokenizer.decode(label[i]))
        print("-------")


test()


def train():
    optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5)
    lr_scheduler = get_scheduler(name="linear",
                                 num_warmup_steps=10,  # 从多少步开始做学习率衰减
                                 num_training_steps=len(train_loader),  # 一共训练多少次
                                 optimizer=optimizer
                                 )
    model.to(device)
    model.train()
    for i, data in enumerate(train_loader):
        input_ids, attention_mask, labels = data["input_ids"].to(device), data["attention_mask"].to(device), data[
            "labels"].to(device)
        optimizer.zero_grad()
        out = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
        loss = out["loss"]
        loss.backward()
        # 梯度裁剪
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1)
        optimizer.step()
        lr_scheduler.step()
        if i % 50 == 0:
            labels = labels[:, 1:]
            out = out["logits"].argmax(dim=2)[:, :-1]
            correct = (labels == out).sum().item()
            acc = correct / len(labels.reshape(-1))
            lr = optimizer.state_dict()["param_groups"][0]["lr"]
            print(f"{i} loss:{loss} acc:{acc} lr:{lr}")


train()
