import torch
import torch.utils.data
from transformers import AutoTokenizer, T5Model, AutoModelForSeq2SeqLM, get_scheduler
from transformers.trainer_pt_utils import get_parameter_names
from datasets import load_from_disk, load_dataset

import dltools

# HF_ENDPOINT=https://hf-mirror.com
tokenizer = AutoTokenizer.from_pretrained("t5-small", cache_dir="./MNIST/cache", force_download=False)
print(tokenizer)
res = tokenizer.batch_encode_plus(['What is your name?', 'My name is Sylvain.'])
print(f"res:{res}")
dataset = load_from_disk("./MNIST/xsum")
print(f"dataset:{dataset}")
dataset["train"] = dataset["train"].shuffle().select(range(10000))
dataset["validation"] = dataset["validation"].shuffle().select(range(1000))
dataset["test"] = dataset["test"].shuffle().select(range(1000))

print(f"dataset['train'][0]:{dataset['train'][0]}")


def f1(examples, tokenizer):
    data = tokenizer.batch_encode_plus(["summarize:" + i for i in examples["document"]],
                                       max_length=1024,
                                       truncation=True
                                       )
    data["labels"] = tokenizer.batch_encode_plus(examples["summary"], max_length=128, truncation=True)["input_ids"]
    return data


dataset = dataset.map(f1, batched=True, batch_size=1000, remove_columns=["document", "summary", "id"],
                      fn_kwargs={"tokenizer": tokenizer})
print(f"dataset:{dataset}")


def collate_fn(data):
    # 这里的data是批次数据
    # 最长的label的长度
    max_length = max([len(i["labels"]) for i in data])

    # 把所有的label都扩充到max_length
    for i in data:
        i["labels"] = i["labels"] + [-100] * (max_length - len(i["labels"]))

    data = tokenizer.pad(
        encoded_inputs=data,
        padding=True,
        return_tensors="pt"
    )
    data["decoder_input_ids"] = torch.zeros_like(data["labels"])
    data["decoder_input_ids"][:, 1:] = data["labels"][:, :-1]
    data["decoder_input_ids"][data["decoder_input_ids"] == -100] = 0
    return data


data = [{
    "input_ids": [1, 2, 3, 4, 5, 6, 7],
    "attention_mask": [1, 1, 1, 1, 1, 1, 1],
    "labels": [11, 12, 13]
}, {
    "input_ids": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
    "attention_mask": [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
    "labels": [11, 12, 13, 14, 15]
}]

data = collate_fn(data)
print(f"data:{data}")

train_loader = torch.utils.data.DataLoader(
    dataset=dataset["train"],
    batch_size=1,
    collate_fn=collate_fn,
    shuffle=True,
    drop_last=True
)

for i, data in enumerate(train_loader):
    for k, v in data.items():
        print(f"{k}:{v.shape}")
    if i > 10:
        break
    print("--------")


class SummaryModel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.pretrained = T5Model.from_pretrained("t5-small", cache_dir="./MNIST/cache",
                                                  force_download=False)
        self.fc = torch.nn.Linear(512, 32128, bias=False)
        # 加载预训练权重
        pretrained_fc_parameters = AutoModelForSeq2SeqLM.from_pretrained("t5-small", cache_dir="./MNIST/cache",
                                                                         force_download=False)
        self.fc.load_state_dict(pretrained_fc_parameters.lm_head.state_dict())

        self.loss_fn = torch.nn.CrossEntropyLoss(ignore_index=-100)

    def forward(self, input_ids, attention_mask, labels, decoder_input_ids):
        logits = self.pretrained.encoder(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
        logits = self.pretrained.decoder(input_ids=decoder_input_ids,
                                         encoder_hidden_states=logits,
                                         encoder_attention_mask=attention_mask
                                         ).last_hidden_state
        logits = logits * (512 ** -0.5)
        logits = self.fc(logits)
        loss = self.loss_fn(logits.reshape(-1, 32128), labels.reshape(-1))
        return {"loss": loss, "logits": logits}


model = SummaryModel()
device = dltools.try_gpu()
for i, data in enumerate(train_loader):
    out = model(**data)
    print(f"loss:{out['loss']} logits:{out['logits'].shape}")
    break
print(f"参数量:{sum(i.numel() for i in model.parameters())}")


def _test(model: torch.nn.Module):
    model.eval()
    model.to(device)
    test_loader = torch.utils.data.DataLoader(
        dataset=dataset["test"],
        batch_size=1,
        collate_fn=collate_fn,
        shuffle=False,
        drop_last=False
    )

    total = 0
    loss = 0
    for i, data in enumerate(test_loader):
        input_ids, attention_mask = data["input_ids"].to(device), data["attention_mask"].to(device)
        labels, decoder_input_ids = data["labels"].to(device), data["decoder_input_ids"].to(device)
        out = model(
            input_ids=input_ids,
            attention_mask=attention_mask,
            decoder_input_ids=decoder_input_ids,
            labels=labels
        )
        loss += out["loss"]
        total += 1
        if i >= 100:
            print(f"loss avg:{loss / total}")
            for j in range(4):
                input = tokenizer.decode(input_ids[j])
                pred = tokenizer.decode(out["logits"].argmax(dim=-1)[j])
                label = tokenizer.decode(decoder_input_ids[j])
                print(f"input:{input}")
                print(f"pred:{pred}")
                print(f"label:{label}")
            break


_test(model)

def train(model: torch.nn.Module):
    get_parameter_names(model, [torch.nn.LayerNorm])
    # weight_decay 权重衰减
    parameters = [
        {
            "params": [param for name, param in model.named_parameters() if "bias" not in name],
            "weight_decay": 1e-2
        },
        {
            "params": [param for name, param in model.named_parameters() if "bias" in name],
            "weight_decay": 0.0
        }
    ]

    optimizer = torch.optim.AdamW(parameters, lr=2e-5)
    scheduler = get_scheduler(
        name="linear",
        num_warmup_steps=0,
        num_training_steps=len(train_loader),
        optimizer=optimizer
    )
    model.to(device)
    model.train()
    for i, data in enumerate(train_loader):
        input_ids, attention_mask = data["input_ids"].to(device), data["attention_mask"].to(device)
        labels, decoder_input_ids = data["labels"].to(device), data["decoder_input_ids"].to(device)
        optimizer.zero_grad()
        model.zero_grad()
        out = model(
            input_ids=input_ids,
            attention_mask=attention_mask,
            decoder_input_ids=decoder_input_ids,
            labels=labels
        )
        loss = out["loss"]
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
        optimizer.step()
        scheduler.step()
        if i % 10 == 0:
            print(f"{i} loss:{loss}")


train(model)
