import torch
import torch.utils.data
from transformers import AutoTokenizer, MarianModel, AutoModelForSeq2SeqLM, get_scheduler
from datasets import load_from_disk, load_dataset
import dltools

# HF_ENDPOINT=https://hf-mirror.com
tokenizer = AutoTokenizer.from_pretrained(
    "Helsinki-NLP/opus-mt-en-ro",
    use_fast=True,
    cache_dir="./MNIST/cache",
    force_download=False)
print(f"tokenizer:{tokenizer}")
res = tokenizer.batch_encode_plus([["Like all truly charismatic people","he can work his magic on both men and women"]])
print(f"res:{res}")
dataset = load_dataset(path="wmt16", name="ro-en",cache_dir="./MNIST/cache")
print(f"dataset:{dataset}")
print(f"dataset['train'][0]:{dataset['train'][0]}")
dataset["train"]=dataset["train"].shuffle().select(range(20000))

def f1(data, tokenizer):
    # print(f"data:{data}")
    ens, ros =[], []
    for temp in data["translation"]:
        ens.append(temp["en"])
        ros.append(temp["ro"])
    result = tokenizer.batch_encode_plus(ens, max_length=128, truncation=True)
    # https://zhuanlan.zhihu.com/p/710946539 as_target_tokenizer的作用是做翻译任务时label和input用不同的vocab
    with tokenizer.as_target_tokenizer():
        result["labels"] = tokenizer.batch_encode_plus(ros, max_length=128, truncation=True)["input_ids"]
    return result

dataset = dataset.map(f1, batched=True, batch_size=1000, fn_kwargs={"tokenizer": tokenizer}, remove_columns=["translation"])
print(f"dataset:{dataset}")


test1 = [[1,2,3], [4,5,6]]
print(f"max:{max(test1)}")


def collate_fn(data):
    # label最大长度
    max_label_length = max(len(i["labels"]) for i in data)
    for i in data:
        pads = [-100]*(max_label_length - len(i["labels"]))
        i["labels"] = i["labels"] + pads
    # input_ids 用tokenizer.pad方法填充，不用自己写
    data = tokenizer.pad(
        encoded_inputs=data,
        padding=True,
        max_length=None,
        pad_to_multiple_of=None,
        return_tensors="pt"
    )
    data["decoder_input_ids"] = torch.full_like(data["labels"], tokenizer.get_vocab()["pad"], dtype=torch.long)
    data["decoder_input_ids"][:, 1:] = data["labels"][:, :-1]
    data["decoder_input_ids"][data["decoder_input_ids"]==-100] = tokenizer.get_vocab()["pad"]
    return data


train_loader = torch.utils.data.DataLoader(
    dataset=dataset["train"],
    batch_size=8,
    collate_fn=collate_fn,
    shuffle=True,
    drop_last=True
)
for data in train_loader:
    print(f"data:{data}")
    for k, v in data.items():
        print(f"{k}:{v.shape}")
    break


class EN2ROModel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.pretrained = MarianModel.from_pretrained("Helsinki-NLP/opus-mt-en-ro", cache_dir="./MNIST/cache", force_download=False)
        # 注册一个变量 等于是一个我们自己加的bias（预训练的模型里没有bias）
        self.register_buffer("final_logits_bias", torch.zeros(1, tokenizer.vocab_size))
        self.fc = torch.nn.Linear(512, tokenizer.vocab_size, bias=False)

        # 加载预训练权重
        pretrained_parameter = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-ro", cache_dir="./MNIST/cache", force_download=False)
        self.fc.load_state_dict(pretrained_parameter.lm_head.state_dict())
        # 因为损失函数torch.nn.CrossEntropyLoss中包含了softmax，所以fc层后不需要再加softmax
        self.loss_fn = torch.nn.CrossEntropyLoss()

    def forward(self, input_ids, attention_mask, labels, decoder_input_ids):
        logits = self.pretrained(
            input_ids=input_ids,
            attention_mask=attention_mask,
            decoder_input_ids=decoder_input_ids
        ).last_hidden_state
        logits = self.fc(logits) + self.final_logits_bias
        loss = self.loss_fn(logits.reshape(-1,tokenizer.vocab_size), labels.reshape(-1))
        return {"loss":loss, "logits":logits}


model = EN2ROModel()
device = dltools.try_gpu()


for data in train_loader:
    out = model(**data)
    print(f"loss:{out['loss']} logits.shape:{out['logits'].shape}")
    break


def _test(model):
    model.to(device)
    test_loader = torch.utils.data.DataLoader(
        dataset=dataset["test"],
        batch_size=8,
        collate_fn=collate_fn,
        shuffle=True,
        drop_last=True
    )
    predictions = []
    references = []
    for i, data in enumerate(test_loader):
        input_ids, attention_mask,labels, decoder_input_ids = data["input_ids"].to(device), data["attention_mask"].to(device), data["labels"].to(device), data["decoder_input_ids"].to(device)
        with torch.no_grad():
            out = model(input_ids, attention_mask, labels, decoder_input_ids)
        pred = tokenizer.batch_decode(out["logits"].argmax(dim=-1))
        label = tokenizer.batch_decode(labels)
        predictions.append(pred)
        references.append(label)
        if (i+1) % 2 == 0:
            print(i)
            print(f"en:{tokenizer.decode(input_ids[0])}")
            print(f"pred:{pred[0]}")
            print(f"label:{label[0]}")
        if i == 10:
            break


# _test(model)


def train(model:torch.nn.Module):
    model.to(device)
    # 2e-5=0.00002
    optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5)
    scheduler = get_scheduler(name="linear", num_warmup_steps=20, num_training_steps=len(train_loader), optimizer=optimizer)
    model.train()
    for i, data in enumerate(train_loader):
        optimizer.zero_grad()
        model.zero_grad()
        input_ids, attention_mask, labels, decoder_input_ids = data["input_ids"].to(device), data["attention_mask"].to(device), data["labels"].to(device), data["decoder_input_ids"].to(device)
        out = model(input_ids, attention_mask, labels, decoder_input_ids)
        loss = out["loss"]
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
        optimizer.step()
        scheduler.step()
        if i % 10 == 0:
            print(f"{i} loss:{loss:.3f} lr:{optimizer.state_dict()['param_groups'][0]['lr']}")
        if i == 100:
            break

train(model)
_test(model)