import os

import torch.utils.data
from transformers import AutoTokenizer, BatchEncoding, default_data_collator, AutoModelForCausalLM, RobertaModel, get_scheduler
from datasets import load_from_disk, load_dataset

import dltools

# HF_ENDPOINT=https://hf-mirror.com
tokenizer = AutoTokenizer.from_pretrained("distilroberta-base", cache_dir="./MNIST/cache", force_download=False)
print(f"tokenizer{tokenizer}")
tokens = tokenizer.batch_encode_plus([
    "hide new secretions from the parental units",
    "this movie like a gold"
])
print(f"tokens:{tokens}")
dataset = load_from_disk("./MNIST/glue_sst2")
print(f"dataset:{dataset}")


def f1(data, tokenizer):
    return tokenizer.batch_encode_plus(data["sentence"])


dataset = dataset.map(f1, batched=True, batch_size=1000, remove_columns=["sentence", "idx", "label"],
                      fn_kwargs={"tokenizer": tokenizer})
print(f"dataset:{dataset}")


def f2(data, tokenizer):
    return [len(i) >= 9 for i in data["input_ids"]]


dataset = dataset.filter(f2, batched=True, batch_size=1000, fn_kwargs={"tokenizer": tokenizer})
print(f"dataset:{dataset}")

print(f"tokenizer.mask_token:{tokenizer.mask_token}")
print(tokenizer.get_vocab()[tokenizer.mask_token])


def f2(data, tokenizer):
    mask_id = tokenizer.get_vocab()[tokenizer.mask_token]
    data["labels"] = [[-100] * 4 + [temp[4]] + [-100] * 4 for temp in data["input_ids"]]
    data["input_ids"] = [temp[:4] + [mask_id] + temp[5:8] + [2] for temp in data["input_ids"]]
    data["attention_mask"] = [[1] * 9] * len(data["attention_mask"])
    return data


dataset = dataset.map(f2, batched=True, batch_size=1000, fn_kwargs={"tokenizer": tokenizer})
print(f"dataset:{dataset}")
print(f"{dataset['train'][0]}")

train_loader = torch.utils.data.DataLoader(dataset=dataset["train"],
                                           batch_size=16,
                                           collate_fn=default_data_collator,
                                           shuffle=True,
                                           drop_last=True
                                           )

for data in train_loader:
    print(f"data:{data}")
    break


class PredMidWordModel(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.pretrained = RobertaModel.from_pretrained("distilroberta-base", cache_dir="./MNIST/cache",
                                                       force_download=False)
        decoder = torch.nn.Linear(768, tokenizer.vocab_size)
        decoder.bias = torch.nn.Parameter(torch.zeros(tokenizer.vocab_size))
        self.fc = torch.nn.Sequential(
            torch.nn.Linear(768, 768),
            torch.nn.GELU(),
            torch.nn.LayerNorm(768),
            decoder
        )

        pretrained_parameters = AutoModelForCausalLM.from_pretrained("distilroberta-base", cache_dir="./MNIST/cache",
                                                                     force_download=False)
        self.fc[0].load_state_dict(pretrained_parameters.lm_head.dense.state_dict())
        self.fc[2].load_state_dict(pretrained_parameters.lm_head.layer_norm.state_dict())
        self.fc[3].load_state_dict(pretrained_parameters.lm_head.decoder.state_dict())
        self.loss_fn = torch.nn.CrossEntropyLoss()

    def forward(self, input_ids, attention_mask, labels=None):
        logits = self.pretrained(input_ids, attention_mask)
        logits = logits.last_hidden_state
        logits = self.fc(logits)

        loss = None

        if labels is not None:
            shifted_logits = logits[:, :-1].reshape(-1, tokenizer.vocab_size)
            shifted_labels = labels[:, 1:].reshape(-1)
            loss = self.loss_fn(shifted_logits, shifted_labels)
        return {"loss": loss, "logits": logits}

device = dltools.try_gpu()
model = PredMidWordModel()
print(f"参数量:{sum(i.numel() for i in model.parameters())}")
for data in train_loader:
    out = model(**data)
    print(f"loss:{out['loss']} logits.shape:{out['logits'].shape}")
    break


# 方法命中能是test 不然右键会变成run test
def _test(model):
    model.eval()

    test_loader = torch.utils.data.DataLoader(dataset=dataset["test"],
                                              batch_size=8,
                                              collate_fn=default_data_collator,
                                              shuffle=False,
                                              drop_last=False
                                              )

    correct = 0
    total = 0

    for i, data in enumerate(test_loader):
        labels = data["labels"][:, 4].clone()
        data["labels"] = None
        with torch.no_grad():
            out = model(**data)

        pred_mid_word = out["logits"].argmax(dim=2)[:, 4]
        correct += (labels == pred_mid_word).sum().item()
        total += len(labels)

        if i % 10 == 0:
            print(f"{i}: labels:{labels} pred_mid_word:{pred_mid_word}")
        if i >= 50:
            break

    print(f"acc: {correct / total}")
    for i in range(8):
        print(tokenizer.decode(data["input_ids"][i]))
        print(tokenizer.decode(labels[i]))
        print(tokenizer.decode(pred_mid_word[i]))
        print("-------------------")


_test(model)


# 训练
def train():
    optimizer = torch.optim.AdamW(model.parameters(), lr=2e-5)
    scheduler = get_scheduler(name="linear",
                              num_warmup_steps=0,
                              num_training_steps=len(train_loader),
                              optimizer=optimizer
                              )
    model.to(device)
    model.train()
    for i, data in enumerate(train_loader):
        input_ids, attention_mask, labels = data["input_ids"].to(device), data["attention_mask"].to(device), data["labels"].to(device)
        optimizer.zero_grad()
        model.zero_grad()
        out = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
        loss = out["loss"]
        loss.backward()
        # 梯度裁剪
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
        optimizer.step()
        scheduler.step()
        if i % 50 == 0:
            labels = labels[:, 4].to(device)
            pred_mid_word = out["logits"].argmax(dim=2)[:, 4]
            correct = (labels == pred_mid_word).sum().item()
            accuracy = correct / len(labels)
            lr = optimizer.state_dict()["param_groups"][0]["lr"]
            print(f"{i} loss:{loss} acc:{accuracy} lr:{lr}")

    torch.save(model, "./modelSave/predMidWord.model")


train()

model2 = torch.load("./modelSave/predMidWord.model", map_location="cpu")
_test(model2)



























