import torch
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer

from zkl_knowedit_tgke import ExtendedEmbedding, ExtendedLinear

# config

model_name = "Qwen/Qwen2-0.5B-Instruct"

device = "cuda" if torch.cuda.is_available() else "cpu"

# execution

print(f"Loading Model and Tokenizer")
model = AutoModelForCausalLM.from_pretrained(model_name).to(device=device)
tokenizer = AutoTokenizer.from_pretrained(model_name)

for param in model.parameters():
    param.requires_grad = False

# noinspection PyTypeChecker
org_embedding: torch.nn.Embedding = model.model.embed_tokens
# noinspection PyTypeChecker
org_linear: torch.nn.Linear = model.lm_head
org_vocab_size = org_embedding.num_embeddings

ext_vocab_size = 1 + 1 + 2
ext_embedding = torch.nn.Embedding(ext_vocab_size, org_embedding.embedding_dim, device=device)
ext_linear = torch.nn.Linear(org_embedding.embedding_dim, ext_vocab_size, device=device)

model.model.embed_tokens = ExtendedEmbedding(org_embedding, ext_embedding)
model.lm_head = ExtendedLinear(org_linear, ext_linear)

optimizer = torch.optim.Adam([
    *ext_embedding.parameters(),
    *ext_linear.parameters(),
], lr=1e-3)

content0_tokens_wid = tokenizer.encode("The capital city of China.")
content1_tokens_wid = tokenizer.encode("The capital city of Russia.")

batch_tokens_in_wid = [
    [org_vocab_size + 0, org_vocab_size + 2, org_vocab_size + 1] + content0_tokens_wid[:-1],
    [org_vocab_size + 0, org_vocab_size + 3, org_vocab_size + 1] + content1_tokens_wid[:-1]]
batch_tokens_out_wid = [
    [org_vocab_size + 2, org_vocab_size + 1] + content0_tokens_wid,
    [org_vocab_size + 3, org_vocab_size + 1] + content1_tokens_wid]
batch_tokens_out_mask = [
    [False] * 2 + [True] * len(content0_tokens_wid),
    [False] * 2 + [True] * len(content1_tokens_wid)]

batch_tokens_in_wid = torch.asarray(batch_tokens_in_wid, dtype=torch.int64, device=device)
batch_tokens_out_wid = torch.asarray(batch_tokens_out_wid, dtype=torch.int64, device=device)
batch_tokens_out_mask = torch.asarray(batch_tokens_out_mask, dtype=torch.bool, device=device)

for i in tqdm(range(500)):
    batch_tokens_out_logits = model(input_ids=batch_tokens_in_wid).logits

    batch_tokens_out_ce = torch.nn.functional.cross_entropy(
        torch.swapaxes(batch_tokens_out_logits, -1, -2),
        batch_tokens_out_wid, reduction='none')
    # [batch_size, context_size]

    batch_tokens_out_acc = torch.gather(
        torch.softmax(batch_tokens_out_logits, dim=-1),
        dim=-1, index=batch_tokens_out_wid.unsqueeze(-1)).squeeze(-1)
    # [batch_size, context_size]

    ce = torch.masked.mean(batch_tokens_out_ce, mask=batch_tokens_out_mask)
    acc = torch.masked.mean(batch_tokens_out_acc, mask=batch_tokens_out_mask)
    # []

    loss = ce

    print(f"acc={acc.detach().cpu().item():.4f}")

    loss.backward()
    optimizer.step()
    optimizer.zero_grad()
