#!/usr/bin/env python
import numpy as np
import torch

from jieba_tokenizer import MyTokenizer, text

# 创建分词器对象
from model import GPTConfig, GPT

device = "mps"

block_size = 12
batch_size = 3
# =================#
n_layer = 12
n_head = 12
n_embd = 768
dropout = 0.0  # for pretraining 0 is good, for finetuning try 0.1+
bias = False  # do we use bias inside LayerNorm and Linear layers?

# model init
model_args = dict(n_layer=n_layer, n_head=n_head, n_embd=n_embd, block_size=block_size,
                  bias=bias, vocab_size=None, dropout=dropout)  # start with model_args from command line

eval_iters = 50
model_args['vocab_size'] = 310
gptconf = GPTConfig(**model_args)
model = GPT(gptconf)
model.to(device)
# max_iters = 600000  # total number of training iterations
weight_decay = 1e-1
beta1 = 0.9
beta2 = 0.95
# adamw optimizer
learning_rate = 6e-4  # max learning rate
optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1, beta2), device)
scaler = torch.cuda.amp.GradScaler()

tokenizer = MyTokenizer()
tokenizer.build_dict(text)
data = tokenizer.encode(text)

for k in range(eval_iters):
    model.train()
    ix = torch.randint(len(data) - block_size, (batch_size,))
    x = torch.stack([torch.from_numpy(np.array(data[i:i + block_size])) for i in ix])
    # decode = tokenizer.decode(x.view(-1).tolist())
    y = torch.stack([torch.from_numpy(np.array(data[i + 1:i + 1 + block_size])) for i in ix])
    # decode1 = tokenizer.decode(y.view(-1).tolist())
    _, loss = model(x.to(device), y.to(device))
    scaler.scale(loss).backward()
    scaler.step(optimizer)
    scaler.update()
    # flush the gradients as soon as we can, no need for this memory anymore
    optimizer.zero_grad(set_to_none=True)

    # loss.backward()
    # optimizer.step()
    print(loss)

model.eval()
max_new_tokens = 12

start_ids = tokenizer.encode("喝酒")
x = (torch.tensor(start_ids, dtype=torch.long, device=device)[None, ...])
temperature = 1  # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
top_k = 12  # retain only the top_k most likely tokens, clamp others to have 0 probability
# run generation
with torch.no_grad():
    for k in range(1):
        y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k)
        tolist = y[0].tolist()
        tokenizer_decode = tokenizer.decode(tolist)
        print(tokenizer_decode)

if __name__ == '__main__':
    print("==========over==========")
