import torch
import torch.utils.data
from transformers import AutoTokenizer, AutoModelForCausalLM, GPT2Model, get_scheduler

import dltools

# HF_ENDPOINT=https://hf-mirror.com
tokenizer = AutoTokenizer.from_pretrained("uer/gpt2-chinese-cluecorpussmall", cache_dir="./MNIST/cache", force_download=False)
print(f"tokenizer:{tokenizer}")

res = tokenizer.batch_encode_plus([
    "窗前明月光，疑是地上霜。举头望明月，低头思故乡。",
    "李白乘舟将欲行，忽闻岸上踏歌声。桃花潭水深千尺，不及汪伦送我情。"
])
print(f"res:{res}")


class PoetryDataset(torch.utils.data.Dataset):
    def __init__(self):
        with open("./MNIST/chinese_poems.txt", "r", encoding="utf-8") as f:
            lines = f.readlines()
        lines = [line.strip() for line in lines]
        self.lines = lines

    def __len__(self):
        return len(self.lines)

    def __getitem__(self, item):
        return self.lines[item]


dataset = PoetryDataset()
print(f"{len(dataset)} dataset[0]:{dataset[0]}")


def collate_fn(batch):
    data = tokenizer.batch_encode_plus(batch, padding=True, return_tensors="pt")
    data["labels"] = data["input_ids"].clone()
    return data


train_loader = torch.utils.data.DataLoader(
    dataset=dataset,
    batch_size=8,
    collate_fn=collate_fn,
    shuffle=True,
    drop_last=True
)
for i, data in enumerate(train_loader):
    print(f"data:{data}")
    for k, v in data.items():
        print(f"{k}:{v.shape}")
    break
model = AutoModelForCausalLM.from_pretrained("uer/gpt2-chinese-cluecorpussmall", cache_dir="./MNIST/cache", force_download=False)
device = dltools.try_gpu()
model.to(device)
print(f"参数量:{sum(i.numel() for i in model.parameters())}")
for i, data in enumerate(train_loader):
    input_ids, token_type_ids, attention_mask, labels = data["input_ids"].to(device), data["token_type_ids"].to(device), data["attention_mask"].to(device), data["labels"].to(device)
    out = model(
        input_ids=input_ids,
        token_type_ids=token_type_ids,
        attention_mask=attention_mask,
        labels=labels
    )
    print(f"loss:{out['loss']} logits.shape:{out['logits'].shape}")
    print(f"{tokenizer.decode(out['logits'][0].argmax(dim=-1))}")
    break


def train():
    optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5)
    scheduler = get_scheduler(name="linear", num_warmup_steps=10, num_training_steps=len(train_loader), optimizer=optimizer)
    model.train()
    for i, data in enumerate(train_loader):
        input_ids, token_type_ids, attention_mask, labels = data["input_ids"].to(device), data["token_type_ids"].to(device), data["attention_mask"].to(device), data["labels"].to(device)
        optimizer.zero_grad()
        model.zero_grad()
        out = model(
            input_ids=input_ids,
            token_type_ids=token_type_ids,
            attention_mask=attention_mask,
            labels=labels
        )
        loss = out["loss"]
        # 梯度裁剪
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
        optimizer.step()
        scheduler.step()
        if i % 10 == 0:
            # label的第一位是cls，out是下一个单词没有cls
            _label = labels[:, 1:]
            # out的最后一位是用label的最后一位预测的，句子已经结束了，训练是用最后一位预测的下一位没有意义不计算准确率
            _out =out["logits"].argmax(dim=-1)[:, :-1]
            # [PAD]的编码是0
            select = _label != 0
            _label = _label[select]
            _out = _out[select]
            # 计算准确率
            accuracy = (_label == _out).sum().item() / _label.numel()
            lr = optimizer.state_dict()["param_groups"][0]["lr"]
            print(f"{i} loss:{loss:.3f} lr:{lr} accuracy:{accuracy:.3f}")
        if i == 100:
            break

# train()


def generate(text, row, col, model1):
    model.eval()
    def generate_loop(data):
        with torch.no_grad():
            input_ids, token_type_ids, attention_mask, labels = data["input_ids"].to(device), data["token_type_ids"].to(device), data["attention_mask"].to(device), data["labels"].to(device)
            out = model1(
                input_ids=input_ids,
                token_type_ids=token_type_ids,
                attention_mask=attention_mask,
                labels=labels
            )
        test_out = out["logits"][0].softmax(dim=-1)
        # print(f'test_out.shape:{test_out.shape}')
        print(f"test_out:{tokenizer.decode(test_out.argmax(dim=-1))}")
        _out = out["logits"][:, -1].softmax(dim=-1)
        """
        topk_out = torch.topk(_out, k=50).values
        print("============")
        print(f"topk_out:{topk_out.shape}")
        topk_out = topk_out[:, -1].unsqueeze(dim=1)
        print(f"topk_out:{topk_out.shape}")
        # float("inf")表示无穷大 -float("inf")或float("-inf")表示负无穷大
        _out = _out.masked_fill(_out < topk_out, -float("inf"))
        print(f"_out:{_out.shape}")
        """

        # 不允许写的特殊字符
        _out[:, tokenizer.sep_token_id] = 0
        _out[:, tokenizer.unk_token_id] = 0
        _out[:, tokenizer.pad_token_id] = 0
        _out[:, tokenizer.get_vocab()["，"]] = 0
        _out[:, tokenizer.get_vocab()["。"]] = 0
        # print(f"_out[0]:{_out[0]}")
        # print(f"_out[1]:{_out[1]}")
        # print(f"_out[2]:{_out[2]}")
        _out = _out.argmax(dim=-1).reshape(-1, 1)
        # print(f"_out.shape:{_out.shape}")
        #_out = _out.multinomial(num_samples=1)

        # 添加标点符号
        c = input_ids.shape[1] / (col + 1)
        # c是整数时是完整一句话
        if c % 1 == 0:
            if c % 2 == 0:
                _out[:, 0] = tokenizer.get_vocab()["。"]
            else:
                _out[:, 0] = tokenizer.get_vocab()["，"]
        data["input_ids"] = torch.cat([input_ids, _out], dim=1)
        data["token_type_ids"] = torch.zeros_like(data["input_ids"])
        data["attention_mask"] = torch.ones_like(data["input_ids"])
        data["labels"] = data["input_ids"].clone()
        if data["input_ids"].shape[1] >= (row*col+row+1) :
            return data
        return generate_loop(data)

    # 一批次放三个相同的数据，可以同上输出三首
    data = tokenizer.batch_encode_plus([text]*3, return_tensors="pt")
    # 去掉最后的结束符
    data["input_ids"] = data["input_ids"][:, :-1]
    data["token_type_ids"] = torch.zeros_like(data["input_ids"])
    data["attention_mask"] = torch.ones_like(data["input_ids"])
    data["labels"] = data["input_ids"].clone()
    data = generate_loop(data)
    for i in range(3):
        print(f"{i} {tokenizer.decode(data['input_ids'][i])}")


model_load = torch.load("./modelSave/al_poetry.model")
model_load.to(device)
generate("床前明月", 4, 5, model_load)
