# encoding: utf-8

import os
import json
import torch
from torch import nn
from torch.utils.data import dataset, DataLoader
from transformers import BertTokenizer
from tqdm import tqdm

from models import EncoderRNN, DecoderRNN, Seq2Seq

pretrained_model = r"D:\codes\nlp_about\pretrained_model\hfl_chinese-roberta-wwm-ext"

tokenizer = BertTokenizer.from_pretrained(pretrained_model)


class MyDataSet(dataset.Dataset):
    def __init__(self, train_path, train: bool = True):
        self.data = []
        self.label = []
        self.load_data_from_json(train_path, train)

    def load_data_from_json(self, train_path, train: bool):
        with open(train_path, "r", encoding="utf-8") as f:
            lines = f.readlines()
        _datas = [json.loads(line.strip()) for line in lines if line.strip()]

        if train:
            _datas = _datas[:-1000]
        else:
            _datas = _datas[-1000:]
        for _data in tqdm(_datas):
            content = _data.get("content", "")
            title = _data.get("title", "")

            inputs = torch.LongTensor(tokenizer.encode(content, max_length=64, truncation=True, padding="max_length"))
            labels = torch.LongTensor(tokenizer.encode(title, max_length=64, truncation=True, padding="max_length"))
            self.data.append(inputs)
            self.label.append(labels)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        return self.data[item], self.label[item]


train_path = r"D:\codes\nlp_about\sku-short-name-extractor\data\train.json"
batch_size = 512
epoch_num = 25

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

train_datasets = MyDataSet(train_path, train=False)
test_datasets = MyDataSet(train_path, train=False)

train_dataloader = DataLoader(train_datasets, batch_size=batch_size, drop_last=True)
test_dataloader = DataLoader(test_datasets, batch_size=batch_size, drop_last=True)

input_size = tokenizer.vocab_size
hidden_size = 20
n_layers = 2
emb_size = 256

encoder = EncoderRNN(input_size, hidden_size, n_layers, emb_size, dropout=0.1)
decoder = DecoderRNN(input_size, hidden_size, n_layers, emb_size, dropout=0.1)

model = Seq2Seq(encoder, decoder, device, max_len=batch_size)
model.to(device)
criterion = nn.NLLLoss(ignore_index=0)
# criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001, weight_decay=0.01)

for epoch in range(1, epoch_num + 1):
    model.to(device)
    model.train()
    batch_acc = 0.0
    batch_loss = 0.0
    test_acc = 0.0
    test_loss = 0.0
    for inputs, labels in tqdm(train_dataloader, desc="training"):
        inputs = inputs.to(device).long()
        labels = labels.to(device).long()
        outputs = model(inputs)

        outputs = outputs.view(-1, input_size)
        outputs = torch.softmax(outputs, dim=-1)
        labels = labels.unsqueeze(-1)
        labels = labels.view(-1)
        # print(f"outputs.shape={outputs.shape}, labels.shape={labels.shape}")
        loss = criterion(outputs, labels)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        batch_loss += loss.cpu().item()

    model.eval()
    for inputs, labels in tqdm(test_dataloader, desc="testing"):
        with torch.no_grad():
            inputs = inputs.to(device).long()
            labels = labels.to(device).long()
            outputs = model(inputs)
            outputs = outputs.view(-1, input_size)
            outputs = torch.softmax(outputs, dim=-1)
            labels = labels.unsqueeze(-1)
            labels = labels.view(-1)

            loss = criterion(outputs, labels)
            test_loss += loss.cpu().item()

    print(f"epoch={epoch}, train_loss={batch_loss}, test_loss={test_loss}")

    # if not os.path.exists("./models"):
    #     os.mkdir("./models")
    # save_path = f"./models/rnn_seq2seq_loss_{test_loss}.pt"
    # model.to("cpu")
    # torch.save(model.state_dict(), save_path)
