import os

os.chdir(os.path.dirname(__file__))
import random

forceCPU = True
if forceCPU:
    os.environ["CUDA_VISIBLE_DEVICES"] = ""
import torch
from transformers import BertTokenizer
from gcn_model.gcn import GCNClassifierPretrainModel
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
from sklearn.metrics import accuracy_score
import numpy as np


def collate_f(one_batch):
    return map(torch.stack, zip(*one_batch))


# vocab and data instances
from gcn_model.make_vocab import make_vocab

data_vocab_info = make_vocab()
instances = data_vocab_info["instances"]
len_vocab = len(data_vocab_info["final_words"])
labels2id = data_vocab_info["labels2id"]
max_words = data_vocab_info["max_words"]
max_symbols = max_words + 2
# max_lines = max_layers = data_vocab_info["max_layers"]
max_lines = 1
batch_size = 16
dim = 64

LR = 0.1 ** 4
EPOCH = 60
batch_size_base = 32
batch_size = 16
LR *= batch_size / batch_size_base

tokenizer = BertTokenizer(vocab_file="vocab.txt", do_lower_case=False)
tokenizer.unique_no_split_tokens = list(tokenizer.ids_to_tokens.values())
from gcn_model.formula_gcn_modeling import info2digit_world
all_data = info2digit_world(instances,tokenizer,max_symbols,max_lines)
random.shuffle(all_data)
quantile_8_10 = len(instances) * 4 // 5
train_instances, dev_instances = all_data[:quantile_8_10], all_data[quantile_8_10:]

# formula label train data
matrices_train, input_ids_s_train, masks_train, labels_formula_all, _ = zip(*train_instances)
pre_data_trains = matrices_train, input_ids_s_train, masks_train, labels_formula_all
##using dataloder
train_formula_label_dataset = TensorDataset(*[torch.tensor(data) for data in pre_data_trains])

## formula label dev data
matrices_dev, input_ids_s_dev, masks_dev, labels_formula_dev, _ = zip(*dev_instances)
matrices_dev = torch.tensor(matrices_dev)
matrices_dev = matrices_dev.split(batch_size, dim=0)
words_dev = torch.tensor(input_ids_s_dev)
words_dev = words_dev.split(batch_size, dim=0)
masks_dev = torch.tensor(masks_dev)
masks_dev = masks_dev.split(batch_size, dim=0)
labels_formula_dev = torch.tensor(labels_formula_dev)
labels_formula_dev = labels_formula_dev.split(batch_size, dim=0)

train_formula_label_dataloader = DataLoader(
    # 从数据库中每次抽出batch size个样本
    dataset=train_formula_label_dataset,
    batch_size=batch_size,
    shuffle=True,
    num_workers=0,
    collate_fn=collate_f
)

# model and embedding
# 模型加载
model_path_prefix = 'history_models/model_'
best_path = 'history_models/best_model.pkl'
loadModel = False
if loadModel:
    model = torch.load(model_path_prefix)
    print(f"load model over path:{model_path_prefix}")
else:
    from _collections import defaultdict

    opt = defaultdict(int, {"num_class": len(labels2id),
                            "hidden_dim": dim,
                            "vocab_size": tokenizer.vocab_size,
                            "emb_dim": dim,
                            "num_layers": 2,
                            "sublayer_first": 1,
                            "sublayer_second": 1,
                            "heads": 1
                            })
    formula_model = GCNClassifierPretrainModel(opt)
    model = formula_model
if torch.cuda.is_available():
    model.cuda()

# 优化器
opt_Adam = torch.optim.Adam(model.parameters(), lr=LR, betas=(0.9, 0.99), eps=1e-8, weight_decay=0.0001)
opt = opt_Adam

best_dev_score_formula = 0
for epoch in tqdm(range(EPOCH)):
    print('formula label train Epoch: ', epoch)
    model.train()
    accuracy_list = []
    loss_list = []
    loss_func = torch.nn.CrossEntropyLoss()
    for step, (*batch, labels) in enumerate(train_formula_label_dataloader):
        # break
        scores, h = model(batch)#scores mean logits
        loss = loss_func(scores.reshape(-1, len(labels2id)), labels.flatten())
        opt.zero_grad()  # clear gradients for next train
        loss.backward()  # backpropagation, compute gradients
        opt.step()  # apply gradients
        loss = loss.cpu().data.numpy()
        loss_list.append(loss)
        if epoch == 0:# and step < 55:
            print(f"step:{step},loss:{loss}")
    print(
        f"train for formula label Epoch{epoch},loss_mean:{np.mean(loss_list)}")

    # # dev code
    print("formula task dev code")
    model.eval()
    true_labels = []
    predict_labels = []
    for *batch, labels in (zip(matrices_dev, words_dev, masks_dev, labels_formula_dev)):
        scores, h = model(batch)
        predict_labels.extend(scores.reshape(-1,2).cpu().data.numpy().argmax(-1))
        true_labels.extend(labels.flatten().cpu().data.numpy())
    dev_formula_accuracy = accuracy_score(y_true=true_labels, y_pred=predict_labels)
    print(f"formula dev accurate:{dev_formula_accuracy},best_dev_score_formula:{best_dev_score_formula}")
    if dev_formula_accuracy > best_dev_score_formula:
        model_path = f"{model_path_prefix}epoch{epoch}dev_formula_accuracy{dev_formula_accuracy}"
        torch.save(model, model_path)
        torch.save(model, best_path)
        best_dev_score_formula = dev_formula_accuracy
        print(f"model saved in {model_path}\nbest_dev_score_formula:{best_dev_score_formula}")
        print(f"model saved in {best_path}")
