import os,sys
os.chdir(os.path.dirname(__file__))
import random

from torch.nn import Embedding
from transformers import BertTokenizer

from cnn_model.resnet import ResNet

forceCPU = True
if forceCPU:
    os.environ["CUDA_VISIBLE_DEVICES"] = ""
    import torch
else:
    import torch
    if torch.cuda.is_available():
        device = torch.device(f"cuda:{torch.cuda.device_count() - 1}")
        torch.cuda.set_device(device)
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
from sklearn.metrics import accuracy_score
import numpy as np
from cnn_model.formula_modeling import FormulaClassification


def collate_f(one_batch):
    input_ids,  labels = map(torch.stack, zip(*one_batch))
    if torch.cuda.is_available():
        input_ids,  labels = input_ids.cuda(), labels.cuda()
    res = input_ids.long(), labels.long()
    return res

#vocab and data instances
from cnn_model.make_vocab import make_vocab
data_vocab_info = make_vocab()
instances = data_vocab_info["instances"]
len_vocab = len(data_vocab_info["final_words"])
labels2id = data_vocab_info["labels2id"]
max_words = data_vocab_info["max_words"]
max_symbols = max_words+2
max_lines = max_layers = data_vocab_info["max_layers"]
batch_size = 16
dim = 64

LR = 0.1 ** 4
EPOCH = 60
batch_size_base = 32
batch_size = 16
LR *= batch_size / batch_size_base

tokenizer = BertTokenizer(vocab_file="vocab.txt",do_lower_case=False)
tokenizer.unique_no_split_tokens = list(tokenizer.ids_to_tokens.values())

# instances to batch ids
true_labels = []
matrixs = []
formulas = []
for instance in instances:
    formulas.append(instance["公式："])
    true_labels.append(labels2id[instance["标签："]])
    layers = instance["layers"]
    # input_ids = tokenizer.batch_encode_plus(layers,max_length=max_symbols,truncation=True,padding="max_length",add_special_tokens=True)
    input_ids = [tokenizer.encode(layer, max_length=max_symbols, truncation=True, padding="max_length",
                                  add_special_tokens=True) for layer in layers]
    input_ids_np = np.stack(input_ids)
    if len(input_ids_np) < max_lines:
        matrix = np.pad(input_ids_np, ((0, max_lines - len(input_ids_np)), (0, 0)), 'constant',
                        constant_values=(0, 0))
    else:
        matrix = input_ids_np
    # print(input_ids_np.shape,matrix.shape)
    matrixs.append(matrix)
all_data = list(zip(matrixs,true_labels,formulas))
random.shuffle(all_data)
quantile_8_10 = len(instances)*4//5
train_instances,dev_instances = all_data[:quantile_8_10],all_data[quantile_8_10:]


# formula label train data
batch_formula_biggest, labels_formula_all, _ = zip(*train_instances)
##using dataloder
batch_formula_biggest = torch.tensor(batch_formula_biggest)
labels_formula_all = torch.tensor(labels_formula_all)
train_formula_label_dataset = TensorDataset(batch_formula_biggest, labels_formula_all)

## formula label dev data
batch_formula_dev, labels_formula_dev, _= zip(*dev_instances)
batch_formula_dev = torch.tensor(batch_formula_dev).long()
batch_formula_dev = batch_formula_dev.split(batch_size, dim=0)
labels_formula_dev = torch.tensor(labels_formula_dev).long()
labels_formula_dev = labels_formula_dev.split(batch_size, dim=0)


train_formula_label_dataloader = DataLoader(
    # 从数据库中每次抽出batch size个样本
    dataset=train_formula_label_dataset,
    batch_size=batch_size,
    shuffle=True,
    num_workers=0,
    collate_fn=collate_f
)

#model and embedding
# 模型加载
best_path = 'history_models/model.pkl'
loadModel = False
if loadModel:
    model = torch.load(best_path)
    print(f"load model over path:{best_path}")
else:
    res_model = ResNet(num_classes=len(labels2id))
    embedding = Embedding(len_vocab,dim)
    model = formula_model = FormulaClassification(res_model=res_model,embedding=embedding)
if torch.cuda.is_available():
    model.cuda()

#优化器
opt_Adam = torch.optim.Adam(model.parameters(), lr=LR, betas=(0.9, 0.99), eps=1e-8, weight_decay=0.0001)
opt = opt_Adam

best_dev_score_formula = 0
for epoch in tqdm(range(EPOCH)):
    print('formula label train Epoch: ', epoch)
    model.train()
    accuracy_list = []
    loss_list = []

    for step, (batch, labels) in enumerate(train_formula_label_dataloader):
        scores, loss = model(input_ids=batch, labels=labels)
        opt.zero_grad()  # clear gradients for next train
        loss.backward()  # backpropagation, compute gradients
        opt.step()  # apply gradients
        loss = loss.cpu().data.numpy()
        loss_list.append(loss)
        accuracy = accuracy_score(y_true=labels.cpu().data.numpy(), y_pred=scores.cpu().data.numpy().argmax(1))
        accuracy_list.append(accuracy)
        if epoch == 0 and step < 55:
            print(f"accurate:{accuracy},loss:{loss}")
    print(
        f"train for formula label Epoch{epoch},accuracy_mean:{np.mean(accuracy_list)},loss_mean:{np.mean(loss_list)}")

    # # dev code
    print("formula task dev code")
    model.eval()
    true_labels = []
    predict_labels = []
    for batch, labels in (zip(batch_formula_dev, labels_formula_dev)):
        if torch.cuda.is_available():
            labels = labels.cuda()
            batch = batch.cuda()

        scores, loss = model(input_ids=batch, labels=labels)
        predict_labels.extend(scores.cpu().data.numpy().argmax(1))
        true_labels.extend(labels.cpu().data.numpy())
    dev_formula_accuracy = accuracy_score(y_true=true_labels, y_pred=predict_labels)
    print(f"formula dev accurate:{dev_formula_accuracy},best_dev_score_formula:{best_dev_score_formula}")
    if dev_formula_accuracy >= best_dev_score_formula:
        model_path = f"{best_path}epoch{epoch}dev_formula_accuracy{dev_formula_accuracy}"
        torch.save(model, model_path)
        best_dev_score_formula = dev_formula_accuracy
        print(f"model saved in {model_path}\nbest_dev_score_formula:{best_dev_score_formula}")