# 解决找不到模块
import sys,os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))


from utils.batchify import batchify
from config import get_args

from model.at_latticelstm_transformer import AT_Latticelstm_Transformer
import os
import numpy as np

import torch
import torch.optim as optim
import time
import random
import sys
import gc
from main import data_initialization,lr_decay,evaluate


def train(data, model, args):
    # 优化器
    parameters = filter(lambda p: p.requires_grad, model.parameters())
    if args.optimizer == "Adam":
        optimizer = optim.Adam(parameters, lr=args.lr, weight_decay=args.l2_penalty)
    elif args.optimizer == "SGD":
        optimizer = optim.SGD(parameters, lr=args.lr, weight_decay=args.l2_penalty)
    # 训练
    best_dev = -1
    # 对于每个轮次
    for idx in range(args.max_epoch):
        epoch_start = time.time()
        temp_start = epoch_start
        print("Epoch: %s/%s" % (idx, args.max_epoch))
        optimizer = lr_decay(optimizer, idx, args.lr_decay, args.lr)
        instance_count = 0
        sample_loss = 0
        total_loss = 0
        random.shuffle(data.train_ids)
        model.train()
        model.zero_grad()
        batch_size = args.batch_size
        train_num = len(data.train_ids)
        total_batch = train_num // batch_size + 1
        # 对于每个批次
        for batch_id in range(total_batch):
            start = batch_id*batch_size
            end = (batch_id+1)*batch_size
            if end > train_num:
                end = train_num
            instance = data.train_ids[start:end]
            if not instance:
                continue
            model.zero_grad()
            char, c_len, gazs, mask, label, recover, t_graph, c_graph, l_graph = batchify(instance, args.use_gpu)
            loss = model.neg_log_likelihood(char, c_len, gazs, t_graph, c_graph, l_graph, mask, label)
            instance_count += 1
            sample_loss += loss.item()
            total_loss += loss.item()
            loss.backward()
            if args.use_clip:
                torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
            optimizer.step()
            model.zero_grad()
            if end % 500 == 0:
                temp_time = time.time()
                temp_cost = temp_time - temp_start
                temp_start = temp_time
                print("     Instance: %s; Time: %.2fs; loss: %.4f" % (
                end, temp_cost, sample_loss))
                sys.stdout.flush()
                sample_loss = 0
        temp_time = time.time()
        temp_cost = temp_time - temp_start
        print("     Instance: %s; Time: %.2fs; loss: %.4f" % (end, temp_cost, sample_loss))
        epoch_finish = time.time()
        epoch_cost = epoch_finish - epoch_start
        print("Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s,  total loss: %s"%(idx, epoch_cost, train_num/epoch_cost, total_loss))
        speed, acc, p, r, f, _ = evaluate(data, model, args, "dev")
        dev_finish = time.time()
        dev_cost = dev_finish - epoch_finish
        current_score = f
        print(
            "Dev: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f" % (dev_cost, speed, acc, p, r, f))
        if current_score > best_dev:
            print("Exceed previous best f score:", best_dev)
            print("------当前最优Dev------")
            if not os.path.exists(args.param_stored_directory + args.dataset_name + "_param"):
                os.makedirs(args.param_stored_directory + args.dataset_name + "_param")
            model_name = "{}epoch_{}_f1_{}.model".format(args.param_stored_directory + args.dataset_name + "_param/", idx, current_score)
            torch.save(model.state_dict(), model_name)
            best_dev = current_score
        gc.collect()


if __name__ == '__main__':
    # 获取参数
    args, unparsed = get_args()
    for arg in vars(args):
        print(arg, ":",  getattr(args, arg))

    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.visible_gpu)
    # 随机数种子
    seed = args.random_seed
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True # 固定随机数种子
    # 处理数据
    data = data_initialization(args)
    # 初始化模型
    model=AT_Latticelstm_Transformer()
    # 训练
    train(data, model, args)


