import os
import time
import random
import logging
import numpy as np

import torch
import torch.optim as optim
from torch.utils.data import DataLoader

from configs.Params import Params
from nets.load_net import gnn_model
from data.tsp_dataset import tsp_dataset, collate_fn
from train.train_tsp import train_epoch, evaluate_epoch


def gpu_setup(use_gpu, gpu_id):
    """GPU设置"""
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)

    if torch.cuda.is_available() and use_gpu:
        print('cuda available with GPU:', torch.cuda.get_device_name(0))
        device = torch.device("cuda")
    else:
        print('cuda not available')
        device = torch.device("cpu")
    return device


def get_logger(log_file):
    """定义log文件记录数据"""
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(log_file, encoding='utf-8'),
            logging.StreamHandler()
        ]
    )
    logger = logging.getLogger()
    return logger


def get_model_param(model):
    """获取模型参数数量"""
    total_param = 0
    for param in model.parameters():
        total_param += np.prod(list(param.data.size()))
    return total_param


def train_val_pipeline(model_name, params):
    """训练验证测试主函数"""
    # 记录总的训练时间
    t0 = time.time()
    # 每个epoch的时间列表
    per_epoch_time = []
    """GPU设置"""
    device = params.device

    """定义log文件保存数据"""
    logger = None
    if params.need_log:
        logger = get_logger(params.log_file)

    """设置随机种子"""
    # 设置随机种子，保证每次训练随机参数一致
    random.seed(params.seed)
    np.random.seed(params.seed)
    torch.manual_seed(params.seed)
    if device.type == 'cuda':
        torch.cuda.manual_seed(params.seed)

    """定义模型"""
    # 定义模型
    model = gnn_model(model_name, params)
    model = model.to(device)
    # 显示模型参数数量
    num_params = get_model_param(model)
    if params.need_log == True:
        logger.info(params.get_model_info(num_params))  # 使用 log 输出
    else:
        print(params.get_model_info(num_params))

    # 选用Adam优化器
    optimizer = optim.Adam(model.parameters(), lr=params.init_lr, weight_decay=params.weight_decay)

    scheduler = None
    if params.lr_decay:
        # 当参考的评价指标停止改进时,降低学习率,factor为每次下降的比例,
        # 训练过程中,当指标连续patience次数还没有改进时,降低学习率
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
                                                         factor=params.lr_reduce_factor,
                                                         patience=params.lr_schedule_patience,
                                                         verbose=True)

    """读取数据"""
    # 读取训练/验证/测试数据
    train_set = tsp_dataset(params.train_path)
    val_set = tsp_dataset(params.val_path)
    test_set = tsp_dataset(params.test_path)

    # 使用data_loader读取数据
    train_loader = DataLoader(train_set, batch_size=params.batch_size, shuffle=True, collate_fn=collate_fn())
    val_loader = DataLoader(val_set, batch_size=params.batch_size, shuffle=False, collate_fn=collate_fn())
    test_loader = DataLoader(test_set, batch_size=params.batch_size, shuffle=False, collate_fn=collate_fn())

    """训练过程"""
    # 在训练过程中可使用 Ctrl + C 跳出训练
    try:

        for epoch in range(params.num_epoch):
            # 记录开始时间
            start = time.time()

            # 训练函数，返回loss值和评价指标
            epoch_train_loss, epoch_train_f1, optimizer = train_epoch(model, optimizer, device, train_loader)
            # 获取验证loss和评价指标
            epoch_val_loss, epoch_val_f1 = evaluate_epoch(model, device, val_loader)
            # 获取测试评价指标
            _, epoch_test_f1 = evaluate_epoch(model, device, test_loader)

            if params.lr_decay:
                # 根据epoch_val_loss降低学习率
                scheduler.step(epoch_val_loss)

            # 保存每轮epoch的耗费时间
            epoch_time = time.time() - start
            per_epoch_time.append(epoch_time)

            # 打印本轮epoch的信息
            infor = "\n" + '-' * 42 + "\n"
            infor += "epoch:{:d}\tepoch_time:{:.5f} s\n".format(epoch + 1, epoch_time)
            infor += "train_loss:{:.5f}, train_f1:{:.5f}\n".format(epoch_train_loss, epoch_train_f1)
            infor += "val_loss:  {:.5f}, val_f1:  {:.5f}\n".format(epoch_val_loss, epoch_val_f1)
            infor += ">>>>>>>>>>>>>>>>>>> test_f1: {:.5f}\n".format(epoch_test_f1)
            infor += '-' * 42
            if params.need_log:
                logger.info(infor)  # 使用 log 输出
            else:
                print(infor)
            # 保存checkpoint的权重(每10个epoch)
            if params.save_model and (epoch + 1) % 1 == 0:
                torch.save(model.state_dict(), '{}.pkl'.format(params.model_path + "epoch_" + str(epoch + 1)))

    except KeyboardInterrupt:
        infor = "\n" + '-' * 42 + "\n"
        infor += "已使用 Ctrl + C 将训练提前退出"
        if params.need_log == True:
            logger.info(infor)  # 使用 log 输出
        else:
            print(infor)

    # 训练结束后打印训练情况
    _, train_f1 = evaluate_epoch(model, device, train_loader)
    _, val_f1 = evaluate_epoch(model, device, val_loader)
    _, test_f1 = evaluate_epoch(model, device, test_loader)
    # log输出
    infor = "\n训练 F1:  {:.5f}\n".format(train_f1)
    infor += "验证 F1:  {:.5f}\n".format(val_f1)
    infor += "测试 F1:  {:.5f}\n".format(test_f1)
    infor += "训练总用时:\t{:.5f} s\n".format(time.time() - t0)
    infor += "平均每个epoch用时:\t{:.5f} s\n".format(np.mean(per_epoch_time))
    if params.need_log == True:
        logger.info(infor)  # 使用 log 输出
    else:
        print(infor)




def main():
    model_name = 'GCN'
    # model_name = 'GAT'
    # model_name = 'GatedGCN'
    device = gpu_setup(True, 0)
    params = Params(model_name, device)
    # 训练入口
    train_val_pipeline(model_name, params)


if __name__ == '__main__':
    main()
