from __future__ import print_function
import os
import sys
from data.dataset import Dataset
import torch
from torch.utils import data
import torch.nn.functional as F
from models import *
import torchvision
from utils import visualizer, view_model
import torch
import numpy as np
import random
import time
from config.config import Config_train
from torch.nn import DataParallel
from torch.optim.lr_scheduler import StepLR
from test import *
from resize import resize_photo
from create_txt import create_v1
import os
import shutil


def save_model(model, save_path, name, iter_cnt): # 存储模型权重
    save_name = os.path.join(save_path, name + '_' + str(iter_cnt) + '.pth')
    torch.save(model.state_dict(), save_name)
    return save_name


if __name__ == '__main__':
    train_root, model_path = sys.argv[1:3]  # 获取超参数
    num_classes = 0
    opt = Config_train(train_root, num_classes, model_path)  # 传命令行参数
    Path0 = os.path.join(os.path.dirname(os.path.abspath(__file__)), opt.train_root)
    Path1 = os.path.join(os.path.dirname(os.path.abspath(__file__)), opt.model_path)
    Path2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), opt.train_list)

    resize_photo(Path0)  # 修改图片大小
    print("modify photo finished\n")

    k = create_v1(Path0)  # 图像处理，创建txt文件
    opt.num_classes = k  # 将得到的总共人脸数进行赋值
    print("create train txt finished\n")

    if opt.display:  # 是否开启实时数据可视化，由于训练在服务器，没可视化界面，若在自己电脑可以修改config,display==True
        visualizer = Visualizer()
    device = torch.device("cuda")  # 设备为gpu.若cpu则选择cpu
    # Path2 = os.path.join(os.path.dirname(os.path.abspath(__file__)), opt.train_list)
    Datasets = Dataset(Path0, Path2, phase='train', input_shape=opt.input_shape)  # 读取图像数据，标签并保存在train_dataset
    train_size = int(0.8 * len(Datasets))  # 按照8：2 划分训练集和测试集
    test_size = len(Datasets) - train_size
    train_datasets, test_datasets = torch.utils.data.random_split(Datasets, [train_size, test_size])
    trainloader = torch.utils.data.DataLoader(train_datasets, batch_size=opt.train_batch_size, shuffle=True, num_workers=opt.num_workers)  # 小批次处理，迭代遍历每一个遍历
    testloader = torch.utils.data.DataLoader(test_datasets, batch_size=opt.train_batch_size, shuffle=True, num_workers=opt.num_workers)

    print('{} train iters per epoch:'.format(len(trainloader))) # 一轮多少次迭代

    identity_list = get_list(opt.lfw_test_list)  # 获取lfw测试每一张图片路径
    img_paths = [os.path.join(opt.lfw_root, each) for each in identity_list]

    if opt.loss == 'focal_loss': # 定义损失函数
        criterion = FocalLoss(gamma=2)
    else:
        criterion = torch.nn.CrossEntropyLoss()

    if opt.backbone == 'resnet18': # 定义网络模型
        model = resnet_face18(use_se=opt.use_se)
    elif opt.backbone == 'resnet34':
        model = resnet34()
    elif opt.backbone == 'resnet50':
        model = resnet50()
    else:
        model = resnet152()

    if opt.metric == 'add_margin':  # 定义网络最后一层
        metric_fc = AddMarginProduct(512, opt.num_classes, s=30, m=0.35)
    elif opt.metric == 'arc_margin':
        metric_fc = ArcMarginProduct(512, opt.num_classes, s=30, m=0.5, easy_margin=opt.easy_margin)
    elif opt.metric == 'sphere':
        metric_fc = SphereProduct(512, opt.num_classes, m=4)
    else:
        metric_fc = nn.Linear(512, opt.num_classes)  # 全连接层

    # view_model(model, opt.input_shape)
    print(model)  # 打印模型
    # model.load_state_dict(torch.load(Path1))
    model.to(device)  # 模型选用gpu or cpu
    model = DataParallel(model)  # 是否多卡并行处理
    metric_fc.to(device)  # 改进的结果选用gpu or cpu
    metric_fc = DataParallel(metric_fc)  # 是否多卡并行处理

    if opt.optimizer == 'sgd':  # 定义梯度下降函数
        optimizer = torch.optim.SGD([{'params': model.parameters()}, {'params': metric_fc.parameters()}],
                                    lr=opt.lr, weight_decay=opt.weight_decay)
    else:
        optimizer = torch.optim.Adam([{'params': model.parameters()}, {'params': metric_fc.parameters()}],
                                     lr=opt.lr, weight_decay=opt.weight_decay)
    scheduler = StepLR(optimizer, step_size=opt.lr_step, gamma=0.1)

    start = time.time()  # 开始时间
    for i in range(opt.max_epoch):
        scheduler.step()  # 每隔几轮lr进行衰减

        model.train()  # 训练模式
        for ii, data in enumerate(trainloader):  # 枚举小批量
            data_input, label = data  # 获取图片像素及类别标签
            label = label-1  # 语法问题，下标得从0开始
            data_input = data_input.to(device)  # 开启gpu or cpu
            label = label.to(device).long()  # 开启gpu or cpu
            feature = model(data_input)  # 64x1x128x128
            output = metric_fc(feature, label)  # 进行最后一层计算 也是本次arcface得改进 64x512 512x2019 = 64x2019
            loss = criterion(output, label)  # 计算loss 
            optimizer.zero_grad()  # 梯度清0
            loss.backward()  # 梯度传播
            optimizer.step()  # 梯度更新

            iters = i * len(trainloader) + ii  # 进行到第几iters

            if iters % opt.print_freq == 0:  # 每隔几轮打印局部训练结果
                output = output.data.cpu().numpy()  # GPU tensor不能直接转为numpy数组，必须先转到CPU tensor。
                output = np.argmax(output, axis=1)  # 取每一行最大的值的索引
                label = label.data.cpu().numpy()

                acc = np.mean((output == label).astype(int))  # 进行比较并进行acc计算
                speed = opt.print_freq / (time.time() - start)
                time_str = time.asctime(time.localtime(time.time()))
                print('{} train epoch {} iter {} {} iters/s loss {} acc {}'.format(time_str, i, ii, speed, loss.item(), acc))
                if opt.display:  # 是否开始可视化
                    visualizer.display_current_results(iters, loss.item(), name='train_loss')
                    visualizer.display_current_results(iters, acc, name='train_acc')

                start = time.time()  # 更新start

        if i % opt.save_interval == 0 or i == opt.max_epoch:  # 每10轮保存一次权重
            save_model(model, Path1, opt.backbone, i)

        model.eval()  # 评估模式
        print('Waiting Test...')
        acc = my_test(model, img_paths, identity_list, opt.lfw_test_list, opt.test_batch_size, mode='lfw')
        print('epoch:{} -> acc:{}'.format(i,acc))
        if opt.display:
           visualizer.display_current_results(iters, acc, name='test_acc')
