import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms

import argparse
# from networks.ClassicNetwork.VGGNet import VGGNet
from networks.ClassicNetwork.ResNet import ResNet50
import os
import numpy as np
import time

from dataloader import load_dataset


def save_file(path, list) :
    """
    保存文件
    :param path: 保存文件路径
    :param list: 写入文件内容
    :return:
    """
    f = open(path, 'w')
    for line in list :
        f.write(line)
    f.close()


def main() :
    """
    正式使用前先注释掉所有关于控制反应停止的计数器count的语句
    :return:
    """
    data_dir = 'ResNet50_car3'
    CLASS_NUMBER = 2# 类别
    TARGET_ACCURACY = 0
    dataset_dir = R'/home/mist/cdataset'
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # 优先启用GPU
    # device = torch.device("cpu")
    print('device:', device)

    EPOCH = 63  # 最大epoch数目
    pre_epoch = 0  # 已训练epoch次数
    BATCH_SIZE = 21  # Batchsize
    LR = 2e-4  # 学习率
    WEIGHT_DECAY = 5e-4  # 衰减系数
    STEP_SIZE = 50  # 学习率衰减过程
    GAMMA = 0.1  # The decay multiple in each decay step

    log_dir = './model/' + data_dir + '/log'
    if os.path.exists(log_dir) is not True :
        os.makedirs(log_dir)
    source_dir = '../model/' + data_dir + '/source'
    if os.path.exists(source_dir) is not True :
        os.makedirs(source_dir)
        # os.makedirs( './model/'+data_dir+'/source')
        # os.makedirs('./model/' + data_dir + '/result')
    # writer = tensorboardX.SummaryWriter(log_dir=log_dir)
    print(CLASS_NUMBER)

    net = ResNet50(num_classes=CLASS_NUMBER).to(device)
    #torch.load('./model/DR80/net.pkl')

    criterion = nn.CrossEntropyLoss()  # 定义损失函数
    optimizer = optim.Adam(net.parameters(), lr=LR, weight_decay=WEIGHT_DECAY)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=STEP_SIZE, gamma=GAMMA)

    data_loader, data_size = load_dataset(BATCH_SIZE, data_dir=dataset_dir)
    if os.path.exists('./model/' + data_dir + '/net.pkl') is not True :
        f_loss = open('./model/' + data_dir + '/train_loss.txt', 'a')
        f_acc = open('./model/' + data_dir + '/train_acc.txt', 'a')
        print("Start Training ResNet50")
    else :
        f_loss = open('./model/' + data_dir + '/train_loss.txt', 'a')
        f_acc = open('./model/' + data_dir + '/train_acc.txt', 'a')
        checkpoint = torch.load('./model/' + data_dir + '/net.pkl')
        net.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        pre_epoch = checkpoint['EPOCH'] - 1
        print("已训练epoch：", pre_epoch, "总epoch：", EPOCH)
    for epoch in range(pre_epoch, EPOCH) :
        since = time.time()
        print('\n Epoch: {}'.format(epoch + 1))
        net.eval()
        sum_loss = 0.0
        correct = 0.0
        total = 0.0
        train_data = data_loader["train"]
        test_data = data_loader["test"]
        print('start to test')
        f_acc1 = open('./model/' + data_dir + '/test_acc.txt', 'a')
        f_loss1 = open('./model/' + data_dir + '/test_loss.txt', 'a')
        with torch.no_grad() :  # 不用计算梯度，节省GPU
            correct = 0
            total = 0
            best_acc = TARGET_ACCURACY
            labels_list = []
            predited_list = []
            preValue_list = []  # fpr
            feature_list = []
            loss = 0.0

            for data in test_data :
                net.eval()  # 这个一般出现在测试函数中，直接运用训练的结果
                images, labels = data
                images, labels = images.to(device), labels.to(device)
                outputs, features = net(images)
                feature_list.append(features)
                loss = criterion(outputs, labels)
                preValue, predicted = torch.max(outputs.data, 1)  
                total += labels.size(0)
                correct += torch.sum(predicted == labels.data)
                for i in predicted :
                    predited_list.append(str(i.item()) + '\n')
                for i in labels.data :
                    labels_list.append(str(i.item()) + '\n')
                for i in outputs.cpu().data.numpy() :
                    preValue_list.append(i)
               
            acc = 100. * correct / total
            f_loss1.write(str(loss.item()) + '\n')
            f_acc1.write(str(float(acc)) + '\n')
            print('测试分类准确率为:{}%, time:{}'.format(round(acc.item(), 3), time.time() - since))

        
if __name__ == '__main__' :
    main()
