import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms

import argparse
from networks.ClassicNetwork.AlexNet import AlexNet
from networks.ClassicNetwork.InceptionV1 import InceptionV1
from networks.ClassicNetwork.ResNet import ResNet50
import os
import numpy as np
import time

from dataloader import load_dataset


def save_file(path, list):
    """
    保存文件
    :param path: 保存文件路径
    :param list: 写入文件内容
    :return:
    """
    f = open(path, 'w')
    for line in list:
        f.write(line)
    f.close()


#

def main():
    """
    正式使用前先注释掉所有关于控制反应停止的计数器count的语句
    :return:
    """
    data_dir = 'google'
    CLASS_NUMBER = 2  # 类别
    TARGET_ACCURACY = 0
    dataset_dir = R'dataset'
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # 优先启用GPU
    # device = torch.device("cpu")
    print('device:', device)

    EPOCH = 40  # 最大epoch数目
    pre_epoch = 0  # 已训练epoch次数
    BATCH_SIZE = 64  # Batchsize
    LR = 2e-4  # 学习率
    WEIGHT_DECAY = 5e-4  # 衰减系数
    STEP_SIZE = 50  # 学习率衰减过程
    GAMMA = 0.1  # The decay multiple in each decay step

    log_dir = './model/' + data_dir + '/log'
    if os.path.exists(log_dir) is not True:
        os.makedirs(log_dir)
    source_dir = '../model/' + data_dir + '/source'
    if os.path.exists(source_dir) is not True:
        os.makedirs(source_dir)
        # os.makedirs( './model/'+data_dir+'/source')
        # os.makedirs('./model/' + data_dir + '/result')
    # writer = tensorboardX.SummaryWriter(log_dir=log_dir)
    print(CLASS_NUMBER)

    net = InceptionV1().to(device)
    # net = ResNet50(num_classes=CLASS_NUMBER).to(device)
    # torch.load('./model/DR80/net.pkl')

    criterion = nn.CrossEntropyLoss()  # 定义损失函数
    optimizer = optim.Adam(net.parameters(), lr=LR, weight_decay=WEIGHT_DECAY)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=STEP_SIZE, gamma=GAMMA)

    data_loader, data_size = load_dataset(BATCH_SIZE, data_dir=dataset_dir)
    if os.path.exists('./model/' + data_dir + '/net.pkl') is not True:
        f_loss = open('./model/' + data_dir + '/train_loss.txt', 'a')
        f_acc = open('./model/' + data_dir + '/train_acc.txt', 'a')
        print("Start Training VGG16")
    else:
        f_loss = open('./model/' + data_dir + '/train_loss.txt', 'a')
        f_acc = open('./model/' + data_dir + '/train_acc.txt', 'a')
        checkpoint = torch.load('./model/' + data_dir + '/net.pkl')
        net.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        pre_epoch = checkpoint['EPOCH'] - 1
        print("已训练epoch：", pre_epoch, "总epoch：", EPOCH)
        net.eval()
        print("Start Continue Training VGG16")
    # count = 0  # 设置一个计数器控制反应停止
    for epoch in range(pre_epoch, EPOCH):
        since = time.time()
        print('\n Epoch: {}'.format(epoch + 1))
        net.train()
        sum_loss = 0.0
        correct = 0.0
        total = 0.0
        train_data = data_loader["train"]
        test_data = data_loader["test"]
        length = len(train_data)
        for i, data in enumerate(train_data, 0):
            # count += 1
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()  # 梯度下降，初始化梯度为0

            # forward+backward
            outputs, features = net(inputs)
            loss = criterion(outputs, labels)
            # print('features:', features.size())
            # print('outputs:',outputs.size())
            # print('labels:',labels)

            loss.backward()  # 反向传播计算梯度
            optimizer.step()  # 更新所有参数

            sum_loss += loss.item()
            _, pre = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += torch.sum(pre == labels.data)
            print('[epoch:%d, iter:%d] Loss: %.03f | Acc: %.3f%% |time:%.3f'
                  % (epoch + 1, (i + 1 + epoch * length), sum_loss / (i + 1), 100. * correct / total,
                     time.time() - since))
            # if count > 10 :
            #     break
        scheduler.step(epoch)
        f_loss.write(str(float(sum_loss / (i + 1))) + '\n')
        f_acc.write(str(float(100. * correct / total)) + '\n')

        # testing
        # count = 0
        if (epoch + 1) % 20 == 0:
            print('start to test')
            f_acc1 = open('./model/' + data_dir + '/test_acc.txt', 'a')
            f_loss1 = open('./model/' + data_dir + '/test_loss.txt', 'a')
            with torch.no_grad():  # 不用计算梯度，节省GPU
                correct = 0
                total = 0
                best_acc = TARGET_ACCURACY
                labels_list = []
                predited_list = []
                preValue_list = []  # fpr
                feature_list = []
                loss = 0.0

                for data in test_data:
                    # count += 1
                    net.eval()  # 这个一般出现在测试函数中，直接运用训练的结果
                    images, labels = data
                    images, labels = images.to(device), labels.to(device)
                    outputs, features = net(images)
                    feature_list.append(features)
                    # print('features:',features)
                    loss = criterion(outputs, labels)
                    preValue, predicted = torch.max(outputs.data, 1)
                    # print("preValue:{},prediction:{}".format(preValue, predicted))
                    total += labels.size(0)
                    correct += torch.sum(predicted == labels.data)
                    for i in predicted:
                        predited_list.append(str(i.item()) + '\n')
                    for i in labels.data:
                        labels_list.append(str(i.item()) + '\n')
                    for i in outputs.cpu().data.numpy():
                        preValue_list.append(i)
                # print('outputs:',outputs)
                # print('preValue_list:',preValue_list)
                acc = 100. * correct / total
                f_loss1.write(str(loss.item()) + '\n')
                f_acc1.write(str(float(acc)) + '\n')
                print('测试分类准确率为:{}%, time:{}'.format(round(acc.item(), 3), time.time() - since))
                # if count > 10 :
                #     break

                if acc >= best_acc:
                    best_acc = acc
                    torch.save(net, './model/' + data_dir + '/net.pkl')
                    tsne_features = torch.cat(feature_list, 0).cpu().data.numpy()
                    np.save('./model/' + data_dir + '/tsne_features.npy',
                            tsne_features)  # 用于tsne
                    # print('tsne_features:',tsne_features)
                    np.save('./model/' + data_dir + '/preValue.npy', preValue_list)  # ROC 预测的概率值
                    save_file('./model/' + data_dir + '/con_predicted.txt',
                              predited_list)  # 预测label confusion matrix
                    save_file('./model/' + data_dir + '/con_labels.txt',
                              labels_list)  # 真实label confusion matrix
                    torch.save(net.state_dict(), './model/' + data_dir + '/net_best.pkl')
                #     return
    #            if epoch+1 % 10 == 0:
    # torch.save(net.state_dict(), './model/' + data_dir + '/net_end.pkl')
    torch.save({'EPOCH': EPOCH + 1,
                'state_dict': net.state_dict(),
                'optimizer': optimizer.state_dict()},
               './model/' + data_dir + '/net.pkl')  # 模型数据与其他参数共同打包成字典放在pkl里，方便读取


if __name__ == '__main__':
    main()
