
import os
import sys
import argparse
import torch
import torch.nn as nn
from torchsummary import summary
from tqdm import tqdm
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
import joblib
import matplotlib.pyplot as plt
import logging


os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'

sys.path.append('../taikula')

from inm_yue.res2net import res2net50
from ccc_qmk.GCNet import gcnet9, gcnet18, gcnet50, gcnet_full18, gcnet_full9, gcnet_full_att18
from ccc_qmk.Resnet import resnet9, resnet18, resnet50
from ccc_qmk.vit import VisionTransformer
from utils.LoadData import *

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

def train(net, trainloader, optimizer, scheduler=None):
    net.train()
    running_loss = 0.0
    for batch_idx, (data, target) in enumerate(tqdm(trainloader, desc="Epoch " + str(epoch))):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        output = net(data)

        loss = F.cross_entropy(output, target).cuda()
        loss.backward()
        optimizer.step()
        if scheduler is not None:
            scheduler.step()
        running_loss += loss.item()

    # 打印当前epoch的平均训练损失
    return running_loss / len(trainloader)
    


def test(net, testloader):
    net.eval()
    correct = 0
    running_loss = 0.0
    with torch.no_grad():
        for batch_idx, (data, target) in enumerate(tqdm(testloader)):
            data, target = data.to(device), target.to(device)
            output = net(data)
            loss = F.cross_entropy(output, target).cuda()
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()
            running_loss += loss.item()

    # print("correct:", correct, ", total:", len(testloader.dataset))
    acc = 100. * correct / len(testloader.dataset)
    # print('accuracy=', acc)
    return running_loss / len(testloader), acc


if __name__ == '__main__':



    # 创建 logger 对象
    logger = logging.getLogger('train_logger')
    logger.setLevel(logging.INFO)

    # 创建输出到控制台的 handler
    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.INFO)

    # 创建输出到文件的 handler
    file_handler = logging.FileHandler('utils/train.log')
    file_handler.setLevel(logging.INFO)

    # 定义输出格式
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    console_handler.setFormatter(formatter)
    file_handler.setFormatter(formatter)

    # 将 handler 添加到 logger 对象中
    logger.addHandler(console_handler)
    logger.addHandler(file_handler)
    


    train_losses = []
    test_losses = []
    train_accs = []
    test_accs = []
    learning_rate = 0.1
    epoches = 200
    momentum = 0.3
    weight_decay = 5e-4
    gamma = 0.1
    milestones = [150, 225]

    #model = FcaResNet(FcaBottleNeck, [3, 4, 6, 3]).to(device)
    # name, model = gcnet_full9()
    # name, model = resnet18()
    # name, model = resnet50()
    # name, model = gcnet_full18()
    name, model = gcnet_full18()
    # name, model = VisionTransformer()
    # model = joblib.load("./models/GCNet-full-18-epoch-59-acc-64.5400.model")
    # name = "GCNet-full-18-epoch-40"
    model = model.to(device)
    summary(model, input_size=(3, 32, 32))
    logger.info("model = {}".format(name))
    # optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.5)
    optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate,
            momentum=momentum, weight_decay=weight_decay)
    # optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma)
    for epoch in range(epoches):
        train_loss = train(model, train_loader, optimizer, scheduler)
        test_loss, test_acc = test(model, test_loader)
        # train_loss = 1 - epoch / 100
        # test_loss = 0.5 - epoch / 100
        # test_acc = 0.1 + epoch / 100
        logger.info('Epoch [{}/{}], Train Loss: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'
                    .format(epoch + 1, epoches, train_loss, test_loss, test_acc))
        if epoch != 0 and test_acc > max(test_accs) and test_acc > 60:
            joblib.dump(model, "./models/{}-epoch-{}-acc-{:.4f}.model".format(name, epoch + 1, test_acc))

        train_losses.append(train_loss)
        test_losses.append(test_loss)
        test_accs.append(test_acc)
    
    # 解决在juypter中运行时输出多行logger的问题
    # logger.removeHandler(console_handler)
    # logger.removeHandler(file_handler)


    # 绘制损失和准确率的曲线
    plt.plot(train_losses, label='Train Loss')
    plt.plot(test_losses, label='Test Loss')
    plt.legend()
    plt.title(name)
    plt.savefig("./models/pic/{}-epoch-{}-Loss.png".format(name, epoches))
    # plt.show()
    plt.clf()
    
    plt.plot(test_accs, label='Test Accuracy')
    plt.title(name)
    plt.legend()
    plt.savefig("./models/pic/{}-epoch-{}-acc.png".format(name, epoches))
    # plt.show()
    plt.clf()


    train_losses = []
    test_losses = []
    train_accs = []
    test_accs = []
    learning_rate = 0.1 
    epoches = 300
    momentum = 0.9
    weight_decay = 5e-4
    gamma = 0.1
    milestones = [150, 225]
    #model = FcaResNet(FcaBottleNeck, [3, 4, 6, 3]).to(device)
    # name, model = resnet18()
    # name, model = resnet50()
    name, model = resnet9()
    # model = joblib.load("./models/GCNet-full-18-epoch-40-acc-61.2500.model")
    # name = "GCNet-full-18-epoch-40"
    model = model.to(device)
    summary(model, input_size=(3, 32, 32))
    logger.info("model = {}".format(name))
    # optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.5)
    optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate,
            momentum=momentum, weight_decay=weight_decay)
    # optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma)
    for epoch in range(epoches):
        train_loss = train(model, train_loader, optimizer, scheduler)
        test_loss, test_acc = test(model, test_loader)
        # train_loss = 1 - epoch / 100
        # test_loss = 0.5 - epoch / 100
        # test_acc = 0.1 + epoch / 100
        logger.info('Epoch [{}/{}], Train Loss: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'
                    .format(epoch + 1, epoches, train_loss, test_loss, test_acc))
        if epoch != 0 and test_acc > max(test_accs) and test_acc > 60:
            joblib.dump(model, "./models/{}-epoch-{}-acc-{:.4f}.model".format(name, epoch + 1, test_acc))

        train_losses.append(train_loss)
        test_losses.append(test_loss)
        test_accs.append(test_acc)
    
    # 绘制损失和准确率的曲线
    plt.plot(train_losses, label='Train Loss')
    plt.plot(test_losses, label='Test Loss')
    plt.legend()
    plt.title(name)
    plt.savefig("./models/pic/{}-epoch-{}-Loss.png".format(name, epoches))
    # plt.show()
    plt.clf()
    
    plt.plot(test_accs, label='Test Accuracy')
    plt.title(name)
    plt.legend()
    plt.savefig("./models/pic/{}-epoch-{}-acc.png".format(name, epoches))


    train_losses = []
    test_losses = []
    train_accs = []
    test_accs = []
    learning_rate = 0.1 
    epoches = 300
    momentum = 0.9
    weight_decay = 5e-4
    gamma = 0.1
    milestones = [150, 225]
    #model = FcaResNet(FcaBottleNeck, [3, 4, 6, 3]).to(device)
    # name, model = resnet18()
    # name, model = resnet50()
    name, model = gcnet18()
    # model = joblib.load("./models/GCNet-full-18-epoch-40-acc-61.2500.model")
    # name = "GCNet-full-18-epoch-40"
    model = model.to(device)
    summary(model, input_size=(3, 224, 224))
    logger.info("model = {}".format(name))
    # optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.5)
    optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate,
            momentum=0.3, weight_decay=weight_decay)
    # optimizer = optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma)
    for epoch in range(epoches):
        train_loss = train(model, train_loader_224, optimizer, scheduler)
        test_loss, test_acc = test(model, test_loader_224)
        # train_loss = 1 - epoch / 100
        # test_loss = 0.5 - epoch / 100
        # test_acc = 0.1 + epoch / 100
        logger.info('Epoch [{}/{}], Train Loss: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'
                    .format(epoch + 1, epoches, train_loss, test_loss, test_acc))
        if epoch % 10 == 0 and test_acc > 60:
            joblib.dump(model, "./models/{}-epoch-{}-acc-{:.4f}.model".format(name, epoch + 1, test_acc))

        train_losses.append(train_loss)
        test_losses.append(test_loss)
        test_accs.append(test_acc)
    
    # 绘制损失和准确率的曲线
    plt.plot(train_losses, label='Train Loss')
    plt.plot(test_losses, label='Test Loss')
    plt.legend()
    plt.title(name)
    plt.savefig("./models/pic/{}-epoch-{}-Loss.png".format(name, epoches))
    # plt.show()
    plt.clf()
    
    plt.plot(test_accs, label='Test Accuracy')
    plt.title(name)
    plt.legend()
    plt.savefig("./models/pic/{}-epoch-{}-acc.png".format(name, epoches))
    plt.clf()


# gcnet50 = GCNet50().to(device)
# summary(gcnet50, input_size=(3, 32, 32))

