import torch
import torch.nn as nn
import torch.optim as optim  # optim中定义了各种各样的优化方法，包括SGD
from torchvision.datasets import ImageFolder
from torchvision import transforms
from torch.utils.data import DataLoader
import pandas as pd
from tqdm import tqdm
from Vgg16_net import Vgg16_net


def train_models():
    train_path = './dataset2/traindata_resized/'
    rate = 0.00001
    n_classes = 49
    batch = 32
    # 加上transforms
    data_transform = transforms.Compose([  # Compose方法是将多种变换组合起来
        #transforms.RandomHorizontalFlip(),
        #transforms.RandomResizedCrop(size=(112,112)),  # 依据给定的size随机裁剪 也就是50*50
        transforms.ToTensor(),  # 将图片转换为Tensor,会自动将[0,255]归一化至[0,1]
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])  # #标准化至[-1,1]
    ])
    train_dataset = ImageFolder(train_path, transform=data_transform)
    train_loader = DataLoader(dataset=train_dataset, batch_size=batch, shuffle=True, num_workers=0)
#打印
    # 开始训练
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    net = Vgg16_net(n_classes=n_classes).to(device)
    print(net)
    # CrossEntropyLoss就是我们需要的损失函数
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=rate)
    #cheduler = StepLR(optimizer, step_size=5, gamma=0.1)
    print('Using: {} device'.format(device))

    print("Start Training...")
    tra_steep_loss_df = pd.DataFrame(columns=('step', 'loss'))  # 创建列表
    tra_steep_acc_df = pd.DataFrame(columns=('step', 'acc'))  # 创建列表

    loss100 = 0.0
    correct = 0
    total = 0
    # for epoch in range(10):
    for epoch in tqdm(range(200), desc='总的进度（次数）'):
        # loss100 = 0.0
        # correct = 0
        # total = 0
        # i = 0
        # 我们用一个变量来记录每100个batch的平均loss
        # 我们的dataloader派上了用场
        for i, data in enumerate(train_loader):
        # for data in tqdm(train_loader, desc='batch进度'):
        #     i = i + 1
            inputs, labels = data
            inputs, labels = inputs.to(device), labels.to(device)  # 注意需要复制到GPU
            # print(labels)
            point_labels = labels.tolist()#记录标签

            # 首先要把梯度清零，不然PyTorch每次计算梯度会累加，不清零的话第二次算的梯度等于第一次加第二次的
            optimizer.zero_grad()
            # 计算前向传播的输出，这里就体现出来动态建图了，你还可以传入其他的参数来改变网络的结构
            outputs = net(inputs)
            #print(outputs)
            # 根据输出计算loss
            #print(outputs.shape)
            #print(labels.shape)
            loss = criterion(outputs, labels)
            # 算完loss之后进行反向梯度传播，这个过程之后梯度会记录在变量中
            loss.backward()
            # 用计算的梯度去做优化
            optimizer.step()
            # 可以调用下面的函数来查看参数
            # list(（net.parameters)
            # print(loss.item())
            loss100 += loss.item()

            if i % len(train_loader) == 0:
                _, pred = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (pred == labels).sum().item()
                accuracy = 100 * correct / total
                print('correct = %d , total =  %d' % (correct, total))
                print('[Epoch %d, Batch %5d] train_loss:%.3f, accuracy:%.3f]' % (
                    epoch + 1, epoch * 800 + i, loss100 / 100, accuracy))
                # 写入列表，用于保存到excel
                tra_loss_temp_df = pd.DataFrame([epoch * 800 + i, loss100 / 100], index=('step', 'loss')).T  # 导入
                tra_acc_temp_df = pd.DataFrame([epoch * 800 + i, accuracy], index=('step', 'acc')).T
                # tra_steep_loss_df = tra_steep_loss_df.append(tra_loss_temp_df)  # 添加
                # tra_steep_acc_df = tra_steep_acc_df.append(tra_acc_temp_df)
                tra_steep_loss_df = pd.concat([tra_steep_loss_df, tra_loss_temp_df])  # 添加
                tra_steep_acc_df = pd.concat([tra_steep_acc_df, tra_acc_temp_df])
                loss100 = 0

    # 写入excel
    tra_steep_loss_df = tra_steep_loss_df.reset_index(drop=True)  # 更改序号为连续序号
    tra_steep_acc_df = tra_steep_acc_df.reset_index(drop=True)

    tra_steep_loss_df.to_csv('1m_CNN_tra_steep_loss_0507_v2.csv')
    tra_steep_acc_df.to_csv('1m_CNN_tra_steep_acc_0507_v2.csv')
    print("Done Training!")

    # 保存训练好的模型
    state = {'net': net, 'dict': net.state_dict(), 'optimizer': optimizer.state_dict()}
    torch.save(state, './module/net10_20240507_v2.pth')


def test_models():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    test_path = './dataset2/testdata_resized/'
    batch = 32
    n_classes=49
    # test_path = './data/501/all_testpoint_0.8m/test_group1'
    data_transform = transforms.Compose([  # Compose方法是将多种变换组合起来
        #transforms.RandomResizedCrop(size=(112, 112)),  # 依据给定的size随机裁剪 也就是50*50
        transforms.ToTensor(),  # 将图片转换为Tensor,会自动将[0,255]归一化至[0,1]
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])  # #标准化至[-1,1]
    ])
    test_dataset = ImageFolder(test_path, transform=data_transform)
    test_loader = DataLoader(dataset=test_dataset, batch_size=batch, shuffle=False, num_workers=4)
    # print("------------")
    print('test_loader', test_loader)
    print('(test_dataset', test_dataset)
    # print('test_dataset.classes',test_dataset.classes)
    # print('test_dataset.class_to_idx',test_dataset.class_to_idx)

    # 读取刚才保存的模型
    checkpoint = torch.load('./module/net10_20240507.pth')
    net = Vgg16_net(n_classes).to("cuda" if torch.cuda.is_available() else "cpu")
    optimizer = optim.SGD(net.parameters(), lr=0.00001)
    #model_optimizer = optimizer.load_state_dict(checkpoint['optimizer'])
    model = checkpoint['net']
    model_dict = model.load_state_dict(checkpoint['dict'])

    # 读取测试文件标签
    labels_loc = pd.read_csv('./test.csv')
    length = labels_loc.shape[0]  # 读取目标长度
    print(labels_loc)
    print(length)
    # test01 测试集
    dataiter = iter(test_loader)


    # 预测正确的数量和总数量
    correct = 0
    errors = 0
    total = 0
    p_soft = 0
    x = []
    y = []
    point_lab = []
    locx = []
    locy = []
    # 使用torch.no_grad的话在前向传播中不记录梯度，节省内存
    test01_class_acc_df = pd.DataFrame(columns=('original', 'predicted'))  # 创建列表
    test01_steep_acc_df = pd.DataFrame(columns=('step', 'acc'))  # 创建列表
    with torch.no_grad():
        for i, data in enumerate(test_loader):
            print("i= ",i)
            images, labels = data
            print("images = ", images, "lables = ", labels)
            print("----------")
            images, labels = images.to(device), labels.to(device)

            # 预测
            outputs = model(images)
            if i == 0:
                break

    images.size()
    print(outputs)
    outputs.size()
    print(outputs[1, :])
    value_pre = torch.max(outputs, 1)
    print("value_pre = ", value_pre)
    _, predicted = torch.max(outputs, 1)
    print("predicted = ", predicted)
    print("labels = ", labels)
    # --------判断-------  # 输出分类的准确率
    if torch.all(labels == predicted):
        correct = correct + 1
    else:
        errors = errors + 1
    print("correct = ", correct)
    print("errors = ", errors)
     #---保存标签  # 目的是为了统计定位的精度
    a = [labels, predicted]


if __name__ == '__main__':
    # 现在有一份训练好的模型，每次调试，就不要调这个函数了
    # train_models()
    test_models()
