import torch
import torch.nn as nn
from torchvision import transforms, datasets, utils
import matplotlib.pyplot as plt
import numpy as np
import torch.optim as optim
import sys
# sys.path.append('./')
# from model import AlexNet
# from model4 import ResNet18
# from model5 import ResNet18
from model5 import ResNet18
import data_input

import os
import json
import time
# from loss import FocalLoss

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)

data_dir = '../../datasets/rafdb/'
# data_dir = '../data/'
batch_size = 8
lr_gamma = 0.9
# trainset loader
train_loader, tra_num = data_input.train_data(data_dir,batch_size)
# validation loader
validate_loader, val_num =  data_input.val_data(data_dir,128)

# net = AlexNet(num_classes=7, init_weights=True)
net = ResNet18(num_classes=7)


pre_train_model = 'AlexNet.pth'
save_path = './AlexNet.pth'




net.to(device)
loss_function = nn.CrossEntropyLoss()
# loss_function = FocalLoss(7)

pata = list(net.parameters())  # 查看net内的参数
optimizer = optim.Adam(net.parameters(), lr=0.0001)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 5, 0.9)

is_recovery = False
if is_recovery:
    checkpoint = torch.load(pre_train_model)
    net.load_state_dict(checkpoint)
    # optim.Adam(net.parameters(), lr=0.05).load_state_dict(checkpoint["optimizer_state_dict"])
    # for param_group in optimizer.param_groups:
    #     param_group['lr'] = args.lr
    #     start_epoch = checkpoint["epoch"]
    #     best_accuracy = checkpoint["best_accuracy"]
    # lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 25, 0.3, last_epoch=start_epoch)
    print("-------- 模型恢复训练成功-----------")
    save_path = './pretrain_{}_AlexNet.pth'.format(pre_train_model.split('.')[0])

# 写入文本
def pre_write_txt(pred, file):
    f = open(file, 'a', encoding='utf-8')
    f.write(str(pred))
    f.write('\n')
    f.close()
    print("-----------------预测结果已经写入文本文件--------------------")


best_acc = 0.0

for epoch in range(2000):
   
    # train
    net.train()  # 在训练过程中调用dropout方法
    running_loss = 0.0
    t1 = time.perf_counter()  # 统计训练一个epoch所需时间
    # print('star')
    tra_acc = 0.0
    for step, data in enumerate(train_loader, start=0):
        # 这个时候的标签还是数字 不是onehot
        images, labels = data
        # print(labels)
        optimizer.zero_grad()
        outputs = net(images.to(device))
        # soft_outputs = torch.softmax(outputs, dim=1)
        # print(soft_outputs)
        # print(soft_outputs.max(1),torch.argmax(soft_outputs))
        loss = loss_function(outputs, labels.to(device))
        # print(loss)
        loss.backward()
        optimizer.step()

        # print statistics
        tra_predict_y = torch.max(outputs, dim=1)[1]
        step_acc = (tra_predict_y == labels.to(device)).sum().item()
        tra_acc += step_acc
        running_loss += loss.item()
        # each 10 step(or batch) print once
        if (step+1)%10 == 0:
            print("step:{} train acc:{:.3f} train loss:{:.3f}".format(step,step_acc/len(labels),loss))
    one_epoch_time = time.perf_counter()-t1

    # validate
    net.eval()  # 在测试过程中关掉dropout方法，不希望在测试过程中使用dropout
    acc = 0.0  # accumulate accurate number / epoch
    with torch.no_grad():
        for data_test in validate_loader:
            test_images, test_labels = data_test
            test_labels_len = len(test_labels)
            outputs = net(test_images.to(device))
            predict_y = torch.max(outputs, dim=1)[1]
            acc += (predict_y == test_labels.to(device)).sum().item()

        accurate_test = acc / val_num
        if accurate_test > best_acc:
            best_acc = accurate_test
            torch.save(net.state_dict(), save_path)
        print('\n[epoch %d] trainset_acc:%.3f train_loss: %.3f  testset_accuracy: %.3f best_acc: %.3f one_epoch_time:%.3fs\n' %
              (epoch + 1, tra_acc/tra_num, running_loss / step, accurate_test,best_acc,one_epoch_time))
        pre_write_txt("epoch:{} trainset_acc:{:.3f} train_loss:{:.3f} testset_accuracy: {:.3f} best_acc: {:.3f}".format(epoch + 1, tra_acc/tra_num, running_loss / step, accurate_test,best_acc), file = 'result.txt')
    lr_scheduler.step()

print('Finished Training')