import torch.nn.functional as F
import torch.nn as nn
import torch
from utils import MyDataset, validate, show_confMat
import os
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torch.optim as optim
from torch.autograd import Variable


normMean = [0.4948052, 0.48568845, 0.44682974]
normStd = [0.24580306, 0.24236229, 0.2603115]
normTransform = transforms.Normalize(normMean, normStd)
trainTransform = transforms.Compose([
    transforms.Resize(32),
    transforms.RandomCrop(32, padding=4),
    transforms.ToTensor(),
    normTransform
])

validTransform = transforms.Compose([
    transforms.ToTensor(),
    normTransform
])

train_txt_path = os.path.join("..","data", "train.txt")
valid_txt_path = os.path.join("..","data", "valid.txt")

# 构建MyDataset实例
train_data = MyDataset(txt_path=train_txt_path, transform=trainTransform)
valid_data = MyDataset(txt_path=valid_txt_path, transform=validTransform)

# 构建DataLoder
train_loader = DataLoader(dataset=train_data, batch_size=16, shuffle=True)
valid_loader = DataLoader(dataset=valid_data, batch_size=16)



class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.conv1 = nn.Conv2d(3,6,5)
        self.pool1 = nn.MaxPool2d(2,2)
        self.conv2 = nn.Conv2d(6,16,5)
        self.pool2 = nn.MaxPool2d(2,2)
        self.fc1 = nn.Linear(16*5*5, 120)
        self.fc2 = nn.Linear(120,84)
        self.fc3 = nn.Linear(84,10)
    
    def forward(self,x):
        x = self.pool1(F.relu(self.conv1(x)))
        x = self.pool2(F.relu(self.conv2(x)))
        x = x.view(-1, 16*5*5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

    def initialize_weights(self):
        for m in self.modules():
            #print(m)
            if isinstance(m, nn.Conv2d):
                torch.nn.init.xavier_normal_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                torch.nn.init.normal_(m.weight.data, 0, 0.01)
                m.bias.data.zero_()

net = Net()
path = os.getcwd()
path = os.path.join(path,"train.weights")
weight = torch.load(path)
net.initialize_weights()
net.load_state_dict(weight['net'])


criterion = nn.CrossEntropyLoss()                                                   # 选择损失函数
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9, dampening=0.1)    # 选择优化器
optimizer.load_state_dict(weight['optimizer'])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)     # 设置学习率下降策略


max_epoch = 1
thisepoch = 0
thisepoch = weight['epoch']+1
for epoch in range(max_epoch):
    thisepoch = thisepoch+1
    loss_sigma = 0.0    # 记录一个epoch的loss之和
    correct = 0.0
    total = 0.0
    scheduler.step()  # 更新学习率

    for i, data in enumerate(train_loader):
        # if i == 30 : break
        # 获取图片和标签
        inputs, labels = data
        inputs, labels = Variable(inputs), Variable(labels)

        # forward, backward, update weights
        optimizer.zero_grad()
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # 统计预测信息
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).squeeze().sum().numpy()
        loss_sigma += loss.item()

        # 每10个iteration 打印一次训练信息，loss为10个iteration的平均
        if i % 10 == 9:
            loss_avg = loss_sigma / 10
            loss_sigma = 0.0
            print("Training: Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                epoch + 1, max_epoch, i + 1, len(train_loader), loss_avg, correct / total))

            # # 记录训练loss
            # writer.add_scalars('Loss_group', {'train_loss': loss_avg}, epoch)
            # # 记录learning rate
            # writer.add_scalar('learning rate', scheduler.get_lr()[0], epoch)
            # # 记录Accuracy
            # writer.add_scalars('Accuracy_group', {'train_acc': correct / total}, epoch)

    # # 每个epoch，记录梯度，权值
    # for name, layer in net.named_parameters():
    #     writer.add_histogram(name + '_grad', layer.grad.cpu().data.numpy(), epoch)
    #     writer.add_histogram(name + '_data', layer.cpu().data.numpy(), epoch)

    # ------------------------------------ 观察模型在验证集上的表现 ------------------------------------
    #if epoch % 2 == 0:
    #    loss_sigma = 0.0
    #    cls_num = len(classes_name)
    #    conf_mat = np.zeros([cls_num, cls_num])  # 混淆矩阵
    #    net.eval()
    #    for i, data in enumerate(valid_loader):
#
    #        # 获取图片和标签
    #        images, labels = data
    #        images, labels = Variable(images), Variable(labels)
#
    #        # forward
    #        outputs = net(images)
    #        outputs.detach_()
#
    #        # 计算loss
    #        loss = criterion(outputs, labels)
    #        loss_sigma += loss.item()
#
    #        # 统计
    #        _, predicted = torch.max(outputs.data, 1)
    #        # labels = labels.data    # Variable --> tensor
#
    #        # 统计混淆矩阵
    #        for j in range(len(labels)):
    #            cate_i = labels[j].numpy()
    #            pre_i = predicted[j].numpy()
    #            conf_mat[cate_i, pre_i] += 1.0
#
    #    print('{} set Accuracy:{:.2%}'.format('Valid', conf_mat.trace() / conf_mat.sum()))
    #    # 记录Loss, accuracy
    #    writer.add_scalars('Loss_group', {'valid_loss': loss_sigma / len(valid_loader)}, epoch)
    #    writer.add_scalars('Accuracy_group', {'valid_acc': conf_mat.trace() / conf_mat.sum()}, epoch)

#******************* 保存模型 ************************
state = {'net':net.state_dict(), 'optimizer':optimizer.state_dict(), 'epoch':thisepoch}
savapath = os.getcwd()
savapath = os.path.join(savapath,"train.weights")
torch.save(state,savapath)

print('Finished Training')