import numpy as np
import torch
import torchvision
from matplotlib import pyplot as plt
from torch import nn, optim

import data_to_img
import img_intensive
from GAF_config import config
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

# 根据信号生成二位特征图
if config.NEED_TO_IMG:
    for i in config.MAT_NAME:
        data_to_img.pre_data(i, 1400, 256, 256, 2)

# 是否使用图像增强
if config.IMG_INTENSIVE:
    img_intensive.img_intensive(config.DATASET, 8, False)

# 定义超参数
input_size = 256  # 图像的总尺寸
num_classes = 3  # 标签的种类数
num_epochs = 10  # 训练的总循环周期
batch_size = 16  # 一个簇（批次）的大小
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("当前使用GPU加速运算：" + str(torch.cuda.is_available()))

transforms = transforms.Compose([
    transforms.Resize(256),  # 将图片短边缩放至256，长宽比保持不变：
    transforms.RandomRotation(45),
    transforms.CenterCrop(224),  # 将图片从中心切剪成3*224*224大小的图片
    transforms.ToTensor()  # 把图片进行归一化，并把数据转换成Tensor类型
])

# 训练集
data_train = datasets.ImageFolder(config.DATASET+'train', transform=transforms)
train_loader = DataLoader(data_train, batch_size=batch_size, shuffle=True)
# 测试集
data_test = datasets.ImageFolder(config.DATASET+'test', transform=transforms)
test_loader = DataLoader(data_test, batch_size=batch_size, shuffle=True)


# 构建卷积层  conv+relu+pool
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        # 下采样层1
        self.conv1 = nn.Sequential(  # 输入大小（3，224，224）
            nn.Conv2d(
                in_channels=3,  # 彩色图
                out_channels=96,  # 要得到几多个特征图
                kernel_size=7,  # 卷积核大小
                stride=2,  # 步长
                padding=3,  # 如果需要卷积后大小与原来一直，则需要设置padding=（kernel_size-1）/2 if stride = 1
            ),
            nn.ReLU(),  # relu层
            nn.MaxPool2d(kernel_size=2),  # 池化操作（2*2），输出为（96，56，56）
        )
        # 深度卷积层1
        self.conv2 = nn.Sequential(  # 下一个输入（96，56，56）
            nn.Conv2d(96, 192, 5, 1, 2),  # 输出（192，56，56）
            nn.ReLU(),  # relu层
        )
        # 下采样层2
        self.conv3 = nn.Sequential(  # 输入大小（192，56，56）
            nn.Conv2d(192, 384, 5, 1, 2),
            nn.ReLU(),  # relu层
            nn.MaxPool2d(kernel_size=2),  # 池化操作（2*2），输出为（384，28，28）
        )
        # 深度卷积层2
        self.conv4 = nn.Sequential(  # 下一个输入（384，28，28）
            nn.Conv2d(384, 764, 5, 1, 2),  # 输入（384，28，28）
            nn.ReLU(),  # relu层
            nn.MaxPool2d(2),  # 输出（764，14，14）
        )
        # 深度卷积层2
        self.conv5 = nn.Sequential(  # 下一个输入（384，28，28）
            nn.Conv2d(764, 1528, 5, 1, 2),  # 输入（384，28，28）
            nn.ReLU(),  # relu层
        )
        # 深度卷积层
        self.conv6 = nn.Sequential(  # 下一个输入（764，14，14）
            nn.AvgPool2d(2),  # 输出（764，7，7）
        )
        # 全连接层
        self.out = nn.Linear(1528 * 7 * 7, 3)  # 全连接层

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        x = self.conv6(x)
        x = x.view(x.size(0), -1)  # flatten操作，结果为(batch_size,32*7*7)
        output = self.out(x)
        return output


# 计算准确率
def accuracy(predictions, labels):
    pred = torch.max(predictions.data, 1)[1]
    rights = pred.eq(labels.data.view_as(pred)).sum()
    return rights, len(labels)


# 训练网络模型
net = CNN().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)

for epoch in range(num_epochs):
    train_rights = []

    for batch_idx, (data, target) in enumerate(train_loader):
        net.train()
        data = data.to(device)
        target = target.to(device)
        output = net(data)
        optimizer.zero_grad()
        loss = criterion(output, target)
        loss.backward()
        optimizer.step()
        right = accuracy(output, target)
        train_rights.append(right)

        if batch_idx % 30 == 0:
            net.eval()
            val_right = []

            for (data, target) in test_loader:
                data = data.to(device)
                target = target.to(device)
                output = net(data)
                right = accuracy(output, target)
                val_right.append(right)

            # 计算准确率
            train_r = (sum([tup[0] for tup in train_rights]), sum([tup[1] for tup in train_rights]))
            val_r = (sum([tup[0] for tup in val_right]), sum([tup[1] for tup in val_right]))

            print('当前epoch：{} [{}/{}  ({:.0f}%)]\t 损失:{:.6f} \t训练集准确率:{:.2f}%\t测试集准确率:{:.2f}%'.format(
                epoch, batch_idx * batch_size, len(train_loader.dataset),
                       100. * batch_idx / len(train_loader),
                loss.data,
                       100. * train_r[0].cpu().numpy() / train_r[1],
                       100. * val_r[0].cpu().numpy() / val_r[1]
            ))
