# -*- codeing = utf-8 -*-
# @Time : 2022/11/11 15:48
# @Author : xiaow
# @File : classification.py
# @Software : PyCharm


import torch
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
import os
from PIL import Image
import warnings
import matplotlib.pyplot as plt
from args import args
import utils

warnings.filterwarnings("ignore")

plt.ion()

data_transforms = {
    'train': transforms.Compose([
        transforms.Resize(84),
        transforms.CenterCrop(84),
        # 转换成tensor向量
        transforms.ToTensor(),
        # 对图像进行归一化操作
        # [0.485, 0.456, 0.406]，RGB通道的均值与标准差
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
}


def Load_Image_Information(path):
    return Image.open(path).convert('RGB')


class my_Data_Set(nn.Module):
    def __init__(self, transform=None, loader=None):
        super(my_Data_Set, self).__init__()
        images = []
        labels = []
        # 0 sandal 1 boot 2 shoe
        for i in os.listdir(args.sandal_path):
            images.append(os.path.join(args.sandal_path, i))
            labels.append(0)
        for j in os.listdir(args.boot_path):
            images.append(os.path.join(args.boot_path, j))
            labels.append(1)
        for k in os.listdir(args.shoe_path):
            images.append(os.path.join(args.shoe_path, k))
            labels.append(2)
        self.images = images
        self.labels = labels
        self.transform = transform
        self.loader = loader

    # 重写这个函数用来进行图像数据的读取
    def __getitem__(self, item):
        # 获取图像名和标签
        imageName = self.images[item]
        label = self.labels[item]
        # 读入图像信息
        image = self.loader(imageName)
        # 处理图像数据
        if self.transform is not None:
            image = self.transform(image)
        return image, label

    # 重写这个函数，来看数据集中含有多少数据
    def __len__(self):
        return len(self.images)


# 生成Pytorch所需的DataLoader数据输入格式
train_dataset = my_Data_Set(transform=data_transforms['train'], loader=Load_Image_Information)
train_loader = DataLoader(train_dataset, batch_size=10, shuffle=True)


class ConvLayer(torch.nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, is_last=False):
        super(ConvLayer, self).__init__()
        reflection_padding = int(np.floor(kernel_size / 2))
        self.reflection_pad = nn.ReflectionPad2d(reflection_padding)
        self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
        self.dropout = nn.Dropout2d(p=0.5)
        self.is_last = is_last

    def forward(self, x):
        # 图片进行填充
        out = self.reflection_pad(x)
        out = self.conv2d(out)
        if self.is_last is False:
            out = F.leaky_relu(out, inplace=True)
        return out


# Dense convolution unit
class DenseConv2d(torch.nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, is_Last=False):
        super(DenseConv2d, self).__init__()
        self.dense_conv = ConvLayer(in_channels, out_channels, kernel_size, stride, is_Last)
        self.is_last = is_Last

    def forward(self, x):
        out = self.dense_conv(x)
        if self.is_last == False:
            # 按第二个维度进行拼接    为了实现densenet
            out = torch.cat([x, out], 1)
        return out


class Net(nn.Module):
    def __init__(self, in_channels, kernel_size, stride):
        super(Net, self).__init__()
        out_channels_def = 16
        denseblock = []
        # densenet
        denseblock += [DenseConv2d(in_channels, out_channels_def, kernel_size, stride),
                       DenseConv2d(in_channels + out_channels_def, out_channels_def, kernel_size, stride),
                       DenseConv2d(in_channels + out_channels_def * 2, out_channels_def, kernel_size, stride),
                       DenseConv2d(in_channels + out_channels_def * 3, out_channels_def, kernel_size, stride),
                       DenseConv2d(in_channels + out_channels_def * 4, out_channels_def, kernel_size, stride),
                       DenseConv2d(in_channels + out_channels_def * 5, out_channels_def, kernel_size, stride),
                       DenseConv2d(in_channels + out_channels_def * 6, out_channels_def, kernel_size, stride, True)
                       ]
        self.denseblock = nn.Sequential(*denseblock)
        # 16*18*18是指矩阵总元素数
        self.fc1 = nn.Linear((out_channels_def) * 84 * 84, 2000)
        self.fc2 = nn.Linear(2000, 700)
        self.fc3 = nn.Linear(700, 3)

    def forward(self, x):
        # x = self.pool(F.relu(self.conv1(x)))
        # x = self.pool(F.relu(self.conv2(x)))
        x = self.denseblock(x)
        # 将多维数据转为一维数据
        x = x.view(-1, (16) * 84 * 84)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


def train():
    net = Net(3, 3, 1)
    net.cuda()
    net.train()
    # 训练
    cirterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.5)
    for epoch in range(50):
        running_loss = 0.0
        for i, data in enumerate(train_loader, 0):
            inputs, labels = data
            inputs, labels = Variable(inputs), Variable(labels)
            inputs = torch.tensor(inputs)
            labels = torch.tensor(labels)
            inputs = inputs.cuda()
            labels = labels.cuda()
            optimizer.zero_grad()  # 优化器清零
            outputs = net(inputs)
            loss = cirterion(outputs, labels)
            loss.backward()
            optimizer.step()  # 优化
            running_loss += loss.item()
            if i % 5 == 0:
                print('[%d %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 200))
                running_loss = 0.0
        # 每个epoch存储一个模型
        torch.save(net, 'shoenet' + epoch.__str__() + '.pth')
    torch.save(net, 'shoenet.pth')


# 0 sandal 1 boot 2 shoe
def test():
    p = []
    mmodel = torch.load('shoenet49.pth')
    for i in range(1, 9):
        url = 'pics/' + i.__str__() + '.png'
        img = Image.open(url).convert('RGB')
        img = data_transforms['train'](img)
        img = img.unsqueeze(0)
        img = Variable(img, requires_grad=False)
        img = img.cuda()
        r = mmodel(img)
        p = torch.argmax(r)
        if p.item() == 0:
            utils.settags('凉鞋', url)
        if p.item() == 1:
            utils.settags('靴子', url)
        if p.item() == 2:
            utils.settags('鞋', url)


if __name__ == '__main__':
    test()
