import os
import zipfile
import random
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import paddle
from paddle import nn
from paddle import metric as M
from paddle.io import DataLoader,Dataset
from paddle.nn import functional as F
from paddle.optimizer import Adam
from paddle.optimizer.lr import NaturalExpDecay

BATCH_SIZE = 32      # 每批次的样本数
CLASS_DIM = 10       # 手势种类数
EPOCHS = 10          # 训练轮数
LOG_GAP = 30         # 输出训练信息的间隔

INIT_LR = 3e-4       # 初始学习率
LR_DECAY = 0.5       # 学习率衰减率

SRC_PATH = "./data/Gestures.zip"      # 压缩包路径
DST_PATH = "./data"                   # 解压路径
DATA_PATH = DST_PATH + "/Main"        # 实验数据集路径
INFER_PATH = DST_PATH + "/Infer"      # 预测数据集路径
MODEL_PATH = "AlexNet.pdparams"       # 模型参数保存路径
# MODEL_PATH = "model/AlexNet"       # 模型参数保存路径
if not os.path.isdir(DATA_PATH) or not os.path.isdir(INFER_PATH):
    z = zipfile.ZipFile(SRC_PATH,'r')
    z.extractall(DST_PATH)
    z.close()
print("数据集解压缩完毕")

train_list, test_list = [], []           # 存放数据的路径及标签的映射关系
file_folders = os.listdir(DATA_PATH)     # 统计数据集下的文件夹

for folder in file_folders:
    print(os.path.join(DATA_PATH, folder))
    imgs = os.listdir(os.path.join(DATA_PATH, folder))
    print(imgs)
    for idx, img in enumerate(imgs):
        path = os.path.join(DATA_PATH, folder, img)
        if idx % 10 == 0:      # 按照1:9的比例划分数据集
            test_list.append([path, folder])
        else:
            train_list.append([path, folder])

class MyDataset(Dataset):
    def __init__(self,data_list,transform):
        super(MyDataset,self).__init__()
        random.shuffle(data_list)
        self.data_list = data_list
        self.transform = transform

    def __getitem__(self, index):
        img_path,label = self.data_list[index]
        img = self.transform(img_path)
        return img,int(label)

    def __len__(self):
        return len(self.data_list)

def data_transform(img_path):
    img = Image.open(img_path)
    # img.show()
    img = img.resize((224,224),Image.ANTIALIAS)
    # img.show()
    img = np.array(img).astype("float32")
    # print(img.shape)
    img = img.transpose((2, 0, 1))
    # print(img.shape)
    # 将图像数据归一化，并转换成Tensor格式：
    img = paddle.to_tensor(img / 255)
    return img

train_dataset = MyDataset(train_list, data_transform)  # 训练集
test_dataset = MyDataset(test_list, data_transform)    # 测试集

train_loader = DataLoader(train_dataset,            # 训练数据集
                          batch_size=BATCH_SIZE,    # 每批读取的样本数
                          num_workers=0,            # 加载数据的子进程个数
                          shuffle=True,             # 打乱训练数据集
                          drop_last=False)          # 不丢弃不完整的样本

test_loader = DataLoader(test_dataset,              # 测试数据集
                         batch_size=BATCH_SIZE,     # 每批读取的样本数
                         num_workers=0,             # 加载数据的子进程个数
                         shuffle=False,             # 不打乱测试数据集
                         drop_last=False)           # 不丢弃不完整的样本



class AlexNet(nn.Layer):
    def __init__(self, in_channels=3, n_classes=10):
        '''
        * `in_channels`: 输入的通道数
        * `n_classes`: 输出分类数量
        '''
        super(AlexNet, self).__init__()
        # Conv2D(输入通道数,输出通道数,卷积核大小,卷积步长,填充长度)
        # MaxPool2D(池化核大小,池化步长)

        self.conv1 = nn.Conv2D(in_channels, 96, 11, stride=4, padding=2)
        self.pool1 = nn.MaxPool2D(3, 2)
        self.conv2 = nn.Conv2D(96, 256, 5, stride=1, padding=2)
        self.pool2 = nn.MaxPool2D(3, 2)
        self.conv3 = nn.Conv2D(256, 384, 3, stride=1, padding=1)
        self.conv4 = nn.Conv2D(384, 384, 3, stride=1, padding=1)
        self.conv5 = nn.Conv2D(384, 256, 3, stride=1, padding=1)
        self.pool3 = nn.MaxPool2D(3, 2)
        self.fc1 = nn.Linear(256 * 6 * 6, 4096)
        self.drop1 = nn.Dropout(0.25)
        self.fc2 = nn.Linear(4096, 4096)
        self.drop2 = nn.Dropout(0.25)
        self.fc3 = nn.Linear(4096, n_classes)

    def forward(self, x):  # 前向传播参数，连接各层组成神经网络
        x = F.relu(self.conv1(x))
        x = self.pool1(x)
        x = F.relu(self.conv2(x))
        x = self.pool2(x)
        x = F.relu(self.conv3(x))
        x = F.relu(self.conv4(x))
        x = F.relu(self.conv5(x))
        x = self.pool3(x)
        x = paddle.flatten(x, 1, -1)
        x = F.relu(self.fc1(x))
        x = self.drop1(x)
        x = F.relu(self.fc2(x))
        x = self.drop2(x)
        y = self.fc3(x)
        return y

model = AlexNet(in_channels=3, n_classes=CLASS_DIM)
# model = paddle.Model(model)
# model.prepare(optimizer=paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()),
#               loss=paddle.nn.CrossEntropyLoss(),
#               metrics=paddle.metric.Accuracy())
# model.fit(train_dataset,
#           epochs=2,
#           batch_size=32,
#           verbose=1)
model.train()                # 开启训练模式
scheduler = NaturalExpDecay(
    learning_rate=INIT_LR,
    gamma=LR_DECAY
)                            # 定义学习率衰减器
optimizer = Adam(
    learning_rate=scheduler,
    parameters=model.parameters()
)                            # 定义Adam优化器
loss_arr, acc_arr = [], []   # 用于可视化

for ep in range(EPOCHS):
    for batch_id, data in enumerate(train_loader()):
        x_data, y_data = data
        print(y_data.shape)
        y_data = y_data[:, np.newaxis]          # 增加一维维度
        print(y_data.shape)
        y_pred = model(x_data)                  # 预测结果
        print(y_pred.shape)
        acc = M.accuracy(y_pred, y_data)        # 计算准确率
        loss = F.cross_entropy(y_pred, y_data)  # 计算交叉熵
        if batch_id != 0 and batch_id % LOG_GAP == 0:   # 定期输出训练结果
            print("Epoch：%d，Batch：%3d，Loss：%.5f，Acc：%.5f"\
                % (ep, batch_id, loss, acc))
        acc_arr.append(acc.item())
        loss_arr.append(loss.item())
        optimizer.clear_grad()
        loss.backward()
        optimizer.step()
    scheduler.step()       # 每轮衰减一次学习率

paddle.save(model.state_dict(), MODEL_PATH)  # 保存训练好的模型

fig = plt.figure(figsize=[10, 8])

# 训练误差图像：
ax1 = fig.add_subplot(211, facecolor="#E8E8F8")
ax1.set_ylabel("Loss", fontsize=18)
plt.tick_params(labelsize=14)
ax1.plot(range(len(loss_arr)), loss_arr, color="orangered")
ax1.grid(linewidth=1.5, color="white")  # 显示网格

# 训练准确率图像：
ax2 = fig.add_subplot(212, facecolor="#E8E8F8")
ax2.set_xlabel("Training Steps", fontsize=18)
ax2.set_ylabel("Accuracy", fontsize=18)
plt.tick_params(labelsize=14)
ax2.plot(range(len(acc_arr)), acc_arr, color="dodgerblue")
ax2.grid(linewidth=1.5, color="white")  # 显示网格

fig.tight_layout()
plt.show()
plt.close()


# model.save(MODEL_PATH)  # 保存训练好的模型


#模型预测
model.eval()                 # 开启评估模式
model.set_state_dict(
    paddle.load(MODEL_PATH)
)   # 载入预训练模型参数

result = model(infer_img)
infer_lab = np.argmax(result)   # 返回数组result中的最大值的索引值
print("真实标签：%d，预测结果：%d" % (truth_lab, infer_lab))

