# -*- coding: utf-8 -*-
"""
Created on 2021/11/7 14:19
File name: fcDemo.py
IDE: pycharm
@author: KwokHooh
"""
import torch
from torch import nn
import numpy as np
from sklearn.datasets import load_digits

class MyModel(nn.Module):
    def __init__(self):
        super(MyModel, self).__init__()
        # 以下定义每层神经元个数，可按照自己想法 随意调整，只需保证下一层的输入神经元个数与上一层神经元个数对应即可
        # 第一层
        self.fc1 = nn.Linear(64,128)
        # 第二层
        self.fc2 = nn.Linear(128,256)
        # 第三层
        self.fc3 = nn.Linear(256,128)
        # 第四层
        self.fc4 = nn.Linear(128,64)
        # 第五层
        self.fc5 = nn.Linear(64,32)
        # 输出层
        self.fc6 = nn.Linear(32,10)
    # 继承nn.Module必须重写前馈过程
    def forward(self,X):
        '''

        Args:
            X: 特征，要求为tensor形式

        Returns:
            最后一层输出
        '''
        output = self.fc1(X)
        # 隐藏层用sigmoid激活
        output = nn.Sigmoid()(output)
        output = self.fc2(output)
        output = nn.Sigmoid()(output)
        output = self.fc3(output)
        output = nn.Sigmoid()(output)
        output = self.fc4(output)
        output = nn.Sigmoid()(output)
        output = self.fc5(output)
        output = nn.Sigmoid()(output)
        output = self.fc6(output)
        # 输出层用softmax激活，在行维度使用
        output = nn.Softmax(dim=1)(output)

        return output


if __name__ == '__main__':
    digits = load_digits()
    x_train = digits.data[:1500]  # 训练图片
    y_train = digits.target[:1500]  # 训练图片对应的数字
    x_pred = digits.data[1500:]  # 验证图片
    y_pred = digits.target[1500:]  # 验证图片对应的数字
    # 生成tensor
    x = torch.from_numpy(x_train).float()
    # 生成batchsize
    x_loader = torch.utils.data.DataLoader(x,32)
    y = torch.from_numpy(y_train).long()
    y_loader = torch.utils.data.DataLoader(y,32)
    x_ = torch.from_numpy(x_pred).float()
    model = MyModel()
    # 定义损失函数，softmax+log+NLLLoss
    loss_fn = nn.CrossEntropyLoss()
    # 优化器
    opt = torch.optim.Adam(params=model.parameters())
    for epoch in range(100):
        total_loss = 0
        length = 0
        for x_batch,y_batch in zip(x_loader,y_loader):
            model.train()
            # 清空优化器中的梯度
            opt.zero_grad()
            output = model(x_batch)
            loss = loss_fn(output,y_batch)
            # 反向传播
            loss.backward()
            # 优化器作用，更新参数
            opt.step()
            total_loss += loss.item()
            length += 1
        # 计算改epoch平均每个bacht的loss
        print(total_loss/length)
    # 预测概率分布
    model.eval()
    pred = model(x_)

    # 得到label
    predict = torch.argmax(pred,axis=1).numpy()

    # 计算准确率
    from sklearn.metrics import accuracy_score
    print(accuracy_score(y_pred,predict))

print("正确数字：\n", y_pred)
print("预测数字：\n", predict)
error_num = 0
for i in range(len(predict)):
    if predict[i] != y_pred[i]:
        error_num += 1
print("错误个数：%d  正确率：%.2f%%"%(error_num, 100-(error_num*100/len(predict))))