import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset
import time

class Classifier(nn.Module):
    def __init__(self):
        # 调用父类的初始化方法
        super(Classifier, self).__init__()
        # 定义模型
        self.model = nn.Sequential(
            nn.Linear(784, 200),
            nn.LeakyReLU(0.02),

            nn.LayerNorm(200),

            nn.Linear(200, 10),
            nn.Sigmoid()
        )   

        # 定义损失函数
        # self.loss_fn = nn.MSELoss() # 均方误差
        self.loss_fn = nn.BCELoss() # 二元交叉熵

        # 将优化器从 SGD 改为 Adam
        # SGD的速度比Adam慢，Adam的收敛速度更快
        # self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.01)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.001)  # Adam 通常使用更小的学习率
        
        # 在初始化时创建计数器和进度列表
        self.counter = 0
        self.progress = []
        pass

    # 定义前向传播
    def forward(self, x):
        # 运行模型
        return self.model(x)
    
    # 定义训练函数
    def train(self, inputs, targets):
        # 运行模型
        outputs = self.forward(inputs)
        # 计算损失
        loss = self.loss_fn(outputs, targets)

        # 反向传播
        # 清空梯度  
        self.optimizer.zero_grad()
        # 反向传播，计算梯度
        loss.backward()
        # 更新参数
        self.optimizer.step()

        # 更新计数器和进度
        self.counter += 1
        if self.counter % 10 == 0:
            self.progress.append(loss.item())
        if (self.counter % 10000 == 0):
            print("counter: ", self.counter)
        
        return loss
       
    def plot_progress(self):
        # 绘制损失函数
        df = pd.DataFrame(self.progress, columns=['loss'])
        df.plot(ylim=(0, 1.0), figsize=(16, 8), alpha=0.1, marker='.', grid=True, yticks=(0, 0.25, 0.5))
        plt.show()
        pass

class MnistDataset(Dataset):    
    def __init__(self, file_path):
        # 读取CSV文件
        # 读取数据，第一列是标签，其他列是像素值
        self.data_df = pd.read_csv(file_path, header=None)
        pass

    def __len__(self):
        return len(self.data_df)
    
    def __getitem__(self, idx):
        # 获取标签  
        label = self.data_df.iloc[idx, 0]
        # 创建目标向量
        target = torch.zeros(10)
        # 设置目标向量
        target[label] = 1.0
        # 获取图像数据，取值范围0-255，转换为0-1
        image_values = torch.FloatTensor(self.data_df.iloc[idx, 1:].values)/255.0

        # 返回标签、图像数据和目标向量  
        return label, image_values, target
    pass

    def plot_image(self, idx):
        # 获取图像数据
        arr = self.data_df.iloc[idx, 1:].values.reshape(28, 28)
        # 显示图像
        plt.title("laber = "+ str(self.data_df.iloc[idx, 0]))
        plt.imshow(arr,  interpolation='none', cmap='Blues')
        plt.show()
        pass


def train_model(dataset, epochs=3):
    # 开始时间
    start_time = time.time()

    # 创建神经网络模型
    model = Classifier()

    for i in range(epochs):
        print("Epoch: ", i)
        # 训练模型
        for label, image_data_tensor, target_tensor in dataset:
            model.train(image_data_tensor, target_tensor)
        pass
    pass

    # 结束时间
    end_time = time.time()
    print("训练时间：", end_time - start_time)

    # 绘制损失函数
    model.plot_progress()
    return model

def test_single_image(dataset, model, record_idx):
    # 测试模型
    dataset.plot_image(record_idx)

    # 获取图像数据  
    image_data_tensor = dataset[record_idx][1]
    # 运行模型
    output = model.forward(image_data_tensor)
    # 绘制输出
    pd.DataFrame(output.detach().numpy()).plot(kind='bar', legend=False, ylim=(0, 1.0), yticks=(0, 0.25, 0.5))
    plt.show()

def test_model_accuracy(dataset, model):
    score = 0
    items = 0

    for label, image_data_tensor, target_tensor in dataset:
        output = model.forward(image_data_tensor)
        # 获取最大值的索引
        max_value, max_index = torch.max(output, 0)
        # 如果最大值的索引与目标标签相同，则计数器加1
        if max_index == label:
            score += 1
        items += 1
        pass
    pass

    print(score, items, "准确率：", score / items)
    return score / items

# 训练模型  
mnist_dataset = MnistDataset('dataset_mnist/mnist_train_60000.csv')
# mnist_dataset.plot_image(9)
c = train_model(mnist_dataset)

# 测试模型
minist_test_dataset = MnistDataset('dataset_mnist/mnist_test_10000.csv')
test_model_accuracy(minist_test_dataset, c)

# 测试单个图像
test_single_image(mnist_dataset, c, 9)
