import time

from matplotlib_inline.backend_inline import set_matplotlib_formats
import sys
import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torchvision
import matplotlib
matplotlib.use('TkAgg')  # 或者 'Qt5Agg'
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F



def use_svg_display():
    # 使用矢量图显示
    #set_matplotlib_formats('svg')  # 正确的方法
    matplotlib.use('TkAgg')  # 或者 'Qt5Agg'

def set_figsize(figsize=(3.5, 2.5)):
    use_svg_display()
    # 设置图的尺寸
    plt.rcParams['figure.figsize'] = figsize


#定义模型
def linreg(X, w, b): # 本函数已保存在d2lzh_pytorch包中⽅便以后使⽤
    return torch.mm(X, w) + b


#定义损失函数
def squared_loss(y_hat, y): # 本函数已保存在d2lzh_pytorch包中⽅便以后使⽤
    # 注意这⾥返回的是向量, 另外, pytorch⾥的MSELoss并没有除以 2
    return (y_hat - y.view(y_hat.size())) ** 2 / 2

#定义优化算法
def sgd(params, lr, batch_size): # 本函数已保存在d2lzh_pytorch包中⽅便以后使⽤
    for param in params:
        param.data -= lr * param.grad / batch_size # 注意这⾥更改param时⽤的param.data


#将数值标签转换为文本标签
def get_fashion_mnist_labels(labels):
    text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat','sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
    return [text_labels[int(i)] for i in labels]


# 本函数已保存在d2lzh包中⽅便以后使⽤
def show_fashion_mnist(images, labels):
    # 使用合适的显示后端
    use_svg_display()  # 如果需要矢量图，保持此行，或改为交互式后端显示

    # 创建子图并设置图形大小
    _, figs = plt.subplots(1, len(images), figsize=(12, 12))

    # 遍历每个子图，显示图像及标签
    for f, img, lbl in zip(figs, images, labels):
        # 显示28x28的图片，并设置标题为标签
        f.imshow(img.view((28, 28)).numpy(), cmap='gray')  # 显示为灰度图
        f.set_title(lbl)  # 设置标题
        f.axes.get_xaxis().set_visible(False)  # 隐藏x轴
        f.axes.get_yaxis().set_visible(False)  # 隐藏y轴

    plt.show()  # 显示图形


# 定义函数来加载数据集
def load_data_fashion_mnist(batch_size, resize=None, root='~/Datasets/FashionMNIST'):
    # 定义数据转换过程：将图片转换为Tensor并归一化
    trans = []
    if resize:
        trans.append(torchvision.transforms.Resize(size=resize))
    trans.append(torchvision.transforms.ToTensor())

    transform = torchvision.transforms.Compose(trans)
    mnist_train = torchvision.datasets.FashionMNIST(root=root,train=True,transform=transform)
    mnist_test = torchvision.datasets.FashionMNIST(root=root,train=False,download=True,transform=transform)
    train_iter = torch.utils.data.DataLoader(mnist_train,batch_size=batch_size, shuffle=True, num_workers=0)
    test_iter = torch.utils.data.DataLoader(mnist_test,batch_size=batch_size, shuffle=False, num_workers=0)

    return train_iter, test_iter

#评价模型在数据集上的准确率
# 本函数已保存在d2lzh_pytorch包中⽅便以后使⽤。该函数将被逐步改进。
def evaluate_accuracy(data_iter, net, device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')):
    """
    计算模型在数据集上的准确率。
    :param data_iter: 数据迭代器
    :param net: 神经网络模型
    :param device: 设备（CPU或GPU）
    :return: 准确率
    """
    acc_sum, n = 0.0, 0
    with torch.no_grad():
        for X, y in data_iter:
            if isinstance(net, torch.nn.Module):
                net.eval()  # 评估模式, 关闭 dropout
                acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
                net.train()  # 改回训练模式
            else:  # 自定义的模型, 3.13 节之后不会用到, 不考虑 GPU
                if 'is_training' in net.__code__.co_varnames:  # 如果有 is_training 这个参数
                    acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
                else:
                    acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
            n += y.shape[0]
    return acc_sum / n

#softmax模型的训练过程
def train_softmax(net, train_iter, test_iter, loss, num_epochs,batch_size,params=None, lr=None, optimizer=None):
    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
        for X, y in train_iter:
            y_hat = net(X)
            l = loss(y_hat, y).sum()

            # 梯度清零
            if optimizer is not None:
                optimizer.zero_grad()
            elif params is not None and params[0].grad is not None:
                for param in params:
                    param.grad.data.zero_()

            l.backward()
            if optimizer is None:
                sgd(params, lr, batch_size)
            else:
                optimizer.step()  # “softmax回归的简洁实现”⼀节将⽤到

            train_l_sum += l.item()
            train_acc_sum += (y_hat.argmax(dim=1) ==y).sum().item()
            n += y.shape[0]
        test_acc = evaluate_accuracy(test_iter, net)
        print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'% (epoch + 1, train_l_sum / n, train_acc_sum / n,test_acc))

# 本函数已保存在d2lzh_pytorch包中⽅便以后使⽤
class FlattenLayer(nn.Module):
    def __init__(self):
        super(FlattenLayer, self).__init__()

    def forward(self, x):  # x shape: (batch, *, *, ...)
        return x.view(x.shape[0], -1)


# 本函数已保存在d2lzh_pytorch包中⽅便以后使⽤
def semilogy(x_vals, y_vals, x_label, y_label, x2_vals=None, y2_vals=None,legend=None, figsize=(3.5, 2.5),plot=None):
    set_figsize(figsize)
    plt.xlabel(x_label)
    plt.ylabel(y_label)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=':')
        plt.legend(legend)
    if plot == True:
        plt.show()

def corr2d(X, K): # 本函数已保存在d2lzh_pytorch包中⽅便以后使⽤
    h, w = K.shape
    Y = torch.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
    for i in range(Y.shape[0]):
        for j in range(Y.shape[1]):
            Y[i, j] = (X[i: i + h, j: j + w] * K).sum()
    return Y


import time
import torch


def train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs):
    """
    训练模型并评估在测试集上的准确率。
    :param net: 神经网络模型
    :param train_iter: 训练数据迭代器
    :param test_iter: 测试数据迭代器
    :param batch_size: 批量大小
    :param optimizer: 优化器
    :param device: 设备（CPU或GPU）
    :param num_epochs: 训练轮数
    训练模型并评估在测试集上的准确率，并动态绘制 loss 和 accuracy 曲线
    """
    net = net.to(device)
    print("Training on", device)
    loss = torch.nn.CrossEntropyLoss()

    # animator = Animator(xlabel='epoch', xlim=[1, num_epochs],
    #                     legend=['train loss', 'train acc', 'test acc'])

    for epoch in range(num_epochs):
        train_l_sum, train_acc_sum, n, batch_count = 0.0, 0.0, 0, 0
        start = time.time()

        for X, y in train_iter:
            X, y = X.to(device), y.to(device)
            y_hat = net(X)
            l = loss(y_hat, y)

            optimizer.zero_grad()
            l.backward()
            optimizer.step()

            train_l_sum += l.cpu().item()
            train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item()
            n += y.shape[0]
            batch_count += 1

        test_acc = evaluate_accuracy(test_iter, net)
        train_loss = train_l_sum / batch_count
        train_acc = train_acc_sum / n

        # # 动态更新 loss 和 accuracy 曲线
        # animator.update(epoch + 1, [train_loss, train_acc, test_acc])

        print(f'epoch {epoch + 1}, loss {train_loss:.4f}, train acc {train_acc:.3f}, '
              f'test acc {test_acc:.3f}, time {time.time() - start:.1f} sec')



class Animator:
    """动态绘制训练曲线"""

    def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None, ylim=None):
        self.xlabel = xlabel
        self.ylabel = ylabel
        self.legend = legend  # 图例（例如 ['train loss', 'train acc', 'test acc']）
        self.xlim = xlim
        self.ylim = ylim
        self.fig, self.ax = plt.subplots()  # 创建画布
        self.data = []  # 存储数据
        self.lines = []  # 存储折线对象

        # 初始化多条折线
        for _ in range(len(legend)):
            line, = self.ax.plot([], [], label=legend[_])  # 画出空折线
            self.lines.append(line)

        self.ax.legend()  # 显示图例
        self.ax.set_xlabel(xlabel)
        self.ax.set_ylabel(ylabel)
        if xlim: self.ax.set_xlim(xlim)
        if ylim: self.ax.set_ylim(ylim)

    def update(self, x, y_values):
        """更新图表：x 是横坐标（epoch），y_values 是一个列表，包含多个 y 值"""
        if not self.data:
            self.data = [[[] for _ in y_values] for _ in range(2)]  # 初始化 x 和 y 数据

        self.data[0].append(x)  # 添加 x 轴数据
        for i, y in enumerate(y_values):
            self.data[1][i].append(y)  # 添加 y 轴数据
            self.lines[i].set_data(self.data[0], self.data[1][i])  # 更新折线数据

        self.ax.relim()  # 重新计算 limits
        self.ax.autoscale_view()  # 自动调整坐标范围
        plt.draw()  # 重新绘制
        plt.pause(0.01)  # 短暂暂停，显示更新



import matplotlib.pyplot as plt
# 在训练完成后，绘制损失和准确率的变化曲线
train_losses = [11,22,33,44]
train_accuracies = [77,88,99,100]
epoch = 4
def plot_training_curve(train_losses, train_accuracies, epoch):
    epochs = range(1, epoch + 1)
    plt.figure(figsize=(10, 6))

    # 创建图形对象
    fig, ax1 = plt.subplots()

    # 绘制损失曲线（左边的 y 轴）
    ax1.plot(epochs, train_losses, label='Training Loss', color='tab:blue')
    ax1.set_xlabel('Epochs')
    ax1.set_ylabel('Loss', color='tab:blue')
    ax1.tick_params(axis='y', labelcolor='tab:blue')

    # 创建共享 x 轴的第二个 y 轴
    ax2 = ax1.twinx()
    ax2.plot(epochs, train_accuracies, label='Training Accuracy', color='tab:orange')
    ax2.set_ylabel('Accuracy', color='tab:orange')
    ax2.tick_params(axis='y', labelcolor='tab:orange')

    # 添加标题
    plt.title('Training Loss and Accuracy Curves')

    # 图例处理
    ax1.legend(loc='upper left')
    ax2.legend(loc='upper right')

    # 保存图像到 log_dir
    # plt.tight_layout()
    # plt.savefig(f'{log_dir}/training_curve.png')
    plt.show()

#plot_training_curve(train_losses, train_accuracies, epoch)


class GlobalAvgPool2d(nn.Module):
    """全局平均池化层"""
    def __init__(self):
        super(GlobalAvgPool2d, self).__init__()

    def forward(self, x):
        # x.size()[2:] 获取 (H, W)，让池化窗口与输入尺寸匹配，实现全局池化
        return F.avg_pool2d(x, kernel_size=x.shape[2:])


def show_heatmaps(matrices, xlabel, ylabel, titles=None, figsize=(2.5, 2.5),
                  cmap='Reds'):
    """显示矩阵热图"""
    use_svg_display()
    num_rows, num_cols = matrices.shape[0], matrices.shape[1]
    fig, axes = plt.subplots(num_rows, num_cols, figsize=figsize,
                                 sharex=True, sharey=True, squeeze=False)
    for i, (row_axes, row_matrices) in enumerate(zip(axes, matrices)):
        for j, (ax, matrix) in enumerate(zip(row_axes, row_matrices)):
            pcm = ax.imshow(matrix.detach().numpy(), cmap=cmap)
            if i == num_rows - 1:
                ax.set_xlabel(xlabel)
            if j == 0:
                ax.set_ylabel(ylabel)
            if titles:
                ax.set_title(titles[j])
    fig.colorbar(pcm, ax=axes, shrink=0.6);