# common file
import random
from typing import Any

import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
from IPython import display
from matplotlib import pyplot as plt

#  下载数据库
from prettytable import PrettyTable


def down_load_data(path_dir):
    mnist_train = torchvision.datasets.MNIST(root=path_dir, train=True, download=True, transform=transforms.ToTensor())
    mnist_test = torchvision.datasets.MNIST(root=path_dir, train=False, transform=transforms.ToTensor())
    return mnist_train, mnist_test


#  读取小批量图集
def load_data_iter(mnist_train, mnist_test, batch_size, num_workers):
    train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
    test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
    return train_iter, test_iter


def data_iter(feathers, lables, batch_size, shuffle):
    feathersLen = len(feathers)
    # print("feathersLen {}".format(feathersLen))
    # print("batch_size {}".format(batch_size))
    indices = list(range(feathersLen))
    # 将数据进行打乱
    if shuffle:
        random.shuffle(indices)
    # start, end, step（每隔step输出一次）
    for i in range(0, feathersLen, batch_size):
        # 最后一次可能不足一个batch
        j = torch.LongTensor(indices[i: min(i + batch_size, feathersLen)])
        # 参数0表示按行索引，1表示按列进行索引
        yield feathers.index_select(0, j), lables.index_select(0, j)


def train_test_split(x, y, ratio):
    split_index = int(len(y) * ratio)
    x_trains = x[:split_index]
    y_trains = y[:split_index]
    x_tests = x[split_index:]
    y_tests = y[split_index:]
    return x_trains, y_trains, x_tests, y_tests


# 评估模型net在数据集data_iter中的准确率
def evaluate_accuracy_data_iter(data_iter, net):
    acc_sum, n = 0.0, 0
    for X, y in data_iter:
        acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
        n += y.shape[0]
    return acc_sum / n


# 计算模型在数据集上的准确率
def evaluate_accuracy(batch_size, test_data, test_labels, net, loss):
    acc_sum, loss_sum, n = 0.0, 0.0, 0
    data_len = len(test_data)
    indices = list(range(data_len))
    for i in range(0, data_len, batch_size):
        # 最后一次可能不足一个batch
        j = torch.LongTensor(indices[i: min(i + batch_size, data_len)])
        # 参数0表示按行索引，1表示按列进行索引
        X, y = test_data.index_select(0, j), test_labels.index_select(0, j)
        acc_sum += ((net(X).ge(0.5).float().squeeze()) == y).sum().item()
        if loss is loss_bce:
            loss_sum += loss(y_pred_proba=net(X).squeeze(), y_true=y.squeeze()).sum().item()
        else:
            loss_sum += loss(net(X), y).sum().item()
        n += y.shape[0]
    return acc_sum / n, loss_sum / n


# 计算模型在数据集上的准确率
def evaluate_accuracy_k(batch_size, test_data, test_labels, net, loss):
    acc_sum, loss_sum, n = 0.0, 0.0, 0
    data_len = len(test_data)
    indices = list(range(data_len))
    for i in range(0, data_len, batch_size):
        # 最后一次可能不足一个batch
        j = torch.LongTensor(indices[i: min(i + batch_size, data_len)])
        # 参数0表示按行索引，1表示按列进行索引
        X, y = test_data.index_select(0, j), test_labels.index_select(0, j)
        acc_sum += ((net(X).ge(0.5).float().squeeze()) == y).sum().item()
        if loss is loss_bce:
            loss_sum += loss(y_pred_proba=net(X).squeeze(), y_true=y.squeeze()).sum().item()
        else:
            loss_sum += loss(net(X), y).sum().item()
        n += y.shape[0]
    return acc_sum / n, loss_sum / n


# 计算模型在数据集上的准确率
def evaluate_accuracy_2(batch_size, test_data, test_labels, net, loss):
    acc_sum, loss_sum, n = 0.0, 0.0, 0
    data_len = len(test_data)
    indices = list(range(data_len))
    y_predict = net(test_data)
    # 计算误差
    l = loss(y_predict, test_labels)
    acc_sum = ((net(test_data).ge(0.5).float().squeeze()) == test_labels).sum().item()
    loss_sum = l.data.numpy()
    return acc_sum / data_len, loss_sum


# 手动优化器
def SGD(params, lr):
    """
        定义随机梯度下降函数。
    """
    for param in params:
        param.data -= lr * param.grad


# loss set
mseLoss = torch.nn.MSELoss()
ceLoss = torch.nn.CrossEntropyLoss()


# 定义交叉熵损失
def cross_entropy(y_hat, y):
    return - torch.log(y_hat.gather(1, y.view(-1, 1)))


# binary cross entropy
def loss_bce(y_true=None, y_pred_proba=None):
    return torch.mean(-1.0 * ((y_true * torch.log(y_pred_proba) + (1.0 - y_true) * torch.log(1.0 - y_pred_proba))))


# 激活函数
def sigmod_manual(z):
    return 1 / (1 + torch.exp(-z))


def sigmod(z):
    return torch.sigmoid(z)


def relu(X):
    return torch.max(input=X, other=torch.tensor(0.0))


def relu_manual(x):
    x = np.where(x >= 0, x, 0)
    return torch.tensor(x)


def elu(X):
    return torch.nn.functional.elu(X, 0.2)


def elu_manual(x, gamma):
    x = np.where(x > 0, x, gamma * (np.exp(x) - 1))
    return torch.tensor(x)


def leakyReLu(X):
    return torch.nn.functional.leaky_relu(X, 0.2)


def leakyReLu_manual(x, gamma):
    x = np.where(x > 0, x, x * gamma)
    return torch.tenso(x)


def tanh(X):
    return torch.tanh(X)


def tanh_manual(X):
    return (np.exp(X) - np.exp(-X)) / (np.exp(X) + np.exp(-X))


# 梯度清零
def grad_zero(params):
    for param in params:
        param.grad.zero_()


# 定义FlattenLayer层
class FlattenLayer(torch.nn.Module):
    def __init__(self):
        super(FlattenLayer, self).__init__()

    def forward(self, x):
        return x.view(x.shape[0], -1)


num_inputs, num_hiddens, num_outs = 200, 4, 1


class Classification(torch.nn.Module):
    def __init__(self):
        super(Classification, self).__init__()
        self.hidden = torch.nn.Linear(num_inputs, num_hiddens)
        self.tanh = torch.nn.Tanh()
        self.output = torch.nn.Linear(num_hiddens, num_outs)
        self.sigmoid = torch.nn.Sigmoid()

    def forward(self, x):
        x = self.hidden(x)
        x = self.tanh(x)
        x = self.output(x)
        x = self.sigmoid(x)
        return x


# 画图
def loss_draw(x_vals, y_vals, x_lable, y_lable, x2_vals=None, y2_vals=None, legend=None, figsize=(3.5, 2.5)):
    plt.rcParams['figure.figsize'] = figsize
    plt.xlabel(x_lable)
    plt.ylabel(y_lable)
    plt.semilogy(x_vals, y_vals)
    if x2_vals and y2_vals:
        plt.semilogy(x2_vals, y2_vals, linestyle=":")
        plt.legend(legend)
    plt.show()


def get_fashion_mnist_labels(labels):
    """ 函数可以将数值标签转成相应的文本标签 """
    text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
                   'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
    return [text_labels[int(i)] for i in labels]


def use_svg_display():
    """Use svg format to display plot in jupyter"""
    display.set_matplotlib_formats('svg')


def show_fashion_mnist(images, labels):
    """ 该函数在一行里画出多张图像和对应标签 """
    use_svg_display()
    # 这里的_表示我们忽略（不使用）的变量
    _, figs = plt.subplots(1, len(images), figsize=(12, 12))
    for f, img, lbl in zip(figs, images, labels):
        f.imshow(img.view((28, 28)).numpy())
        f.set_title(lbl)
        f.axes.get_xaxis().set_visible(False)
        f.axes.get_yaxis().set_visible(False)
    plt.show()


# 手动定义Dropout
def dropout_manual(X, drop_prob):
    X = X.float()
    assert 0 <= drop_prob <= 1  # 检查丢弃率是否在0-1之间
    keep_drop = 1 - drop_prob
    if keep_drop == 0:
        return torch.zeros_like(X)
    # 生成mask矩阵（向量）
    mask = (torch.rand(X.shape) < keep_drop).float()
    # 按照mask对矩阵进行变换
    return mask * X / keep_drop


# K折交叉验证
def get_kfold_data(k, i, X, y):
    # 返回第i+1折（i=0-->k-1）
    # 每折的个数，总数除以折数
    fold_size = X.shape[0] // k
    val_start = i * fold_size
    if i != k - 1:
        val_end = (i + 1) * fold_size
        X_valid, y_valid = X[val_start:val_end], y[val_start:val_end]
        X_train = torch.cat((X[0:val_start], X[val_end:]), dim=0)
        y_train = torch.cat((y[0:val_start], y[val_end:]), dim=0)
    else:
        X_valid, y_valid = X[val_start:], y[val_start:]
        X_train = X[0:val_start]
        y_train = y[0:val_start]
    return X_train, y_train, X_valid, y_valid


def k_fold(train, k, feathers, labels, num_epochs, batch_size, params, lr, optimizer):
    train_acc_sum, valid_acc_sum = 0.0, 0.0
    train_loss_sum, test_loss_sum = 0.0, 0.0
    # 生成数据表格
    k_table = PrettyTable(["epoch", "train_loss", "valid_loss", "train_acc", "valid_acc"])
    # print("X_train.shape{}".format(X_train.shape))
    for i in range(k):
        print('第', i + 1, '折验证结果')
        data = get_kfold_data(k, i, feathers, labels)  # 获取K折交叉验证的训练和验证数据
        # 对每份数据进行运算
        train_loss, test_loss, train_acc, test_acc = train(*data, num_epochs, batch_size, params=params, lr=lr,
                                                             optimizer=optimizer)
        train_loss_sum += train_loss
        test_loss_sum += test_loss
        k_table.add_row([(i + 1), ("%.4f" % train_loss), ("%.3f" % test_loss), train_acc, test_acc])
        print("train loss: {:.4f}, test loss: {:.3f}".format(train_loss, test_loss))
    print("train_loss_sum {}".format(train_loss_sum))
    # 绘制每折的表格
    print(k_table)
    print("\n", "K折交叉验证的结果：")
    print("average train loss :{:.4f}".format(train_loss_sum / k))
    print("average valid loss :{:.4f}".format(test_loss_sum / k))


# 对每折数进行训练和测试并计算平均值
def k_fold_mult(train_k, k, train_iter, num_epochs, batch_size, optimizer=None):
    train_acc_sum, valid_acc_sum = 0.0, 0.0
    train_loss_sum, valid_loss_sum = 0.0, 0.0
    # 获取数据
    count = 0
    X_train = torch.zeros(1)
    y_train = torch.zeros(1)
    for X, y in train_iter:  # 处理数据
        if count == 0:
            X_train = X
            y_train = y
        else:
            X_train = torch.cat((X_train, X), dim=0)
            y_train = torch.cat((y_train, y), dim=0)
        count += 1
    k_table = PrettyTable(["epoch", "train_loss", "valid_loss", "train_acc", "valid_acc"])
    # print("X_train.shape{}".format(X_train.shape))
    for i in range(k):
        print('第', i + 1, '折验证结果')
        data = get_kfold_data(k, i, X_train, y_train)
        # 对每份数据进行运算
        train_loss, valid_loss, train_acc, valid_acc = train_k(*data, num_epochs, batch_size, optimizer)
        train_acc_sum += train_acc
        valid_acc_sum += valid_acc
        train_loss_sum += train_loss
        valid_loss_sum += valid_loss
        k_table.add_row([(i+1), ("%.4f" % train_loss), ("%.3f" % valid_loss), ("%.4f" % train_acc), ("%.3f" % valid_acc)])
        print("train loss: {:.4f}, test loss: {:.3f}".format(train_loss, valid_loss))
    # 绘制每折的表格
    print(k_table)
    print("\n", "最终K折交叉验证的结果：")
    print("average train loss :{:.4f}, average train acc :{:.3f}%".format(train_loss_sum/k, train_acc_sum/k*100))
    print("average valid loss :{:.4f}, average valid acc :{:.3f}%".format(valid_loss_sum/k, valid_acc_sum/k*100))
