import numpy as np
import math
import matplotlib.pyplot as plt


def cross_entropy(y_pred, y_label):
    # y_pred = np.array(y_pred)
    # y_label = np.array(y_label)
    epsilon = 1e-10
    # 计算交叉熵
    cross_entropy = -np.dot(y_label.flatten(), np.log(y_pred.flatten()+epsilon)) - np.dot((1 - y_label.flatten()), np.log(1 - y_pred.flatten()+epsilon))

    return cross_entropy

def sigmoid(z):
    return np.clip(1 / (1.0 + np.exp(-z)), 1e-6, 1-1e-6)

def get_prob(x, w, b):
    x = np.asarray(x)
    w = np.asarray(w)
    b = np.asarray(b)
    # 调整返回值形状为 (n_samples, 1)
    return sigmoid(np.add(np.matmul(x, w), b)).reshape(-1, 1)

def infer(x, w, b):
    return np.round(get_prob(x, w, b))

def loss(y_pred, y_label, lamda, w):
    return cross_entropy(y_pred, y_label) + lamda * np.sum(np.square(w))

def gradient_regularization(x, y_label, w, b, lamda):
    # 打印数据形状
    # print("x shape:", x.shape)
    # print("y_label shape:", y_label.shape)
    # print("w shape:", w.shape)
    # print("b shape:", b.shape)
    # 返回平均梯度
    y_pred = get_prob(x, w, b)
    pred_error = y_label - y_pred
    # print("y_pred shape:", y_pred.shape)
    # print("pred_error shape:", pred_error.shape)
    w_grad = -np.mean(np.multiply(pred_error.T, x.T), 1) + lamda * w
    b_grad = -np.mean(pred_error)

    return w_grad, b_grad

def train_dev_split(x, y, dev_size=0.25):
    # 按照dev_size的比例分割数据，用于使用交叉验证的情况
    train_len = int(round(len(x) * (1-dev_size)))
    return x[0:train_len], y[0:train_len], x[train_len:None], y[train_len:None]

def shuffle(x, y):
    # 打乱数据
    randomize = np.arange(len(x))
    np.random.shuffle(randomize)
    return x[randomize], y[randomize]


def gradient(x, y_label, w, b):
    # 返回平均梯度
    y_pred = get_prob(x, w, b)
    pred_error = y_label - y_pred
    w_grad = -np.mean(np.multiply(pred_error.T, x.T).drop("income"), 1)
    b_grad = -np.mean(pred_error)

    return w_grad, b_grad

# 定义正确率
def accuracy(y_pred, y_label):
    acc = np.sum(y_pred == y_label) / len(y_pred)
    return acc

# 逻辑回归训练模型+Adagrad
def logistic_regression(X, Y, batch_size=32, max_iter=40, lr=0.1, regularize = True):
    # 分割训练集和验证集
    x_train, y_train, x_dev, y_dev = train_dev_split(X.values, Y.values, dev_size=0.20)

    # 存储损失和准确率
    loss_train = []
    loss_validation = []
    acc_train = []
    acc_dev = []

    features = X.columns
    w = np.zeros((x_train.shape[1],))
    b = np.zeros((1,))
    prev_w_grad = np.zeros((x_train.shape[1],))
    prev_b_grad = np.zeros((1,))

    num_train = len(y_train)
    num_dev = len(y_dev)

    if regularize:
        lamda = 0.001
    else:
        lamda = 0
    step = 1
    for epoch in range(max_iter):
        # 随机化每个迭代
        x_train, y_train = shuffle(x_train, y_train)

        total_loss = 0.0

        # 逻辑回归训练
        for idx in range(int(np.floor(len(y_train)/batch_size))):
            x = x_train[idx*batch_size:(idx+1)*batch_size]
            y = y_train[idx*batch_size:(idx+1)*batch_size]

            # 计算损失函数的梯度
            w_grad, b_grad = gradient_regularization(x, y, w, b, lamda)
            # print("w_grad:",w_grad)
            # print("b_grad:",b_grad)
            # # <----- 随迭代次数衰减的学习率
            # # 学习率随迭代次数衰减
            # w = w - lr/np.sqrt(step) * w_grad
            # b = b - lr/np.sqrt(step) * b_grad
            # # ------ 随迭代次数衰减的学习率>

            # <----- Adagrad方法
            # 更新每一轮的梯度平方和
            prev_w_grad += w_grad**2
            prev_b_grad += b_grad**2
            ada_w = np.sqrt(prev_w_grad)
            ada_b = np.sqrt(prev_b_grad)

            w = w - lr / ada_w * w_grad
            b = b - lr / ada_b * b_grad
            # ------ Adagrad方法>
            step += 1

        y_train_pred = get_prob(x_train, w, b)
        y_dev_pred = get_prob(x_dev, w, b)
        loss_train.append(loss(y_train_pred, y_train, lamda, w)/num_train)
        loss_validation.append(loss(y_dev_pred, y_dev, lamda, w)/num_dev)
        y_train_pred = np.round(y_train_pred)
        y_dev_pred = np.round(y_dev_pred)
        acc_train.append(accuracy(y_train_pred, y_train))
        acc_dev.append(accuracy(y_dev_pred, y_dev))

        print(f'Iteration {epoch}, Loss Train: {loss(y_train_pred, y_train, lamda, w)/num_train}, Acc Train: {accuracy(y_train_pred, y_train)}')
        print(f'Iteration {epoch}, Loss Validation: {loss(y_dev_pred, y_dev, lamda, w)/num_dev}, Acc Validation: {accuracy(y_dev_pred, y_dev)}')

    # 绘制图形
    plt.figure(figsize=(15,10))
    plt.plot(loss_train, label='Training loss')
    plt.plot(loss_validation, label='Validation loss')
    plt.ylim(0.25, 0.5)
    plt.legend()
    plt.title("Training vs Validation Loss")
    plt.savefig("Loss.png")
    plt.show()

    plt.figure(figsize=(10,6))
    plt.plot(acc_train, label='Training accuracy')
    plt.plot(acc_dev, label='Validation accuracy')
    plt.ylim(0.8, 0.9)
    plt.legend()
    plt.title("Training vs Validation Accuracy")
    plt.savefig("Accuracy.png")
    plt.show()


    # 查看各个特征项对结果的贡献
    ind = np.argsort(np.abs(w))[::-1]
    for i in ind[0:10]:
        print("{0:40} {1:}".format(features[i], w[i]))

    return w, b














# def adagrad(x, y, lr=0.01, iter_num=100):
#     # 初始化权重
#     weights = np.zeros(x.shape[1])
#     # 初始化梯度平方和
#     prev_gra = np.zeros(x.shape[1])
#
#     for i in range(iter_num):
#         # 计算预测值 y' = X * weights
#         y_pred = np.dot(x, weights)
#         # 计算损失 L = y' - y_train
#         loss = y_pred - y
#         # 计算梯度  gradient = 2 * X.T * Loss
#         gradient = 2 * np.dot(x.T, loss)
#         # 更新每一轮的梯度平方和
#         prev_gra += gradient**2
#         # 计算调整后的梯度步长 adagrad
#         ada = np.sqrt(prev_gra)
#         # 更新权重 weights -= learning rate * gradient / ada
#         weights -= lr * gradient/ada
#
#         # 计算平方差和标准差
#         cost = np.sum(loss**2)/len(x)
#         cost_a = math.sqrt(cost)
#         # 打印每次迭代的损失值和标准差 (可选)
#         print(f'Iteration {i}, Loss (cost): {cost}, Std Dev (cost_a): {cost_a}')
#
#         if i == iter_num - 1:
#             plt.figure(figsize=(80, 10))
#             # 绘制预测值
#             plt.plot(loss, label='Predicted Values', color='red')
#
#             # 添加图例
#             plt.legend()
#
#             # 添加标题和标签
#             plt.title('Real vs Predicted Values')
#             plt.xlabel('Index')
#             plt.ylabel('PM2.5 Value')
#
#             # 显示图形
#             plt.savefig('table.png', dpi=200)
#
#     return weights
