import numpy as np
import pickle
import os
from PIL import Image
import matplotlib.pyplot as plt
import chineseize_matplotlib
import pandas as pd

from predict import accuracies

plt.rcParams['axes.unicode_minus'] = False


# 图像处理部分
def load_and_resize_images(folder_path, target_size=(128, 128)):
    resized_images = []
    for file in os.listdir(folder_path):
        if file.endswith(('.png', '.jpg', '.jpeg', '.bmp', '.gif')):
            img_path = os.path.join(folder_path, file)
            with Image.open(img_path) as img:
                img = img.convert('RGB')  # 我们选取的部分图像编码方式是BGR,所以我加了这一步
                img_resized = img.resize(target_size)
                resized_images.append(np.array(img_resized))

    return resized_images


# 调用函数读取并调整图片尺寸
red = load_and_resize_images('./traffic_light_data/train/red', target_size=(32, 32))
green = load_and_resize_images('./traffic_light_data/train/green', target_size=(32, 32))
yellow = load_and_resize_images('./traffic_light_data/train/yellow', target_size=(32, 32))

# 合并数据集
X = np.concatenate([red, green, yellow])

# 创建标签 ,就三个标签,就按实际红绿灯的摆放顺序好了
y_train = np.concatenate([np.tile([1, 0, 0], (len(red), 1)),  # 红灯
                          np.tile([0, 1, 0], (len(green), 1)),  # 绿灯
                          np.tile([0, 0, 1], (len(yellow), 1))])  # 黄灯

# 展平图像并归一化,这一步是为了让下一步神经网络更好的接受
X_train = X.reshape(X.shape[0], -1) / 255.0


def initialize_network(input_size, hidden_size, output_size):
    np.random.seed(1)
    weights1 = np.random.randn(input_size, hidden_size) * 0.01
    weights2 = np.random.randn(hidden_size, output_size) * 0.01
    return weights1, weights2


input_size = 32 * 32 * 3  # 对于32x32 RGB图像
hidden_size = 256  # 可调整的数据,我试了1024,结果太大,模型无法收敛,又试了128,结果预测结果不太理想
output_size = 3  # 红灯、绿灯、黄灯
weights1, weights2 = initialize_network(input_size, hidden_size, output_size)


def sigmoid(x): # sigmoid激活函数,ppt中有涉及
    return 1 / (1 + np.exp(-x))


def sigmoid_derivative(x):
    return x * (1 - x)


def compute_loss(y_true, y_pred): # 交叉熵计算损失
    m = y_true.shape[0]
    loss = -1 / m * np.sum(y_true * np.log(y_pred))
    return loss


def calculate_recall(y_true, y_pred): # 计算召回率
    TP = np.sum(np.logical_and(y_pred == 1, y_true == 1), axis=0)
    FN = np.sum(np.logical_and(y_pred == 0, y_true == 1), axis=0)

    recall_per_class = TP / (TP + FN)
    recall_per_class = np.nan_to_num(recall_per_class)

    recall = np.mean(recall_per_class)
    return recall


def forward_backward_propagation(X, y, weights1, weights2, learning_rate):
    # 前向传播
    hidden_layer_input = np.dot(X, weights1) # 矩阵相乘
    hidden_layer_output = sigmoid(hidden_layer_input) # 激活函数
    output_layer_input = np.dot(hidden_layer_output, weights2)
    predicted_output = sigmoid(output_layer_input)

    # 计算损失
    loss = compute_loss(y, predicted_output)

    # 反向传播
    error = y - predicted_output
    d_predicted_output = error * sigmoid_derivative(predicted_output)

    error_hidden_layer = d_predicted_output.dot(weights2.T)
    d_hidden_layer = error_hidden_layer * sigmoid_derivative(hidden_layer_output)
    # 反向传播,从而找到相关权重
    # 更新权重
    weights2 += hidden_layer_output.T.dot(d_predicted_output) * learning_rate
    weights1 += X.T.dot(d_hidden_layer) * learning_rate
    # 虽然老师在讲课中没有涉及学习率,但是学习率必须引入
    # 不论是基于pytorch,tensorflow,minspore框架在机器学习中都引入了学习率
    # ,学习率是优化算法中的调谐参数，该参数可确定确定每次迭代中的步长，使损失函数收敛到最小值
    return loss, weights1, weights2


learning_rate = 0.001
epochs = 500

loss_history = []
recall_history = []


def train():
    global weights1, weights2
    for epoch in range(epochs):
        loss, weights1, weights2 = forward_backward_propagation(X_train, y_train, weights1, weights2, learning_rate)
        loss_history.append(loss)

        hidden_layer_input = np.dot(X_train, weights1)
        hidden_layer_output = sigmoid(hidden_layer_input)
        output_layer_input = np.dot(hidden_layer_output, weights2)
        y_pred = sigmoid(output_layer_input)
        y_pred_class = (y_pred == y_pred.max(axis=1)[:, None]).astype(int) # 简单来说,就是算出训练集的结果,从而说明这一个被训练到了

        recall = calculate_recall(y_train, y_pred_class)
        recall_history.append(recall)
        if epoch % 10 == 0:
            print(f"Epoch {epoch}, Loss: {loss}")
    model = {
        "weights1": weights1,
        "weights2": weights2
    }

    with open('model.pkl', 'wb') as file:
        pickle.dump(model, file)

    print("模型已保存")


if __name__ == '__main__':
    train()
    f1_scores = [2 * (precision * recall) / (precision + recall) if (precision + recall) > 0 else 0
                 for precision, recall in zip(accuracies, recall_history)]


    # 以下打印保存可视化结果
    plt.plot(loss_history)
    plt.title('损失随迭代次数的变化')
    plt.xlabel('训练次数')
    plt.ylabel('损失')
    plt.grid(True)
    plt.savefig('损失随迭代次数的变化.png')

    plt.plot(recall_history)
    plt.title('召回率随迭代次数的变化')
    plt.xlabel('训练次数')
    plt.ylabel('召回率')
    plt.grid(True)
    plt.savefig('召回率随迭代次数的变化.png')  # 保存损失曲线图像

    plt.figure()
    plt.plot(f1_scores, color='tab:green', label='F1分数')
    plt.title('模型训练中的F1分数')
    plt.xlabel('迭代次数')
    plt.ylabel('F1分数')
    plt.legend()
    plt.grid(True)
    plt.savefig('模型训练中的F1分数.png')

    # 设置图像大小

    fig, ax1 = plt.subplots()
    color = 'tab:red'
    ax1.set_xlabel('迭代次数')
    ax1.set_ylabel('损失', color=color)
    ax1.plot(loss_history, color=color, label='损失率')
    ax1.tick_params(axis='y', labelcolor=color)

    ax2 = ax1.twinx()
    color = 'tab:blue'
    ax2.set_ylabel('召回率和F1分数', color=color)
    ax2.plot(recall_history, color=color, label='召回率')
    ax2.plot(f1_scores, color='tab:green', label='F1分数')
    ax2.tick_params(axis='y', labelcolor=color)

    # 设置图例位置
    lines, labels = ax1.get_legend_handles_labels()
    lines2, labels2 = ax2.get_legend_handles_labels()
    ax2.legend(lines + lines2, labels + labels2, loc='upper center', bbox_to_anchor=(0.5, -0.1), shadow=True, ncol=3)

    # 设置标题
    plt.title('模型训练中的损失、召回率和F1分数')
    plt.grid(True)
    # 自动调整布局
    plt.tight_layout()
    # 保存图像
    plt.savefig('总览_loss_recall_f1_curve.png')
    # 显示图像
    plt.show()

    metrics_df = pd.DataFrame({
        'loss': loss_history,
        'recall': recall_history
    })

    # 将训练指标保存到CSV文件中
    csv_file_path = 'training_metrics.csv'
    metrics_df.to_csv(csv_file_path, index_label='epoch')
    print(f"保存相关csv文件到 {csv_file_path}.")
