import numpy as np
import torch
import pandas as pd
import pickle
from scipy.special import expit





class Neuralnetwork:
    def __init__(self, input_size, hidden_size1, hidden_size2, hidden_size3, output_size):
        # 初始化权重和偏置
        # 输入层到第一个隐藏层
        self.weights_input_hidden1 = np.random.randn(input_size, hidden_size1)
        self.bias_input_hidden1 = np.zeros((1, hidden_size1))
        # 第一个隐藏层到第二个隐藏层
        self.weights_hidden1_hidden2 = np.random.randn(hidden_size1, hidden_size2)
        self.bias_hidden1_hidden2 = np.zeros((1, hidden_size2))
        # 第二个隐藏层到第三个隐藏层
        self.weights_hidden2_hidden3 = np.random.randn(hidden_size3, hidden_size3)
        self.bias_hidden2_hidden3 = np.zeros((1, hidden_size3))
        # 第三个隐藏层到输出层
        self.weights_hidden3_output = np.random.randn(hidden_size3, output_size)
        self.bias_hidden3_output = np.zeros((1, output_size))

    pass

    #激活函数
    def sigmoid(self, x):
        return 1 / (1 + expit(-x))

    def sigmoid_derivative(self, x):
        return x * (1 - x)

    def swish(self,x):
        return x * 1 / (1 + expit(-x))

    def swish_derivative(self,x):
        sig = self.sigmoid(x)
        return sig + x * sig * (1 - sig)

    def softmax(self,x):
        """Compute softmax values for each score in x."""
        e_x = np.exp(x - np.max(x))  # 减去最大值以提高数值稳定性
        return e_x / e_x.sum(axis=0, keepdims=True)

    def softmax_derivative(self, softmax_output):
        """Compute the derivative of the softmax function."""
        s = softmax_output
        # 转置后，对角线元素是自身减去其他元素的和
        d = s / s.sum(axis=1, keepdims=True)
        # 恢复原始形状并减去单位矩阵
        return np.diagflat(d.T) - np.dot(s, s.T)

#前向
    def feedforward(self, X):
        # 输入层到第一个隐藏层
        self.hidden1_input = np.dot(X, self.weights_input_hidden1) + self.bias_input_hidden1
        self.hidden1_output = self.swish(self.hidden1_input)
        # 第一个隐藏层到第二个隐藏层
        self.hidden2_input = np.dot(self.hidden1_output, self.weights_hidden1_hidden2) + self.bias_hidden1_hidden2
        self.hidden2_output = self.swish(self.hidden2_input)
        #第二个隐藏层到第三个隐藏层
        self.hidden3_input = np.dot(self.hidden2_output, self.weights_hidden2_hidden3) + self.bias_hidden2_hidden3
        self.hidden3_output = self.swish(self.hidden3_input)
        # 第三个隐藏层到输出层
        self.output_input = np.dot(self.hidden3_output, self.weights_hidden3_output) + self.bias_hidden3_output
        self.output = self.softmax(self.output_input)
        return self.output

#后向
    def backpropagation(self, X, learning_rate, output_error,first_term,y_pred, y_true, gamma_s, alpha,second_term):
       #前向传播得到预测值
       self.feedforward(X)
       #计算自定义损失函数的两部分
       diff = self.output - y_true
       first_term = ((gamma_s ** 0.1) * diff ** 2).mean()
       second_term = ((1 - (gamma_s ** alpha)) ** 2).mean()
       #损失函数
       output_error = 0.01 * (first_term + second_term)
       # 计算输出层的调整量
       output_delta = output_error * self.softmax_derivative(self.output)
       # 更新输出层的权重和偏置
       self.weights_hidden3_output += np.dot(self.hidden3_output.T, output_delta) * learning_rate
       self.bias_hidden3_output += np.sum(output_delta, axis=0, keepdims=True) * learning_rate

       # 计算第三个隐藏层误差
       hidden3_error = np.dot(output_delta, self.weights_hidden3_output.T)
       # 计算第三个隐藏层的调整量
       hidden3_delta = hidden3_error * self.swish_derivative(self.hidden3_input)
       # 更新第三个隐藏层的权重和偏置
       self.weights_hidden2_hidden3 += np.dot(self.hidden2_output.T, hidden3_delta) * learning_rate
       self.bias_hidden2_hidden3 += np.sum(hidden3_delta, axis=0, keepdims=True) * learning_rate

       # 计算第二个隐藏层误差
       hidden2_error = np.dot(hidden3_delta, self.weights_hidden2_hidden3.T)
       # 计算第二个隐藏层的调整量
       hidden2_delta = hidden2_error * self.swish_derivative(self.hidden2_input)
       # 更新第二个隐藏层的权重和偏置
       self.weights_hidden1_hidden2 += np.dot(self.hidden1_output.T, hidden2_delta) * learning_rate
       self.bias_hidden1_hidden2 += np.sum(hidden2_delta, axis=0, keepdims=True) * learning_rate

       # 计算第一个隐藏层误差
       hidden1_error = np.dot(hidden2_delta, self.weights_hidden1_hidden2.T)
       # 计算第一个隐藏层的调整量
       hidden1_delta = hidden1_error * self.swish_derivative(self.hidden1_input)
       # 更新第一个隐藏层的权重和偏置
       self.weights_input_hidden1 += np.dot(X.T, hidden1_delta) * learning_rate
       self.bias_input_hidden1 += np.sum(hidden1_delta, axis=0, keepdims=True) * learning_rate

    def train(self, X, y, epochs, learning_rate):
        for epoch in range(epochs):
            self.feedforward(X)
            self.backpropagation(X, y, learning_rate)
            if epoch % 1000 == 0:
                loss = np.mean(np.square(y - self.output))
                print(f'Epoch {epoch}, Loss: {loss:.4f}')

if __name__ == "__main__":
    # 从CSV文件读取训练数据
    data_x = pd.read_excel('Data_set_.xlsx')
    data_y = pd.read_csv('Data_set_y.csv')
    X = data_x.iloc[:, :]
    X = X.values
    y = data_y.iloc[:, :]
    y = y.values

    # 训练模型
    nn = Neuralnetwork(input_size=3, hidden_size1=16, hidden_size2=16, hidden_size3=16,output_size=3)
    nn.train(X, y, epochs=5000, learning_rate=0.001)

    # 保存模型参数到文件
    with open('BPmodel_parameters.pkl', 'wb') as f:
        pickle.dump(nn, f)