#在做softmax回归时，可以将softmax看成只有一层的神经网络，激活函数为softmax函数
import numpy as np
import pandas as pd
df = pd.read_csv('train.csv', index_col=0)
text = pd.read_csv('test_no_answer.csv', index_col=0)
#首先进行数据预处理（将y变成独热编码）
df = pd.get_dummies(df, columns=['label'])

#处理异常值(并将异常值删除，将异常值视为缺失值）
for feature in df.columns:
    mean_value = df[feature].mean()
    std_value = df[feature].std()
    outlier_min = mean_value - 3 * std_value
    outlier_max = mean_value + 3 * std_value
    df.loc[(df[feature] < outlier_min) | (df[feature] > outlier_max), feature] = np.nan
#在处理缺失值时，使用均值填充
df = df.fillna(df.mean())
#删除重复值
df = df.drop_duplicates()
#2.实现基于梯度下降法实现的softmax 回归
#step 1.将数据向量化，并将数据划分为测试集和训练集

Y =df[['label_0','label_1', 'label_2', 'label_3', 'label_4', 'label_5',"label_6", 'label_7', 'label_8', 'label_9']]

X1 = df.drop(['label_0','label_1', 'label_2', 'label_3', 'label_4', 'label_5',"label_6", 'label_7', 'label_8', 'label_9'], axis=1)
X1 = X1.values
X0 = np.ones((X1.shape[0], 1))
X = np.hstack((X1, X0))
X = (X - np.mean(X, axis=0)) / (np.std(X, axis=0)+1e-12)
Y = Y.values
Y = Y.reshape(Y.shape[0], 10)

#step2.定义softmax及其导数，定义交叉熵损失函数,这里对softmax进行了偏移，防止上溢
def softmax_funtion(X):
    exp_X = np.exp(X - np.max(X))
    return exp_X / np.sum(exp_X, axis=0)
def dsoftmax(Y):
    Z =softmax_funtion(Y)
    return np.diagflat(Z) - np.dot(Z, Z.T)
def cross_entropy_loss(Y,A):
    A= np.clip(A, 1e-16, 1 -1e-16)#防止为0,1
    loss = -np.mean(np.sum(Y * np.log(A), axis=0))
    return loss
#step3.前向传播
def forward(W,X):
    Z = np.dot(X,W)
    A = softmax_funtion(Z)
    return Z,A
#step4.后向传播
def back(X,Y,W):
    Z,A =forward(W,X)
    #定义交叉熵的导数
    D = (A -Y) / Y.shape[1]
    dw = np.dot(X.T, D)
    return dw
#设置有早停的梯度下降法
# 定义最大迭代次数和早停参数
max_iters = 10000
tolerance = 0.0001
W = np.random.uniform(low=-1/(X.shape[1]**(1/2)), high=1/(X.shape[1]**(1/2)), size=(X.shape[1], 1))
alpha = 0.001
prev_loss = float('inf')
for i in range(max_iters):
    Z,A = forward(W,X)
    loss = cross_entropy_loss(Y,A)
    if i%100==0:
        print(loss)

    # 判断是否满足早停条件
    if abs(loss - prev_loss) < tolerance:
        print("Early stopping at iteration ", i)
        break
    grad = back (X,Y,W)
    W = W - alpha * grad / len(Y)
    grad = back(X, Y, W)
    prev_loss = loss

X_text = text.values
X0 = np.ones((X_text.shape[0], 1))
X_text = np.hstack((X_text, X0))
X_text = (X_text - np.mean(X_text, axis=0)) / (np.std(X_text, axis=0)+1e-12)
_,Y_text = forward(W,X_text)
Y_text = np.argmax(Y_text, axis=1)

print(Y_text.shape[0])
Y_text = pd.DataFrame(Y_text)
Y_text.to_excel('3.xlsx')










