from sklearn import datasets
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt



# load data
df_wine=pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data',header=None)#服务器加载

# split the data，train：test=7:3
x, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
x_train, x_test, y_train1, y_test = train_test_split(x, y, test_size=0.3, stratify=y, random_state=0)

# standardize the feature 标准化单位方差
sc = StandardScaler()
x_train_std = sc.fit_transform(x_train)
x_test_std = sc.fit_transform(x_test)
# print(x_train_std)

# 构造协方差矩阵，得到特征向量和特征值
cov_matrix = np.cov(x_train_std.T)
eigen_val, eigen_vec = np.linalg.eig(cov_matrix)
# print("values\n ", eigen_val, "\nvector\n ", eigen_vec)

# 解释方差比
tot = sum(eigen_val)  # 总特征值和
var_exp = [(i / tot) for i in sorted(eigen_val, reverse=True)]  # 计算解释方差比，降序
# print(var_exp)
# cum_var_exp = np.cumsum(var_exp)  # 累加方差比率
# plt.rcParams['font.sans-serif'] = ['SimHei']  # 显示中文
# plt.bar(range(1, 14), var_exp, alpha=0.5, align='center', label='独立解释方差')  # 柱状 Individual_explained_variance
# plt.step(range(1, 14), cum_var_exp, where='mid', label='累加解释方差')  # Cumulative_explained_variance
# plt.ylabel("解释方差率")
# plt.xlabel("主成分索引")
# plt.legend(loc='right')
# plt.show()

# 特征变换
eigen_pairs = [(np.abs(eigen_val[i]), eigen_vec[:, i]) for i in range(len(eigen_val))]
eigen_pairs.sort(key=lambda k: k[0], reverse=True)  # (特征值，特征向量)降序排列
# print(eigen_pairs)
w = np.hstack((eigen_pairs[0][1][:, np.newaxis], eigen_pairs[1][1][:, np.newaxis]))  # 降维投影矩阵W
# print(w)
x_train_pca = x_train_std.dot(w)
# print(x_train_pca)
# # 这里开始应该是画图
color = ['r', 'g', 'b']
marker = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train1), color, marker):
    # plt.scatter(x_train_pca[y_train1 == l, 0],
    #             x_train_pca[y_train1 == l, 1],
    #             c=c, label=l, marker=m)
    # print(l)
    # print('x轴：')
    # print(x_train_pca[y_train1 == l, 0])
    # print('y轴：')
    # print(x_train_pca[y_train1 == l, 1])
    # 还是以y = wx + b为例


    x1 = x_train_pca[y_train1 == l, 0]
    y1 = x_train_pca[y_train1 == l, 1]


    x_train = x1[0:20]
    y_train = y1[0:20]

    x_test = x1[20:]
    y_test = y1[20:]

    # 给参数w和b的初始值分别为1.0和-1.0，以及学习率为0.0001，求10000次参数
    w_start = 1.0
    b_start = -1.0
    learn = 0.0001
    times = 10000

    for i in range(times):
        sum_w = 0
        sum_b = 0

        # 这里就是我们的梯度下降法，通过它，使我们的损失函数变小，求出我们要的新参数w和b
        for i in range(len(x_train)):
            sum_w += (y_train[i] - w_start * x_train[i]) * (-x_train[i])
            sum_b += (y_train[i] - b_start * x_train[i]) * (-1)

        w_start = w_start - 2 * sum_w * learn
        b_start = b_start - 2 * sum_b * learn


    # 下面这里是为了检测我得出的线性回归模型理想程度

    total_train_loss = 0
    y_hat=0
    for i in range(len(x_train)):
        y_hat += y_train[i]
    y_hat=y_hat/len(x_train)
    for i in range(len(x_train)):
        total_train_loss += (y_train[i] - y_hat) ** 2
    # print(total_train_loss)#tot

    total_test_loss = 0
    for i in range(len(x_test)):
        total_test_loss += (y_test[i] - y_hat) ** 2
    # print(total_test_loss)#reg
    if l==3:
        print((total_train_loss / total_test_loss-1)*100)
    else:
        print((total_train_loss/total_test_loss)*100)






