import pandas as pd
import pandas as pd1
import pandas as pd3
import matplotlib
import matplotlib.pyplot as plt1
import pandas as pd2
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import os
from sklearn import linear_model
from sklearn.linear_model import LinearRegression

from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt2
import matplotlib.pyplot as plt3
from sklearn import svm
import sklearn
import time



class LogisticRegression():
    """逻辑回归类"""

    def __init__(self, n):
        self.STOP_ITER = 0
        self.STOP_COST = 1
        self.STOP_GRAD = 2
        self.n = n

    def sigmoid(self, z):
        """
            sigmoid函数
            将预测值映射成概率
        """
        return 1 / (1 + np.exp(-z))

    def model(self, X, theta):
        """
            预测函数：返回预测值
        """
        return self.sigmoid(np.dot(X, theta.T))

    def cost(self, X, y, theta):
        """损失函数"""
        left = np.multiply(-y, np.log(self.model(X, theta)))
        right = np.multiply(1 - y, np.log(1 - self.model(X, theta)))
        return np.sum(left - right) / (len(X))

    def gradient(self, X, y, theta):
        """计算梯度"""
        grad = np.zeros(theta.shape)
        error = (self.model(X, theta) - y).ravel()
        for j in range(len(theta.ravel())):  # for each parmeter
            term = np.multiply(error, X[:, j])
            grad[0, j] = np.sum(term) / len(X)
        return grad

    def stopCriterion(self, type, value, threshold):
        """
            停止标准函数：
                1.迭代次数
                2.损失值变化
                3.梯度变化
        """
        if type == self.STOP_ITER:
            return value > threshold
        elif type == self.STOP_COST:
            return abs(value[-1] - value[-2]) < threshold
        elif type == self.STOP_GRAD:
            return np.linalg.norm(value) < threshold

    def shuffleData(self, data):
        """洗牌"""
        np.random.shuffle(data)
        cols = data.shape[1]
        X = data[:, 0:cols - 1]
        y = data[:, cols - 1:]
        return X, y

    def descent(self, data, theta, batchSize, stopType, thresh, alpha):
        """梯度下降求解"""

        init_time = time.time()
        i = 0  # 迭代次数
        k = 0  # batch
        X, y = self.shuffleData(data)
        grad = np.zeros(theta.shape)  # 计算的梯度
        costs = [self.cost(X, y, theta)]  # 损失值

        while True:
            grad = self.gradient(X[k:k + batchSize], y[k:k + batchSize], theta)
            k += batchSize  # 取batch数量个数据
            if k >= self.n:
                k = 0
                X, y = self.shuffleData(data)  # 重新洗牌
            theta = theta - alpha * grad  # 参数更新
            costs.append(self.cost(X, y, theta))  # 计算新的损失
            i += 1

            if stopType == self.STOP_ITER:
                value = i
            elif stopType == self.STOP_COST:
                value = costs
            elif stopType == self.STOP_GRAD:
                value = grad
            if self.stopCriterion(stopType, value, thresh):
                break

        return theta, i - 1, costs, grad, time.time() - init_time

    def predict(self, X, theta):
        return [1 if x >= 0.5 else 0 for x in self.model(X, theta)]


def main() :
    df_jdcs = pd.read_csv('d:\sorb_pc.csv')#样本数据跟批次号 PCA
    jd_hg = pd1.read_csv('d:\sorb1.csv.')#样本数据跟合格 SVM
    g = pd1.read_csv('d:\sorb.csv')#样本数据跟h变量 LR
    X=df_jdcs.iloc[:,1:]
    y=df_jdcs.iloc[:,0]
    x_train,x_test,y_train,y_test=train_test_split(X,y, test_size=0.3, random_state=0)
    # standardize the feature 标准化
    sc = StandardScaler()

    x_train_std = sc.fit_transform(x_train)
    x_test_std = sc.fit_transform(x_test)

    cov_matrix = np.cov(x_train_std.T)
    eigen_val, eigen_vec = np.linalg.eig(cov_matrix)
    # print("values\n ", eigen_val, "\nvector\n ", eigen_vec)

    # 解释方差比
    tot = sum(eigen_val)  # 总特征值和
    var_exp = [(i / tot) for i in sorted(eigen_val, reverse=True)]  # 计算解释方差比，降序
    # print(var_exp)
    # cum_var_exp = np.cumsum(var_exp)  # 累加方差比率
    # plt.rcParams['font.sans-serif'] = ['SimHei']  # 显示中文
    # plt.bar(range(1, 1749), var_exp, alpha=0.5, align='center', label='独立解释方差')  # 柱状 Individual_explained_variance
    # plt.step(range(1, 1749), cum_var_exp, where='mid', label='累加解释方差')  # Cumulative_explained_variance
    # plt.ylabel("解释方差率")
    # plt.xlabel("主成分索引")
    # plt.legend(loc='right')
    # plt.show()

    # 特征变换
    eigen_pairs = [(np.abs(eigen_val[i]), eigen_vec[:, i]) for i in range(len(eigen_val))]
    eigen_pairs.sort(key=lambda k: k[0], reverse=True)  # (特征值，特征向量)降序排列
    # print(eigen_pairs)
    w = np.hstack((eigen_pairs[0][1][:, np.newaxis], eigen_pairs[1][1][:, np.newaxis]))  # 降维投影矩阵W

    x_train_pca = x_train_std.dot(w)
    x_test_pca = x_test_std.dot(w)

    color = ['r', 'g', 'b', 'c', 'm', 'y', 'blueviolet', 'plum', 'cadetblue', 'pink', 'peru', 'tomato', 'silver', 'k']
    marker = ['s', 'x', 'o', '4', 'v', 'p', '*', 'h', '+', 'd', '8', '1', '2', '3']
    for l, c, m in zip(np.unique(y_train), color, marker):
        plt.scatter(x_train_pca[y_train == l, 0],
                    x_train_pca[y_train == l, 1],
                    c=c, label=l, marker=m)
    plt.title('Result')
    plt.xlabel('PC1')
    plt.ylabel('PC2')
    plt.legend(loc='lower left')
    plt.show()

    x_train_pca=x_train_pca.astype(np.float64)
    x_test_pca=x_test_pca.astype(np.float64)
    x_pca=np.vstack((x_train_pca,x_test_pca))
    print(x_pca)

    y1=np.mat(y_train)
    y2=np.mat(y_test)
    y_pca=np.vstack((y1.T,y2.T))
    xs=np.mat(np.array(g.iloc[0:138,0]))
    sx=xs.T
    print(g.iloc[0:138,0])



    # set X (training data) and y (target variable)


    yhg=np.mat(jd_hg.iloc[0:138,0])



    train_data, test_data, train_label, test_label = sklearn.model_selection.train_test_split(x_pca, yhg.T,
                                                                                              random_state=1,
                                                                                              train_size=0.7,
                                                                                              test_size=0.3)
    classifier = svm.SVC(kernel='rbf', gamma=10, decision_function_shape='ovr')
    classifier.fit(train_data, train_label)
    from sklearn.metrics import accuracy_score
    tra_label = classifier.predict(train_data)
    tes_label = classifier.predict(test_data)

    # print('训练集：', accuracy_score(train_label, tra_label))
    print('分类排名', accuracy_score(test_label, tes_label))
    # print('train_decision_function:', classifier.decision_function(train_data))
    # print('predict_result:', classifier.predict(train_data))
    x1_min, x1_max = x_pca[:, 0].min(), x_pca[:, 0].max()  # 第0维特征的范围
    x2_min, x2_max = x_pca[:, 1].min(), x_pca[:, 1].max()  # 第1维特征的范围
    x1, x2 = np.mgrid[x1_min:x1_max:200j, x2_min:x2_max:200j]  # 生成网络采样点
    grid_test = np.stack((x1.flat, x2.flat), axis=1)  # 测试点
    # 指定默认字体

    matplotlib.rcParams['font.sans-serif'] = ['SimHei']
    cm_light = matplotlib.colors.ListedColormap(
        ['#A0FFA0', '#FFA0A0', '#A0A0FF'])
    cm_dark = matplotlib.colors.ListedColormap(
        ['g', 'r', 'b'])
    grid_hat = classifier.predict(grid_test)  # 预测分类值
    grid_hat = grid_hat.reshape(x1.shape)

    plt3.pcolormesh(x1, x2, grid_hat, cmap=cm_light)  # 预测值的显示
    plt3.scatter(x_pca[:, 0], x_pca[:, 1], c=np.array(yhg.T), s=30, cmap=cm_dark)  # 样本
    plt3.scatter(test_data[:, 0], test_data[:, 1], c=np.array(test_label[:, 0]), s=80, edgecolors='k', zorder=2,
                 cmap=cm_dark)
    plt3.xlabel('第一主成分', fontsize=13)
    plt3.ylabel('第二主成分', fontsize=13)
    plt3.xlim(x1_min, x1_max)
    plt3.ylim(x2_min, x2_max)
    plt3.title('吸收系数')

    plt3.show()
    arr1 = np.ones(138)
    arr1=arr1.T.astype(np.float16)

    arr2 = np.column_stack((arr1,x_pca))

    x_p=np.vstack((np.mat(tra_label).T,np.mat(tes_label).T))


    arr2 = np.column_stack((arr2,sx.astype(int)))

    # 加一列
    cols = arr2.shape[1]
    X = arr2[:, 0:cols - 1]
    y = arr2[:, cols - 1:cols]

    # convert to numpy arrays and initalize the parameter array theta
    theta = np.zeros([1, 3])

    lr = LogisticRegression(100)

    def runExpe(data, theta, batchSize, stopType, thresh, alpha):

        theta, iter, costs, grad, dur = lr.descent(data, theta, batchSize, stopType, thresh, alpha)


        name = "Original" if (data[:, 2] > 2).sum() > 1 else "Scaled"
        name += f" data / learning rate: {alpha} / "
        if batchSize == 1:
            strDescType = "Stochastic"
        elif batchSize == n:
            strDescType = "Gradient"
        else:
            strDescType = f"Mini-batch ({batchSize})"
        name += strDescType + " descent / Stop: "
        if stopType == lr.STOP_ITER:
            strStop = f"{thresh} iterations"
        elif stopType == lr.STOP_COST:
            strStop = f"costs change < {thresh}"
        else:
            strStop = f"gradient norm < {thresh}"
        name += strStop
        #     print(name)
        # print(f"{name}\nTheta: {theta} / Iter: {iter} / Last cost: {costs[-1]:03.2f} / Duration: {dur:03.2f}s")
        plt2.subplots(figsize=(12, 4))
        plt2.plot(np.arange(len(costs)), costs, 'r')
        plt2.xlabel('Iterations')
        plt2.ylabel('Cost')
        plt2.xlim(-1, )
        plt2.title(name.upper())
        plt2.show()
        return theta

    n = 100

    theta = runExpe(arr2, theta, n, 0, thresh=0.00001, alpha=0.000001)

    from sklearn import preprocessing as pp

    scaled_X = arr2[:, :3]
    y = arr2[:, 3]

    predictions = lr.predict(scaled_X, theta)
    correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
    accuracy = (sum(map(int, correct)) % len(correct))
    print('accuracy = {0}%'.format(accuracy))

if __name__ == '__main__':
        main()

