import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.model_selection import train_test_split # 拆分数据集工具
from sklearn.preprocessing import StandardScaler # 标准化工具
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.metrics import accuracy_score # 导入准确率指标


# 多元分类：拆解成多个二元分类
# 多元分类的损失函数：
# 1. one-hot格式的分类编码，比如0-9分类中的8，格式为[0, 0, 0, 0, 0, 0, 0, 1, 0]，使用分类交叉熵作为损失函数
# 2. 直接转换成类别数字：1， 2， 3， 4，使用稀疏分类交叉熵作为损失函数

# 正则化，欠拟合和过拟合
# 规范化：数据限定在需要的范围
# 标准化：将数据正态分布，平均值0，标准差1，消除过大的数值差异
# 正则化：在损失函数内增加惩罚项，增加建模的模糊性，防止过拟合问题，影响的是模型的权重
# L1正则化：根据权重的绝对值的总和来惩罚权重，在依赖稀疏特征的模型中，L1正则化有助于使不相关的特征权重正好为0，排除不相关特征
# L2正则化：根据权重的平方和来惩罚权重，有助于使离群值的权重接近于0，比较常用，适合于任何情况
def logic_regression_iris():
    iris = datasets.load_iris()
    X_sepal = iris.data[:, [0,1]] # 花蕊特征集：两个特征，长度和宽度
    X_petal = iris.data[:, [2,3]] # 花瓣特征集：两个特征，长度和宽度
    y = iris.target # 标签集

    X_train_sepal, X_test_sepal, y_train_sepal, y_test_sepal = \
        train_test_split(X_sepal, y, test_size=0.3, random_state=0)
    print("花瓣训练集样本数: ", len(X_train_sepal))
    print("花瓣测试集样本数: ", len(X_test_sepal))
    scaler = StandardScaler()
    X_train_sepal = scaler.fit_transform(X_train_sepal)
    X_test_sepal = scaler.transform(X_test_sepal)

    # 合并特征集和标签集，留待以后数据展示
    X_combined_sepal = np.vstack((X_train_sepal, X_test_sepal)) # 特征集合并
    Y_combined_sepal = np.hstack((y_train_sepal, y_test_sepal)) # 标签集合并

    lr = LogisticRegression(penalty='l2', C=0.1) # 设定L2正则化和C参数（表示正则化力度，值越小力度越大）
    lr.fit(X_train_sepal, y_train_sepal) # 训练机器
    score = lr.score(X_test_sepal, y_test_sepal) # 验证集分数评估
    print("Sklearn 逻辑回归测试准确率 {:.2f}%".format(score*100))


    # 测试正则化C参数
    X_train_petal, X_test_petal, y_train_petal, y_test_petal = \
        train_test_split(X_petal, y, test_size=0.3, random_state=0)  # 拆分数据集
    print("花瓣训练集样本数: ", len(X_train_petal))
    print("花瓣测试集样本数: ", len(X_test_petal))
    scaler = StandardScaler()  # 标准化工具
    X_train_petal = scaler.fit_transform(X_train_petal)  # 训练集数据标准化
    X_test_petal = scaler.transform(X_test_petal)  # 测试集数据标准化
    # 合并特征集和标签集，留待以后数据展示之用
    X_combined_petal = np.vstack((X_train_petal, X_test_petal))  # 合并特征集
    Y_combined_petal = np.hstack((y_train_petal, y_test_petal))  # 合并标签集

    C_param_range = [0.01, 0.1, 1, 10, 100, 1000]
    petal_acc_table = pd.DataFrame(columns=['C_parameter', 'Accuracy'])
    petal_acc_table['C_parameter'] = C_param_range
    plt.figure(figsize=(10, 10))
    j = 0
    for i in C_param_range:
        lr = LogisticRegression(penalty='l2', C=i, random_state=0)
        lr.fit(X_train_petal, y_train_petal)
        y_pred_petal = lr.predict(X_test_petal)
        petal_acc_table.iloc[j, 1] = accuracy_score(y_test_petal, y_pred_petal)
        j += 1
        plt.subplot(3, 2, j)
        plt.subplots_adjust(hspace=0.4)
        plot_decision_regions(X=X_combined_petal, y=Y_combined_petal,
                              classifier=lr, test_idx=range(105, 150))
        plt.xlabel('Petal length')
        plt.ylabel('Petal width')
        plt.title('C = %s' % i)
    plt.show()



# 绘图函数
def plot_decision_regions(X,y,classifier,test_idx=None,resolution=0.02):
    markers = ('o','x','v')
    colors = ('red','blue','lightgreen')
    color_Map = ListedColormap(colors[:len(np.unique(y))])
    x1_min = X[:,0].min() - 1
    x1_max = X[:,0].max() + 1
    x2_min = X[:,1].min() - 1
    x2_max = X[:,1].max() + 1
    xx1, xx2 = np.meshgrid(np.arange(x1_min,x1_max,resolution),
                           np.arange(x2_min,x2_max,resolution))
    Z = classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T)
    Z = Z.reshape(xx1.shape)
    plt.contour(xx1,xx2,Z,alpha=0.4,cmap = color_Map)
    plt.xlim(xx1.min(),xx1.max())
    plt.ylim(xx2.min(),xx2.max())
    X_test, Y_test = X[test_idx,:], y[test_idx]
    for idx, cl in enumerate(np.unique(y)):
        plt.scatter(x = X[y == cl, 0], y = X[y == cl, 1],
                    alpha = 0.8, c = color_Map(idx),
                    marker = markers[idx], label = cl)


# Sklean 逻辑回归实现
def logic_regression_sklean():
    iris = datasets.load_iris()
    X_sepal = iris.data[:, [0, 1]]  # 花蕊特征集：两个特征，长度和宽度
    X_petal = iris.data[:, [2, 3]]  # 花瓣特征集：两个特征，长度和宽度
    y = iris.target  # 标签集

    X_train_sepal, X_test_sepal, y_train_sepal, y_test_sepal = \
        train_test_split(X_sepal, y, test_size=0.3, random_state=0)

    lr = LogisticRegression(penalty='l2', C=1)
    lr.fit(X_train_sepal, y_train_sepal)
    score = lr.score(X_test_sepal, y_test_sepal)
    print("Sklearn 逻辑回归测试准确率 {:.2f}%".format(score*100))

if __name__ == '__main__':
    # logic_regression_iris()
    logic_regression_sklean()