import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression

from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score

if __name__ == '__main__':
    path = '../1.Regression/data/iris.data'  # 数据文件路径
    data = pd.read_csv(path, header=None)

    x, y = np.split(data.values, (4,), axis=1)
    y = pd.Categorical(data[4]).codes
    x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.75, random_state=1, shuffle=True)

    # PolynomialFeatures这个类可以进行特征的构造，
    # 构造的方式就是特征与特征相乘（自己与自己，自己与其他人），
    # 这种方式叫做使用多项式的方式，degree：控制多项式的次数；
    model = Pipeline([('sc', StandardScaler()),
                      ('poly', PolynomialFeatures(degree=3)),
                      ('clf', LogisticRegression(penalty='l2', dual=False, tol=1e-4, C=1,
                                                 fit_intercept=True, intercept_scaling=1, class_weight=None,
                                                 random_state=None, solver='liblinear', max_iter=100,
                                                 multi_class='auto', verbose=0, warm_start=False, n_jobs=None,
                                                 l1_ratio=None))])

    model_arr = [
        Pipeline([('sc', StandardScaler()),
                  ('poly', PolynomialFeatures(degree=2)),
                  ('clf', LogisticRegression(penalty='l1', dual=False, tol=1e-4, C=100,
                                             fit_intercept=True, intercept_scaling=1, class_weight=None,
                                             random_state=None, solver='liblinear', max_iter=100,
                                             multi_class='auto', verbose=0, warm_start=False, n_jobs=None,
                                             l1_ratio=None))]),

        Pipeline([('sc', StandardScaler()),
                  ('poly', PolynomialFeatures(degree=2)),
                  ('clf', LogisticRegression(penalty='l2', dual=False, tol=1e-4, C=100,
                                             fit_intercept=True, intercept_scaling=1, class_weight=None,
                                             random_state=None, solver='liblinear', max_iter=100,
                                             multi_class='auto', verbose=0, warm_start=False, n_jobs=None,
                                             l1_ratio=None))]),
    ]

    model_name_arr = ['LRL1', 'LRL2']

    print('第二组结果：')
    for i in range(2):
        model = model_arr[i]
        penalty = model_name_arr[i]

        model.fit(x_train, y_train)
        y_train_pred = model.predict(x_train)
        y_test_pred = model.predict(x_test)

        acc_train = accuracy_score(y_train, y_train_pred)
        acc_test = accuracy_score(y_test, y_test_pred)

        test_precision_score_v = precision_score(y_test, y_test_pred, average='macro')
        test_recall_score_v = recall_score(y_test, y_test_pred, average='macro')
        test_f1_score_v = f1_score(y_test, y_test_pred, average='macro')

        train_precision_score_v = precision_score(y_train, y_train_pred, average='macro')
        train_recall_score_v = recall_score(y_train, y_train_pred,  average='macro')
        train_f1_score_v = f1_score(y_train, y_train_pred,  average='macro')

        print('train->>', penalty,
              ' | acc: ', (100 * acc_train),
              " | pre:", train_precision_score_v * 100,
              " | rec:", train_recall_score_v * 100,
              " | f1:", train_f1_score_v * 100,
              )

        print('test ->>', penalty,
              ' | acc:', (100 * acc_test),
              " | pre:", test_precision_score_v * 100,
              " | rec:", test_recall_score_v * 100,
              " | f1:", test_f1_score_v * 100,
              )

        print('------------------------------')
