import os
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import argparse
from sklearn.datasets import load_iris, make_moons
from sklearn.preprocessing import PolynomialFeatures
from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier, OutputCodeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier

from joblib import dump, load


def get_arguments():
    parser = argparse.ArgumentParser(description='LogisticRegression')
    parser.add_argument('--features', type=list, default=['f0', 'f1'],
                        help="the features of iris datasets for regression, "
                             "element of parameter should be 'f0', 'f1', 'f2' or 'f3'")
    parser.add_argument('--classes', type=list, default=[0, 1],
                        help='the classes of iris datasets for classify, element of parameter should be 0, 1, 2')
    parser.add_argument('--test_size', type=float, default=0.33, help='the proportion of test data')
    parser.add_argument('--random_state', type=int, default=42, help='the random seed of dataset split')
    parser.add_argument('--multi_class', type=str, default='auto', choices=('auto', 'ovr', 'multinomial'),
                        help='the option of multi-classify')
    parser.add_argument('--n_samples', type=int, default=500, help='the size of moon_datasets')
    parser.add_argument('--noise', type=float, default=0.2, help='the noise of moon_datasets')
    parser.add_argument('--penalty', type=str, default='l2', choices=('l1', 'l2', 'elasticnet', 'none'),
                        help='the type of regularization')
    parser.add_argument('--solver', type=str, default='lbfgs',
                        choices=('newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'),
                        help='the type of optimal solution')
    parser.add_argument('--C', type=float, default=1.0, help='The reciprocal of the regularization intensity')
    parser.add_argument('--degree', type=int, default=3, help='The degree of PolynomialFeatures')

    args = parser.parse_args()
    return args


def load_dataset():
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore")
        dataset = load_iris()
        print("The iris datasets is loaded successfully!")
        datas = dataset.data.astype(float)
        target = dataset.target.astype(int)
        df = pd.DataFrame({'f0': datas[:, 0],
                           'f1': datas[:, 1],
                           'f2': datas[:, 2],
                           'f3': datas[:, 3],
                           'label': target})
        return df


class MyPreprocessing:
    def __init__(self, parser):
        self.features = parser.features
        self.classes = parser.classes
        self.test_size = parser.test_size
        self.random_state = parser.random_state

    def choose_dataset(self, df):
        df = df[df.label.isin(self.classes)]  # 选取特定标签
        datas = df[self.features].values  # 选取特定特征
        labels = df[['label']].values  # 读取标签
        return datas, labels

    def draw_dataset(self, df):
        datas, labels = self.choose_dataset(df)
        # label = np.unique(labels)
        colors = []
        for i in labels:
            if i == 0:
                colors.append('b')
            elif i == 1:
                colors.append('r')
            elif i == 2:
                colors.append('y')
        plt.scatter(datas[:, 0], datas[:, 1], color=colors)
        plt.xlabel(self.features[0])
        plt.ylabel(self.features[1])
        plt.show()

    def split_dataset(self, df):
        datas, labels = self.choose_dataset(df)
        assert 0 < self.test_size < 1, "Please choose right test size between 0 and 1"
        X_train, X_test, y_train, y_test = train_test_split(
            datas, labels, test_size=self.test_size, random_state=self.random_state)
        return X_train, X_test, y_train.reshape(np.shape(y_train)[0]), y_test.reshape(np.shape(y_test)[0])


class SklearnLogisticRegression:
    def __init__(self, parser):
        self.multi_class = parser.multi_class

    def regression(self, X_train, y_train, X_test, y_test):
        print("-------------------------------------LogisticRegression-------------------------------------")
        clf = LogisticRegression(random_state=0, multi_class=self.multi_class).fit(X_train, y_train)
        print("The score of LogisticRegression is: {}".format(clf.score(X_train, y_train)))
        print("The coefficient of LinearRegression is: {}".format(clf.coef_))
        print("The intercept of LinearRegression is: {}".format(clf.intercept_))
        self.evaluate(clf, X_train, y_train, X_test, y_test)
        self.draw_result(clf, X_train, y_train, "LogisticRegression_train", True)
        self.draw_result(clf, X_test, y_test, "LogisticRegression_test", True)
        dump(clf, 'LogisticRegression.joblib')
        print("The training model file 'LogisticRegression.joblib' is saved successfully")

    def OvR(self, X_train, y_train, X_test, y_test):
        print("-------------------------------------OvR-------------------------------------")
        clf = OneVsRestClassifier(SVC()).fit(X_train, y_train)
        self.evaluate(clf, X_train, y_train, X_test, y_test)
        self.draw_result(clf, X_train, y_train, "OvR_train", False)
        self.draw_result(clf, X_test, y_test, "OvR_test", False)
        dump(clf, 'OvR.joblib')
        print("The training model file 'OvR.joblib' is saved successfully")

    def OvO(self, X_train, y_train, X_test, y_test):
        print("-------------------------------------OvO-------------------------------------")
        clf = OneVsOneClassifier(LinearSVC(random_state=0)).fit(X_train, y_train)
        self.evaluate(clf, X_train, y_train, X_test, y_test)
        self.draw_result(clf, X_train, y_train, "OvO_train", False)
        self.draw_result(clf, X_test, y_test, "OvO_test", False)
        dump(clf, 'OvO.joblib')
        print("The training model file 'OvO.joblib' is saved successfully")

    def MvM(self, X_train, y_train, X_test, y_test):
        print("-------------------------------------MvM-------------------------------------")
        clf = OutputCodeClassifier(estimator=RandomForestClassifier(random_state=0), random_state=0).fit(X_train, y_train)
        self.evaluate(clf, X_train, y_train, X_test, y_test)
        self.draw_result(clf, X_train, y_train, "MvM_train", False)
        self.draw_result(clf, X_test, y_test, "MvM_test", False)
        dump(clf, 'MvM.joblib')
        print("The training model file 'MvM.joblib' is saved successfully")

    def evaluate(self, clf, X_train, y_train, X_test, y_test):
        precision_train = self.score(clf, X_train, y_train)
        precision_test = self.score(clf, X_test, y_test)
        print("The precision of training data is {}".format(precision_train))
        print("The precision of testing data is {}".format(precision_test))
        return precision_train, precision_test

    def score(self, clf, X, y):
        # 计算预测准确率
        pre = clf.predict(X)
        right_num = np.sum(pre == y)
        return right_num / len(y)

    def draw_result(self, clf, X, y, type, isLinear):
        _, ax = plt.subplots()
        DecisionBoundaryDisplay.from_estimator(
            clf, X, response_method="predict", cmap=plt.cm.Paired, ax=ax
        )
        plt.title("Decision surface of LogisticRegression (%s)" % type)
        plt.axis("tight")

        # Plot also the training points
        colors = "bry"
        for i, color in zip(clf.classes_, colors):
            idx = np.where(y == i)
            plt.scatter(
                X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired, edgecolor="black", s=20
            )

        if isLinear:
            # Plot the classifiers
            xmin, xmax = plt.xlim()
            ymin, ymax = plt.ylim()
            coef = clf.coef_
            intercept = clf.intercept_

            def plot_hyperplane(c, color):
                def line(x0):
                    return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
                plt.plot([xmin, xmax], [line(xmin), line(xmax)], ls="--", color=color)

            for i, color in zip(np.arange(np.shape(coef)[0]), colors):
                plot_hyperplane(i, color)

        plt.show()


class SklearnNonLinearLogisticRegression(SklearnLogisticRegression):
    def __init__(self, parser):
        super(SklearnNonLinearLogisticRegression, self).__init__(parser)
        self.n_samples = parser.n_samples
        self.noise = parser.noise
        self.penalty = parser.penalty
        self.C = parser.C
        self.degree = parser.degree
        self.test_size = parser.test_size
        self.random_state = parser.random_state
        self.solver = parser.solver

    def generate_random_data(self):
        X, y = make_moons(n_samples=self.n_samples, noise=self.noise, random_state=520)
        plt.title(" moon datasets, n_samples = {}, noise = {}".format(self.n_samples, self.noise))
        plt.scatter(X[:, 0], X[:, 1], marker="o", c=y, s=25, edgecolors='k')
        plt.show()
        return X, y

    def split_dataset(self, X, y):
        assert 0 < self.test_size < 1, "Please choose right test size between 0 and 1"
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=self.test_size, random_state=self.random_state)
        return X_train, X_test, y_train, y_test

    def non_linear_regression(self, X, y):
        train_X, test_X, train_y, test_y = self.split_dataset(X, y)
        model = Pipeline([('poly', PolynomialFeatures(degree=self.degree)),
                          ('non_linear', LogisticRegression(solver=self.solver, penalty=self.penalty, C=self.C))])
        model = model.fit(train_X, train_y)
        print("The coefficient of nonLinearRegression is: {}".format(model.named_steps['non_linear'].coef_))
        self.draw_result(model, train_X, train_y, "nonLinear_train, degree=" + str(self.degree), False)
        self.draw_result(model, test_X, test_y, "nonLinear_test, degree=" + str(self.degree), False)
        self.evaluate(model, train_X, train_y, test_X, test_y)

    def search_parameter(self, X, y, degrees):
        train_X, test_X, train_y, test_y = self.split_dataset(X, y)
        train_acc = []
        test_acc = []
        for degree in degrees:
            model = Pipeline([('poly', PolynomialFeatures(degree=degree)),
                              ('non_linear', LogisticRegression(solver=self.solver,
                                                                penalty=self.penalty,
                                                                C=self.C))])
            model = model.fit(train_X, train_y)
            precision_train, precision_test = self.evaluate(model, train_X, train_y, test_X, test_y)
            train_acc.append(precision_train)
            test_acc.append(precision_test)
        self.draw_acc(degrees, train_acc, test_acc)

    def draw_acc(self, degrees, train_acc, test_acc):
        plt.plot(degrees, train_acc, label='train_acc')
        plt.plot(degrees, test_acc, label='test_acc')
        plt.xlabel('degree')
        plt.ylabel('precision')
        plt.legend()
        plt.show()


if __name__ == "__main__":
    # 加载参数
    parser = get_arguments()

    # # ---------数据处理-------------
    # # 加载数据集
    # df = load_dataset()
    # # 初始化数据预处理MyPreprocessing类
    # MyPreprocessing = MyPreprocessing(parser)
    # # 对数据集进行可视化，观察是否可分
    # MyPreprocessing.draw_dataset(df)
    # # 对数据集进行划分，分成训练集和测试集
    # X_train, X_test, y_train, y_test = MyPreprocessing.split_dataset(df)
    #
    # # ---------模型训练-------------
    # # 初始化模型MyLogisticRegression类
    # SklearnLogisticRegression = SklearnLogisticRegression(parser)
    # #  LogisticRegression模型
    # SklearnLogisticRegression.regression(X_train, y_train, X_test, y_test)
    # # OvR模型
    # SklearnLogisticRegression.OvR(X_train, y_train, X_test, y_test)
    # # OvO模型
    # SklearnLogisticRegression.OvO(X_train, y_train, X_test, y_test)
    # # MvM模型
    # SklearnLogisticRegression.MvM(X_train, y_train, X_test, y_test)

    # ---------非线性-------------
    SklearnNonLinearLogisticRegression = SklearnNonLinearLogisticRegression(parser)
    X, y = SklearnNonLinearLogisticRegression.generate_random_data()
    SklearnNonLinearLogisticRegression.non_linear_regression(X, y)
    # 寻找最优参数
    # degrees = np.arange(0, 25)
    # SklearnNonLinearLogisticRegression.search_parameter(X, y, degrees)




