import os
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import argparse
from sklearn.datasets import load_iris, make_moons
from sklearn.preprocessing import PolynomialFeatures
from sklearn.inspection import DecisionBoundaryDisplay
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier, OutputCodeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from joblib import dump, load

from BGD import MyLinearRegression, draw_loss


def get_arguments():
    parser = argparse.ArgumentParser(description='LogisticRegression')
    parser.add_argument('--features', type=list, default=['f0', 'f3'],
                        help="the features of iris datasets for regression, "
                             "element of parameter should be 'f0', 'f1', 'f2' or 'f3'")
    parser.add_argument('--classes', type=list, default=[0, 1, 2],
                        help='the classes of iris datasets for classify, element of parameter should be 0, 1, 2')
    parser.add_argument('--test_size', type=float, default=0.33, help='the proportion of test data')
    parser.add_argument('--random_state', type=int, default=42, help='the random seed of dataset split')
    parser.add_argument('--multi_class', type=str, default='auto', choices=('auto', 'ovr', 'multinomial'),
                        help='the option of multi-classify')
    parser.add_argument('--n_samples', type=int, default=500, help='the size of moon_datasets')
    parser.add_argument('--noise', type=float, default=0.2, help='the noise of moon_datasets')
    parser.add_argument('--penalty', type=str, default='l2', choices=('l1', 'l2', 'elasticnet', 'none'),
                        help='the type of regularization')
    parser.add_argument('--solver', type=str, default='lbfgs',
                        choices=('newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'),
                        help='the type of optimal solution')
    parser.add_argument('--C', type=float, default=1.0, help='The reciprocal of the regularization intensity')
    parser.add_argument('--degree', type=int, default=3, help='The degree of PolynomialFeatures')
    parser.add_argument('--iteration', type=int, default=20000, help='the iteration of SGD')
    parser.add_argument('--initialization', type=int, default=1, choices=(0, 1),
                        help='select the type of parameter initialization'
                             '0: all parameters initialize 0,'
                             '1: random initialization in [0, 1] uniform distribution')
    parser.add_argument('--learning_rate', type=float, default=1e-3,
                        help='the learning rate of gradient descent,'
                             'if normalization == 0, suggest learning_rate = 1e-5 to avoid loss booming,'
                             'else, suggest learning_rate = 1e-2 to avoid loss descending slowly')
    parser.add_argument('--mini_batch_size', type=int, default=32, help='the number of data for each update')
    parser.add_argument('--gradient_descent_method', type=int, default=1, choices=(1, 2),
                        help='the type of gradient descent,'
                             '1: BGD,'
                             '2: MBGD')

    args = parser.parse_args()
    return args


def load_dataset():
    with warnings.catch_warnings():
        warnings.filterwarnings("ignore")
        dataset = load_iris()
        print("The iris datasets is loaded successfully!")
        datas = dataset.data.astype(float)
        target = dataset.target.astype(int)
        df = pd.DataFrame({'f0': datas[:, 0],
                           'f1': datas[:, 1],
                           'f2': datas[:, 2],
                           'f3': datas[:, 3],
                           'label': target})
        return df


class MyPreprocessing:
    def __init__(self, parser):
        self.features = parser.features
        self.classes = parser.classes
        self.test_size = parser.test_size
        self.random_state = parser.random_state

    def choose_dataset(self, df):
        df = df[df.label.isin(self.classes)]  # 选取特定标签
        datas = df[self.features].values  # 选取特定特征
        labels = df[['label']].values  # 读取标签
        return datas, labels

    def draw_dataset(self, df):
        datas, labels = self.choose_dataset(df)
        # label = np.unique(labels)
        colors = []
        for i in labels:
            if i == 0:
                colors.append('b')
            elif i == 1:
                colors.append('r')
            elif i == 2:
                colors.append('y')
        plt.scatter(datas[:, 0], datas[:, 1], color=colors)
        plt.xlabel(self.features[0])
        plt.ylabel(self.features[1])
        plt.title("Dataset scatter plot")
        plt.show()

    def split_dataset(self, df):
        datas, labels = self.choose_dataset(df)
        assert 0 < self.test_size < 1, "Please choose right test size between 0 and 1"
        X_train, X_test, y_train, y_test = train_test_split(
            datas, labels, test_size=self.test_size, random_state=self.random_state)
        return X_train, X_test, y_train.reshape(np.shape(y_train)[0]), y_test.reshape(np.shape(y_test)[0])


class SklearnLogisticRegression:
    def __init__(self, parser):
        self.multi_class = parser.multi_class

    def regression(self, X_train, y_train, X_test, y_test):
        print("-------------------------------------LogisticRegression-------------------------------------")
        clf = LogisticRegression(random_state=0, multi_class=self.multi_class).fit(X_train, y_train)
        print("The score of LogisticRegression is: {}".format(clf.score(X_train, y_train)))
        print("The coefficient of LinearRegression is: {}".format(clf.coef_))
        print("The intercept of LinearRegression is: {}".format(clf.intercept_))
        self.evaluate(clf, X_train, y_train, X_test, y_test)
        self.draw_result(clf, X_train, y_train, "LogisticRegression_train", True)
        self.draw_result(clf, X_test, y_test, "LogisticRegression_test", True)
        dump(clf, 'LogisticRegression.joblib')
        print("The training model file 'LogisticRegression.joblib' is saved successfully")

    def OvR(self, X_train, y_train, X_test, y_test):
        print("-------------------------------------OvR-------------------------------------")
        clf = OneVsRestClassifier(SVC()).fit(X_train, y_train)
        self.evaluate(clf, X_train, y_train, X_test, y_test)
        self.draw_result(clf, X_train, y_train, "OvR_train", False)
        self.draw_result(clf, X_test, y_test, "OvR_test", False)
        dump(clf, 'OvR.joblib')
        print("The training model file 'OvR.joblib' is saved successfully")

    def OvO(self, X_train, y_train, X_test, y_test):
        print("-------------------------------------OvO-------------------------------------")
        clf = OneVsOneClassifier(LinearSVC(random_state=0)).fit(X_train, y_train)
        self.evaluate(clf, X_train, y_train, X_test, y_test)
        self.draw_result(clf, X_train, y_train, "OvO_train", False)
        self.draw_result(clf, X_test, y_test, "OvO_test", False)
        dump(clf, 'OvO.joblib')
        print("The training model file 'OvO.joblib' is saved successfully")

    def MvM(self, X_train, y_train, X_test, y_test):
        print("-------------------------------------MvM-------------------------------------")
        clf = OutputCodeClassifier(estimator=RandomForestClassifier(random_state=0), random_state=0).fit(X_train, y_train)
        self.evaluate(clf, X_train, y_train, X_test, y_test)
        self.draw_result(clf, X_train, y_train, "MvM_train", False)
        self.draw_result(clf, X_test, y_test, "MvM_test", False)
        dump(clf, 'MvM.joblib')
        print("The training model file 'MvM.joblib' is saved successfully")

    def evaluate(self, clf, X_train, y_train, X_test, y_test):
        precision_train = self.score(clf, X_train, y_train)
        precision_test = self.score(clf, X_test, y_test)
        print("The precision of training data is {}".format(precision_train))
        print("The precision of testing data is {}".format(precision_test))
        return precision_train, precision_test

    def score(self, clf, X, y):
        # 计算预测准确率
        pre = clf.predict(X)
        right_num = np.sum(pre == y)
        return right_num / len(y)

    def draw_result(self, clf, X, y, type, isLinear):
        _, ax = plt.subplots()
        DecisionBoundaryDisplay.from_estimator(
            clf, X, response_method="predict", cmap=plt.cm.Paired, ax=ax
        )
        plt.title("Decision surface of LogisticRegression (%s)" % type)
        plt.axis("tight")

        # Plot also the training points
        colors = "bry"
        for i, color in zip(clf.classes_, colors):
            idx = np.where(y == i)
            plt.scatter(
                X[idx, 0], X[idx, 1], c=color, cmap=plt.cm.Paired, edgecolor="black", s=20
            )

        if isLinear:
            # Plot the classifiers
            xmin, xmax = plt.xlim()
            ymin, ymax = plt.ylim()
            coef = clf.coef_
            intercept = clf.intercept_

            def plot_hyperplane(c, color):
                def line(x0):
                    return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
                plt.plot([xmin, xmax], [line(xmin), line(xmax)], ls="--", color=color)

            for i, color in zip(np.arange(np.shape(coef)[0]), colors):
                plot_hyperplane(i, color)

        plt.show()


class SklearnNonLinearLogisticRegression(SklearnLogisticRegression):
    def __init__(self, parser):
        super(SklearnNonLinearLogisticRegression, self).__init__(parser)
        self.n_samples = parser.n_samples
        self.noise = parser.noise
        self.penalty = parser.penalty
        self.C = parser.C
        self.degree = parser.degree
        self.test_size = parser.test_size
        self.random_state = parser.random_state
        self.solver = parser.solver

    def generate_random_data(self):
        X, y = make_moons(n_samples=self.n_samples, noise=self.noise, random_state=520)
        plt.title(" moon datasets, n_samples = {}, noise = {}".format(self.n_samples, self.noise))
        plt.scatter(X[:, 0], X[:, 1], marker="o", c=y, s=25, edgecolors='k')
        plt.show()
        return X, y

    def split_dataset(self, X, y):
        assert 0 < self.test_size < 1, "Please choose right test size between 0 and 1"
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=self.test_size, random_state=self.random_state)
        return X_train, X_test, y_train, y_test

    def non_linear_regression(self, X, y):
        train_X, test_X, train_y, test_y = self.split_dataset(X, y)
        model = Pipeline([('poly', PolynomialFeatures(degree=self.degree)),
                          ('non_linear', LogisticRegression(solver=self.solver, penalty=self.penalty, C=self.C))])
        model = model.fit(train_X, train_y)
        print("The coefficient of nonLinearRegression is: {}".format(model.named_steps['non_linear'].coef_))
        self.draw_result(model, train_X, train_y, "nonLinear_train, degree=" + str(self.degree), False)
        self.draw_result(model, test_X, test_y, "nonLinear_test, degree=" + str(self.degree), False)
        self.evaluate(model, train_X, train_y, test_X, test_y)

    def search_parameter(self, X, y, degrees):
        train_X, test_X, train_y, test_y = self.split_dataset(X, y)
        train_acc = []
        test_acc = []
        for degree in degrees:
            model = Pipeline([('poly', PolynomialFeatures(degree=degree)),
                              ('non_linear', LogisticRegression(solver=self.solver,
                                                                penalty=self.penalty,
                                                                C=self.C))])
            model = model.fit(train_X, train_y)
            precision_train, precision_test = self.evaluate(model, train_X, train_y, test_X, test_y)
            train_acc.append(precision_train)
            test_acc.append(precision_test)
        self.draw_acc(degrees, train_acc, test_acc)

    def draw_acc(self, degrees, train_acc, test_acc):
        plt.plot(degrees, train_acc, label='train_acc')
        plt.plot(degrees, test_acc, label='test_acc')
        plt.xlabel('degree')
        plt.ylabel('precision')
        plt.legend()
        plt.show()


class MyBinaryClassification(MyLinearRegression):
    def __init__(self, parser):
        # super(MyBinaryClassification, self).__init__(parser)
        self.initialization = parser.initialization
        self.learning_rate = parser.learning_rate
        # self.random_seed = parser.random_seed
        self.mini_batch_size = parser.mini_batch_size
        self.gradient_descent_method = parser.gradient_descent_method

    def label_convert(self, y_train, y_test):
        class_train = np.unique(y_train)
        class_test = np.unique(y_test)
        Y_train = [1 if y_train[i] == np.max(class_train) else 0 for i in range(np.shape(y_train)[0])]
        Y_test = [1 if y_test[i] == np.max(class_test) else 0 for i in range(np.shape(y_test)[0])]
        return np.array(Y_train), np.array(Y_test), class_train, class_test

    def data_convert(self, X):
        ones = np.ones(X.shape[0], dtype=float).reshape((X.shape[0], 1))
        X = np.concatenate([X, ones], axis=1)
        return X

    def loss(self, X, y, w):
        ones = np.ones(X.shape[0], dtype=float).reshape((X.shape[0], 1))
        X = np.concatenate([X, ones], axis=1)
        CrossEntropyLoss = np.sum(-1 * y * np.dot(X, w) + np.log(1 + np.exp(np.dot(X, w))))
        return CrossEntropyLoss

    def parameter_update(self, X_train, y_train, w):
        if self.gradient_descent_method == 1:
            w = self.BGD_update(X_train, y_train, w)
        elif self.gradient_descent_method == 2:
            w = self.MBGD_update(X_train, y_train, w)
        else:
            raise ValueError('Please choose right gradient descent method', self.gradient_descent_method)
        return w

    def BGD_update(self, X_train, y_train, w):
        # ones = np.ones(X_train.shape[0], dtype=float).reshape((X_train.shape[0], 1))
        # X_train = np.concatenate([X_train, ones], axis=1)
        X_train = self.data_convert(X_train)
        diff = self.h(X_train, w) - y_train
        for j, theta in enumerate(w):
            w[j] = theta - self.learning_rate / X_train.shape[0] * np.sum(diff * X_train[:, j])
        return w

    def MBGD_update(self, X_train, y_train, w):
        assert 0 < self.mini_batch_size < X_train.shape[0], "Please input suitable mini batch size"
        X_train = self.data_convert(X_train)
        # ones = np.ones(X_train.shape[0], dtype=float).reshape((X_train.shape[0], 1))
        # X_train = np.concatenate([X_train, ones], axis=1)
        # 小批量更新
        for i in range(0, X_train.shape[0]-self.mini_batch_size, self.mini_batch_size):
            X = X_train[i: i+self.mini_batch_size]
            y = y_train[i: i+self.mini_batch_size]
            diff = self.h(X, w) - y
            for j, theta in enumerate(w):
                w[j] = theta - self.learning_rate / X.shape[0] * np.sum(diff * X[:, j])
        return w

    def evaluate(self, X_train, y_train, X_test, y_test, class_train, class_test, w):
        X_train = self.data_convert(X_train)
        X_test = self.data_convert(X_test)
        h_train = self.h(X_train, w)
        h_test = self.h(X_test, w)
        pre_train = [1 if h_train[i] >= 0.5 else 0 for i in range(len(h_train))]
        pre_test = [1 if h_test[i] >= 0.5 else 0 for i in range(len(h_test))]
        output_train = [np.max(class_train) if pre_train[i] == 1 else np.min(class_train) for i in range(len(pre_train))]
        output_test = [np.max(class_test) if pre_test[i] == 1 else np.min(class_test) for i in range(len(pre_test))]
        self.plotBestFit(w, X_train, y_train, " Train")
        self.plotBestFit(w, X_test, y_test, "Test")
        score_train = np.sum(pre_train == y_train) / len(y_train)
        score_test = np.sum(pre_test == y_test) / len(y_test)
        print("The predict class of train dataset is {}".format(output_train))
        print("The predict class of test dataset if {}".format(output_test))
        print("The precision of train dataset is {}".format(score_train))
        print("The precision of test dataset is {}".format(score_test))

    def h(self, X, w):
        h = 1 / (1 + np.exp(-1 * np.dot(X, w)))
        return h

    def plotBestFit(self, weights, dataSet, labels, type):
        # 先划分dataSet中属于不同类的点
        weights = weights
        dataMat = np.mat(dataSet)
        lablesArr = np.array(labels)
        aux_1 = lablesArr == 1
        aux_2 = ~aux_1
        class_1_cord1 = np.array((dataMat[aux_1, :][:, 0])).reshape(-1)
        class_1_cord2 = np.array((dataMat[aux_1, :][:, 1])).reshape(-1)
        class_2_cord1 = np.array((dataMat[aux_2, :][:, 0])).reshape(-1)
        class_2_cord2 = np.array((dataMat[aux_2, :][:, 1])).reshape(-1)
        fig = plt.figure(1)  # 画布对象
        ax = fig.add_subplot(111)  # 坐标对象
        # 画散点图。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。。
        ax.scatter(class_1_cord1, class_1_cord2, s=20, c='b', marker='.')
        ax.scatter(class_2_cord1, class_2_cord2, s=20, c='r', marker='v')
        # 画直线
        x = np.arange(np.min(dataSet[:, 0]),
                      np.max(dataSet[:, 0]), 0.1)
        y = -(weights[0] * x + weights[2]) / weights[1]
        plt.plot(x, y, '-g')
        plt.title("Decision surface of LogisticRegression (%s)" % type)
        plt.show()


class MyOvR(MyBinaryClassification):
    def __init__(self, parser):
        super(MyOvR, self).__init__(parser)

    def one_hot(self, y_train, y_test):
        onehot_train = []
        onehot_test = []
        for i in range(len(y_train)):
            if y_train[i] == 0:
                onehot_train.append([1, 0, 0])
            elif y_train[i] == 1:
                onehot_train.append([0, 1, 0])
            else:
                onehot_train.append([0, 0, 1])
        for i in range(len(y_test)):
            if y_test[i] == 0:
                onehot_test.append([1, 0, 0])
            elif y_test[i] == 1:
                onehot_test.append([0, 1, 0])
            else:
                onehot_test.append([0, 0, 1])
        return np.array(onehot_train), np.array(onehot_test)

    def evaluate(self, X_train, y_train, X_test, y_test, w1, w2, w3):
        X_train = self.data_convert(X_train)
        X_test = self.data_convert(X_test)
        pre_train = self.softmax(X_train, w1, w2, w3)
        pre_test = self.softmax(X_test, w1, w2, w3)
        predict_train = np.argmax(pre_train, axis=0)
        predict_test = np.argmax(pre_test, axis=0)
        score_train = np.sum(predict_train == y_train) / len(y_train)
        score_test = np.sum(predict_test == y_test) / len(y_test)
        self.plotFit(w1, w2, w3, X_train, y_train, "Train")
        self.plotFit(w1, w2, w3, X_test, y_test, "Test")
        # print("The probability of sample in train dataset belongs to class1 is {}\n"
        #       "The probability of sample in train dataset belongs to class2 is {}\n"
        #       "The probability of sample in train dataset belongs to class2 is {}\n"
        #       .format(pre_train[0, :], pre_train[1, :], pre_train[2, :]))
        print("The predict of sample in train dataset is {}".format(predict_train))
        # print("The probability of sample in test dataset belongs to class1 is {}\n"
        #       "The probability of sample in test dataset belongs to class2 is {}\n"
        #       "The probability of sample in test dataset belongs to class2 is {}\n"
        #       .format(pre_test[0, :], pre_test[1, :], pre_test[2, :]))
        print("The predict of sample in test dataset is {}".format(predict_test))
        print("The precision of train dataset is {}".format(score_train))
        print("The precision of test dataset is {}".format(score_test))

    def softmax(self, X, w1, w2, w3):
        pre1 = self.h(X, w1)
        pre2 = self.h(X, w2)
        pre3 = self.h(X, w3)
        pre = np.vstack((pre1, pre2, pre3))
        pre = pre / pre.sum(axis=0)
        return pre

    def plotFit(self, w1, w2, w3, dataSet, labels, type):
        # 先划分dataSet中属于不同类的点
        dataMat = np.mat(dataSet)
        lablesArr = np.array(labels)
        aux_1 = lablesArr == 0
        aux_2 = lablesArr == 1
        aux_3 = lablesArr == 2
        class_1_cord1 = np.array((dataMat[aux_1, :][:, 0])).reshape(-1)
        class_1_cord2 = np.array((dataMat[aux_1, :][:, 1])).reshape(-1)
        class_2_cord1 = np.array((dataMat[aux_2, :][:, 0])).reshape(-1)
        class_2_cord2 = np.array((dataMat[aux_2, :][:, 1])).reshape(-1)
        class_3_cord1 = np.array((dataMat[aux_3, :][:, 0])).reshape(-1)
        class_3_cord2 = np.array((dataMat[aux_3, :][:, 1])).reshape(-1)
        fig = plt.figure(1)  # 画布对象
        ax = fig.add_subplot(111)  # 坐标对象
        # 画散点图
        ax.scatter(class_1_cord1, class_1_cord2, s=20, c='b', marker='.')
        ax.scatter(class_2_cord1, class_2_cord2, s=20, c='r', marker='v')
        ax.scatter(class_3_cord1, class_3_cord2, s=20, c='y', marker='*')
        # 画直线
        x = np.arange(np.min(dataSet[:, 0]),
                      np.max(dataSet[:, 0]), 0.1)
        y1 = -(w1[0] * x + w1[2]) / w1[1]
        y2 = -(w2[0] * x + w2[2]) / w2[1]
        y3 = -(w3[0] * x + w3[2]) / w3[1]
        plt.plot(x, y1, '-g')
        plt.plot(x, y2, '-g')
        plt.plot(x, y3, '-g')
        plt.title("Decision surface of LogisticRegression (%s)" % type)
        plt.show()


if __name__ == "__main__":
    # 加载参数
    parser = get_arguments()
    assert len(parser.features) == 2, "Only two features can be processed!"

    # ---------数据处理-------------
    # 加载数据集
    df = load_dataset()
    # 实例化数据预处理MyPreprocessing类
    MyPreprocessing = MyPreprocessing(parser)
    # 对数据集进行可视化，观察是否可分
    MyPreprocessing.draw_dataset(df)
    # 对数据集进行划分，分成训练集和测试集
    X_train, X_test, y_train, y_test = MyPreprocessing.split_dataset(df)

    # # ---------模型训练-------------
    # # 实例化模型MyLogisticRegression类
    # SklearnLogisticRegression = SklearnLogisticRegression(parser)
    # #  LogisticRegression模型
    # SklearnLogisticRegression.regression(X_train, y_train, X_test, y_test)
    # # OvR模型
    # SklearnLogisticRegression.OvR(X_train, y_train, X_test, y_test)
    # # OvO模型
    # SklearnLogisticRegression.OvO(X_train, y_train, X_test, y_test)
    # # MvM模型
    # SklearnLogisticRegression.MvM(X_train, y_train, X_test, y_test)

    # # ---------非线性-------------
    # # 实例化非线性模型SklearnNonLinearLogisticRegression类
    # SklearnNonLinearLogisticRegression = SklearnNonLinearLogisticRegression(parser)
    # # 生成随机数据
    # X, y = SklearnNonLinearLogisticRegression.generate_random_data()
    # # 对随机数据进行数据衍生并建立逻辑回归二分类模型
    # SklearnNonLinearLogisticRegression.non_linear_regression(X, y)
    # # 寻找最优参数
    # degrees = np.arange(0, 25)
    # SklearnNonLinearLogisticRegression.search_parameter(X, y, degrees)

    # # ---------numpy自己编写二分类-------------
    # assert len(parser.classes) == 2, "Only Binary Classification!"
    # MyBinaryClassification = MyBinaryClassification(parser)
    # w = MyBinaryClassification.initialize(X_train)
    # # 将标签变为0和1，大的取1，小的取0
    # Y_train, Y_test, class_train, class_test = MyBinaryClassification.label_convert(y_train, y_test)
    # train_loss = []
    # test_loss = []
    # for epoch in range(parser.iteration):
    #     loss1 = MyBinaryClassification.loss(X_train, Y_train, w)
    #     loss2 = MyBinaryClassification.loss(X_test, Y_test, w)
    #     train_loss.append(loss1)
    #     test_loss.append(loss2)
    #     w = MyBinaryClassification.parameter_update(X_train, Y_train, w)
    # print("The coefficient of MyBinaryClassification is: {}".format(w))
    # draw_loss(train_loss, test_loss, "CrossEntropyLoss")
    # MyBinaryClassification.evaluate(X_train, Y_train, X_test, Y_test, class_train, class_test, w)

    # ---------numpy自己编写多分类-------------
    assert len(parser.classes) == 3, "Only Ternary Classification!"
    MyOvR = MyOvR(parser)
    onehot_train, onehot_test = MyOvR.one_hot(y_train, y_test)
    w1 = MyOvR.initialize(X_train)
    w2 = MyOvR.initialize(X_train)
    w3 = MyOvR.initialize(X_train)
    train_loss = []
    test_loss = []
    for epoch in range(parser.iteration):
        w1 = MyOvR.parameter_update(X_train, onehot_train[:, 0], w1)
        w2 = MyOvR.parameter_update(X_train, onehot_train[:, 1], w2)
        w3 = MyOvR.parameter_update(X_train, onehot_train[:, 2], w3)
    print("The coefficient of LogisticRegression1 is: {}\n"
          "The coefficient of LogisticRegression2 is: {}\n"
          "The coefficient of LogisticRegression3 is: {}".format(w1, w2, w3))
    MyOvR.evaluate(X_train, y_train, X_test, y_test, w1, w2, w3)

