from matplotlib.colors import ListedColormap
from sklearn.svm import SVC

from LinearRegression import LinearRegression
from KNN import KNN
from LogisticRegression import LogisticRegression
from SupportVectorMachine import SupportVectorMachine
from ClassificationAndRegressiontree.CART import CartClassificationTree, CartRegressionTree

import json  # for json data
import sys  # get the args

import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

from sklearn.preprocessing import StandardScaler, MinMaxScaler, LabelEncoder
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.decomposition import PCA

def linear_regression(data_path):
    # data: housing;
    try:
        data = np.genfromtxt(data_path, delimiter=',', skip_header=True)
        X = data[:, [1]]
        y = data[:, 4]
        # lr = LinearRegression.LinearRegression.train(X_train, Y_train)
        model = LinearRegression.GDLinearRegression(n_iter=10000, eta=0.001)
        # model = LinearRegression()
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)

        ss = StandardScaler()
        ss.fit(X_train)

        X_train_std = ss.transform(X_train)
        X_test_std = ss.transform(X_test)
        model.train(X_train_std, y_train)
        # model.train(X_train_std, y_train)
        # model.fit(X_train_std,y_train)
        # 这里 因为 转json的时候，json 不认可 numpy的array，所以需要 .tolist 进行转换。
        # res = {'X_train': X_train.tolist(), 'y_train': y_train.tolist()}
        # res = json.dumps(res)

        y_pred = model.predict(X_test_std)

        MSE = mean_squared_error(y_test, y_pred)
        MAE = mean_absolute_error(y_test, y_pred)
        R_Square = r2_score(y_test, y_pred)

        res = {'algorithmModel': 'linear',
               'Original_data_x': X.tolist(),
               'Original_data_y': y.tolist()}
        res = json.dumps(res)

        # res 这里还要加上 测试集
        # res['predict']= ...
        # regr = LinearRegression.LinearRegression()

        # 绘图
        #plt.rcParams['axes.facecolor'] = 'ivory'  # 添加背景颜色
        #plt.title('housing')  # 添加标题
        #plt.xlabel('RM')  # 添加横坐标标签
       #plt.ylabel('MEDV')  # 添加纵坐标标签
        #plt.scatter(X_train, y_train, color='r')  # 画散点图
       # plt.plot(X_test, y_pred, color='b')  # 回归直线
        # plt.show()
        #plt.savefig("../static/images/linear/result_1.png")  # 保存图片

        return res

    except Exception as err:
        return err
    # res = json.dumps(df)
    # print(regr)
    # return "{\"json result\":\"linear training\"}"


def logistic_regression(data_path):
    try:
        data = np.genfromtxt(data_path, delimiter=',', skip_header=True)
        data_X = data[1:, :-1]
        data_y = data[1:, -1]
        X = data[:, :2]  # 取前两个特征
        y = data[:, 4]

        X_train, X_test, y_train, y_test = train_test_split(X, y)
        log_reg = LogisticRegression.LogisticRegression(n_iter=3000, eta=0.001)
        ss = StandardScaler()
        ss.fit(X_train)
        X_train_std = ss.transform(X_train)

        X_test_std = ss.transform(X_test)

        log_reg.train(X_train_std, y_train)

        y_pred = log_reg.predict(X_test_std)

        accuracy = accuracy_score(y_test, y_pred)

        res = {'algorithmModel': 'logistic',
               'sepallength_x_zero': X[y == 0, 0].tolist(),
               'sepalwidth_y_zero': X[y == 0, 1].tolist(),
               'sepqllength_x_one': X[y == 1, 0].tolist(),
               'sepalwidth_y_one': X[y == 1, 1].tolist()
               }
        res = json.dumps(res)

        # 绘图
        X_plot = np.linspace(4, 7.5, num=1000)
        decision_boundary = (- log_reg.coef_[0] * X_plot - log_reg.intercept_) / log_reg.coef_[1]
       # plt.rcParams['axes.facecolor'] = 'ivory'  # 添加背景色
       # plt.title("iris-Dataset")  # 添加标题
       # plt.xlabel('SepalLength')  # 添加横坐标标签
       # plt.ylabel('SepalWidth')  # 添加纵坐标标签
        # plt.scatter(data_X[data_y == 0, 0], data_X[data_y == 0, 1], color='green')  # 0分类的散点图
        # plt.scatter(data_X[data_y == 1, 0], data_X[data_y == 1, 1], color='blue')  # 1分类的散点图
      #  plt.scatter(X[y == 0, 0], X[y == 0, 1], color='green')  # 0分类的散点图
       # plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue')  # 1分类的散点图

      #  plt.plot(X_plot, decision_boundary, color='red')  # 绘制决策边界
      #  plt.savefig("../static/images/logistic/result_2.png")  # 保存图片

        return res
    except Exception as err:
        return err


def knn(data_path):
    data = pd.read_csv(data_path)
    value = data.values
    X = value[:, 2:4]
    le = LabelEncoder()
    le.fit(value[:, -1])
    y = le.transform(value[:, -1])

    X_train, X_test, y_train, y_test = train_test_split(X, y)
    mms = MinMaxScaler()
    mms.fit(X_train)
    X_train_norm = mms.transform(X_train)
    X_test_norm = mms.transform(X_test)

    model_knn = KNN.KDTree(5)
    model_knn.train(X_train_norm, y_train)
    y_pred = model_knn.predict(X_test_norm)
    accuracy = accuracy_score(y_test, y_pred)

    res = {'algorithmModel': 'knn',
            'rootwidth_and_rootlength':X.tolist(),
           'class_encoded(0:ginkgo;1:pear;2:poplar)':y.tolist()
           }
    res = json.dumps(res)

   # plt.rcParams['axes.facecolor'] = 'ivory'  # 添加背景色
    #plt.title("tree-Dataset")  # 添加标题
   # plt.xlabel('root length')  # 添加横坐标标签
   # plt.ylabel('root width')  # 添加纵坐标标签
   # plot_decision_boundary(model_knn, mms ,axes=[1, 8,1,80])
   # # type1 ,type2 , type3 = le.inverse_transform(['poplar','pear','ginkgo'])
    # type2 = le.inverse_transform(['pear'])
    # type3 = le.inverse_transform(['ginkgo'])
    p1 = plt.scatter(X[y == 1, 0], X[y == 1, 1], color='lime')
    p2 = plt.scatter(X[y == 2, 0], X[y == 2, 1], color='b')
    p3 = plt.scatter(X[y == 0, 0], X[y == 0, 1], color='r')
   # plt.legend([p1, p2, p3], ['pear','poplar','ginkgo'], loc='upper right')
   # plt.show()
   # plt.savefig("../static/images/knn/result.png")  # 保存原始数据分布图
    return res


def plot_decision_boundary(clf,StandardScaler,axes):
    try:
        xp = np.linspace(axes[0], axes[1], 300)  # 均匀300个横坐标
        yp = np.linspace(axes[2], axes[3], 300)  # 均匀300个纵坐标
        x1, y1 = np.meshgrid(xp, yp)  # 生成300x300个点
        xy = np.c_[x1.ravel(), y1.ravel()]  # 按行拼接，规范成坐标点的格式
        y_pred = clf.predict(StandardScaler.transform(xy)).reshape(x1.shape)  # 训练之后平铺
        custom_cmap = ListedColormap(['#fafab0', '#9898ff', '#a0faa0'])
      #  plt.contourf(x1, y1, y_pred, alpha=0.3, cmap=custom_cmap)
    except Exception as err:
        return err


def svm(data_path):
    data = pd.read_csv(data_path)
    array = data.values
    X = array[:,2:31]
    y = array[:,0]
    le = LabelEncoder()
    y = le.fit_transform(y)
    ss =StandardScaler()
    Xs = ss.fit_transform(X)
    pca = PCA(n_components=10)
    fit = pca.fit(Xs)
    X_pca = pca.transform(Xs)

    PCA_df = pd.DataFrame()

    PCA_df['PCA_1'] = X_pca[:,0]
    PCA_df['PCA_2'] = X_pca[:,1]

  #  plt.plot(PCA_df['PCA_1'][data.diagnosis == 'M'],PCA_df['PCA_2'][data.diagnosis == 'M'],'o', alpha = 0.7, color = 'r')
   # plt.plot(PCA_df['PCA_1'][data.diagnosis == 'B'],PCA_df['PCA_2'][data.diagnosis == 'B'],'o', alpha = 0.7, color = 'b')
   # plt.xlabel('PCA_1')
   # plt.ylabel('PCA_2')
   # plt.legend(['Malignant','Benign'])
   # plt.show()
    #plt.savefig("../static/images/svm/keshihua.png")

    X_train, X_test, y_train, y_test = train_test_split( Xs, y,test_size=0.3, random_state=2)

    res = {'algorithmModel': 'svm',
           'type1_x':PCA_df['PCA_1'][data.diagnosis == 'M'].tolist(),
           'type1_y':PCA_df['PCA_2'][data.diagnosis == 'M'].tolist(),
            'type2_x':PCA_df['PCA_1'][data.diagnosis == 'B'].tolist(),
            'type2_y':PCA_df['PCA_2'][data.diagnosis == 'B'].tolist()
           }
    res = json.dumps(res)

    Xtrain = X_train[:, :2]
    cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA'])
    cmap_bold = ListedColormap(['#FF0000', '#00FF00'])
    C = 1.0
    # linear_svm = SupportVectorMachine.SMO(C=C,tol=0.01,kernel='rbf',gamma=0.1).train(Xtrain,y_train)
    # rbf_svm = SupportVectorMachine.SMO(C=C,tol=0.01,kernel='linear',gamma=0.7).train(Xtrain,y_train)
    linear_svm = SVC(kernel='linear', random_state=0, gamma=0.1, C=C).fit(Xtrain, y_train)
    rbf_svm = SVC(kernel='rbf', gamma=0.7, C=C).fit(Xtrain, y_train)

    x_min, x_max = Xtrain[:, 0].min() - 1, Xtrain[:, 0].max() + 1
    y_min, y_max = Xtrain[:, 1].min() - 1, Xtrain[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
                         np.arange(y_min, y_max, 0.1))

    titles = ['SVC with linear kernel',
              'SVC with RBF kernel',]
    for i, clf in enumerate((linear_svm, rbf_svm)):
      #  plt.subplot(2, 2, i + 1)
      #  plt.subplots_adjust(wspace=0.4, hspace=0.4)

        Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

        # Put the result into a color plot
        Z = Z.reshape(xx.shape)
      #  plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)

        # Plot also the training points
      #  plt.scatter(Xtrain[:, 0], Xtrain[:, 1], c=y_train, cmap=plt.cm.coolwarm)
       # plt.xlabel('radius_mean')
       # plt.ylabel('texture_mean')
       # plt.xlim(xx.min(), xx.max())
       # plt.ylim(yy.min(), yy.max())
       # plt.xticks(())
       # plt.yticks(())
       # plt.title(titles[i])

    #plt.show()
   # plt.savefig("../static/images/svm/result.png")
    return res


def decision_plot(X_train, y_train, n_neighbors, weights):
    h = .02  # step size in the mesh


def CART(data_path):
    try:
        data = pd.read_csv(data_path)
        #整理数据集
        del data['year']

        #分离出仅含特征列的部分作为X和仅含目标列的部分作为y
        X = data.iloc[:,0:12].values

        y = data.iloc[:,-1].values
        s = y.astype(str)
        for i in range(len(s)):
            s[i] = s[i].replace(' ','')
        le = LabelEncoder()
        y = le.fit_transform(s)
        ss = StandardScaler()
        res = {
            'algorithmModel':'cart',
            'X':X.tolist(),
            'y':y.tolist()
        }
        res = json.dumps(res)
        return res
        ss.fit(X)
        X = ss.transform(X)
        X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=123)
        # X_train_std = ss.fit_transform(X_train)
        # X_test_std = ss.fit_transform(X_test)

        model_clf_tree = CartClassificationTree()
        model_clf_tree.train(X_train,y_train)
        y_pred = model_clf_tree.predict(X_test)
        accuracy = accuracy_score(y_test,y_pred)

    except Exception as err:
        return err


if __name__ == '__main__':
    """
    测试代码
    """
    ##############################################
    # logistic_regression('../data/iris.csv')
    # linear_regression('../data/housing.csv')
    # knn('../data/tree.csv')
    # svm('../data/cancer.csv')
    #CART('../data/forestfires.csv')
    ##############################################
    args = sys.argv
    # 通过java 命令行获取参数：model：算法模型，path：数据集路径。
    model = args[1]
    path = args[2]
    # str_ = model + ", path:" + path
    # print(str_)
    if model == 'linear':
        print(linear_regression(path))
    elif model == 'knn':
        print(knn(path))
    elif model == 'CART':
        print(CART(path))
    elif model == 'logistic':
        print(logistic_regression(path))
    elif model == 'svm':
        print(svm(path))
    else:
        str_ = model + "--path--" + path
        print("{\"json result\":", str_, "}")
