#!/usr/bin/env python
# coding: utf-8
import os
from os.path import dirname, exists, expanduser, isdir, join, splitext
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet, RidgeCV, SGDRegressor
from sklearn.svm import SVR
from hic import load_hic
from hic_test import load_hic_test

names = ["LinearRegression", "Ridge", "Lasso", "ElasticNet", "RidgeCV", "SGDRegressor", "SVR-linear", "SVR-poly", "SVR-rbf", "SVR-sigmoid"]
linears = [
    LinearRegression(),
    Ridge(),
    Lasso(),
    ElasticNet(),
    RidgeCV(),
    SGDRegressor(),
    SVR(kernel="linear"),
    SVR(kernel="poly"),
    SVR(kernel="rbf"),
    SVR(kernel="sigmoid")
]
print("加载 Health Insurance Costs 数据\n")
train = load_hic()
test = load_hic_test()

def prepare(X):
    categorical_features = ["sex", "region", "smoker"]
    X_cat = pd.get_dummies(X[categorical_features])
    X = X.drop(categorical_features, axis=1)
    X = pd.concat([X, X_cat], axis=1)

    return X

X_train = prepare(train.data)
y_train = train.target
X_test = prepare(test.data)
y_test = test.target

from sklearn.model_selection import learning_curve

# 定义学习曲线显示
def loss_curve(model, X, y, cv=10, train_sizes=[0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,1]):
    train_sizes,train_loss,test_loss = learning_curve(model, X, y, cv=cv, scoring='neg_mean_squared_error', train_sizes=train_sizes)#尝试调参gamma；
    #print(train_loss)#查看训练损失，确为负值，依据不同的train_sizes;
    train_loss_mean = -np.mean(train_loss,axis=1)#将训练损失值按行求平均；
    #实例演示：a = np.array([[1,2,3],[4,5,6]])，np.mean(a,axis=1)，array([2., 5.])
    #print(train_loss_mean)#查看训练损失均值；
    test_loss_mean = -np.mean(test_loss, axis=1)#验证损失，已正向；
    plt.figure()
    plt.plot(train_sizes,train_loss_mean,'o-',color='r',label='Training')
    plt.plot(train_sizes,test_loss_mean,'o-',color='g',label='Cross-validation')
    plt.xlabel("Training example quantities")
    plt.ylabel('Loss')
    plt.legend(loc='best')

def score_curve(model, X, y, cv=10, train_sizes=[0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,1]):
    train_sizes, train_scores, test_scores = learning_curve(model, X, y, cv=cv, train_sizes=train_sizes)
    #print(train_scores)
    train_scores_mean = np.mean(train_scores,axis=1)
    #print(train_scores_mean)
    test_scores_mean = np.mean(test_scores,axis=1)
    plt.figure()
    plt.plot(train_sizes,train_scores_mean,'o-',color='r',label='Training')
    plt.plot(train_sizes,test_scores_mean,'o-',color='g',label='Cross-validation')
    plt.xlabel("Training example quantities")
    plt.ylabel('Scores')
    plt.legend(loc='best')

for name, clf in zip(names, linears):
    clf.fit(X_train, y_train)
    score = clf.score(X_test, y_test)
    print("%s %f" % (name, score))
    score_curve(clf, X_train, y_train)
    loss_curve(clf, X_train, y_train)
    plt.show()
