import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from linear_regression1 import LinearRegression
import matplotlib
from utils.features import prepare_for_training
polynomial_degree=0
sinusoid_degree=0
is_normalize=True
#迭代次数
num_iters=500
#学习率
learn_rate=0.01
def get_train_test_data(data, frac=0.8):
    # 随机抽取80%的数据 一共155条数据  抽取124条数据
    # shape=(124,1)
    train_data: pd.DataFrame = data.sample(frac=0.8)
    # 31 条数据
    test_data: pd.DataFrame = data.drop(train_data.index)
    return train_data, test_data
def draw_plot(x_train,y_train,x_test, y_test,feature_column_name,label_column_name,
              label_train='Train data', label_test='Test data',title="Happy"):
    plt.scatter(x_train, y_train, label=label_train)
    plt.scatter(x_test, y_test, label=label_test)
    plt.xlabel(feature_column_name)
    plt.ylabel(label_column_name)
    plt.title('Happy')
    plt.legend()
    plt.show()

if __name__ == '__main__':
    data: pd.DataFrame = pd.read_csv('../data/world-happiness-report-2017.csv')
    # 特征featue字段
    feature_column_name = ['Economy..GDP.per.Capita.']
    # 标签label 字段
    label_column_name = ['Happiness.Score']
    (train_data, test_data) = get_train_test_data(data)
    # 训练数据 其实就是特征值 (124, 1)
    x_train: np.ndarray = train_data[feature_column_name].values
    y_train: np.ndarray = train_data[label_column_name].values
    # 测试数据(31, 1)
    x_test: np.ndarray = test_data[feature_column_name].values
    y_test: np.ndarray = test_data[label_column_name].values
    draw_plot(x_train,y_train,x_test,y_test,feature_column_name='Economy..GDP.per.Capita.',label_column_name='Happiness.Score',
              label_train='Train data', label_test='Test data',title="Happy")

    #对训练数据 也就是特征数据进行预处理 返回处理后的特征数据 多了一列
    ((data_processed,
         features_mean,
         features_deviation))=prepare_for_training(x_train,polynomial_degree,sinusoid_degree,is_normalize)
    #feature, labels, features_mean,features_deviation,theta:np.ndarray,learn_rate:int=0.01
    #(124, 2)
    data_processed:np.ndarray=data_processed
    num_features = data_processed.shape[1]
    # theta.shape (2,1) [[0],[0]]
    theta:np.ndarray = np.zeros((num_features, 1))
    regression:LinearRegression = LinearRegression(data_processed, y_train, features_mean,
                                                   features_deviation,theta, learn_rate=learn_rate,
                                  polynomial_degree=0,sinusoid_degree=0, is_normalize=is_normalize)

    (theta,cost_history)=regression.train(num_iters=num_iters)

    print('开始时的损失：', cost_history[0])
    print('训练后的损失：', cost_history[-1])

    #
    # 画图
    plt.plot(range(num_iters), cost_history)
    plt.xlabel('Iter')
    plt.ylabel('cost')
    plt.title('GD')
    plt.show()

    predictions_num = 100
    x_predictions = np.linspace(x_train.min(), x_train.max(), predictions_num).reshape(predictions_num, 1)
    x_predictions_normal= prepare_for_training(x_predictions, polynomial_degree, sinusoid_degree, is_normalize)[0]
    #

    y_predictions = LinearRegression.hypothesis(x_predictions_normal, theta)
    plt.scatter(x_train, y_train, label='Train data')
    plt.scatter(x_test, y_test, label='Test data')
    plt.plot(x_predictions, y_predictions, 'r', label='Prediction')
    plt.xlabel(feature_column_name)
    plt.ylabel(label_column_name)
    plt.title('Happy')
    plt.legend()
    plt.show()
    plt.close()