import pandas as pd

"""
The most important part of the Pandas library is the DataFrame. 
A DataFrame holds the type of data you might think of as a table. 
This is similar to a sheet in Excel, or a table in a SQL database.
"""

base_dic = "D:/MyPythonProject/kaggle/kaggle_study/input"
# save filepath to variable for easier access
melbourne_file_path = base_dic + '/melbourne-housing-snapshot/melb_data.csv'
# read the data and store data in DataFrame titled melbourne_data
melbourne_data = pd.read_csv(melbourne_file_path)
# print a summary of the data in Melbourne data
# print(melbourne_data.describe())

# Selecting Data for Modeling
print(melbourne_data.columns)
# print(melbourne_data.head)

# The Melbourne data has some missing values (some houses for which some variables weren't recorded.)
# We'll learn to handle missing values in a later tutorial.
# Your Iowa data doesn't have missing values in the columns you use.
# So we will take the simplest option for now, and drop houses from our data.
# Don't worry about this much for now, though the code is:

# dropna drops missing values (think of na as "not available")
melbourne_data = melbourne_data.dropna(axis=0)

"""
Selecting The Prediction Target¶
You can pull out a variable with dot-notation. 
This single column is stored in a Series, which is broadly like a DataFrame with only a single column of data.

We'll use the dot notation to select the column we want to predict,
 which is called the prediction target. 
 By convention, the prediction target is called y. 
 So the code we need to save the house prices in the Melbourne data is
"""
# 根据列名获取数据,这里获取待预测的列
y = melbourne_data.Price

# 选取特征列
melbourne_features = ['Rooms', 'Bathroom', 'Landsize', 'Lattitude', 'Longtitude']
X = melbourne_data[melbourne_features]
print(X.describe())
print(X.head())

"""
You will use the scikit-learn library to create your models.
 When coding, this library is written as sklearn, as you will see in the sample code.
  Scikit-learn is easily the most popular library for modeling the types of data typically stored in DataFrames.

The steps to building and using a model are:

Define: What type of model will it be? A decision tree? Some other type of model? 
Some other parameters of the model type are specified too.
Fit: Capture patterns from provided data. This is the heart of modeling.
Predict: Just what it sounds like
Evaluate: Determine how accurate the model's predictions are.
"""
from sklearn.tree import DecisionTreeRegressor

# 选取并定义模型，配置一些参数
# Define model. Specify a number for random_state to ensure same results each run
melbourne_model = DecisionTreeRegressor(random_state=1)

# 模型拟合
# Fit model
melbourne_model.fit(X, y)

# 使用模型进行预测
print("Making predictions for the following 5 houses:")
print(X.head())
print("The predictions are")
# 使用前五行数据预测
print(melbourne_model.predict(X.head()))
# 使用全部数据预测
# print(melbourne_model.predict(X))

"""
 learn to use model validation to measure the quality of your model.
  Measuring model quality is the key to iteratively improving your models.
  MAE: Mean Absolute Error(On average, our predictions are off by about X.)
"""
from sklearn.metrics import mean_absolute_error

predicted_home_prices = melbourne_model.predict(X)
print(mean_absolute_error(y, predicted_home_prices))

"""
The measure we just computed can be called an "in-sample" score. 
We used a single "sample" of houses for both building the model and evaluating it. 
Here's why this is bad.

Imagine that, in the large real estate market, door color is unrelated to home price.

However, in the sample of data you used to build the model, 
all homes with green doors were very expensive. 
The model's job is to find patterns that predict home prices, 
so it will see this pattern, and it will always predict high prices for homes with green doors.

Since this pattern was derived from the training data, 
the model will appear accurate in the training data.

But if this pattern doesn't hold when the model sees new data, 
the model would be very inaccurate when used in practice.

Since models' practical value come from making predictions on new data, 
we measure performance on data that wasn't used to build the model. 
The most straightforward way to do this is to exclude some data from the model-building process,
 and then use those to test the model's accuracy on data it hasn't seen before. 
 This data is called validation data.
"""
from sklearn.model_selection import train_test_split

# split data into training and validation data, for both features and target
# The split is based on a random number generator. Supplying a numeric value to
# the random_state argument guarantees we get the same split every time we
# run this script.
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=0)
# Define model
melbourne_model = DecisionTreeRegressor()
# Fit model
melbourne_model.fit(train_X, train_y)

# get predicted prices on validation data
val_predictions = melbourne_model.predict(val_X)
print(mean_absolute_error(val_y, val_predictions))

# There are many ways to improve this model,
# such as experimenting to find better features or different model types.


"""
Underfitting and Overfitting

Models can suffer from either:
Overfitting: capturing spurious patterns that won't recur in the future,
                leading to less accurate predictions, or
Underfitting: failing to capture relevant patterns, 
                again leading to less accurate predictions.
                    
We use validation data, which isn't used in model training, 
to measure a candidate model's accuracy. 
This lets us try many candidate models and keep the best one.
"""


def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):
    model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
    model.fit(train_X, train_y)
    preds_val = model.predict(val_X)
    mae = mean_absolute_error(val_y, preds_val)
    return (mae)


# compare MAE with differing values of max_leaf_nodes
for max_leaf_nodes in [5, 50, 500, 5000]:
    my_mae = get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y)
    print("Max leaf nodes: %d  \t\t Mean Absolute Error:  %d" % (max_leaf_nodes, my_mae))

# Fit the model with best_tree_size. Fill in argument to make optimal size
# final_model = DecisionTreeRegressor(max_leaf_nodes=best_tree_size, random_state=1)

# fit the final model
# final_model.fit(X, y)

"""
You've tuned this model and improved your results. 
But we are still using Decision Tree models,
which are not very sophisticated by modern machine learning standards. 
In the next step you will learn to use Random Forests to improve your models even more.
 
The random forest uses many trees,
and it makes a prediction by averaging the predictions of each component tree. 
It generally has much better predictive accuracy than a single decision tree 
and it works well with default parameters.
If you keep modeling, you can learn more models with even better performance, 
but many of those are sensitive to getting the right parameters.
"""
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error

forest_model = RandomForestRegressor(random_state=1)
forest_model.fit(train_X, train_y)
melb_preds = forest_model.predict(val_X)
print(mean_absolute_error(val_y, melb_preds))

"""
在需要设置 random_state 的地方给其赋一个值，当多次运行此段代码能够得到完全一样的结果，
别人运行此代码也可以复现你的过程。

若不设置此参数则会随机选择一个种子，执行结果也会因此而不同了。

虽然可以对 random_state 进行调参，但是调参后在训练集上表现好的模型未必在陌生训练集上表现好，
所以一般会随便选取一个 random_state 的值作为参数。
"""
