# 导入必要的库
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from xgboost import XGBRegressor
from sklearn.model_selection import cross_val_score, GridSearchCV

# 设置pandas显示选项和忽略警告
pd.set_option('display.max_columns', None)
warnings.filterwarnings('ignore')

# 读取数据
df_train = pd.read_csv('myspace/used_car_train_20200313.csv', sep=' ')
df_test = pd.read_csv('myspace/used_car_testB_20200421.csv', sep=' ')

# 数据预处理
train_data = df_train.drop(['SaleID'], axis=1)
test_data = df_test.drop(['SaleID'], axis=1)

# 查看数据
print(train_data.head())
print(test_data.head())
print(train_data.info())
print(test_data.info())

# 处理缺失值
train_data['notRepairedDamage'] = train_data['notRepairedDamage'].replace('-', np.nan)
test_data['notRepairedDamage'] = test_data['notRepairedDamage'].replace('-', np.nan)
train_data['notRepairedDamage'] = train_data['notRepairedDamage'].astype('float64')
test_data['notRepairedDamage'] = test_data['notRepairedDamage'].astype('float64')

# 查看数据分布
print(test_data.describe())

# 删除不必要的列
train_data.drop(['seller', 'offerType'], axis=1, inplace=True)
test_data.drop(['seller', 'offerType'], axis=1, inplace=True)

# 查看数据形状
print(train_data.shape, test_data.shape)

# 处理异常值
train_data[train_data['power'] > 600]['power'].count()
test_data[test_data['power'] > 600]['power'].count()
train_data.corr().unstack()['price'].sort_values(ascending=False)
train_data.drop(['v_2', 'v_6', 'v_1', 'v_14', 'v_13', 'v_7', 'name', 'creatDate'], axis=1, inplace=True)
test_data.drop(['v_2', 'v_6', 'v_1', 'v_14', 'v_13', 'v_7', 'name', 'creatDate'], axis=1, inplace=True)

# 查看数据形状和相关性
print(train_data.shape, test_data.shape)
print(train_data.corr().unstack()['price'].sort_values(ascending=False))

# 处理power列的异常值
train_data['power'] = train_data['power'].map(lambda x: train_data['power'].median() if x > 600 else x)
test_data['power'] = test_data['power'].map(lambda x: test_data['power'].median() if x > 600 else x)

# 绘制power列的直方图
train_data['power'].plot.hist()
test_data['power'].plot.hist()

# 查看缺失值
print(train_data.isnull().sum()[train_data.isnull().sum() > 0])
print(test_data.isnull().sum()[test_data.isnull().sum() > 0])

# 处理model列的缺失值
train_data.loc[38424, 'model'] = 157.0

# 查看bodyType和fuelType列的缺失值
print(train_data['bodyType'].isnull().value_counts())
print(test_data['bodyType'].isnull().value_counts())
print(train_data['bodyType'].value_counts())
print(test_data['bodyType'].value_counts())

# 填充bodyType和fuelType列的缺失值
train_data.loc[:, 'bodyType'] = train_data['bodyType'].map(lambda x: 0.0 if pd.isnull(x) else x)
test_data.loc[:, 'bodyType'] = test_data['bodyType'].map(lambda x: 0.0 if pd.isnull(x) else x)

# 定义字典来填充fuelType列的缺失值
dict_enu_train, dict_enu_test = {}, {}
for i in [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]:
    dict_enu_train[i] = train_data[train_data['bodyType'] == i]['fuelType'].mode()[0]
    dict_enu_test[i] = test_data[test_data['bodyType'] == i]['fuelType'].mode()[0]

# 填充fuelType列的缺失值
dict_index_train, dict_index_test = {}, {}
for bodytype in [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]:
    dict_index_train[bodytype] = train_data[(train_data['bodyType'] == bodytype) & (train_data['fuelType'].isnull())].index.tolist()
    dict_index_test[bodytype] = test_data[(test_data['bodyType'] == bodytype) & (test_data['fuelType'].isnull())].index.tolist()

for bt, ft in dict_enu_train.items():
    train_data.loc[dict_index_train[bt], 'fuelType'] = ft  
    test_data.loc[dict_index_test[bt], 'fuelType'] = ft

# 处理gearbox列的缺失值
print(train_data['gearbox'].isnull().value_counts())
print(test_data['gearbox'].isnull().value_counts())
print(train_data['gearbox'].value_counts())
print(test_data['gearbox'].value_counts())

train_data.loc[:, 'gearbox'] = train_data['gearbox'].map(lambda x: 0.0 if pd.isnull(x) else x)
test_data.loc[:, 'gearbox'] = test_data['gearbox'].map(lambda x: 0.0 if pd.isnull(x) else x)

# 查看数据信息
print(train_data.info())
print(test_data.info())

# 处理notRepairedDamage列的缺失值
print(train_data['notRepairedDamage'].isnull().value_counts())
print(test_data['notRepairedDamage'].isnull().value_counts())
print(train_data['notRepairedDamage'].value_counts())
print(test_data['notRepairedDamage'].value_counts())

print(train_data[['notRepairedDamage', 'price']].corr()['price'])

train_data.loc[:, 'notRepairedDamage'] = train_data['notRepairedDamage'].map(lambda x: 0.0 if pd.isnull(x) else x)
test_data.loc[:, 'notRepairedDamage'] = test_data['notRepairedDamage'].map(lambda x: 0.0 if pd.isnull(x) else x)

# 查看数据信息
print(train_data.info())
print(test_data.info())

# 定义模型
rf_model = RandomForestRegressor(n_estimators=100, max_depth=8, random_state=1) 
xgb_model = XGBRegressor(n_estimators=150, max_depth=8, learning_rate=0.1, random_state=1)  
gbdt_model = GradientBoostingRegressor(subsample=0.8, random_state=1)

# 准备数据
X = train_data.drop(['price'], axis=1)
y = train_data['price']

# 交叉验证模型
score_rf = -1 * cross_val_score(rf_model,
                           X,
                           y,
                           scoring='neg_mean_absolute_error',
                           cv=5).mean()  

print('随机森林模型的平均MAE为：', score_rf)

score_xgb = -1 * cross_val_score(xgb_model,
                                X,
                                y,
                                scoring='neg_mean_absolute_error',
                                cv=5).mean()  

print('XGBoost模型的平均MAE为：', score_xgb)

score_gbdt = -1 * cross_val_score(gbdt_model,
                                X,
                                y,
                                scoring='neg_mean_absolute_error',
                                cv=5).mean()   

print('梯度提升树模型的平均MAE为：', score_gbdt)

# 网格搜索最佳参数
params = {'n_estimators': [150, 200, 250],
          'learning_rate': [0.1],
          'subsample': [0.5, 0.8]}

grid_search = GridSearchCV(estimator=xgb_model,
                    param_grid=params,
                    scoring='neg_mean_absolute_error',
                    cv=3)
grid_search.fit(X, y)

print('最佳参数为：\n', grid_search.best_params_)
print('最佳分数为：\n', grid_search.best_score_)
print('最佳模型为：\n', grid_search.best_estimator_)

# 预测并保存结果
predictions = grid_search.predict(test_data)
result_df = pd.DataFrame({'SaleID': df_test['SaleID'], 'price': predictions})
result_df.to_csv('downloads/55214/My_submission.csv', index=False)