import pandas as pd
import warnings
warnings.filterwarnings('ignore')

df = pd.read_csv('train1.csv')

#一。数据检视

print(df.head())
df.info()
print(df.describe())

#二。切分训练集 和 测试集
from sklearn.model_selection import train_test_split
train_df,test_df = train_test_split(df,random_state=123)
# x_train,x_test,y_train,y_testt= train_test_split(x,y)

#新增一列 "traintest"用来区分 该数据是训练集 还是 测试集的数据
train_df['traintest']='train'
test_df['traintest']='test'

#将 train_df 和  test_df 重新拼接 成 all_df
all_df = pd.concat((train_df,test_df))

#查看拼接后的结果
print(all_df.head())

# 2020-01-01 10:10:10
#将 日期时间 进行 拆分
#1.新增 date列，只有日期
# df['date']=str(df['datetime']).split()[0]
all_df['date']=all_df['datetime'].map(lambda x:x.split(' ')[0])
#lambda 匿名函数 和 f函数功能一致
def f(x):
    return x.split(' ')[0]

all_df['date']=all_df['datetime'].apply(lambda x:x.split(' ')[0])
print(all_df.head())
print(all_df.columns)
print(all_df['date'].value_counts())

#df['date']   2020-01-01   [2020,01,01]
#取出 用数字表示的月份信息
def g(x):
    return int(x.split('-')[1])
all_df['monthnum']=all_df['date'].map(g)
print(all_df['monthnum'].value_counts())

import calendar

print('calendar.month_name')
print(calendar.month_name)
for i in range(len(calendar.month_name)):
    print(i,calendar.month_name[i])

#增加 英文表示的 月份列 month
def h(x):
    return calendar.month_name[x]
all_df['month']= all_df['monthnum'].map(h)
print(all_df['month'].value_counts())

#新增 几号这一列
#2020-01-01
all_df['daynum']=all_df['date'].apply(lambda x:int(x.split('-')[2]))
print(all_df['daynum'].value_counts())

#新增一列  星期几(英文格式)
print(calendar.day_name)
for i in range(len(calendar.day_name)):
    print(i,calendar.day_name[i])


#将 df[date]中每个元素  字符串类型 转换成 日期时间类 datetime对象
import datetime
#p：parse 解析
def k(x):
    #2020-10-10
    return datetime.datetime.strptime(x,"%Y-%m-%d").weekday()

print(datetime.datetime.strptime("2020-10-10","%Y-%m-%d"))
print(datetime.datetime.strptime("2020-10-10","%Y-%m-%d").weekday())

all_df['weekday_num']=all_df['date'].map(k)
print(all_df['weekday_num'].value_counts())

all_df['weekday']=all_df['weekday_num'].map(lambda x:calendar.day_name[x])
print(all_df['weekday'].value_counts())

#新增小时列
#2020-10-10 10:10:10
all_df['hour']=all_df['datetime'].map(lambda x:int(x.split(' ')[1].split(':')[0]))
print(all_df['hour'].value_counts())

print(all_df[['hour','count']])
df2=all_df[['hour','count']]

import numpy as np
print(df2.groupby('hour').agg(np.sum))
print(type(df2.groupby('hour').agg(np.sum)))
print("df2.groupby('hour').agg(np.sum).index")
print(df2.groupby('hour').agg(np.sum).index)
print("df2.groupby('hour').agg(np.sum).colunms")
print(df2.groupby('hour').agg(np.sum).columns)

df3 = df2.groupby('hour').agg(np.sum)
#图形绘制 横轴 hour 纵轴 该小时的用车总量
# df3.plot(kind='line')
import  matplotlib.pyplot as plt
# plt.show()

#新增 用车时间段 列 (反应 用车的 高低谷 时间段)

def h_s(x):
    if x>=0 and x<=6:
        return 0
    elif x>=7 and x<=10:
        return 1
    elif x>=11 and x<=16:
        return 2
    elif x>=17 and x<=20:
        return 3
    else:
        return 4
all_df['hour_selection']=all_df['hour'].map(h_s)
print(all_df['hour_selection'].value_counts())


#数据降噪
#1。绘制箱线图
#四张图
#1.只考虑 count 的箱线图
#2.考虑不同季节下，用车数量count的箱线图
#3，考虑不同小时，用车数量count的箱线图
#4.考虑是否是工作日，用车数量count的箱线图
# fig ,axes = plt.subplots(nrows=2,ncols=2)
# fig.set_size_inches(12,14)
import seaborn as sns
# #只取出 all_df中 训练集的部分
# df4=all_df[all_df['traintest']=='train']
# # df4=all_df.loc(all_df['traintest']=='train')
#
# sns.boxplot(data=df4,y='count',ax=axes[0][0])
# sns.boxplot(data=df4,x='season',y='count',ax=axes[0][1])
# sns.boxplot(data=df4,x='hour',y='count',ax=axes[1][0])
# sns.boxplot(data=df4,x='workingday',y='count',ax=axes[1][1])
#
# axes[0][0].set(ylabel='count',title='Box Plot on Count ')
# axes[0][1].set(xlabel ='season',ylabel='count',title='Box Plot on Count across Season')
# axes[1][0].set(xlabel ='day',ylabel='count',title='Box Plot on Count across Day')
# axes[1][1].set(xlabel ='workingday',ylabel='count',title='Box Plot on Count across Workingday')
#
# plt.show()

#数据检视 数据信息描述
print(all_df.describe())
#噪音点的 标准： count-均值>3*标准差
#对 训练集中 count 列 求 均值
print(" 训练集中 count 列 求 均值")
print(all_df.loc[all_df['traintest']=='train',['count']].mean())
mean=all_df.loc[all_df['traintest']=='train',['count']].mean()
#对 训练集中 count 列 求 标准差
print(all_df.loc[all_df['traintest']=='train',['count']].std())
std = all_df.loc[all_df['traintest']=='train',['count']].std()

print((all_df.loc[all_df['traintest']=='train',['count']]-mean)>(3*std))
bad_ix = (all_df.loc[all_df['traintest']=='train',['count']]-mean)>(3*std)
print("判断异常点的数量")
print(bad_ix.sum())


#查找正常样本点
print(np.abs(all_df.loc[all_df['traintest']=='train',['count']]-mean)<=(3*std))
good_ix=(np.abs(all_df.loc[all_df['traintest']=='train',['count']]-mean)<=(3*std))
print("正常点的数量")
print(good_ix.sum())
#筛选出 符合要求的正常点 (数据帧类型)
print(good_ix)
print(type(good_ix))
#
##筛选出 符合要求的正常点 数据帧的值 (numpy数组)
print(good_ix.values)
print(type(good_ix.values))

# print(all_df['traintest']=='train')
#定位 训练集 的 数据，保存到数据帧df5中
df5 = all_df[all_df['traintest']=='train']
#筛选 df5 中 正常样本点
# print(df5[good_ix.values].info())
#df6 是 训练集中 正常样本点
df6 = df5[good_ix.values]

#拼接 筛选后的 训练集正常样本点 和 测试集所有样本点
all_df=pd.concat((df6,all_df[all_df['traintest']=='test']))
print(all_df.info())
print(all_df['traintest'].value_counts())
print(good_ix.sum())

#数据分析 分析属性的相关性
print(all_df.columns)
df7 = all_df.loc[all_df['traintest']=='train',['temp','atemp', 'humidity', 'windspeed','casual', 'registered', 'count']]
corr_mat = df7.corr()
sns.heatmap(corr_mat,annot=True)
plt.show()

#绘制下三角的热力图
# mask=np.array(corr_mat)
# print('------------------mask')
# print(mask)
# print(type(mask))
# # mask=corr_mat.values
# # print(mask)
# # print(type(mask))
# mask[np.tril_indices_from(mask)]=False
#
# sns.heatmap(corr_mat,mask=mask,vmax=0.8,annot=True)
# plt.show()




# #分析 季节season 对 用车数量count的影响
# df7 = all_df[all_df['traintest']=='train']
# g  = df7.groupby('season')
# df = g.agg(np.sum)
# s = df['count']
# s.plot(kind='bar')
# plt.show()

# #分析 月份 对 用车数量的影响(均值)
# s1 = df7.groupby('monthnum')['count'].mean()
# s1.plot(kind='bar')
# plt.show()

#对 训练集中 数据 ,根据 季节 和 小时 进行分组
g3 = all_df.groupby(['hour','season'],sort=True)
print(g3.groups)

print('g3[count].mean()')
print(g3['count'].mean())

df8 = g3['count'].mean()
#重置索引
df9 = df8.reset_index()
print(df9)

#折线图(连接起来的散点图)图形绘制  横坐标 是 hour，纵坐标是 count,不同的季节 分开绘制
sns.pointplot(data=df9,x='hour',y='count',hue='season')
plt.show()


#根据 小时 和 星期几的不同 创建 结合二者用车高低谷特征的 属性 hour_week_selection
#通过 映射函数 map或者 apply 进行 处理

#工作日(非周六周日)的小时 分成0-4 共 5类：
# 0，6:
# 7，10:
# 11，15:
# 16，20:
def hour_section(hour,weekday):
    if weekday not in ['Saturday','Sunday']:
        if hour>=0 and hour<=6:
            return 0
        elif hour>=7 and hour<=10:
            return 1
        elif hour>=11 and hour<=15:
            return 2
        elif hour>=16 and hour<=20:
            return 3
        else:
            return 4
    else:
        if hour>=0 and hour <=8 :
            return 5
        elif hour >=9 and hour <=20:
            return 6
        else: return 7

all_df['hour_week_section']=all_df.apply(lambda row:hour_section(row['hour'],row['weekday']),axis=1)


#查看 假期 和 非假期 数据的数量
all_df['holiday'].value_counts()
print(all_df.holiday.value_counts())

#由于 假期和 非假期 数据上 差别悬殊
# 0    10465
#1      311
#因此 查看  假期和 非假期 对用车数量的 影响 不能求和，而是 求均值
s9 =all_df.groupby('holiday')['count'].mean()
print(type(s9))
s9.plot(kind='bar')
plt.show()

#分析  小时  和  是否工作日 对 用车数量的影响
g9=all_df.groupby(['hour','workingday'])
df10 = g9['count'].mean()
df11= df10.reset_index()
print(df11)

sns.pointplot(data=df11,x='hour',y='count',hue='workingday')
plt.show()

#生成一项新的特征，小时和工作日组合
def hour_workingday(hour,workingday):
    return str(hour)+"_"+str(workingday)

all_df['hour_workingday']=all_df.apply(lambda x:hour_workingday(x['hour'],x['workingday']),axis=1)
print(all_df['hour_workingday'].value_counts())

#继续分析天气的影响

all_df.groupby('weather')['count'].mean().plot(kind='bar')
plt.show()

#温度和用车量的影响

all_df[['temp','count']].corr()
print(all_df[['temp','count']].corr())

#一 将 温度 temp 进行 取整数操作 (将 连续型数据 转换成 离散型数据 )——(使用场景：连续型数据 取值范围 不太大（0-40）)
all_df['temp_int']=all_df['temp'].apply(lambda x:int(x))

g11= all_df.groupby('temp_int')
s11=g11['count'].mean()
s11.plot(kind='bar')
plt.show()

# 湿度影响的图像

fig,ax= plt.subplots()
fig.set_size_inches(15,8)
pd.DataFrame({'humidity':all_df.humidity.apply(lambda x: int(x)),'count':all_df['count']}).groupby('humidity')['count'].mean().plot(kind='bar')
plt.show()


#不同风速 用车数量的 均值统计图


fig,ax= plt.subplots()
fig.set_size_inches(10,5)
pd.DataFrame({'windspeed':all_df.windspeed.apply(lambda x: int(x)),'count':all_df['count']}).groupby('windspeed')['count'].mean().plot(kind='bar')
plt.show()

#不同风速 用车数量的 求和统计图

pd.DataFrame({'windspeed':all_df.windspeed.apply(lambda x: int(x)),'count':all_df['count']}).groupby('windspeed')['count'].sum().plot(kind='bar')
plt.show()

#以上是 数据预处理
print(all_df.info())
#五、特征工程
# 做完这些分析，准备做特征工程，比如归一化，缺失值填充，构造新的特征等。

#离散型数据 1.标签化 + 2.独热编码
# 一。使用pandas做onehot操作 需要让大家进行优化处理，修改为使用sklearn.preprocision 中的OneHotEncoder进行处理
#
#all_df= season独热编码 拼接 原来的 all_df 后的结果
all_df=pd.get_dummies(all_df,columns=['season'])

all_df=pd.get_dummies(all_df,columns=['weather'])

#2.连续型数据 标准化处理
from sklearn.preprocessing import StandardScaler

ss = StandardScaler()
all_df['temp_scaled']=ss.fit_transform(all_df[['temp']])
all_df['humidity_scaled'] = ss.fit_transform(all_df[['humidity']]) # 湿度
all_df['windspeed_scaled'] = ss.fit_transform(all_df[['windspeed']]) # 风速

#判断 日期 距离 年初 或者 年终 更近的 天数
import datetime

#2020-10-10
def date_diff(date):
    first_new_year = str(date[0:4]) + "-01-01 00:00:00"
    next_new_year = str(int(date[0:4]) + 1) + "-01-01 00:00:00"

    date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
    first_new_year = datetime.datetime.strptime(first_new_year, '%Y-%m-%d %H:%M:%S')
    next_new_year = datetime.datetime.strptime(next_new_year, '%Y-%m-%d %H:%M:%S')
    if (abs((date - first_new_year).days)) > (abs((date - next_new_year).days)):
        return (abs((date - next_new_year).days))
    else:
        return (abs((date - first_new_year).days))

all_df['date_newyear_num']=all_df.datetime.apply(date_diff)

scaler = StandardScaler()
date_newyear_num_scale_param = scaler.fit(all_df[['date_newyear_num']])
all_df['date_newyear_num_scaled'] = scaler.fit_transform(all_df[['date_newyear_num']], date_newyear_num_scale_param)

all_df=pd.get_dummies(all_df,columns=['month'])
all_df=pd.get_dummies(all_df,columns=['hour'])
all_df=pd.get_dummies(all_df,columns=['weekday'])
all_df=pd.get_dummies(all_df,columns=['hour_workingday'])
all_df=pd.get_dummies(all_df,columns=['hour_week_section'])
print(all_df.columns.values)

#文件保存
all_df.to_csv("feature_engine.csv")

feature_columns=['holiday', 'workingday',
       'season_1', 'season_2', 'season_3', 'season_4', 'weather_1',
       'weather_2', 'weather_3', 'weather_4', 'temp_scaled',
       'humidity_scaled', 'windspeed_scaled',
       'date_newyear_num', 'date_newyear_num_scaled', 'month_April',
       'month_August', 'month_December', 'month_February', 'month_January',
       'month_July', 'month_June', 'month_March', 'month_May',
       'month_November', 'month_October', 'month_September', 'hour_0',
       'hour_1', 'hour_2', 'hour_3', 'hour_4', 'hour_5', 'hour_6',
       'hour_7', 'hour_8', 'hour_9', 'hour_10', 'hour_11', 'hour_12',
       'hour_13', 'hour_14', 'hour_15', 'hour_16', 'hour_17', 'hour_18',
       'hour_19', 'hour_20', 'hour_21', 'hour_22', 'hour_23',
       'weekday_Friday', 'weekday_Monday', 'weekday_Saturday',
       'weekday_Sunday', 'weekday_Thursday', 'weekday_Tuesday',
       'weekday_Wednesday', 'hour_workingday_0_0', 'hour_workingday_0_1',
       'hour_workingday_10_0', 'hour_workingday_10_1',
       'hour_workingday_11_0', 'hour_workingday_11_1',
       'hour_workingday_12_0', 'hour_workingday_12_1',
       'hour_workingday_13_0', 'hour_workingday_13_1',
       'hour_workingday_14_0', 'hour_workingday_14_1',
       'hour_workingday_15_0', 'hour_workingday_15_1',
       'hour_workingday_16_0', 'hour_workingday_16_1',
       'hour_workingday_17_0', 'hour_workingday_17_1',
       'hour_workingday_18_0', 'hour_workingday_18_1',
       'hour_workingday_19_0', 'hour_workingday_19_1',
       'hour_workingday_1_0', 'hour_workingday_1_1',
       'hour_workingday_20_0', 'hour_workingday_20_1',
       'hour_workingday_21_0', 'hour_workingday_21_1',
       'hour_workingday_22_0', 'hour_workingday_22_1',
       'hour_workingday_23_0', 'hour_workingday_23_1',
       'hour_workingday_2_0', 'hour_workingday_2_1', 'hour_workingday_3_0',
       'hour_workingday_3_1', 'hour_workingday_4_0', 'hour_workingday_4_1',
       'hour_workingday_5_0', 'hour_workingday_5_1', 'hour_workingday_6_0',
       'hour_workingday_6_1', 'hour_workingday_7_0', 'hour_workingday_7_1',
       'hour_workingday_8_0', 'hour_workingday_8_1', 'hour_workingday_9_0',
       'hour_workingday_9_1', 'hour_week_section_0', 'hour_week_section_1',
       'hour_week_section_2', 'hour_week_section_3', 'hour_week_section_4',
       'hour_week_section_5', 'hour_week_section_6', 'hour_week_section_7']


# 数据集划分，方便以后查看信息

X=all_df.loc[all_df.traintest=='train',feature_columns].values  # 训练集特征
y_casual=all_df.loc[all_df.traintest=='train'].casual.apply(lambda x: np.log1p(x)).values
y_regstered=all_df.loc[all_df.traintest=='train'].registered.apply(lambda x: np.log1p(x)).values
y_all=all_df.loc[all_df.traintest=='train','count'].values # 训练集标签
X_test=all_df.loc[all_df.traintest=='test',feature_columns].values # 测试集特征
X_date=all_df.loc[all_df.traintest=='test','datetime'].values
y_test=all_df.loc[all_df.traintest=='test','count'].values # 测试集标签
# 将数据信息进行保存处理

all_df.loc[all_df.traintest=='train',feature_columns].to_csv("X.csv")
all_df.loc[all_df.traintest=='train'].casual.apply(lambda x: np.log1p(x)).to_csv("y_casual.csv")
all_df.loc[all_df.traintest=='train'].registered.apply(lambda x: np.log1p(x)).to_csv("y_regstered.csv")
all_df.loc[all_df.traintest=='train','count'].apply(lambda x: np.log1p(x)).to_csv("y_all.csv")
all_df.loc[all_df.traintest=='test',feature_columns].to_csv("X_test.csv")
all_df.loc[all_df.traintest=='test','datetime'].to_csv("X_date.csv")

# 六、模型创建及评测
# L1正则化

from sklearn.linear_model import Lasso, Ridge
from sklearn.model_selection import GridSearchCV
lasso = Lasso()
param_grid = {'alpha': [1, 0.5, 0.1, 0.01, 0.0001]}
model1 = GridSearchCV(lasso, param_grid=param_grid, cv=5)
model1.fit(X, y_all)
print(model1.best_params_)
print(model1.score(X_test, y_test))

#L2正则

ridge = Ridge()
param_grid = {'alpha': [1, 0.5, 0.1, 0.01, 0.0001]}
model2 = GridSearchCV(ridge, param_grid=param_grid, cv=5)
model2.fit(X, y_all)
print(model2.best_params_)
print(model2.score(X_test, y_test))


