{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 用线性回归模型或者lightGBM预测前10天的新增"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "#\n",
    "import os\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from pandas.core.frame import DataFrame\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn import linear_model\n",
    "from sklearn.metrics import mean_absolute_error,mean_squared_error\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import accuracy_score\n",
    "from xgboost import XGBRegressor\n",
    "from sklearn.utils import shuffle\n",
    "from sklearn.model_selection import GridSearchCV, KFold\n",
    "from sklearn import preprocessing\n",
    "from sklearn.metrics import confusion_matrix\n",
    "import lightgbm as lgb\n",
    "#import xgboost as xgb\n",
    "from lightgbm import LGBMRegressor \n",
    "import time\n",
    "import warnings\n",
    "from math import sqrt\n",
    "from matplotlib import pyplot\n",
    "from sklearn.linear_model import ElasticNet, Lasso,  BayesianRidge, LassoLarsIC\n",
    "from sklearn.ensemble import RandomForestRegressor,  GradientBoostingRegressor, AdaBoostRegressor\n",
    "from sklearn.linear_model import Ridge\n",
    "warnings.filterwarnings(\"ignore\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "#训练数据给的是5.1到6.29日\n",
    "train_dataA=pd.read_csv('dataset/features/features_A.csv')\n",
    "train_dataB=pd.read_csv('dataset/features/features_B.csv')\n",
    "train_dataC=pd.read_csv('dataset/features/features_C.csv')\n",
    "train_dataD=pd.read_csv('dataset/features/features_D.csv')\n",
    "train_dataE=pd.read_csv('dataset/features/features_E.csv')\n",
    "train_dataF=pd.read_csv('dataset/features/features_F.csv')\n",
    "train_dataG=pd.read_csv('dataset/features/features_G.csv')\n",
    "train_dataH=pd.read_csv('dataset/features/features_H.csv')\n",
    "train_dataI=pd.read_csv('dataset/features/features_I.csv')\n",
    "train_dataJ=pd.read_csv('dataset/features/features_J.csv')\n",
    "train_dataK=pd.read_csv('dataset/features/features_K.csv')\n",
    "train_dic={'A':train_dataA,'B':train_dataB,'C':train_dataC,'D': train_dataD,'E':train_dataE,\n",
    "        'F':train_dataF, 'G':train_dataG,'H':train_dataH,'I': train_dataI,'J':train_dataJ,'K':train_dataK}\n",
    "def clear_data(df):\n",
    "    return df.drop(['humidity','wind_force','wind_direction',\n",
    "                    'wind_speed','weather_type','temperature','ext_transfer','iner_transfer','density','migration'],axis=1)\n",
    "train_data={}\n",
    "for city_id in ['A','B','C','D','E','F','G','H','I','J','K']:\n",
    "    train_data[city_id]=clear_data(train_dic[city_id])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_lgbm = LGBMRegressor(n_estimators=10, metric='mae', random_state=2020, min_child_samples=5, min_child_weight=0.000001)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "def reserve_target(data,index):\n",
    "    max_id=data.index(max(data))\n",
    "    if max_id==len(data)-1:#最后一个点为最大值\n",
    "        return list(index)\n",
    "    else:\n",
    "        return list(index[max_id:])\n",
    "#保留最大值往后的值\n",
    "for city_id in ['A','B','C','D','E','F','G','H','I','J','K']:\n",
    "    grouped=train_data[city_id].groupby('region_id')\n",
    "    reserve_list=[]\n",
    "    for ciy_id,group in grouped:\n",
    "        reserve_list+=reserve_target(list(group['index'].values),group.index)\n",
    "    train_data[city_id]=train_data[city_id].loc[reserve_list].reset_index(drop=True)\n",
    "def calculate_lag(df, lag_list, column):\n",
    "    for lag in lag_list:\n",
    "        column_lag = column + \"_\" + str(lag)\n",
    "        df[column_lag] = df[column].shift(lag, fill_value=0)\n",
    "    return df\n",
    "\n",
    "def calculate_trend(df, lag_list, column):\n",
    "    for lag in lag_list:\n",
    "        trend_column_lag = \"Trend_\" + column + \"_\" + str(lag)\n",
    "        df[trend_column_lag] = (df[column].shift(0, fill_value=0) - \n",
    "                                df[column].shift(lag, fill_value=0))/df[column].shift(lag, fill_value=0.001)\n",
    "    return df\n",
    "\n",
    "def get_feature(df):\n",
    "    df = calculate_lag(df, range(1,7), 'index')\n",
    "    #all_data = calculate_lag(all_data, range(1,7), 'migration')\n",
    "    df = calculate_trend(df, range(1,7), 'index')\n",
    "    #all_data = calculate_trend(all_data, range(1,7), 'migration')\n",
    "    df.replace([np.inf, -np.inf], 0, inplace=True)\n",
    "    df.fillna(0, inplace=True)\n",
    "    return df\n",
    "def get_test(train_df):\n",
    "    test_data=train_df[-1:].reset_index(drop=True)\n",
    "    last_raw=train_df[-1:].reset_index(drop=True)\n",
    "    pres1=['index_'+str(i) for i in range(1,7)]\n",
    "    pres2=['Trend_index_'+str(i) for i in range(1,7)]\n",
    "    test_data.loc[0,'index_1']=last_raw.loc[0,'index']\n",
    "    for i in range(len(pres1)-1):\n",
    "        test_data.loc[0,pres1[i+1]]=last_raw.loc[0,pres1[i]]\n",
    "    test_data.loc[0,'Trend_index_1']=1\n",
    "    for i in range(len(pres2)-1):\n",
    "        test_data.loc[0,pres2[i+1]]=last_raw.loc[0,pres2[i]]\n",
    "    X_test=test_data.drop(['index'],axis=1)\n",
    "    return X_test\n",
    "pre_lens=5\n",
    "pre_dicSeir={}\n",
    "for city_id in ['A','B','C','D','E','F','G','H','I','J','K']:\n",
    "    grouped=train_data[city_id].groupby('region_id')\n",
    "    reserve_list=[]\n",
    "    pre_dic={}\n",
    "    for region_id,group in grouped:\n",
    "        group=group[-20:].reset_index(drop=True)\n",
    "        group=group.drop(['date','city','region_id'],axis=1)\n",
    "        print(len(group))\n",
    "        pyplot.plot(group.values,'green')\n",
    "        pres=[]\n",
    "        for i in range(pre_lens):\n",
    "            #print(len(group))\n",
    "            train_df=get_feature(group)\n",
    "            X_train=train_df.drop(['index'],axis=1)\n",
    "            Y_train=train_df['index']\n",
    "            X_test=get_test(train_df)\n",
    "            regr = linear_model.LinearRegression()#线性回归模型，可选树模型\n",
    "            model_choose=regr\n",
    "            model_choose.fit(X_train, Y_train)\n",
    "            y_pred = model_choose.predict(X_test)\n",
    "            raw={'index':y_pred[0]}\n",
    "            pres.append(y_pred[0])\n",
    "            group=group.append(raw,ignore_index=True)\n",
    "        print(region_id,pres)\n",
    "        pre_dic.update({region_id:pres})\n",
    "        pyplot.plot([i for i in range(len(group)-len(pres),len(group))],pres, color='red')\n",
    "        save_dir='AR_fit_city_lgb/'+city_id\n",
    "        if not os.path.exists(save_dir):\n",
    "            os.makedirs(save_dir)\n",
    "        plt.savefig(os.path.join(save_dir,str(region_id)+'.png'))\n",
    "        pyplot.show()\n",
    "        pyplot.close()\n",
    "    pre_dicSeir.update({city_id:pre_dic})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "re_sub=pd.read_csv('submits/submission1_16.csv',header=None)\n",
    "re_sub.columns = ['city','region_id','date', 'index']\n",
    "import random\n",
    "tmp_list=[]\n",
    "ex={'A':[],'C':[],'I':[8],'J':[3],'K':[0]}\n",
    "def convert_date(date):\n",
    "    if date==21200630:\n",
    "        return 0\n",
    "    else:\n",
    "        return date-21200700\n",
    "for num,city,region_id,date,value in re_sub.itertuples():\n",
    "    if date<21200702:\n",
    "        if city in ['A','C','I','J','K']:\n",
    "            replace_lgb=pre_dicSeir[city][region_id][convert_date(date)]\n",
    "            if replace_lgb<0:\n",
    "                replace_lgb=0\n",
    "            if region_id not in ex[city]:\n",
    "                re_sub.loc[num,'index']=int(np.ceil(replace_lgb))\n",
    "            #re_sub.loc[num,'index']=int(np.ceil(replace_lgb))\n",
    "re_sub.to_csv('submits/submission7_23v1.csv',index=False,header=None)\n",
    "re_sub.head(2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
