import numpy as np
import pandas as pd
from scipy.stats import kurtosis
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from lightgbm.sklearn import LGBMClassifier
from scipy import sparse
import lightgbm as lgb
import xgboost as xgb
import warnings
# https://zhuanlan.zhihu.com/p/82416104
warnings.filterwarnings("ignore")

path = './HOT_input/'
train_weather = pd.read_csv(path + 'train_weather.csv', encoding='gbk')
train_rice = pd.read_csv(path + 'train_rice.csv', encoding='gbk')
testA = pd.read_csv(path + 'testA.csv', header=None, encoding='gbk')

train_2015 = train_rice[['区县id', '2015年早稻', '2015年晚稻']]
train_2016 = train_rice[['区县id', '2016年早稻', '2016年晚稻']]
train_2017 = train_rice[['区县id', '2017年早稻', '2017年晚稻']]
train_2015.columns = ['区县id', '早稻', '晚稻']
train_2016.columns = ['区县id', '早稻', '晚稻']
train_2017.columns = ['区县id', '早稻', '晚稻']

train_2015['年份'] = 2015
train_2016['年份'] = 2016
train_2017['年份'] = 2017
train = pd.concat((train_2015, train_2016, train_2017))

testA.columns = ['区县id']
testA['年份'] = 2018
data = pd.concat((train, testA))
data['区县id'] = data['区县id'].map(dict(zip(data['区县id'].unique(), range(data['区县id'].nunique()))))
data['id_year'] = data['区县id'] * 10000 + data['年份']
train_weather['区县id'] = train_weather['区县id'].map(
    dict(zip(train_weather['区县id'].unique(), range(train_weather['区县id'].nunique()))))
train_weather['id_year'] = train_weather['区县id'] * 10000 + train_weather['年份']
train_weather['time'] = train_weather['年份'].astype(str) + '-' + train_weather['月份'].astype(str) + '-' + train_weather[
    '日期'].astype(str)
train_weather['time'] = pd.to_datetime(train_weather['time'])

temp_list = ['日照时数（单位：h)', '日平均风速(单位：m/s)', '日降水量（mm）', '日最高温度（单位：℃）', '日最低温度（单位：℃）', '日平均温度（单位：℃）', '日相对湿度（单位：%）',
             '日平均气压（单位：hPa）']
train_weather[temp_list] = train_weather[temp_list].replace('/', None)
train_weather[temp_list] = train_weather[temp_list].replace('*', None)
train_weather[temp_list] = train_weather[temp_list].astype(float)

for i in range(1, 13):
    temp = train_weather.loc[train_weather['月份'] == i]
    for j in temp_list:
        data = data.merge(train_weather.groupby(['id_year'], as_index=False)[j].agg({
            '{}_{}_max'.format(i, j): 'max',
            '{}_{}_min'.format(i, j): 'min',
            '{}_{}_sum'.format(i, j): 'sum',
        }), on = ['id_year'], how = 'left')

        stat_feat = []
        for col in ['早稻', '晚稻']:
            stat_feat.append('id_year_{}_{}'.format(col, 1))
            data['id_year_{}_{}'.format(col, 1)] = data['id_year'] + 1
            df_last = data[~data[col].isnull()].set_index('id_year_{}_{}'.format(col, 1))
            df_last = df_last.drop_duplicates('id_year')
            data['shift_id_year_{}_{}'.format(col, 1)] = data['id_year'].map(df_last[col])
        data['shift_id_year_晚稻_早稻'] = data['shift_id_year_晚稻_1'] / data['shift_id_year_早稻_1']

        data_1 = data.copy()
        data_2 = data.copy()
        data_1['label'] = data_1['早稻'] - data_1['shift_id_year_早稻_1']
        data_2['label'] = data_2['晚稻'] - data_2['shift_id_year_晚稻_1']
        data_1['type'] = '0'
        data_2['type'] = '1'
        data = pd.concat((data_1, data_2)).reset_index(drop=True)

        for i in ['区县id', 'type']:
            data[i] = data[i].astype('category')

        feat_list = ['早稻', '晚稻', 'id_year', 'label', '早稻平均产量', '晚稻平均产量', 'shift_id_year_早稻_1', 'shift_id_year_晚稻_1',
                     '年份'] + stat_feat
        features = [i for i in data.columns if i not in feat_list]

        all_idx = (data['年份'].between(2016, 2017))
        train_idx = (data['年份'].between(2016, 2016))
        valid_idx = (data['年份'].between(2017, 2017))
        test_idx = (data['年份'].between(2018, 2018))
        train_x = data[train_idx][features]
        train_y = data[train_idx]['label']
        valid_x = data[valid_idx][features]
        valid_y = data[valid_idx]['label']

        lgb_model = lgb.LGBMRegressor(objective='rmse',
                                      max_depth=7,
                                      learning_rate=0.05,
                                      random_state=2019,
                                      n_estimators=1000,
                                      subsample=0.9,
                                      colsample_bytree=0.7)

        lgb_model.fit(train_x, train_y, eval_set=[(train_x, train_y), (valid_x, valid_y)],
                      categorical_feature=['区县id', 'type'], early_stopping_rounds=100, verbose=100)
        lgb_model.n_estimators = lgb_model.best_iteration_ + 10
        lgb_model.fit(data[all_idx][features], data[all_idx]['label'], categorical_feature=['区县id', 'type'])
        predictions = lgb_model.predict(data[test_idx][features])
        train_rice['target'] = train_rice['2017年晚稻'] + predictions[81:]
        train_rice[['区县id', 'target']].to_csv(path + 'sub.csv', header=None, index=False)
