import pandas as pd
# 早稻 456 中稻 5678 晚稻6789 10
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.svm import LinearSVR, SVR, NuSVR
from sklearn import linear_model
import warnings
warnings.filterwarnings('ignore')

A = pd.read_csv('data/train_rice1.csv', header=0, encoding='gbk')
cols = ['区县id', '2015年早稻', '2015年晚稻', '2016年早稻', '2016年晚稻', '2017年早稻', '2017年晚稻']
A = A.ix[:, cols]
counties = A.ix[:, 0]

B = pd.read_csv('data/train_weather1.csv', header=0, encoding='gbk')

# A.ix[:, ['区县id', '2017年早稻']].to_csv('res0912.csv', header=None, index=None)

import datetime
import numpy as np

B.insert(2, 'date', B.apply(lambda x: datetime.date(x[2], x[3], x[4]).strftime("%Y%m%d"), axis=1))

B.drop(['年份', '月份', '日期'], axis=1, inplace=True)

# 降水量7 *

# Cannot do inplace boolean setting on mixed-types with a non np.nan value
# B.where(B == '*', 0, inplace=True)
B.replace({'*': '0', '/': '0'}, inplace=True)

cols = ['区县id', '站名id', 'date', '日照时数（单位：h)', '日平均风速(单位：m/s)', '日降水量（mm）', '日最高温度（单位：℃）', '日最低温度（单位：℃）',
        '日平均温度（单位：℃）', '日相对湿度（单位：%）', '日平均气压（单位：hPa）']
B1 = B.ix[:, cols]
# B1.dtypes
B1.ix[:, 1] = B1.ix[:, 1].astype(np.object)
B1.ix[:, 3:] = B1.ix[:, 3:].astype(np.float64)

data = []
label = []
i = 0
num1 = 2 #2, 5(训练)

for year in ['2015', '2016', '2017', '2018']:
    #
    for month in [['0401', '0701']]:
        B2015 = B1.ix[(B['date'] > '{year}{month}'.format(year=year, month=month[0]))
                      & (B['date'] < '{year}{month}'.format(year=year, month=month[1])), :]
        B2015grp = B2015.groupby('区县id')
        B2015cnt = B2015grp.describe()
        cols_sp = list(range(0, 64, 8))
        cols_all = list(range(0, 64, 1))
        cols_diff = [a for a in cols_all if a not in cols_sp]
        B2015fea0 = B2015cnt.ix[:, cols_diff]
        B2015fea = B2015fea0.ix[counties, :]
        data += [B2015fea]
        i += 1
        if i > num1+1:
            1
        elif i > 1:
            label += [A.ix[:, 2*i-1]-A.ix[:, 2*i-3]]

X_0 = pd.concat(data[1:3])
y = pd.concat(label[0:2])
X1_0 = data[3] # data[3]表示预测
y1 = label[1]

cols = []
for i in range(X_0.shape[1]):
    a = X_0.ix[:, i]
    prob = sum(a == 0)/len(a)
    if prob < 0.3:
        cols += [i]

X_1 = X_0.ix[:, cols]
std = X_0.std(axis=0)
men = X_0.mean(axis=0)
X_2 = (X_1 - men) / std

X1_1 = X1_0.ix[:, cols]
X1_2 = (X1_1-men)/std

x1std = X1_2.std()
x1std1 = x1std.reset_index()
x1std1.drop(['level_0', 'level_1'], axis=1, inplace=True)
x1sort = x1std1.sort_values(0)
x1ind = x1sort.index.to_list()

# X_2 = X_0
# X1_2 = X1_0
# x1ind = cols

train_acc = []
val_acc = []
for n_feas in [39]:
    X_3 = X_2.ix[:, x1ind[:n_feas]]
    X1_3 = X1_2.ix[:, x1ind[:n_feas]]

    X = X_3
    X1 = X1_3

    # clf = LogisticRegression(random_state=0, solver='lbfgs', multi_class='multinomial').fit(X, y)
    # pred = clf.predict(X1)

    model = linear_model.LinearRegression()
    model = LinearSVR()
    # model = SVR()
    model = GradientBoostingRegressor()

    train_feat = X
    train_id = y.reset_index().drop('index', axis=1)[0]
    test_feat = X1
    test_id = y1
    # print(train_feat.shape, train_id.shape, test_feat.shape, test_id.shape)
    model.fit(train_feat, train_id)

    pred = model.predict(train_feat)
    total_err = 0
    for i in range(pred.shape[0]):
        # print(pred[i], test_id[i])
        err = (pred[i] - train_id[i])
        total_err += err * err
    acc = total_err / pred.shape[0] / 2
    print(acc)
    train_acc += [acc]
    print(train_acc)

    pred = model.predict(test_feat)
    # total_err = 0
    # for i in range(pred.shape[0]):
    #     # print(pred[i], test_id[i])
    #     err = (pred[i] - test_id[i])
    #     total_err += err * err
    # acc = total_err / pred.shape[0] / 2
    # print(acc)
    # val_acc += [acc]
    # print(val_acc)

    pred = pred + A.ix[:, 5]
    R = pd.concat([A.ix[:, 0], pd.Series(pred)], axis=1)
    R.to_csv('7res0915_gbdt_residue_1617.csv', header=None, index=False)

# import matplotlib.pyplot as plt
# x = list(range(len(val_acc)))
# plt.plot(x, train_acc, x, val_acc)
# plt.show()
