import numpy as np
import pandas as pd
from sklearn import metrics
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
from sklearn import feature_selection
from sklearn.ensemble import ExtraTreesClassifier

ori_data1 = pd.read_csv('./data/X_xyvd_d2_100.csv', header=0)
ori_data2 = pd.read_csv('./data/X_desc_d0.csv', header=0)
ori_data3 = pd.read_csv('./data/trn_per_300.csv', header=0)
tst_data = pd.read_csv('./data/tst_per_300.csv', header=0)

trn_data1 = ori_data1
trn_data2 = ori_data2.drop('type', axis=1)
trn_data3 = ori_data3.drop('type', axis=1)
trn_data_ = pd.merge(trn_data1, trn_data2, on='ship', how='left')
trn_data_ = pd.merge(trn_data_, trn_data3, on='ship', how='left')
trn_data_t = ori_data3

indices = list(range(len(trn_data_t)))
np.random.shuffle(indices)
thresh = 10
sam_nuique = trn_data_t.nunique(axis=1)
sam_is = sam_nuique > thresh
trn_data = trn_data_t.loc[sam_is, :]
print('trn:', 7000 - sum(sam_is), 'test', sum(tst_data.nunique(axis=1) < thresh))


nfold = 12
n_train = int((nfold-1)/nfold * len(trn_data))
X_, Xv_ = trn_data.iloc[:n_train, :-2], trn_data.iloc[n_train:, :-2]
y, yv = trn_data['type'][:n_train], trn_data['type'][n_train:]
X1_ = tst_data.iloc[:, :-1]
X = X_
Xv = Xv_
X1 = X1_

clf = ExtraTreesClassifier(n_estimators=50).fit(X_, y)
fs = feature_selection.SelectFromModel(clf, prefit=True)
X = pd.DataFrame(fs.transform(X_))
Xv = pd.DataFrame(fs.transform(Xv_))
X1 = pd.DataFrame(fs.transform(X1_))

features = X_.columns
fold = StratifiedKFold(n_splits=nfold-1, shuffle=True, random_state=100)
params = {
    'n_estimators': 5000,
    'boosting_type': 'gbdt',
    'objective': 'multiclass',
    'num_class': 3,
    'early_stopping_rounds': 100,
    'verbose_eval': False
}

models = []
pred = np.zeros((len(X1), 3))
oof_v = []
oof_t = []
oof_r = []
for index, (train_idx, val_idx) in enumerate(fold.split(X, y)):
    train_set = lgb.Dataset(X.iloc[train_idx], y.iloc[train_idx])
    val_set = lgb.Dataset(X.iloc[val_idx], y.iloc[val_idx])

    model = lgb.train(params, train_set, valid_sets=[train_set, val_set], verbose_eval=100)
    # model = lgb.train(params, train_set)
    models.append(model)

    trn_pred = np.argmax(model.predict(X.iloc[train_idx]), axis=1)
    f1_trn = metrics.f1_score(y.iloc[train_idx], trn_pred, average='macro')
    oof_t.append(f1_trn)

    val_pred = np.argmax(model.predict(X.iloc[val_idx]), axis=1)
    f1_val = metrics.f1_score(y.iloc[val_idx], val_pred, average='macro')
    oof_v.append(f1_val)

    pred_v = model.predict(Xv)
    pred_vc = np.argmax(pred_v, axis=1)
    f1_res = metrics.f1_score(yv, pred_vc, average='macro')
    oof_r.append(f1_res)
    print(index, 'trn f1:', f1_trn, 'val f1:', f1_val, 'res f1:', f1_res)

    test_pred = model.predict(X1)
    pred += test_pred

f1_trn_all, f1_val_all, f1_res_all = np.mean(oof_t), np.mean(oof_v), np.mean(oof_r)
std_trn_all, std_val_all, std_res_all = np.std(oof_t), np.std(oof_v), np.std(oof_r)
print('oof val f1:', f1_val_all, 'std:', std_val_all,
      'oof res f1:', f1_res_all, 'std:', std_res_all)

# 预测
pred_class = np.argmax(pred, axis=1)
sub = tst_data[['ship']]
sub['pred'] = pred_class
type_map = {'围网': 0, '拖网': 1, '刺网': 2}
type_map_rev = {v: k for k, v in type_map.items()}
print(sub['pred'].value_counts(1))
sub['pred'] = sub['pred'].map(type_map_rev)

import datetime
now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
date = now[:8]
prefix = 'res_lgb_per300_thresh_10'
file = f'{prefix}_{now}.csv'
sub.to_csv(f'res/{file}', index=None, header=None)
# print('oof trn f1:', f1_trn_all, 'oof val f1:', f1_val_all,
# 'oof res f1:', f1_res_all, 'file:', f'res_lgb300per_{now}')
with open(f'fts/{prefix}_{date}.log', 'a+') as f:
    f.write(f'{now}:\n')
    f.write(f'oof val f1: {f1_val_all}, std: {std_val_all}, oof res f1: {f1_res_all}, std:, {std_res_all} \n')
    f.write(f'file: {file} \n')
