import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
from sklearn import feature_selection
from sklearn.ensemble import ExtraTreesClassifier
import xgboost as xgb

# feas_merge
ori_data = pd.read_csv('./data/X_xyvd_d2_100.csv', header=0)
# tst_data = pd.read_csv('./data/tst_per_100.csv', header=0)

trn_data_ = ori_data
fea = [x for x in trn_data_.columns if x not in ['ship', 'type']] + ['ship', 'type']
trn_data = trn_data_[fea]
trn_data1 = trn_data_
# X1_ = tst_data.iloc[:, :-1]

indices = list(range(len(trn_data)))
np.random.shuffle(indices)
trn_data1 = trn_data.iloc[indices, :]

n_train = int(5/6 * len(trn_data1))
X_, Xv_ = trn_data1.iloc[:n_train, :-2], trn_data1.iloc[n_train:, :-2]
y, yv = trn_data1['type'][:n_train], trn_data1['type'][n_train:]
X = X_
Xv = Xv_
# X1 = X1_

clf = ExtraTreesClassifier(n_estimators=50).fit(X_, y)
fs = feature_selection.SelectFromModel(clf, prefit=True)
X = pd.DataFrame(fs.transform(X_))
Xv = pd.DataFrame(fs.transform(Xv_))
# X1 = pd.DataFrame(fs.transform(X1_))

fold = StratifiedKFold(n_splits=100, shuffle=True, random_state=100)
params = {
    'n_estimators': 5000,
    'booster': 'gbtree',
    'objective': 'multi:softprob',
    'num_class': 3,
    'early_stopping_rounds': 100,
    'verbose': False,
    'verbosity': 0,
    'max_depth': 6,
    'n_jobs': 4,
    # 'scale_pos_weight': 1
}
xgbM = xgb.XGBClassifier(**params)


models = []
# pred = np.zeros((len(X1), 3))
oof_v = []
oof_t = []
oof_r = []
for index, (train_idx, val_idx) in enumerate(fold.split(X, y)):
    train_x, train_y = X.iloc[train_idx], y.iloc[train_idx]
    val_x, val_y = X.iloc[val_idx], y.iloc[val_idx]
    eval_set = [(train_x, train_y), (val_x, val_y)]

    # class_weights = list(class_weight.compute_class_weight(
    #     'balanced', np.unique(train_y), train_y))
    # w_array = np.ones(train_y.shape[0], dtype='float')
    # for i, val in enumerate(train_y):
    #     w_array[i] = class_weights[val - 1]
    # model = xgbM.fit(train_x, train_y, eval_set=eval_set,
    #                  eval_metric=["merror", "mlogloss"],
    #                  verbose=False, sample_weight=w_array)
    model = xgbM.fit(train_x, train_y, eval_set=eval_set,
                     eval_metric=["merror", "mlogloss"],
                     verbose=False)
    models.append(model)

    trn_pred = np.argmax(model.predict_proba(X.iloc[train_idx]), axis=1)
    f1_trn = metrics.f1_score(y.iloc[train_idx], trn_pred, average='macro')
    oof_t.append(f1_trn)

    val_pred = np.argmax(model.predict_proba(X.iloc[val_idx]), axis=1)
    f1_val = metrics.f1_score(y.iloc[val_idx], val_pred, average='macro')
    oof_v.append(f1_val)

    pred_v = model.predict_proba(Xv)
    pred_vc = np.argmax(pred_v, axis=1)
    f1_res = metrics.f1_score(yv, pred_vc, average='macro')
    oof_r.append(f1_res)
    print(index, 'trn f1:', f1_trn, 'val f1:', f1_val, 'res f1:', f1_res)

    # test_pred = model.predict_proba(X1)
    # pred += test_pred / 5

f1_trn_all, f1_val_all, f1_res_all = np.mean(oof_t), np.mean(oof_v), np.mean(oof_r)
std_trn_all, std_val_all, std_res_all = np.std(oof_t), np.std(oof_v), np.std(oof_r)
print('oof val f1:', f1_val_all, 'std:', std_val_all,
      'oof res f1:', f1_res_all, 'std:', std_res_all)

