import numpy as np
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
import lightgbm as lgb
from sklearn import metrics
from sklearn.model_selection import KFold,train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing


def lgb_foldtrain_regression(df_train, df_test, feats, label_name, n_fold=10):
    kfold = KFold(n_splits=n_fold)
    train_label = df_train[label_name]
    oof = np.zeros(len(df_train))
    pred_y = 0
    params = { 'boosting_type' : 'gbdt',
                   'objective' : 'regression',
                   'metric' : 'rmse',
                    'learning_rate' : 0.05,
                   'num_leaves' : 31,
                   'max_depth' : -1,
                   'n_estimators' : 1000,
                    'subsample' : 0.7,
                   'subsample_freq' : 1,
                   'colsample_bytree' : 0.7 ,
                   'enable_categorical' : True,
                    'verbose': -1,
               }
    for fold, (train_idx, val_idx) in enumerate(kfold.split(df_train, train_label)):
        print('---------------------------', fold)
        train = lgb.Dataset(df_train.loc[train_idx, feats], df_train.loc[train_idx, label_name])
        val = lgb.Dataset(df_train.loc[val_idx, feats], df_train.loc[val_idx, label_name])
        model = lgb.train(params, train, valid_sets=val, num_boost_round=5000, callbacks=[lgb.early_stopping(100), lgb.log_evaluation(500)])
        oof[val_idx] = model.predict(df_train.loc[val_idx, feats])
        pred_y += model.predict(df_test[feats]) / n_fold
    return pred_y, oof


def xgb_foldtrain_regression(df_train, df_test, feats, label_name, n_fold=10):
    kfold = KFold(n_splits=n_fold)
    train_label = df_train[label_name]
    oof = np.zeros(len(df_train))
    pred_y = 0
    for fold, (train_idx, val_idx) in enumerate(kfold.split(df_train, train_label)):
        print('---------------------------', fold)
        model = XGBRegressor(
            max_depth=6, learning_rate=0.05, n_estimators=2000,
            objective='reg:linear', tree_method='hist', device="cuda",
            subsample=0.8, colsample_bytree=0.8, eval_metric='rmse', reg_lambda=0.5,
            enable_categorical=True)
        model.fit(df_train.loc[train_idx, feats], df_train.loc[train_idx, label_name])
        oof[val_idx] = model.predict(df_train.loc[val_idx, feats])
        pred_y += model.predict(df_test[feats]) / n_fold
    return pred_y, oof


def xgb_train_regression(df_train, df_test, feats, label_name):
    model = XGBRegressor(
        max_depth=6, learning_rate=0.05, n_estimators=2000,
        objective='reg:linear',tree_method='hist',device = "cuda",
        subsample=0.8, colsample_bytree=0.8,eval_metric='rmse', reg_lambda=0.5,
        enable_categorical=True )
    model.fit(df_train[feats], df_train[label_name])
    oof = model.predict(df_train[feats])
    pred_y = model.predict(df_test[feats])
    return pred_y, oof




