import time

import numpy as np
import pandas as pd

from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split

from lightautoml.automl.base import AutoML
from lightautoml.ml_algo.boost_lgbm import BoostLGBM
from lightautoml.pipelines.features.lgb_pipeline import LGBSimpleFeatures
from lightautoml.pipelines.ml.base import MLPipeline
from lightautoml.reader.base import PandasToPandasReader
from lightautoml.tasks import Task


np.random.seed(42)

print("Load data...")
data = pd.read_csv("./data/sampled_app_train.csv")
print("Data loaded")

print("Features modification from user side...")
data["BIRTH_DATE"] = (np.datetime64("2018-01-01") + data["DAYS_BIRTH"].astype(np.dtype("timedelta64[D]"))).astype(str)
data["EMP_DATE"] = (
    np.datetime64("2018-01-01") + np.clip(data["DAYS_EMPLOYED"], None, 0).astype(np.dtype("timedelta64[D]"))
).astype(str)

data["constant"] = 1
data["allnan"] = np.nan

data.drop(["DAYS_BIRTH", "DAYS_EMPLOYED"], axis=1, inplace=True)
print("Features modification finished")

print("Split data...")
train_data, test_data = train_test_split(data, test_size=2000, stratify=data["TARGET"], random_state=13)

train_data.reset_index(drop=True, inplace=True)
test_data.reset_index(drop=True, inplace=True)
print(f"Data splitted. Parts sizes: train_data = {train_data.shape}, test_data = {test_data.shape}")

for task_params, target in zip(
    [
        {"name": "binary"},
        {"name": "binary", "metric": roc_auc_score},
        {"name": "reg", "loss": "mse", "metric": "r2"},
        {"name": "reg", "loss": "rmsle", "metric": "rmsle"},
        {
            "name": "reg",
            "loss": "quantile",
            "loss_params": {"q": 0.9},
            "metric": "quantile",
            "metric_params": {"q": 0.9},
        },
    ],
    ["TARGET", "TARGET", "AMT_CREDIT", "AMT_CREDIT", "AMT_CREDIT"],
):
    print("Create task..")
    task = Task(**task_params)
    print("Task created")

    print("Create reader...")
    reader = PandasToPandasReader(task, cv=5, random_state=1)
    print("Reader created")

    # pipeline 1 level parts
    print("Start creation pipeline_1...")
    pipe = LGBSimpleFeatures()

    print("\t ParamsTuner2 and Model2...")
    model2 = BoostLGBM(
        default_params={
            "learning_rate": 0.025,
            "num_leaves": 64,
            "seed": 2,
            "num_threads": 5,
        }
    )
    print("\t Tuner2 and model2 created")

    print("\t Pipeline1...")
    pipeline_lvl1 = MLPipeline(
        [model2],
        pre_selection=None,  # selector,
        features_pipeline=pipe,
        post_selection=None,
    )
    print("Pipeline1 created")

    print("Create AutoML pipeline...")
    automl = AutoML(
        reader,
        [
            [pipeline_lvl1],
        ],
        skip_conn=False,
    )

    print("AutoML pipeline created...")

    print("Start AutoML pipeline fit_predict...")
    start_time = time.time()
    oof_pred = automl.fit_predict(train_data, roles={"target": target})
    print(f"AutoML pipeline fitted and predicted. Time = {time.time() - start_time:.3f} sec")

    test_pred = automl.predict(test_data)
    print(f"Prediction for test data:\n{test_pred}\nShape = {test_pred.shape}")

    print("Check scores...")
    print(f"OOF score: {task.metric_func(train_data[target].values, oof_pred.data[:, 0])}")
    print(f"TEST score: {task.metric_func(test_data[target].values, test_pred.data[:, 0])}")
