#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import waveletai
from sklearn.model_selection import GridSearchCV
import mlflow
import click
import logging
import pandas as pd
import mlflow.sklearn
import sklearn.metrics as sm
from mlflow.utils.file_utils import TempDir
import sklearn.svm as svm


@click.command()
@click.option("--kernel", "-kp", default=['linear', 'rbf'], help="")
@click.option("--c", "-cp", default=['1', '10', '100', '200', '300', '400', '500', '1000', '2000', '3000', '5000', '10000'], help="")
@click.option("--accuracy_threshold", "-at", type=float, default=0.7, help="")
@click.option("--training_data", "-td", type=str, default="", help="数据集id")
def train(kernel, c, accuracy_threshold, training_data):
    # 平台登录
    waveletai.init()
    # 处理参数
    logging.info("kernel %s %s" % (kernel, type(kernel)))
    logging.info("C %s %s" % (c, type(c)))
    kernel = kernel.replace("[", "").replace("]", "").replace(" ", "").replace("\'", "").split(",")
    c = c.replace("[", "").replace("]", "").replace(" ", "").replace("\'", "").split(",")
    c = [int(item) for item in c]
    logging.info("kernel %s %s" % (kernel, type(kernel)))
    logging.info("C %s %s" % (c, type(c)))

    # 构建临时存储空间
    with TempDir() as tmp:
        logging.info("dataset processing..........")
        tmp_path = tmp.path()
        output_directory = tmp_path + '/'
        logging.info("output_directory: %s" % output_directory)
        waveletai.download_dataset_artifacts(training_data, output_directory + '/dataset', unzip=True)
        logging.info(" %s " % os.listdir(output_directory + '/dataset'))
        if not os.path.exists(output_directory + "dataset/train.csv") or not os.path.exists(
                output_directory + "dataset/val.csv"):
            logging.info("找不到数据集.")
            return
        else:
            logging.info("获取训练数据")
            data = pd.read_csv(output_directory + "dataset/train.csv").values
            train_x = data[:, 0:13]
            train_y = data[:, 13]

            data = pd.read_csv(output_directory + "dataset/val.csv").values
            test_x = data[:, 0:13]
            test_y = data[:, 13]

            logging.info("建模训练")
            parameters = {'kernel': kernel,
                          'C': c}
            svc = svm.SVC()
            # 网格搜索
            clf = GridSearchCV(estimator=svc,
                               param_grid=parameters,
                               scoring='accuracy',
                               n_jobs=-1,
                               cv=5)
            clf.fit(train_x, train_y)
            logging.info("在平台中记录运行的参数")
            waveletai.log_param("kernel_param", clf.best_params_["kernel"])
            waveletai.log_param("C_param", clf.best_params_["C"])

            # 保存最佳模型
            best_model = clf.best_estimator_

            logging.info("指标度量")
            pred_test_y = best_model.predict(test_x)
            accuracy_score = sm.accuracy_score(test_y, pred_test_y, normalize=True)
            logging.info("在平台中记录指标")
            waveletai.log_metric("accuracy_score", float(accuracy_score))

            if float(accuracy_score) >= accuracy_threshold:
                logging.info("保存当前模型文件")
                mlflow.sklearn.log_model(best_model, "model", pip_requirements="requirements.txt")


if __name__ == '__main__':
    train()
