# -*- coding:utf8 -*-
# @Time : 2023/3/30 16:05
# @Author : WanJie Wu
"""
机器学习训练分类模型
"""

import pickle
import os
import numpy as np
import lightgbm as lgb
from sklearn.ensemble import IsolationForest
from lightgbm import early_stopping
from sklearn import tree
import fasttext
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from loguru import logger
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report


def dt_train(train_x, train_y, model_save_path):
    model = tree.DecisionTreeClassifier(
        criterion="gini"
    )
    model.fit(train_x, train_y)
    with open(model_save_path, "wb") as f:
        pickle.dump(model, f)


def dt_test(test_x, test_y, model_save_path):
    with open(model_save_path, "rb") as f:
        model = pickle.load(f)

    pred = model.predict(test_x)
    precision = round(precision_score(test_y, pred, average="macro"), 3)
    recall = round(recall_score(test_y, pred, average="macro"), 3)
    f1 = round(f1_score(test_y, pred, average="macro"), 3)
    logger.info(classification_report(test_y, pred))
    logger.info(f"模型Precision: {precision}\t Recall: {recall} \t F1: {f1}")


def ft_train(train_x, train_y, model_save_path):
    """
    官网： https://fasttext.cc/docs/en/python-module.html
    """
    with open("fasttext_clf.txt", "w") as f:
        for i in range(len(train_x)):
            f.write(f"__label__{train_y[i]} {train_x[i]}\n")

    model = fasttext.train_supervised(
        input="fasttext_clf.txt",
        lr=0.1,
        epoch=1
    )
    os.remove("fasttext_clf.txt")
    model.save_model(model_save_path)


def ft_test(test_x, test_y, model_save_path):
    model = fasttext.load_model(model_save_path)
    res = model.predict(test_x)
    pred = [int(label[0].strip("__label__")) for label in res[0]]
    precision = round(precision_score(test_y, pred), 3)
    recall = round(recall_score(test_y, pred), 3)
    f1 = round(f1_score(test_y, pred), 3)
    logger.info(f"模型Precision: {precision}\t Recall: {recall} \t F1: {f1}")


def lgb_train(train_x, train_y, dev_x, dev_y, model_save_path):
    """
    中文文档： https://lightgbm.cn/docs/1/
    """
    train_data = lgb.Dataset(train_x, label=train_y)
    dev_data = lgb.Dataset(dev_x, label=dev_y, reference=train_data)
    params = {
        'objective': 'binary',
        'metric': 'binary_logloss',
        'boosting_type': 'gbdt',
        'num_leaves': 31,
        'learning_rate': 0.05,
        'scale_pos_weight': 120,
        'feature_fraction': 0.9,
    }
    # 训练模型
    gbm = lgb.train(
        params=params,
        train_set=train_data,
        valid_sets=[dev_data],
        num_boost_round=100,
        callbacks=[early_stopping(stopping_rounds=10)]
    )
    y_pred = gbm.predict(dev_x, num_iteration=gbm.best_iteration)
    y_pred_classes = np.round(y_pred)
    logger.info(classification_report(dev_y, y_pred_classes))
    # with open(model_save_path, "wb") as f:
    #     pickle.dump(gbm, f)


def lgb_test(test_x, test_y, model_save_path):
    with open(model_save_path, "rb") as f:
        model = pickle.load(f)
    pred = model.predict(test_x)
    precision = round(precision_score(test_y, pred), 3)
    recall = round(recall_score(test_y, pred), 3)
    f1 = round(f1_score(test_y, pred), 3)
    logger.info(classification_report(test_y, pred))
    logger.info(f"模型Precision: {precision}\t Recall: {recall} \t F1: {f1}")


def lr_train(train_x, train_y, model_save_path):
    """逻辑回归训练"""
    model = LogisticRegression(
        penalty="l2",  # 正则化参数, 默认l2；
        C=1.0,  # 正则化系数的倒数, 默认1.0;
        class_weight="balanced",  # 分类模型权重参数, 默认为None(不考虑权重)，可取值为字典形式或者balanced自动计算形式
        solver="lbfgs",  # 优化算法选择，sag和saga速度更快
        max_iter=100,  # 迭代次数, 默认为100
    )
    # 训练模型

    model.fit(train_x, train_y)
    with open(model_save_path, "wb") as f:
        pickle.dump(model, f)


def lr_test(test_x, test_y, model_save_path):
    """模型效果测试"""
    with open(model_save_path, "rb") as f:
        model = pickle.load(f)

    pred = model.predict(test_x)
    precision = round(precision_score(test_y, pred), 3)
    recall = round(recall_score(test_y, pred), 3)
    f1 = round(f1_score(test_y, pred), 3)
    logger.info(f"模型Precision: {precision}\t Recall: {recall} \t F1: {f1}")


def svm_train(train_x, train_y, model_save_path):
    svc_model = SVC(
        C=1.0,
        kernel="linear",
        class_weight="balanced",
        verbose=True,
    )
    logger.info(f"开始SVM模型训练...")
    svc_model.fit(train_x, train_y)
    train_score = svc_model.score(train_x, train_y)
    logger.info(f"训练集准确率为: {train_score}")
    with open(model_save_path, "wb") as f:
        pickle.dump(svc_model, f)


def svm_test(test_x, test_y, model_save_path):
    with open(model_save_path, "rb") as f:
        model = pickle.load(f)
    pred = model.predict(test_x)
    precision = round(precision_score(test_y, pred), 3)
    recall = round(recall_score(test_y, pred), 3)
    f1 = round(f1_score(test_y, pred), 3)
    logger.info(f"模型Precision: {precision}\t Recall: {recall} \t F1: {f1}")


def ml_dataset():
    """
    分词；
    获取句子向量；
    获取词向量；
    :return:
    """
    from ml_dataset import ReadClfDataSet
    from app.src.embedding import nltk, predict
    rds = ReadClfDataSet(
        data_dir="/note/nlp_algo/app/data/competition/binary",
        seg_model=nltk.SegmentJB(),
        vector_model=predict.Word2VecPred("/data/output/word2vec.bin")
    )
    return rds


if __name__ == "__main__":
    dataset = ml_dataset()
    x1, y1 = dataset.train_data
    x2, y2 = dataset.dev_data
    # isolation_forest = IsolationForest(contamination=0.05, random_state=42)
    # isolation_forest.fit(x1, y1)
    # y_pred = isolation_forest.predict(x2)
    # print(classification_report(y2, y_pred))
    lgb_train(x1, y1, x2, y2, model_save_path="/data/output/lgb.pkl")
    # lgb_test(*dataset.dev_data, model_save_path="/data/output/lgb.pkl")

