import os
import pickle

from dotenv import load_dotenv

from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier

from . feature_engineering import pre_training

from utils import getfile_abs_path, configUtils


def check_winning(ticket, winning_numbers, rules):
    red_ticket, blue_ticket = set(ticket[:-1]), ticket[-1]
    red_winning, blue_winning = set(winning_numbers[:-1]), winning_numbers[-1]

    red_matches = len(red_ticket.intersection(red_winning))
    blue_matches = 1 if blue_ticket == blue_winning else 0

    for rule in rules['winning_conditions']:
        for condition in rule.get('conditions',
                                  [{'red_matches': rule['red_matches'], 'blue_matches': rule['blue_matches']}]):
            if red_matches == condition['red_matches'] and blue_matches == condition['blue_matches']:
                return rule['name']

    return "未中奖"


def train_model(config, model_name):
    x_train_scaled, x_test_scaled, y_train, y_test = pre_training()
    moel_path = config["models"][model_name]["model_path"]
    model_abs_path = getfile_abs_path.getFilePath(moel_path)
    model_parent_path = os.path.dirname(model_abs_path)
    if model_name == 'random_forest':
        rf_model = RandomForestClassifier(n_estimators=config["models"]["random_forest"]["n_estimators"],
                                          random_state=config["models"]["random_forest"]["random_state"])
        # rf_model.fit(x_train_scaled, y_train)

        # 判断模型文件的文件路径是否存在 没有则新增
        if not os.path.exists(model_parent_path):
            os.makedirs(model_parent_path)
        param_grid = {
            'n_estimators': config["models"]["random_forest"]["n_estimators"],
            'max_depth': config["models"]["random_forest"]["max_depth"],
            'min_samples_split': config["models"]["random_forest"]["min_samples_split"],
            'min_samples_leaf': config["models"]["random_forest"]["min_samples_leaf"],
            'max_features': config["models"]["random_forest"]["max_features"]
        }
        # 使用GridSearchCV来优化超参数
        grid_search = GridSearchCV(estimator=rf_model, param_grid=param_grid,
                                   cv=config["models"]["random_forest"]["cv"],
                                   verbose=config["models"]["random_forest"]["verbose"],
                                   n_jobs=config["models"]["random_forest"]["n_jobs"])
        grid_search.fit(x_train_scaled, y_train)

        # 打印最优参数
        print(f"最优参数: {grid_search.best_params_}")

        # 使用最优参数的模型进行预测
        best_rf_model = grid_search.best_estimator_
        # 保存随机森林模型
        with open(model_abs_path, 'wb') as file:
            pickle.dump(best_rf_model, file)
        print(f"Random Forest 模型已保存到 {model_abs_path}")

    elif model_name == 'logisticRegression':

        # 逻辑回归模型
        lr_model = LogisticRegression(random_state=config["models"]["logisticRegression"]["random_state"])
        # lr_model.fit(x_train_scaled, y_train)

        # 判断模型文件的文件路径是否存在 没有则新增
        if not os.path.exists(model_parent_path):
            os.makedirs(model_parent_path)

        param_grid = {
            'C': config["models"]["logisticRegression"]["C"],
            'penalty': config["models"]["logisticRegression"]["penalty"],
            'solver': config["models"]["logisticRegression"]["solver"]
        }
        # 使用GridSearchCV来优化超参数
        grid_search_lr = GridSearchCV(estimator=lr_model, param_grid=param_grid,
                                      cv=config["models"]["logisticRegression"]["cv"],
                                      verbose=config["models"]["logisticRegression"]["verbose"],
                                      n_jobs=config["models"]["logisticRegression"]["n_jobs"])
        grid_search_lr.fit(x_train_scaled, y_train)

        # 打印最优参数并进行预测
        print(f"最优参数（逻辑回归）: {grid_search_lr.best_params_}")

        # 使用最优参数的模型进行预测
        best_rf_model = grid_search_lr.best_estimator_

        # 保存逻辑回归模型
        with open(model_abs_path, 'wb') as file:
            pickle.dump(best_rf_model, file)

        print(f"logisticRegression 模型已保存到 {model_abs_path}")

    elif model_name == 'knn':

        # K近邻模型
        knn_model = KNeighborsClassifier(n_neighbors=config["models"][model_name]["n_neighbors"])
        # knn_model.fit(x_train_scaled, y_train)

        # 判断模型文件的文件路径是否存在 没有则新增
        if not os.path.exists(model_parent_path):
            os.makedirs(model_parent_path)
        param_grid = {
            'n_neighbors': config["models"]["knn"]["n_neighbors"],
            'weights': config["models"]["knn"]["weights"],
            'p': config["models"]["knn"]["p"]
        }
        grid_search_knn = GridSearchCV(estimator=knn_model, param_grid=param_grid, cv=config["models"]["knn"]["cv"],
                                       verbose=config["models"]["knn"]["verbose"],
                                       n_jobs=config["models"]["knn"]["n_jobs"])
        grid_search_knn.fit(x_train_scaled, y_train)

        # 打印最优参数
        print(f"最优参数: {grid_search_knn.best_params_}")

        # 使用最优参数的模型进行预测
        best_knn_model = grid_search_knn.best_estimator_

        # 保存K近邻模型
        with open(model_abs_path, 'wb') as file:
            pickle.dump(best_knn_model, file)

        print(f"KNN 模型已保存到 {model_abs_path}")


def train_ssc_model():
    project_path = os.environ.get("PROJECT_PATH")
    config_path = os.path.join(project_path, 'config', 'ssc_config.yaml')
    config = configUtils.load_config(config_path)
    # print("====训练随机森林模型开始====")
    # train_model(config, 'random_forest')
    # print("====训练随机森林模型完成====")
    # print("")
    # print("====训练logisticRegression模型开始====")
    # train_model(config, 'logisticRegression')
    # print("====训练logisticRegression模型完成====")
    # print("")
    print("====训练knn模型开始====")
    train_model(config, 'knn')
    print("====训练knn模型完成====")

# if __name__ == '__main__':
#     load_dotenv()
#     train_ssc_model()
