# -*- coding: UTF-8 -*-
# PyCharm  tools
# 2024年 06月 03日
# 作者：小帅天一
import warnings

from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sqlalchemy.sql import text
import pandas as pd
from sqlalchemy import create_engine
import Config
import numpy as np
import joblib


def engine_of_sql():
    """
    连接数据库
    :param sql_path: 数据库路径
    :return: engine
    """
    engine = create_engine(f'sqlite:///{Config.machine_datasets}')
    return engine


def save_health_data_to_datasets(file):
    """
    保存health数据到数据库中
    :param file: csv文件
    :return: None
    """
    # 创建一个DataFrame
    healthcare_data = pd.read_csv(file)
    healthcare_data = healthcare_data.dropna()
    del healthcare_data['id']

    # 创建一个SQLAlchemy引擎
    engine = engine_of_sql()

    # 使用to_sql方法将DataFrame保存到数据库中
    # 'your_table_name' 是你想要创建的表名
    healthcare_data.to_sql('healthcareData', con=engine, if_exists='replace', index=False)


def save_salary_data_to_datasets(file):
    """
    保存salary到数据库中
    :param file: csv文件
    :return: None
    """
    # 创建一个DataFrame
    salary_data = pd.read_csv(file)
    salary_data = salary_data.replace(to_replace=" ?", value=np.nan)
    salary_data = salary_data.dropna()

    # 创建一个SQLAlchemy引擎
    engine = engine_of_sql()

    # 使用to_sql方法将DataFrame保存到数据库中
    salary_data.to_sql('salaryData', con=engine, if_exists='replace', index=False)


def health_data_conversion(data):
    """
    标准化数据
    :param data: data
    :return: 标准化后的数据
    """
    # 给定的列顺序
    warnings.filterwarnings('ignore')
    pd.set_option('display.max_columns', None)
    health_columns = joblib.load(Config.health_data_columns)
    print(health_columns)
    print("===================================")
    # 创建空的 DataFrame
    new_data = pd.DataFrame(columns=health_columns)
    del new_data['stroke']

    print(new_data.columns)

    # 将原始数据转换为新的 DataFrame
    for key, value in vars(data).items():
        if key == 'gender' or key == 'ever_married' or key == 'work_type' or key == 'Residence_type' or\
                key == 'smoking_status':
            new_data[value] = [1]
        else:
            if key != 'mode':
                new_data[key] = value
    new_data.fillna(0, inplace=True)

    print(new_data)

    return new_data


def salary_data_conversion(data):
    """
    标准化数据
    :param data: data
    :return: 标准化后的数据
    """
    # 给定的列顺序
    warnings.filterwarnings('ignore')
    pd.set_option('display.max_columns', None)
    salary_columns = joblib.load(Config.salary_data_columns)
    print(salary_columns)
    print("===================================")
    # 创建空的 DataFrame
    new_data = pd.DataFrame(columns=salary_columns)

    print(new_data.columns)

    # 将原始数据转换为新的 DataFrame
    for key, value in vars(data).items():
        if key == 'age' or key == 'fnlwgt' or key == 'education_num' or key == 'capital_gain' or\
                key == 'capital_loss' or key == 'hours_per_week':
            new_data[key.replace("_", "-")] = value
        else:
            if key != 'mode':
                new_data[key.replace("_", "-") + "_" + value] = [1]
    new_data.fillna(0, inplace=True)

    print(new_data)

    return new_data


def model_predict(model, x):
    print("-------------------------------")
    y_pred = model.predict(x)
    print(y_pred)
    print(type(y_pred))

    return y_pred[0]


def create_model(mission, mode):
    if mission == "healthcare":
        healthcare_data = pd.read_sql(text('select * from healthcareData'), engine_of_sql())
        healthcare_data = healthcare_data.dropna()
        # 性别热编码
        gender_list = healthcare_data['gender']
        gender_hot = pd.get_dummies(gender_list)
        # 婚姻热编码
        ever_married_list = healthcare_data['ever_married']
        ever_married_hot = pd.get_dummies(ever_married_list)
        # 工作热编码
        work_type_list = healthcare_data['work_type']
        work_type_hot = pd.get_dummies(work_type_list)
        # 居住热编码
        residence_type_list = healthcare_data['Residence_type']
        residence_type_hot = pd.get_dummies(residence_type_list)
        # 吸烟热编码
        smoking_status_list = healthcare_data['smoking_status']
        smoking_status_hot = pd.get_dummies(smoking_status_list)

        del healthcare_data['gender'], healthcare_data['ever_married'], healthcare_data['work_type'], healthcare_data[
            'Residence_type'], healthcare_data['smoking_status']

        healthcare_data = pd.concat(
            [healthcare_data, gender_hot, ever_married_hot, work_type_hot, residence_type_hot, smoking_status_hot],
            axis=1)
        # X是特征矩阵，y是目标变量
        X, y = healthcare_data.drop('stroke', axis=1), healthcare_data['stroke']

        # 划分训练集和测试集
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
        scaler = joblib.load(Config.health_scaler_model)
        X_train = scaler.fit_transform(X_train)
        X_test = scaler.transform(X_test)
        model = None

        if mode == "lg":
            model = LogisticRegression(solver='liblinear')
        elif mode == 'dt':
            model = DecisionTreeClassifier(
                max_depth=7,
                max_leaf_nodes=9,
                min_samples_split=7
            )
        elif mode == 'knn':
            model = KNeighborsClassifier(
                metric='manhattan',
                n_neighbors=3,
                weights='distance'
            )
        elif mode == 'by':
            model = GaussianNB(
                var_smoothing=0.1
            )
        elif mode == 'svn':
            model = SVC(
                C=100,
                gamma=0.1
            )
        elif mode == 'rf':
            model = RandomForestClassifier(
                max_depth=10,
                n_estimators=1
            )
        model.fit(X_train, y_train)
        joblib.dump(model, './models/re_model/health_model.pkl')
    else:
        salary_data = pd.read_sql(text('select * from salaryData'), engine_of_sql())
        salary_data = pd.get_dummies(salary_data, columns=['salary'], drop_first=True)
        salary_data = salary_data.replace(to_replace=" ?", value=np.nan)
        salary_data = salary_data.dropna()

        # 热编码
        salary_data = pd.get_dummies(salary_data, columns=['workclass'])
        salary_data = pd.get_dummies(salary_data, columns=['education'])
        salary_data = pd.get_dummies(salary_data, columns=['marital-status'])
        salary_data = pd.get_dummies(salary_data, columns=['occupation'])
        salary_data = pd.get_dummies(salary_data, columns=['relationship'])
        salary_data = pd.get_dummies(salary_data, columns=['race'])
        salary_data = pd.get_dummies(salary_data, columns=['sex'])
        salary_data = pd.get_dummies(salary_data, columns=['native-country'])

        X, y = salary_data.drop('salary_ >50K', axis=1), salary_data['salary_ >50K']
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)
        scaler = joblib.load(Config.salary_scaler_model)
        X_train = scaler.fit_transform(X_train)
        X_test = scaler.transform(X_test)

        model = None
        if mode == "lg":
            model = LogisticRegression(solver='liblinear')
        elif mode == 'dt':
            model = DecisionTreeClassifier(
                max_depth=6,
                max_leaf_nodes=9
            )
        elif mode == 'knn':
            model = KNeighborsClassifier(
                metric='manhattan',
                n_neighbors=9,
                weights='uniform'
            )
        elif mode == 'by':
            model = GaussianNB(
                var_smoothing=0.1
            )
        elif mode == 'svn':
            model = SVC(
                C=10
            )
        elif mode == 'rf':
            model = RandomForestClassifier(
                max_depth=10,
                n_estimators=1
            )
        model.fit(X_train, y_train)
        joblib.dump(model, './models/re_model/salary_model.pkl')

    return model.score(X_test, y_test)


if __name__ == '__main__':
    save_health_data_to_datasets(Config.healthcare_data_path)
    save_salary_data_to_datasets(Config.salary_data_path)
