import random

from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, matthews_corrcoef, \
    balanced_accuracy_score, roc_auc_score
from sklearn.model_selection import train_test_split
import joblib
from utils.utils import get_default_model_name


class ClsModel:
    def __init__(self, classifier_name, **kwargs):
        self.classifier_name = classifier_name
        self.model = self._get_model(**kwargs)
        self.metric = {}
        self.data = None
        self.x_train = None
        self.y_train = None
        self.x_test = None
        self.y_test = None
        self.ordered_metrics = []
        self.test_size = 0.2
        self.random_state = 42
        if kwargs.get("model_name") is None:
            self.model_name = get_default_model_name(classifier_name)+"-A"
        else:
            self.model_name = kwargs.get("model_name")

    def train_test_split(self, data, target, test_size=0.2, random_state=0):
        """
        数据集分割
        :param random_state:
        :param data:
        :param target:
        :param test_size:
        :return:
        """
        if random_state == 0:
            self.random_state=random.randint(0,100)
        else:
            self.random_state=random_state
        self.test_size = test_size
        self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(data, target, test_size=self.test_size,
                                                                                random_state=self.random_state)

    def _get_model(self, **kwargs):
        """
        返回指定模型
        :param kwargs:
        :return:
        """
        if self.classifier_name == "random_forest":
            return RandomForestClassifier(**kwargs)
        elif self.classifier_name == "naive_bayes":
            return GaussianNB(**kwargs)
        else:
            raise ValueError("Invalid classifier name. Please choose from 'random_forest' or 'naive_bayes'.")

    def train(self):
        self.model.fit(self.x_train, self.y_train)

    def evaluate_all(self):
        """
        获取全部指标参数
        :return:
        """
        y_pred = self.model.predict(self.x_test)
        self.metric['accuracy'] = accuracy_score(self.y_test, y_pred)
        self.metric['precision'] = precision_score(self.y_test, y_pred, average='macro',zero_division=0)
        self.metric['recall'] = recall_score(self.y_test, y_pred, average='macro')
        self.metric['f1'] = f1_score(self.y_test, y_pred, average='macro')
        self.metric['mcc'] = matthews_corrcoef(self.y_test, y_pred)
        self.metric['balance'] = balanced_accuracy_score(self.y_test, y_pred)
        # TODO:auc指标获取存在问题
        # self.metric['auc'] = roc_auc_score(self.y_test,y_pred,multi_class='ovr')

        self.ordered_metrics = [
            ('Accuracy', self.metric['accuracy']),
            ('Precision', self.metric['precision']),
            ('Recall', self.metric['recall']),
            ('F1 Score', self.metric['f1']),
            ('MCC', self.metric['mcc']),
            ('Balance', self.metric['balance']),
            # ('AUC',self.metric['auc'])
        ]

    def get_accuracy(self):
        y_pred = self.model.predict(self.x_test)
        self.metric['accuracy'] = accuracy_score(self.y_test, y_pred)
        return self.metric['accuracy']

    def get_precision(self):
        y_pred = self.model.predict(self.x_test)
        self.metric['precision'] = precision_score(self.y_test, y_pred, average="macro")
        return self.metric['precision']

    def get_recall(self):
        y_pred = self.model.predict(self.x_test)
        self.metric['recall'] = recall_score(self.y_test, y_pred, average="macro")
        return self.metric['recall']

    def get_f1(self):
        y_pred = self.model.predict(self.x_test)
        self.metric['f1'] = f1_score(self.y_test, y_pred, average="macro")
        return self.metric['f1']

    def get_mcc(self):
        y_pred = self.model.predict(self.x_test)
        self.metric['mcc'] = matthews_corrcoef(self.y_test, y_pred)
        return self.metric['mcc']

    def get_balance(self):
        y_pred = self.model.predict(self.x_test)
        self.metric['balance'] = balanced_accuracy_score(self.y_test, y_pred)
        return self.metric['balance']

    # TODO: auc指标获取异常
    # def get_auc(self):
    #     y_pred = self.model.predict(self.x_test)
    #     self.metric['auc'] = roc_auc_score(self.y_test,y_pred,multi_class='ovr')
    #     return self.metric['auc']

    def get_x_train(self):
        return self.x_train

    def get_y_train(self):
        return self.y_train

    def get_x_test(self):
        return self.x_test

    def get_y_test(self):
        return self.y_test

    def print_metrics(self):
        print("==============================")
        for metric_name, metric_value in self.ordered_metrics:
            print(f"{metric_name}:{metric_value}")
        print("==============================")

    def save_model(self, path=""):
        model_path = ""
        if path != "":
            model_path = "./models/" + path
        else:
            model_path = "./models/" + self.model_name
        print(model_path)
        # with open(model_path, "wb") as file:
        #     joblib.dump(self.model, file)
        joblib.dump(self.model,model_path+".pkl")

    def load_model(self, path):
        model_path = "../models/" + path
        # with open(model_path, "rb") as file:
        #     self.model = joblib.load(file)
        self.model = joblib.load(model_path+".pkl")

    def get_xlsx_data(self):
        # 模型名称 机器学习模型 分类算法
        l = [self.model_name,self.classifier_name,self.test_size,self.random_state]
        for k, v in self.ordered_metrics:
            l.append(v)
        return l