from __future__ import annotations
from typing import TYPE_CHECKING, Any, NamedTuple, Callable

from sklearn.model_selection import learning_curve
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, RocCurveDisplay
from sklearn.preprocessing import LabelBinarizer

import matplotlib
import matplotlib.pyplot as plt
import numpy as np

import optuna

if TYPE_CHECKING:
    from dataset import DataSet, DataWrapper
    import pandas as pd
    from pathlib import Path

matplotlib.rcParams['font.family'] = 'Microsoft YaHei'
optuna.logging.disable_default_handler()

class ModelRecord(NamedTuple):
    name: str
    score: float
    hyperparameters: dict

# data -> X_train, y_train, X_test, y_test
def data_split(data: pd.DataFrame)\
      -> tuple[pd.DataFrame, pd.Series, pd.DataFrame, pd.Series]:
      train = data[data['train']].drop('train', axis=1)
      test = data[~data['train']].drop('train', axis=1)
      X_train = train.drop(['y', 'weight'], axis=1, errors='ignore')
      y_train = train['y']
      X_test = test.drop(['y', 'weight'], axis=1, errors='ignore')
      y_test = test['y']
      return (X_train, y_train, X_test, y_test)


# model must support fit(x,y)
class ModelManager:
    def __init__(self, name: str, model: Any) -> None:
        self.name = name
        self.model = model
        self.record: dict[name, ModelRecord] = {}

    def register_record(self, name: str, score: float, hyperparameters: dict) -> None:
        if name in self.record:
            print(f'Warning {name} has existed and will be overwrited')
        self.record[name] = ModelRecord(name=name, score=score, hyperparameters=hyperparameters)

    def get_best_record(self) -> ModelRecord:
        best_record = None
        best_score = 0
        for _, record in self.record.items():
            if record.score > best_score:
                best_record = record
                best_score = record.score
        return best_record

    def get_best_model(self, dataset: DataSet) -> ModelWrapper:
        best_record = self.get_best_record()
        data = dataset.get_data_from_name(best_record.name)
        return ModelWrapper(self.model, data, best_record)

# todo label init

class ModelWrapper:
    def __init__(self, model: Any, data: DataWrapper, record: ModelRecord) -> None:
        self.model = model
        self.data = data
        self.record = record
        # todo
        self.labels = None
        self.X_train, self.y_train, self.X_test, self.y_test = data_split(data.data)
        self.clf = self.model(**record.hyperparameters).fit(self.X_train, self.y_train)
        self.y_predict = self.clf.predict(self.X_test)
        self.y_predict_proba = self.clf.predict_proba(self.X_test)

    def get_learning_curve(self, save: Path = None) -> None:
        _clf = self.model(**self.record.hyperparameters)
        train_size_ticks, tran_score, test_score = learning_curve(_clf, self.X_train, self.y_train, train_sizes=np.linspace(0.1, 1.0 ,10), scoring='f1_macro')
        fig, ax = plt.subplots()
        ax.plot(train_size_ticks, tran_score.mean(axis=1), label='训练集')
        ax.plot(train_size_ticks, test_score.mean(axis=1), label='验证集')
        ax.fill_between(train_size_ticks, tran_score.min(axis=1), tran_score.max(axis=1), alpha=0.5, linewidth=0)
        ax.fill_between(train_size_ticks, test_score.min(axis=1), test_score.max(axis=1), alpha=0.5, linewidth=0)
        ax.ylabel('macro-F1')
        ax.xlabel('数据量')
        ax.legend(loc='lower right')
        if save:
            plt.savefig(save, dpi=300)
        else:
            plt.show()

    def get_confusion_matrix(self, save: Path = None) -> None:
        matrix = confusion_matrix(self.y_test, self.y_predict)
        image = ConfusionMatrixDisplay(matrix, display_labels=self.labels)
        image.plot()
        if save:
            plt.savefig(save, dpi=300)
        else:
            plt.show()

    def get_scores(self, score_funs: dict[str, Callable]) -> dict[str, float]:
        scores = {}
        for name, fun in score_funs.items():
            score_funs[name] = fun(self.y_test, self.y_predict)
        return scores

    def get_roc_and_auc(self, save: Path = None) -> None:
        y_binarize = LabelBinarizer().fit_transform(self.y_test)
        fig, ax = plt.subplots()
        for index, label in enumerate(self.labels):
            RocCurveDisplay.from_predictions(
                y_binarize[:, index],
                self.y_predict_proba[:, index],
                name=label,
                ax=ax
            )

        ax.set(
            xlabel="False Positive Rate",
            ylabel="True Positive Rate",
        )

        if save:
            plt.savefig(save, dpi=300)
        else:
            plt.show()
        

