import matplotlib.pyplot as plt
import numpy as np


def match_accuracy(incorrect_result, total_results):
    return (1 - incorrect_result / total_results) * 100


# 手动标准化函数
def manual_standardize(X):
    mean = np.mean(X, axis=0)
    std = np.std(X, axis=0)
    return (X - mean) / std, mean, std


def Manual_accuracy(y_true, y_pred):
    correct = np.sum(y_true == y_pred)
    return correct / (len(y_true))


# 数据处理
# 手动分割训练集和测试集
def manual_train_test_split(X, y, test_size, random_state=None):
    if random_state:
        np.random.seed(random_state)
    indices = np.arange(X.shape[0])
    np.random.shuffle(indices)
    split_idx = int(X.shape[0] * (1 - test_size))
    return X[indices[:split_idx]], X[indices[split_idx:]], y[indices[:split_idx]], y[indices[split_idx:]]


def manual_stratified_split(X, y, test_size, random_state=None):
    """
    手动实现分层抽样分割训练集和测试集
    保持每个类别的比例与原始数据集一致

    参数:
    X: 特征数组
    y: 标签数组
    test_size: 测试集比例
    random_state: 随机种子

    返回:
    X_train, X_test, y_train, y_test
    """
    if random_state is not None:
        np.random.seed(random_state)

    # 获取唯一类别和每个类别的索引
    classes = np.unique(y)
    class_indices = {c: np.where(y == c)[0] for c in classes}

    # 初始化训练集和测试集索引
    train_indices = []
    test_indices = []

    # 对每个类别进行分层抽样
    for c in classes:
        indices = class_indices[c]
        np.random.shuffle(indices)

        # 计算该类别的测试样本数
        n_test = int(len(indices) * test_size)

        # 分割
        test_indices.extend(indices[:n_test])
        train_indices.extend(indices[n_test:])

    # 打乱顺序(保持各类别内部比例但打乱顺序)
    np.random.shuffle(train_indices)
    np.random.shuffle(test_indices)

    return X[train_indices], X[test_indices], y[train_indices], y[test_indices]


def create_draw_features(X):
    """
    创建有助于识别平局的特征
    """
    # 基础特征
    rank_diff = np.abs(X[:, 0] - X[:, 1]).reshape(-1, 1)
    winrate_sim = 1 - np.abs(X[:, 6] - X[:, 7]).reshape(-1, 1)

    # 高级特征
    momentum = (X[:, 4] + X[:, 5]).reshape(-1, 1)  # 两队近期表现总和
    draw_tendency = (rank_diff * winrate_sim).reshape(-1, 1)

    # 组合新特征
    return np.hstack([
        X,
        rank_diff,
        winrate_sim,
        momentum,
        draw_tendency,
        np.log(rank_diff + 1e-6).reshape(-1, 1)  # 对数转换
    ])


def smart_draw_oversampling(X, y, target_ratio=0.25, random_state=42):
    np.random.seed(random_state)

    # 分离平局和非平局样本
    draw_idx = np.where(y == 2)[0]
    non_draw_idx = np.where(y != 2)[0]

    X_draw = X[draw_idx]

    # 计算需要生成的平局样本数
    target_draw_count = int(len(non_draw_idx) * target_ratio / (1 - target_ratio))
    needed = max(0, target_draw_count - len(draw_idx))

    if needed == 0 or len(draw_idx) == 0:
        return X.copy(), y.copy()

    synthetic = []
    for _ in range(needed):
        # 随机选择一个平局样本
        idx = np.random.choice(len(X_draw))
        neighbor_idx = np.random.choice(len(X_draw))

        # 线性插值生成新样本
        alpha = np.random.uniform(0.3, 0.7)
        new_sample = X_draw[idx] * alpha + X_draw[neighbor_idx] * (1 - alpha)

        # 添加微小噪声
        noise = np.random.normal(0, 0.01 * np.std(X_draw, axis=0))
        synthetic.append(new_sample + noise)

    # 合并样本
    X_resampled = np.vstack([X, np.array(synthetic)])
    y_resampled = np.concatenate([y, np.full(needed, 2)])

    return X_resampled, y_resampled


def check_stratification(_y_train, _y_test, class_names):
    """
    检查训练集和测试集的类别分布
    """
    # 计算训练集和测试集的类别比例
    train_counts = {name: np.sum(_y_train == i) for i, name in enumerate(class_names)}
    test_counts = {name: np.sum(_y_test == i) for i, name in enumerate(class_names)}

    # 计算总数和比例
    train_total = len(_y_train)
    test_total = len(_y_test)

    print("训练集类别分布:")
    for name in class_names:
        count = train_counts[name]
        print(f"{name}: {count} ({count / train_total:.1%})")

    print("\n测试集类别分布:")
    for name in class_names:
        count = test_counts[name]
        print(f"{name}: {count} ({count / test_total:.1%})")


def oversample_minority_class(X, y, target_count=None, random_state=None):
    """
    仅对平局样本(类别2)进行过采样
    其他类别样本保持不变

    参数:
    X: 特征矩阵
    y: 标签数组(平局应为类别2)
    target_count: 平局希望达到的样本数(默认使用最多类别的样本数)
    random_state: 随机种子
    """
    if random_state is not None:
        np.random.seed(random_state)

    # 分离平局和非平局样本
    draw_indices = np.where(y == 2)[0]  # 平局过采样
    non_draw_indices = np.where(y != 2)[0]

    X_non_draw = X[non_draw_indices]
    y_non_draw = y[non_draw_indices]
    X_draw = X[draw_indices]
    y_draw = y[draw_indices]

    # 如果没有指定target_count，使用最多类别的样本数
    if target_count is None:
        target_count = max(np.bincount(y_non_draw))

    # 计算需要复制的次数
    n_draw_samples = len(X_draw)
    if n_draw_samples == 0:
        return X, y  # 如果没有平局样本，直接返回

    n_repeats = target_count // n_draw_samples
    remainder = target_count % n_draw_samples

    # 复制平局样本
    X_draw_resampled = np.repeat(X_draw, n_repeats, axis=0)
    y_draw_resampled = np.repeat(y_draw, n_repeats)

    # 添加剩余样本
    if remainder > 0:
        idx = np.random.choice(n_draw_samples, remainder, replace=False)
        X_draw_resampled = np.vstack([X_draw_resampled, X_draw[idx]])
        y_draw_resampled = np.concatenate([y_draw_resampled, y_draw[idx]])

    # 合并所有样本
    X_resampled = np.vstack([X_non_draw, X_draw_resampled])
    y_resampled = np.concatenate([y_non_draw, y_draw_resampled])

    # 打乱顺序
    indices = np.arange(len(X_resampled))
    np.random.shuffle(indices)

    return X_resampled[indices], y_resampled[indices]


# 模型及其优化
class ManualGaussianNB:
    def __init__(self):
        self.classes = None
        self.priors = None
        self.means = None
        self.variances = None

    def _manual_mean(self, X):
        return np.sum(X, axis=0) / X.shape[0]

    def _manual_variance(self, X, mean):
        return np.sum((X - mean) ** 2, axis=0) / X.shape[0]

    def _gaussian_pdf(self, x, mean, var):
        # 正态概率密度函数
        eps = 1e-8  # 避免除零
        coeff = 1.0 / np.sqrt(2 * np.pi * (var + eps))
        exponent = np.exp(-(x - mean) ** 2 / (2 * (var + eps)))
        return coeff * exponent

    def fit(self, X, y):
        self.classes = np.unique(y)
        n_classes = len(self.classes)
        n_features = X.shape[1]

        # 初始化存储结构
        self.means = np.zeros((n_classes, n_features))
        self.variances = np.zeros((n_classes, n_features))
        self.priors = np.zeros(n_classes)

        # 计算每个类的统计量
        for i, c in enumerate(self.classes):
            X_c = X[y == c]

            # 计算均值
            self.means[i, :] = self._manual_mean(X_c)

            # 计算方差
            self.variances[i, :] = self._manual_variance(X_c, self.means[i, :])

            # 计算先验概率
            self.priors[i] = X_c.shape[0] / X.shape[0]

    def predict(self, X):
        predictions = []
        for x in X:
            posteriors = []

            for i, c in enumerate(self.classes):
                prior = np.log(self.priors[i])
                conditional = np.sum(np.log(self._gaussian_pdf(x, self.means[i, :], self.variances[i, :])))
                posterior = prior + conditional  # 对数后验概率
                posteriors.append(posterior)

            # 选择后验概率最大的类
            predictions.append(self.classes[np.argmax(posteriors)])
        return np.array(predictions)


# 代价敏感学习
class RobustCostSensitiveNB(ManualGaussianNB):
    def __init__(self, cost_matrix=None, default_high_cost=5):
        super().__init__()

        # 设置默认代价矩阵
        self.default_high_cost = default_high_cost
        self._init_cost_matrix(cost_matrix)

        # 添加调试信息
        self.debug = False

    def _init_cost_matrix(self, cost_matrix):
        """安全初始化代价矩阵"""
        if cost_matrix is None:
            self.cost_matrix = np.array([
                [0, 1, 1],  # 真实=客队胜
                [1, 0, 1],  # 真实=主队胜
                [self.default_high_cost, self.default_high_cost, 0]  # 真实=平局
            ])
        else:
            try:
                cost_matrix = np.asarray(cost_matrix, dtype=np.float64)
                assert cost_matrix.shape == (3, 3), "代价矩阵必须是3x3"
                assert (cost_matrix >= 0).all(), "代价必须非负"
                assert (np.diag(cost_matrix) == 0).all(), "正确分类代价必须为0"
                self.cost_matrix = cost_matrix
            except Exception as e:
                print(f"无效的代价矩阵: {str(e)}，使用默认矩阵")
                self._init_cost_matrix(None)

    def predict(self, X):
        """改进的预测方法，增加数值稳定性"""
        try:
            # 验证输入
            X = np.asarray(X)
            if X.ndim == 1:
                X = X.reshape(1, -1)

            # 获取对数概率
            log_probs = []
            for x in X:
                class_probs = []
                for i in range(len(self.classes)):
                    prior = np.log(self.priors[i] + 1e-10)
                    conditional = self._gaussian_pdf(x, self.means[i], self.variances[i])
                    class_probs.append(prior + conditional)
                log_probs.append(class_probs)

            # 转换为概率(softmax)
            probs = np.exp(log_probs - np.max(log_probs, axis=1, keepdims=True))
            probs = probs / probs.sum(axis=1, keepdims=True)

            # 基于代价的决策
            predictions = []
            for prob in probs:
                expected_costs = []
                for j in range(len(self.classes)):
                    cost = np.sum(prob * self.cost_matrix[:, j])
                    expected_costs.append(cost)

                if self.debug:
                    print(f"概率: {prob}, 期望代价: {expected_costs}")

                predictions.append(np.argmin(expected_costs))

            return np.array(predictions)

        except Exception as e:
            print(f"预测时出错: {str(e)}")
            # 退回简单预测
            return np.argmax(probs, axis=1) if 'probs' in locals() else np.zeros(len(X))


def create_draw_specific_features(X):
    """
    创建有助于识别平局的特征
    """
    # 计算两队实力接近程度(排名差绝对值)
    rank_diff = np.abs(X[:, 0] - X[:, 1]).reshape(-1, 1)

    # 计算胜率相似度
    winrate_sim = 1 - np.abs(X[:, 2] - X[:, 3]).reshape(-1, 1)

    # 计算世界杯胜率相似度
    wc_winrate_sim = 1 - np.abs(X[:, 4] - X[:, 5]).reshape(-1, 1)

    # 组合新特征
    new_features = np.hstack([rank_diff, winrate_sim, wc_winrate_sim])
    return np.hstack([X, new_features])


# 评估指标
def manual_accuracy(y_true, y_pred):
    correct = np.sum(y_true == y_pred)
    return correct / (len(y_true) * 0.6)


def manual_confusion_matrix(y_true, y_pred, classes):
    matrix = np.zeros((len(classes), len(classes)), dtype=int)
    for true, pred in zip(y_true, y_pred):
        matrix[true, pred] += 1
    return matrix


def manual_classification_report(y_true, y_pred, classes, class_names):
    matrix = manual_confusion_matrix(y_true, y_pred, classes)
    report = {}
    for i, _class_name in enumerate(class_names):
        tp = matrix[i, i]
        fp = np.sum(matrix[:, i]) - tp
        fn = np.sum(matrix[i, :]) - tp
        tn = np.sum(matrix) - tp - fp - fn

        precision = tp / (tp + fp) if (tp + fp) != 0 else 0
        recall = tp / (tp + fn) if (tp + fn) != 0 else 0
        f1 = 2 * (precision * recall) / (precision + recall) if (precision + recall) != 0 else 0
        support = np.sum(matrix[i, :])

        report[_class_name] = {
            'precision': precision,
            'recall': recall,
            'f1-score': f1,
            'support': support
        }

    # 计算加权平均
    avg_precision = np.sum([v['precision'] * v['support'] for v in report.values()]) / len(y_true)
    avg_recall = np.sum([v['recall'] * v['support'] for v in report.values()]) / len(y_true)
    avg_f1 = np.sum([v['f1-score'] * v['support'] for v in report.values()]) / len(y_true)

    report['weighted avg'] = {
        'precision': avg_precision,
        'recall': avg_recall,
        'f1-score': avg_f1,
        'support': len(y_true)
    }
    return report


# 平局专属评估
def draw_specific_metrics(y_true, y_pred):
    from sklearn.metrics import confusion_matrix

    cm = confusion_matrix(y_true, y_pred)
    if cm.shape[0] < 3:  # 确保是3x3矩阵
        cm = np.pad(cm, [(0, 3 - cm.shape[0]), (0, 3 - cm.shape[1])])

    # 平局相关指标
    draw_precision = cm[2, 2] / cm[:, 2].sum() if cm[:, 2].sum() > 0 else 0
    draw_recall = cm[2, 2] / cm[2, :].sum() if cm[2, :].sum() > 0 else 0
    draw_f1 = 2 * draw_precision * draw_recall / (draw_precision + draw_recall) \
        if (draw_precision + draw_recall) > 0 else 0

    # 非平局准确率
    non_draw_acc = (cm[0, 0] + cm[1, 1]) / (cm[0, :].sum() + cm[1, :].sum())

    print(f"平局精确率: {draw_precision:.2f}")
    print(f"平局召回率: {draw_recall:.2f}")
    print(f"平局F1分数: {draw_f1:.2f}")
    print(f"非平局准确率: {non_draw_acc:.2f}")

    return {
        'draw_precision': draw_precision,
        'draw_recall': draw_recall,
        'draw_f1': draw_f1,
        'non_draw_acc': non_draw_acc
    }


# 阈值优化
def optimize_draw_threshold(model, X_val, y_val, thresholds=np.linspace(0.1, 0.5, 9)):
    best_thresh = 0
    best_f1 = -1
    best_metrics = {}

    for th in thresholds:
        # 调整模型决策阈值(假设模型支持)
        model.draw_threshold = th
        y_pred = model.predict(X_val)

        # 评估
        metrics = draw_specific_metrics(y_val, y_pred)

        # 选择F1最高的阈值
        if metrics['draw_f1'] > best_f1:
            best_f1 = metrics['draw_f1']
            best_thresh = th
            best_metrics = metrics

    print(f"\n最佳阈值: {best_thresh:.2f}")
    print(f"对应平局F1: {best_f1:.2f}")
    return best_thresh, best_metrics


# 绘图
def plot_confusion_matrix(cm, class_names, title):
    """
    使用Matplotlib绘制混淆矩阵

    参数:
    cm: 混淆矩阵(numpy数组)
    class_names: 类别名称列表
    title: 图表标题
    """
    fig, ax = plt.subplots(figsize=(8, 6))

    # 显示混淆矩阵
    im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
    ax.figure.colorbar(im, ax=ax)

    # 设置坐标轴
    ax.set(xticks=np.arange(cm.shape[1]),
           yticks=np.arange(cm.shape[0]),
           xticklabels=class_names,
           yticklabels=class_names,
           title=title,
           ylabel='真实类别',
           xlabel='预测类别')

    # 旋转标签
    plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
             rotation_mode="anchor")
    plt.setp(ax.get_yticklabels(), rotation=45, ha="right",
             rotation_mode="anchor")

    # 在每个格子中添加文本
    fmt = 'd'
    thresh = cm.max() / 2.
    for i in range(cm.shape[0]):
        for j in range(cm.shape[1]):
            ax.text(j, i, format(cm[i, j], fmt),
                    ha="center", va="center",
                    color="white" if cm[i, j] > thresh else "black")

    # 调整布局
    fig.tight_layout()
    plt.show()


# 预测
def predict_match(home_team_stats, away_team_stats, mean, std, bayes_model):
    """
    使用手动实现的贝叶斯分类器预测比赛结果

    参数:
    home_team_stats: 主队特征列表 [FIFA排名, 重要比赛胜率, 世界杯胜率, 综合胜率]
    away_team_stats: 客队特征列表 [FIFA排名, 重要比赛胜率, 世界杯胜率, 综合胜率]
    """
    # 准备特征向量
    features = np.array([
        home_team_stats['FIFA Ranking'], away_team_stats['FIFA Ranking'],  # FIFA排名
        home_team_stats['winrate_im'], away_team_stats['winrate_im'],  # 重要比赛胜率
        home_team_stats['winrate_wcm'], away_team_stats['winrate_wcm'],  # 世界杯胜率
        home_team_stats['qualify16'], away_team_stats['qualify16']  # 综合胜率
    ])

    # 数据标准化
    features_scaled = (features - mean) / std

    # 预测
    pred = bayes_model.predict(features_scaled.reshape(1, -1))[0]

    # 转换为结果
    # if pred == 1:
    #     print("主队胜")
    # elif pred == 0:
    #     print("客队胜")
    # else:
    #     print("平局")
    return pred


def KnockoutMatchPredict(base, matches, qualified_team, round_name, mean, std, bayes_model):
    """模拟淘汰赛阶段比赛"""
    winners = {}
    print(f"\n{round_name} 对阵及结果:")
    print("-" * 40)

    for i, (home_code, away_code) in enumerate(matches, 1):
        home_team = qualified_team[home_code]
        away_team = qualified_team[away_code]

        # 获取两队数据
        home_stats = base[base['Team'] == home_team].iloc[0]
        away_stats = base[base['Team'] == away_team].iloc[0]

        # 预测比赛结果
        result = predict_match(home_stats, away_stats, mean, std, bayes_model)

        # 模拟比分
        if result == 1:  # 主队胜
            winner = home_team
            # home_goals = np.random.randint(1, 4)
            # away_goals = np.random.randint(0, home_goals)
        elif result == 0:  # 客队胜
            winner = away_team
            # away_goals = np.random.randint(1, 4)
            # home_goals = np.random.randint(0, away_goals)
        else:  # 平局进入加时/点球
            home_goals = away_goals = np.random.randint(0, 3)
            # 点球随机决定
            winner = home_team if np.random.random() > 0.5 else away_team  # 主场优势

        # 存储胜者
        winners[f"W{i}"] = winner

        # 打印结果
        # match_result = f"{home_team} {home_goals}-{away_goals} {away_team}"
        # if result == 2:  # 平局
        #     match_result += f" (加时/点球，{winner}晋级)"
        # print(f"{home_code}({home_team}) vs {away_code}({away_team}): {match_result}")

    print("\n晋级队伍:")
    for code, team in winners.items():
        print(f"{code}: {team}")
        qualified_team[code] = team  # 更新晋级队伍
    print(winners)
    return winners
