import numpy as np
from math import exp
import pandas as pd
from scipy.stats import norm
import matplotlib.pyplot as plt

# 32强初步数据
group_match_analysis_file_path = (
    "C://Users//54114//OneDrive//Desktop//FIFAPrediction//KeyCode//group_match_analysis.csv")
total_team_situation = pd.read_csv(group_match_analysis_file_path)
# 创建字典
team = {}

# 遍历 DataFrame 中的每一行
for index, row in total_team_situation.iterrows():
    country = row['Team']
    winrate_im = row['winrate_im']  # 国际比赛胜率
    winrate_wcm = row['winrate_wcm']  # 世界杯比赛胜率

    # 将数据添加到字典中
    team[country] = {
        'winrate_im': winrate_im,
        'winrate_wcm': winrate_wcm
    }

history_world_cup_match_file_path = "C://Users//54114//OneDrive//Desktop//FIFAPrediction//KeyCode//fifa.csv"
match_history = pd.read_csv(history_world_cup_match_file_path)


# 贝叶斯优化(Bayesian Optimization)过程，用于寻找黑箱函数的最优解
# 超参数,值越大函数越平滑
def rbf_kernel(x1, x2, length_scale=1.0):
    """
    RBF核函数（高斯核）
    公式: k(x₁, x₂) = exp(-0.5 * (x₁ - x₂)² / l²)
    其中 l 是 length_scale 参数
    """
    return exp(-0.5 * ((x1 - x2) ** 2) / (length_scale ** 2))


def gp_predict(X_train, y_train, X_new, kernel=rbf_kernel, noise=1e-8):
    n_train = len(X_train)
    # 计算训练点之间的核矩阵
    K = np.zeros((n_train, n_train))
    for i in range(n_train):
        for j in range(n_train):
            K[i, j] = kernel(X_train[i], X_train[j])
    # 添加噪声项（提高数值稳定性）
    K += noise * np.eye(n_train)

    # 计算训练点与新点之间的核矩阵
    K_new = np.zeros((n_train, len(X_new)))
    for i in range(n_train):
        for j in range(len(X_new)):
            K_new[i, j] = kernel(X_train[i], X_new[j])

    # 计算预测均值和方差
    K_inv = np.linalg.inv(K)
    mu = K_new.T.dot(K_inv).dot(y_train)
    var = np.array([kernel(x, x) for x in X_new]) - np.diag(K_new.T.dot(K_inv).dot(K_new))
    return mu, np.sqrt(np.maximum(var, 0))


def expected_improvement(X, X_train, y_train, current_best, xi=0.01):
    mu, sigma = gp_predict(X_train, y_train, X)
    sigma = sigma + 1e-8  # 避免除零
    Z = (mu - current_best - xi) / sigma
    ei = (mu - current_best - xi) * norm.cdf(Z) + sigma * norm.pdf(Z)
    return ei


def bayesian_optimization(objective_func, bounds, n_iter, n_init, plot_convergence=True):
    X_train = []
    y_train = []
    history = {'x': [], 'y': [], 'best_y': []}  # 记录优化历史

    # 初始随机采样
    for _ in range(n_init):
        x = np.random.uniform(bounds[0][0], bounds[0][1])
        y = objective_func(x)
        X_train.append(x)
        y_train.append(y)
        history['x'].append(x)
        history['y'].append(y)
        history['best_y'].append(min(y_train))  # 记录当前最佳值

    # 迭代优化
    for each_iter in range(n_iter):
        # 找到当前最优值
        current_best = min(y_train)

        # 在参数空间生成候选点
        X_candidates = np.linspace(bounds[0][0], bounds[0][1], 1000)
        ei = expected_improvement(X_candidates, X_train, y_train, current_best)

        # 选择EI最大的点
        x_next = X_candidates[np.argmax(ei)]
        y_next = objective_func(x_next)

        # 更新数据
        X_train.append(x_next)
        y_train.append(y_next)
        history['x'].append(x_next)
        history['y'].append(y_next)
        history['best_y'].append(min(y_train))  # 更新当前最佳值

    # 绘制收敛曲线
    if plot_convergence:
        plt.figure(figsize=(10, 5))

        # 绘制最佳值随迭代的变化
        plt.subplot(1, 2, 1)

        # 绘制目标函数曲线
        plt.plot(history['best_y'], 'b-o', markersize=4)
        plt.xlabel('Iteration')
        plt.ylabel('Best Objective Value')
        plt.title('Convergence Curve')
        plt.grid(True)

        # 绘制所有采样点
        plt.subplot(1, 2, 2)
        x_range = np.linspace(bounds[0][0], bounds[0][1], 100)
        y_range = [objective_func(x) for x in x_range]
        plt.plot(x_range, y_range, 'k-', label='Objective Function')
        # 标注初始采样点（红色）
        plt.scatter(history['x'][:n_init], history['y'][:n_init],
                    c='r', s=50, label='Initial Samples')

        # 标注贝叶斯优化点（蓝色）
        plt.scatter(history['x'][n_init:], history['y'][n_init:],
                    c='b', s=30, label='BO Samples')

        # 标注最优解（绿色五角星）
        best_idx = np.argmin(history['y'])
        plt.scatter(history['x'][best_idx], history['y'][best_idx],
                    c='g', marker='*', s=200, label='Optimal Solution')
        plt.xlabel('x')
        plt.ylabel('y')
        plt.legend()
        plt.title('Sampling Points')
        plt.grid(True)

        plt.tight_layout()
        plt.show()

    # 返回最优解
    best_idx = np.argmin(y_train)
    return X_train[best_idx], y_train[best_idx]


def best_weight_calculate(alpha):
    """目标函数：加权胜率预测比赛的负准确率（需最小化）"""
    correct = 0
    valid_total = 0  # 只统计有效比赛

    for _, match in match_history.iterrows():
        home_team_name = match['home_team']
        away_team_name = match['away_team']

        # 检查国家是否在进入世界杯决赛圈中
        if home_team_name not in team or away_team_name not in team:
            continue  # 跳过该场比赛，不参与计算

        valid_total += 1  # 有效比赛+1  先验为347场
        home_team = team[home_team_name]
        away_team = team[away_team_name]
        # 计算加权胜率（国际比赛权重alpha，世界杯权重1-alpha）
        home_score = alpha * home_team['winrate_im'] + (1 - alpha) * home_team['winrate_wcm']
        away_score = alpha * away_team['winrate_im'] + (1 - alpha) * away_team['winrate_wcm']

        # 预测结果 平局以阈值处理
        score_diff = home_score - away_score
        if abs(score_diff) < 0.05:  # 差距小于5%时判为平局
            pred = -1  # 平局
        else:
            pred = 1 if score_diff > 0.05 else 0

        # 验证结果取值：1=主队胜，0=客队胜，-1=平局
        if pred == match['result_n']:
            correct += 1
        # print(score_diff, pred, match['result_n'], correct)
    accuracy = correct / valid_total
    return -accuracy  # 最小化负准确率


# 定义权重范围：alpha ∈ [0, 1]
weight_bound = [(0, 1)]

# 运行贝叶斯优化  52 12
best_alpha, best_score = bayesian_optimization(best_weight_calculate, weight_bound, n_iter=10, n_init=20)
print(f"最优权重 α = {best_alpha:.4f}")
# print(f"最高验证准确率 = {-best_score:.4f}")

# 配置权重(使用贝叶斯优化权重，得到最高准确度)
# 计算综合胜率
total_team_situation['qualify16'] = total_team_situation['winrate_im'] * best_alpha + total_team_situation[
    'winrate_wcm'] * (1 - best_alpha)

# 对没有世界杯数据的球队特殊处理(世界杯新军)
# 如果games_wcm为0，则完全使用winrate_im及其权重
total_team_situation.loc[total_team_situation['games_wcm'] == 0, 'qualify16'] = total_team_situation[
                                                                                    'winrate_im'] * best_alpha

# 将结果保存回原文件（保留原始数据并新增出线16强的概率）
total_team_situation.to_csv(group_match_analysis_file_path, index=False)

# 按小组分,并排序
total_team_situation['group_rank'] = total_team_situation.groupby('Group')['qualify16'].rank(ascending=False,
                                                                                             method='min')
total_team_situation = total_team_situation.sort_values(['Group', 'group_rank'])
total_team_situation.to_csv(group_match_analysis_file_path, index=False)

# 获取每组前两名
top2_per_group = total_team_situation.sort_values(['Group', 'qualify16'], ascending=[True, False]) \
    .groupby('Group').head(2)

# 16强诞生
top_teams = {}
for group in sorted(top2_per_group['Group'].unique()):
    group_teams = top2_per_group[top2_per_group['Group'] == group]
    top_teams[f"{group}1"] = {
        'team': group_teams.iloc[0]['Team'],
        'qualify16': group_teams.iloc[0]['qualify16'],
        'fifa_rank': group_teams.iloc[0]['FIFA Ranking']
    }
    top_teams[f"{group}2"] = {
        'team': group_teams.iloc[1]['Team'],
        'qualify16': group_teams.iloc[1]['qualify16'],
        'fifa_rank': group_teams.iloc[1]['FIFA Ranking']
    }

print(top_teams)
