import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import  RobustScaler
df = pd.read_csv("Player_cleaned.csv", encoding="UTF-8")
def process_data():
    dl = len(df)
    df_array = np.array(df)
    df_list = df_array.tolist()     #df转化为列表

    # 计算BMI数据
    h=df['height'].tolist()     #身高
    w=df['weight'].tolist()     #体重
    bmi={}

    for h1,w1 in zip(h,w) :
        #基于身高体重计算bmi
        if round(w1/pow(h1/100,2)) not in bmi :
            bmi[round(w1/pow(h1/100,2))]=1
        else :
            bmi[round(w1/pow(h1/100,2))]=1
    bmi["<=20.0"]=0
    bmi[">=27.0"]=0
    # 遍历所有键，将BMI >= 27.0的值累加到特殊键中
    keys_to_remove = []
    for key in bmi:
        if key == ">=27.0" or key=="<=20.0":
            continue
        try:
            key_value = float(key)
            if key_value >= 27.0:
                bmi[">=27.0"] += bmi[key]
                keys_to_remove.append(key)
            elif key_value <=20.0:
                bmi["<=20.0"] +=bmi[key]
                keys_to_remove.append(key)
        except (ValueError, TypeError):
            # 如果键不是数字，跳过处理
            continue
    for key in keys_to_remove:
        bmi.pop(key)
    for key in bmi:
        bmi[key] /= dl
    t=[]
    for key in bmi.keys() :
        if key == ">=27.0" or key=="<=20.0" :
            continue
        else :
            t.append(key)
    sorted_bmi={}
    sorted_bmi["<=20.0"]=bmi["<=20.0"]
    t1=sorted(t,key=float)
    for i in t1 :
        sorted_bmi[i]=bmi[i]
    sorted_bmi[">=27.0"]=bmi[">=27.0"]
    
    h_aver = sum(h) / dl / 100
    w_aver = sum(w) / dl
    print("NBA运动员的平均身高为:{:.1f}m,平均体重为:{:.1f}kg".format(h_aver, w_aver))



    # 处理出生年份和州数据
    by = df['born'].value_counts().to_dict()
    bs = df['birth_state'].value_counts().to_dict()
    sorted_by = dict(sorted(by.items()))
    sorted_bs = dict(sorted(bs.items()))
    for keys in sorted_bs.keys():
        sorted_bs[keys] /= dl
    sorted_bs["others"] = 0
    for keys in list(sorted_bs.keys()):
        if sorted_bs[keys] <= 0.01:  # 将占比少于1%的洲并入others类中
            sorted_bs["others"] += sorted_bs[keys]
            sorted_bs.pop(keys)
    height_BLK_data = df[['height', 'BLK%']].dropna()
    
    OWS_DWS_data=df[['OWS','DWS']]
    weight_TRB_data= df[['weight', 'TRB%']]
    PTS_data=df['PTS']
    height_TRB_data = df[['height', 'TRB%']]
    height_weight_TRB_data = df[['height','weight','TRB%']]
    height_weight_TRB_data['bmi']=height_weight_TRB_data['weight'] / ((height_weight_TRB_data['height'] / 100) **2)
    bmi_TRB_data=height_weight_TRB_data[['bmi','TRB%']]
    
    return sorted_bmi, sorted_by, sorted_bs, height_BLK_data,OWS_DWS_data,weight_TRB_data,PTS_data,height_TRB_data,bmi_TRB_data

def perform_ows_dws_clustering(data,n_clusters=4):
    """参数:
    data: 包含OWS和DWS特征的DataFrame
    n_clusters: 聚类数量,默认为4
    返回:
    包含聚类标签的数据和聚类中心"""
    features=data[['OWS','DWS']].copy()
    #特征值选择
    scaler = RobustScaler()
    scaled_features = scaler.fit_transform(features)
    #标准化数据
    kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
    cluster_labels = kmeans.fit_predict(scaled_features)
    cluster_centers_scaled = kmeans.cluster_centers_
    #使用K聚类
    data_with_clusters = data.copy()
    data_with_clusters['cluster'] = cluster_labels
    # 将聚类标签添加到原始数据
    cluster_centers = scaler.inverse_transform(cluster_centers_scaled)
    cluster_centers_df = pd.DataFrame(cluster_centers, columns=['OWS', 'DWS'])
    cluster_centers_df['cluster'] = range(n_clusters)
    # 计算聚类中心（原始尺度）
    cluster_descriptions = []
    for i, center in cluster_centers_df.iterrows():
        ows = center['OWS']
        dws = center['DWS']
        
        if ows > 0 and dws > 0:
            desc = "攻守兼备型"
        elif ows > 0 and dws <= 0:
            desc = "进攻专精型"
        elif ows <= 0 and dws > 0:
            desc = "防守专精型"
        else:
            desc = "待发展型"
        
        cluster_descriptions.append(desc)
    
    cluster_centers_df['description'] = cluster_descriptions
    # 为每个聚类添加描述性标签
    
    return data_with_clusters, cluster_centers_df

def get_position_stats(df1=df):
    # 定义要分析的指标
    stats_columns = ['PER', 'TS%', '3P%', '2P%', 'FT%', 'TRB%', 'AST%']
    
    # 位置映射
    position_mapping = {
        'G-F': ['F', 'G'],
        'C-F': ['F', 'C'],
        'F-C': ['F', 'C'],
        'F-G': ['F', 'G'],
        'C': ['C'],
        'G': ['G'],
        'F': ['F']
    }
    
    # 初始化结果字典
    position_stats = {}
    
    # 获取所有球员的指标范围用于归一化
    all_players_min = df[stats_columns].min()
    all_players_max = df[stats_columns].max()
    
    # 计算每个位置的平均值
    for position in position_mapping.keys():
        # 获取该位置的球员数据
        if position in ['F-C', 'F-G','C-F','G-F']:
            # 对于复合位置，使用映射获取对应的球员
            mask = df['position'].isin(position_mapping[position])
        else:
            mask = df['position'] == position
        
        position_data = df[mask][stats_columns]
        
        # 计算平均值
        avg_stats = position_data.mean().to_dict()
        
        # 归一化到0-1范围
        normalized_stats = {}
        for stat, value in avg_stats.items():
            if pd.isna(value):
                normalized_stats[stat] = 0
            else:
                normalized_stats[stat] = (value - all_players_min[stat]) / (all_players_max[stat] - all_players_min[stat])
        
        position_stats[position] = normalized_stats
    
    return position_stats, stats_columns