import pandas as pd
import numpy as np
from scipy.stats import pearsonr, pointbiserialr  # 新增点二列相关系数

def load_detailed_data(file_path):
    """加载包含匹配度和满意度的详细数据"""
    try:
        df = pd.read_csv(file_path)
        required_columns = ['user_id', 'movie_id', 'match_score', 'satisfaction']
        missing_columns = [col for col in required_columns if col not in df.columns]
        
        if missing_columns:
            print(f"错误：详细数据文件缺少必要列：{', '.join(missing_columns)}")
            return None
        
        print(f"成功加载详细数据：{len(df)} 条记录")
        return df
    except FileNotFoundError:
        print(f"错误：未找到详细数据文件 {file_path}")
        return None
    except Exception as e:
        print(f"加载详细数据出错：{str(e)}")
        return None

def load_ctr_data(file_path):
    """加载包含点击率(CTR)的汇总数据"""
    try:
        df = pd.read_csv(file_path)
        required_columns = ['user_id', 'ctr']  # 假设按用户ID聚合的CTR
        missing_columns = [col for col in required_columns if col not in df.columns]
        
        if missing_columns:
            print(f"错误：CTR数据文件缺少必要列：{', '.join(missing_columns)}")
            return None
        
        print(f"成功加载CTR数据：{len(df)} 条记录")
        return df
    except FileNotFoundError:
        print(f"错误：未找到CTR数据文件 {file_path}")
        return None
    except Exception as e:
        print(f"加载CTR数据出错：{str(e)}")
        return None

def merge_data(detailed_df, ctr_df):
    """合并详细数据与CTR数据（按user_id关联）"""
    # 去重CTR数据，确保每个用户只有一条记录
    ctr_unique = ctr_df.drop_duplicates(subset=['user_id']).copy()
    
    # 合并数据
    merged_df = pd.merge(
        detailed_df,
        ctr_unique[['user_id', 'ctr']],
        on='user_id',
        how='inner'
    )
    
    print(f"合并后数据量：{len(merged_df)} 条记录（成功关联CTR的样本）")
    if len(merged_df) < 2:
        print("警告：合并后数据量不足，可能影响相关性计算")
    
    return merged_df

def is_constant(arr):
    """判断数组是否为常数数组"""
    return np.all(arr == arr[0])

def calculate_correlations(merged_df):
    match_scores = merged_df['match_score']  # 连续变量
    satisfaction_scores = merged_df['satisfaction']  # 离散变量（-1,0,1）
    ctr_values = merged_df['ctr']  # 连续变量
    
    # 初始化结果
    corr_match_ctr, p_match = np.nan, np.nan
    corr_sat_ctr, p_sat = np.nan, np.nan
    corr_match_sat, p_both = np.nan, np.nan
    
    # 1. 语义匹配度（连续）与CTR（连续）→ 皮尔逊相关
    if not is_constant(match_scores) and not is_constant(ctr_values):
        corr_match_ctr, p_match = pearsonr(match_scores, ctr_values)
    
    # 2. 情感满意度（离散）与CTR（连续）→ 点二列相关
    if not is_constant(satisfaction_scores) and not is_constant(ctr_values) and len(np.unique(satisfaction_scores)) > 1:
        corr_sat_ctr, p_sat = pointbiserialr(satisfaction_scores, ctr_values)
    
    # 3. 语义匹配度（连续）与情感满意度（离散）→ 点二列相关
    if not is_constant(match_scores) and not is_constant(satisfaction_scores) and len(np.unique(satisfaction_scores)) > 1:
        corr_match_sat, p_both = pointbiserialr(match_scores, satisfaction_scores)
    
    return {
        'match_ctr': {
            'correlation': round(corr_match_ctr, 3) if not np.isnan(corr_match_ctr) else np.nan,
            'p_value': round(p_match, 3) if not np.isnan(p_match) else np.nan,
            'method': '皮尔逊相关'  # 新增方法说明
        },
        'satisfaction_ctr': {
            'correlation': round(corr_sat_ctr, 3) if not np.isnan(corr_sat_ctr) else np.nan,
            'p_value': round(p_sat, 3) if not np.isnan(p_sat) else np.nan,
            'method': '点二列相关'  # 新增方法说明
        },
        'match_satisfaction': {
            'correlation': round(corr_match_sat, 3) if not np.isnan(corr_match_sat) else np.nan,
            'p_value': round(p_both, 3) if not np.isnan(p_both) else np.nan,
            'method': '点二列相关'  # 新增方法说明
        }
    }

def print_correlation_results(results):
    """打印相关系数结果（区分方法）"""
    if not results:
        return
    print("\n=== 相关系数分析结果 ===")
    
    # 语义匹配度与CTR
    match_ctr = results['match_ctr']
    print(f"1. 语义匹配度与点击率(CTR)：")
    print(f"   相关系数（{match_ctr['method']}）：{match_ctr['correlation']}（p值：{match_ctr['p_value']}）")
    print(get_correlation_interpretation(match_ctr['correlation'], match_ctr['p_value']))
    
    # 情感满意度与CTR
    sat_ctr = results['satisfaction_ctr']
    print(f"\n2. 情感满意度与点击率(CTR)：")
    print(f"   相关系数（{sat_ctr['method']}）：{sat_ctr['correlation']}（p值：{sat_ctr['p_value']}）")
    print(get_correlation_interpretation(sat_ctr['correlation'], sat_ctr['p_value']))
    
    # 语义匹配度与情感满意度
    match_sat = results['match_satisfaction']
    print(f"\n3. 语义匹配度与情感满意度：")
    print(f"   相关系数（{match_sat['method']}）：{match_sat['correlation']}（p值：{match_sat['p_value']}）")
    print(get_correlation_interpretation(match_sat['correlation'], match_sat['p_value']))

def get_correlation_interpretation(correlation, p_value):
    # 处理nan的情况
    if np.isnan(correlation) or np.isnan(p_value):
        return "   解释：因至少一个变量为常数（无波动）或离散值唯一，无法计算相关系数"
    
    # 正常情况的解释
    if p_value >= 0.05:
        return "   解释：无统计显著性（p值≥0.05）"
    
    abs_corr = abs(correlation)
    if abs_corr < 0.3:
        return "   解释：显著弱相关（|r|<0.3，p<0.05）"
    elif 0.3 <= abs_corr < 0.7:
        return "   解释：显著中等相关（0.3≤|r|<0.7，p<0.05）"
    else:
        return "   解释：显著强相关（|r|≥0.7，p<0.05）"

def main():
    # 数据文件路径
    detailed_data_path = "results/lightgcn_detailed_results copy.csv"
    ctr_data_path = "recommendation_results/random50_users_lightgcn_summary.csv"  # 含ctr
    
    # 加载数据
    detailed_df = load_detailed_data(detailed_data_path)
    ctr_df = load_ctr_data(ctr_data_path)
    
    if detailed_df is None or ctr_df is None:
        return  # 数据加载失败则退出
    
    # 合并数据
    merged_df = merge_data(detailed_df, ctr_df)
    if merged_df is None or len(merged_df) < 2:
        return
    
    # 计算相关性（适配离散变量）
    correlation_results = calculate_correlations(merged_df)
    
    # 打印结果
    print_correlation_results(correlation_results)

if __name__ == "__main__":
    main()