import os
import pandas as pd
import numpy as np
import json
import logging
from aif360.datasets import BinaryLabelDataset
from aif360.metrics import BinaryLabelDatasetMetric

class BiasDetector:
    """
    偏见检测器类，使用IBM AI Fairness 360框架分析视频评分中可能存在的偏见
    主要关注受保护属性(性别和年龄)的选择率差异，并采用行业标准的"80%规则"
    """
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        # 设置可接受的偏见阈值范围（符合80%规则）
        self.acceptable_threshold_lower = 0.8  # 最小可接受的影响比例
        self.acceptable_threshold_upper = 1.25  # 最大可接受的影响比例
        
    def _prepare_candidate_data(self, scores_data, demographics_data):
        """
        准备候选人数据用于偏见分析
        
        Args:
            scores_data (dict): 评分数据，包含各个维度的得分
            demographics_data (dict): 人口统计学数据，包含性别和年龄等信息
            
        Returns:
            pandas.DataFrame: 合并后的数据框
        """
        try:
            # 创建评分数据框
            scores_df = pd.DataFrame({
                'candidate_id': [scores_data['video_id']],
                'overall_score': [scores_data['overall_score']],
                'visual_score': [scores_data['component_scores']['visual']],
                'audio_score': [scores_data['component_scores']['audio']],
                'content_score': [scores_data['component_scores']['content']]
            })
            
            # 提取特征
            if 'features' in scores_data:
                visual_features = scores_data['features'].get('visual', {})
                audio_features = scores_data['features'].get('audio', {})
                content_features = scores_data['features'].get('content', {})
                
                for feat, value in visual_features.items():
                    scores_df[f'visual_{feat}'] = [value]
                
                for feat, value in audio_features.items():
                    scores_df[f'audio_{feat}'] = [value]
                
                for feat, value in content_features.items():
                    scores_df[f'content_{feat}'] = [value]
            
            # 合并人口统计学数据
            merged_df = pd.DataFrame({
                'candidate_id': [scores_data['video_id']],
                'gender': [demographics_data.get('gender', 'unknown')],
                'age_group': [demographics_data.get('age_group', 'unknown')]
            })
            
            # 合并两个数据框
            candidate_df = pd.merge(merged_df, scores_df, on='candidate_id', how='inner')
            
            # 创建选择指标（根据整体得分是否超过特定阈值）
            threshold = 60  # 假设60分以上为通过
            candidate_df['selected'] = (candidate_df['overall_score'] > threshold).astype(int)
            
            return candidate_df
            
        except Exception as e:
            self.logger.error(f"准备候选人数据时出错: {str(e)}")
            return pd.DataFrame()
    
    def check_bias(self, scores_data_list, demographics_data_dict, protected_attributes=None):
        """
        检查评分系统中是否存在对特定群体的偏见
        
        Args:
            scores_data_list (list): 所有候选人的评分数据列表
            demographics_data_dict (dict): 包含候选人人口统计学信息的字典，键为candidate_id
            protected_attributes (list, optional): 要分析的受保护属性列表
            
        Returns:
            dict: 偏见分析结果
        """
        if protected_attributes is None:
            protected_attributes = ['gender', 'age_group']  # 默认分析性别和年龄
            
        # 创建完整的候选人数据集
        all_candidates_data = []
        for score_data in scores_data_list:
            candidate_id = score_data['video_id']
            demographics = demographics_data_dict.get(candidate_id, {})
            candidate_df = self._prepare_candidate_data(score_data, demographics)
            if not candidate_df.empty:
                all_candidates_data.append(candidate_df)
        
        if not all_candidates_data:
            self.logger.warning("没有有效的候选人数据用于偏见分析")
            return {"error": "无有效数据用于分析"}
            
        # 合并所有候选人数据
        combined_df = pd.concat(all_candidates_data, ignore_index=True)
        
        # 分析每个受保护属性的偏见
        bias_results = {}
        for attribute in protected_attributes:
            # 确保属性存在且有足够的不同值进行分析
            if attribute not in combined_df.columns or combined_df[attribute].nunique() < 2:
                bias_results[attribute] = {
                    "error": f"属性 {attribute} 不存在或值不足以进行分析"
                }
                continue
                
            # 执行偏见检查
            attribute_results = self._check_bias_for_attribute(combined_df, attribute)
            bias_results[attribute] = attribute_results
            
        return bias_results
    
    def _check_bias_for_attribute(self, candidate_data, protected_attribute):
        """
        针对特定受保护属性检查偏见
        
        Args:
            candidate_data (pandas.DataFrame): 候选人数据
            protected_attribute (str): 受保护属性名称
            
        Returns:
            dict: 偏见分析结果
        """
        try:
            # 准备数据集
            label_name = 'selected'
            protected_attribute_names = [protected_attribute]
            
            # 去除包含未知值的行
            filtered_data = candidate_data[~candidate_data[protected_attribute].isin(['unknown', np.nan])]
            if filtered_data.empty:
                return {"error": f"过滤后无有效 {protected_attribute} 数据"}
            
            # 创建二元标签数据集
            dataset = BinaryLabelDataset(
                df=filtered_data,
                label_names=[label_name],
                protected_attribute_names=protected_attribute_names,
                favorable_label=1,
                unfavorable_label=0
            )
            
            # 确定特权和非特权群体
            # 我们假设数量最多的群体为特权群体
            privileged_value = filtered_data[protected_attribute].value_counts().idxmax()
            unprivileged_values = [value for value in filtered_data[protected_attribute].unique() if value != privileged_value]
            
            # 计算指标
            metrics = BinaryLabelDatasetMetric(
                dataset,
                unprivileged_groups=[{protected_attribute: value} for value in unprivileged_values],
                privileged_groups=[{protected_attribute: privileged_value}]
            )
            
            # 计算差异影响比率和统计平等差异
            disparate_impact = metrics.disparate_impact()
            statistical_parity_difference = metrics.statistical_parity_difference()
            
            # 分析各群体的选择率
            selection_rates = {}
            for value in filtered_data[protected_attribute].unique():
                group_data = filtered_data[filtered_data[protected_attribute] == value]
                if not group_data.empty:
                    selection_rate = group_data[label_name].mean()
                    selection_rates[value] = float(selection_rate)
            
            # 确定是否存在偏见
            has_bias = (disparate_impact < self.acceptable_threshold_lower or 
                       disparate_impact > self.acceptable_threshold_upper)
            
            # 准备建议
            recommendations = []
            if has_bias:
                if disparate_impact < self.acceptable_threshold_lower:
                    recommendations.append(f"{protected_attribute.capitalize()} 群体 {', '.join(unprivileged_values)} 的选择率显著低于 {privileged_value} 群体")
                    recommendations.append("考虑重新审视评分标准，确保它们不会系统性地对某些群体产生不利影响")
                elif disparate_impact > self.acceptable_threshold_upper:
                    recommendations.append(f"{protected_attribute.capitalize()} 群体 {', '.join(unprivileged_values)} 的选择率显著高于 {privileged_value} 群体")
                    recommendations.append("审查评分标准，确保它们能公平地评估所有群体")
                
                recommendations.append("考虑应用偏见缓解技术，如重采样或调整阈值")
            else:
                recommendations.append(f"评分系统在 {protected_attribute} 方面相对公平")
            
            return {
                "disparate_impact": float(disparate_impact),
                "statistical_parity_difference": float(statistical_parity_difference),
                "selection_rates": selection_rates,
                "privileged_group": privileged_value,
                "unprivileged_groups": unprivileged_values,
                "has_bias": has_bias,
                "bias_level": "高" if abs(1 - disparate_impact) > 0.4 else ("中" if abs(1 - disparate_impact) > 0.2 else "低"),
                "recommendations": recommendations
            }
        except Exception as e:
            self.logger.error(f"分析 {protected_attribute} 属性偏见时出错: {str(e)}")
            return {"error": str(e)}
    
    def save_bias_report(self, bias_results, output_dir):
        """
        保存偏见分析报告
        
        Args:
            bias_results (dict): 偏见分析结果
            output_dir (str): 输出目录
        """
        try:
            os.makedirs(output_dir, exist_ok=True)
            
            # 保存JSON报告
            report_path = os.path.join(output_dir, 'bias_report.json')
            with open(report_path, 'w', encoding='utf-8') as f:
                json.dump(bias_results, f, indent=2, ensure_ascii=False)
                
            # 生成HTML报告
            html_content = self._generate_html_report(bias_results)
            html_path = os.path.join(output_dir, 'bias_report.html')
            with open(html_path, 'w', encoding='utf-8') as f:
                f.write(html_content)
                
            self.logger.info(f"偏见分析报告已保存至 {output_dir}")
            return {"json_path": report_path, "html_path": html_path}
        except Exception as e:
            self.logger.error(f"保存偏见报告时出错: {str(e)}")
            return {"error": str(e)}
    
    def _generate_html_report(self, bias_results):
        """
        生成HTML格式的偏见分析报告
        
        Args:
            bias_results (dict): 偏见分析结果
            
        Returns:
            str: HTML报告内容
        """
        html = """
        <!DOCTYPE html>
        <html>
        <head>
            <meta charset="UTF-8">
            <title>偏见分析报告</title>
            <style>
                body { font-family: Arial, sans-serif; line-height: 1.6; margin: 0; padding: 20px; color: #333; }
                h1 { color: #2c3e50; border-bottom: 2px solid #eee; padding-bottom: 10px; }
                h2 { color: #3498db; margin-top: 20px; }
                .card { background: #f8f9fa; border-radius: 8px; padding: 15px; margin-bottom: 20px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); }
                .bias-high { color: #e74c3c; }
                .bias-medium { color: #f39c12; }
                .bias-low { color: #27ae60; }
                .no-bias { color: #27ae60; }
                .metric { font-weight: bold; margin-right: 5px; }
                table { width: 100%; border-collapse: collapse; margin: 15px 0; }
                th, td { padding: 12px 15px; text-align: left; border-bottom: 1px solid #ddd; }
                th { background-color: #f2f2f2; }
                ul { padding-left: 20px; }
                .chart-container { height: 250px; margin: 20px 0; }
            </style>
        </head>
        <body>
            <h1>偏见分析报告</h1>
        """
        
        for attribute, results in bias_results.items():
            if "error" in results:
                html += f"""
                <div class="card">
                    <h2>{attribute} 分析</h2>
                    <p>错误: {results['error']}</p>
                </div>
                """
                continue
                
            # 确定偏见水平的类
            bias_class = ""
            if results.get("has_bias", False):
                bias_level = results.get("bias_level", "")
                if bias_level == "高":
                    bias_class = "bias-high"
                elif bias_level == "中":
                    bias_class = "bias-medium"
                else:
                    bias_class = "bias-low"
            else:
                bias_class = "no-bias"
                
            html += f"""
            <div class="card">
                <h2>{attribute.capitalize()} 分析</h2>
                <p><span class="metric">差异影响比率:</span> <span class="{bias_class}">{results['disparate_impact']:.4f}</span> 
                   (<span class="{bias_class}">{results.get('bias_level', '未知')}偏见</span>)</p>
                <p><span class="metric">统计平等差异:</span> {results['statistical_parity_difference']:.4f}</p>
                
                <h3>各群体选择率</h3>
                <table>
                    <tr>
                        <th>群体</th>
                        <th>选择率</th>
                        <th>状态</th>
                    </tr>
            """
            
            selection_rates = results.get('selection_rates', {})
            privileged_group = results.get('privileged_group', '')
            
            for group, rate in selection_rates.items():
                status = "特权群体" if group == privileged_group else "非特权群体"
                html += f"""
                    <tr>
                        <td>{group}</td>
                        <td>{rate:.2%}</td>
                        <td>{status}</td>
                    </tr>
                """
                
            html += """
                </table>
                
                <h3>建议</h3>
                <ul>
            """
            
            for recommendation in results.get('recommendations', []):
                html += f"<li>{recommendation}</li>"
                
            html += """
                </ul>
            </div>
            """
            
        html += """
        </body>
        </html>
        """
        
        return html

# 创建模拟人口统计数据的函数
def create_sample_demographics_data(video_ids, random_seed=42):
    """
    创建示例人口统计数据用于测试
    
    Args:
        video_ids (list): 视频ID列表
        random_seed (int): 随机种子
        
    Returns:
        dict: 人口统计数据字典
    """
    np.random.seed(random_seed)
    
    demographics = {}
    for video_id in video_ids:
        demographics[video_id] = {
            'gender': np.random.choice(['male', 'female']),
            'age_group': np.random.choice(['20-30', '30-40', '40+'])
        }
    
    return demographics

# 示例使用
if __name__ == "__main__":
    # 创建模拟评分数据
    scores_list = []
    for i in range(1, 101):
        video_id = f"candidate_{i}"
        
        # 添加一些基于性别的偏见以进行演示
        gender = "male" if i % 3 == 0 else "female"
        gender_bias = 1.2 if gender == "male" else 0.9
        
        # 添加一些基于年龄的偏见
        age_group = "20-30" if i % 5 == 0 else ("30-40" if i % 5 == 1 else "40+")
        age_bias = 1.1 if age_group == "30-40" else 0.95
        
        # 计算各个组件的分数
        visual_score = min(100, max(0, np.random.normal(70, 15) * gender_bias * 0.8))
        audio_score = min(100, max(0, np.random.normal(65, 20) * age_bias))
        content_score = min(100, max(0, np.random.normal(75, 10) * gender_bias * age_bias))
        
        # 计算总分
        overall_score = (visual_score * 0.3 + audio_score * 0.3 + content_score * 0.4)
        
        # 创建评分数据
        score_data = {
            "video_id": video_id,
            "overall_score": overall_score,
            "component_scores": {
                "visual": visual_score,
                "audio": audio_score,
                "content": content_score
            },
            "features": {
                "visual": {
                    "eye_contact": np.random.uniform(0.5, 1.0) * gender_bias,
                    "posture": np.random.uniform(0.4, 0.9),
                    "expression_variation": np.random.uniform(0.3, 0.8)
                },
                "audio": {
                    "speaking_rate": np.random.uniform(0.6, 0.9) * age_bias,
                    "pitch_variation": np.random.uniform(0.4, 0.8),
                    "volume_variation": np.random.uniform(0.5, 0.9),
                    "clarity": np.random.uniform(0.5, 0.9)
                },
                "content": {
                    "keyword_relevance": np.random.uniform(0.6, 0.9),
                    "confidence": np.random.uniform(0.5, 1.0) * gender_bias,
                    "clarity": np.random.uniform(0.6, 0.95) * age_bias
                }
            }
        }
        
        scores_list.append(score_data)
    
    # 创建人口统计数据
    video_ids = [score["video_id"] for score in scores_list]
    demographics_data = create_sample_demographics_data(video_ids)
    
    # 创建偏见检测器并分析
    detector = BiasDetector()
    bias_results = detector.check_bias(scores_list, demographics_data)
    
    # 打印结果
    print("\n===== 偏见分析结果 =====")
    for attribute, results in bias_results.items():
        print(f"\n{attribute.upper()} 分析:")
        if "error" in results:
            print(f"  错误: {results['error']}")
            continue
            
        print(f"  差异影响比率: {results['disparate_impact']:.4f}")
        print(f"  统计平等差异: {results['statistical_parity_difference']:.4f}")
        print("  各群体选择率:")
        for group, rate in results['selection_rates'].items():
            print(f"    {group}: {rate:.2%}")
        
        print("  建议:")
        for recommendation in results['recommendations']:
            print(f"    - {recommendation}")
    
    # 保存报告
    output_dir = "bias_analysis_results"
    detector.save_bias_report(bias_results, output_dir)
    print(f"\n偏见分析报告已保存至 {output_dir}")
