import json
import pandas as pd
import numpy as np
from typing import List, Dict, Any

class CommentProcessor:
    """评论数据处理类"""
    
    def __init__(self, csv_path: str):
        """
        初始化处理器
        
        Args:
            csv_path: 评论数据CSV文件路径
        """
        self.df = pd.read_csv(csv_path)
        
    def _map_sentiment_to_opinion(self, sentiment_score: float) -> float:
        """将情感分数映射到意见值"""
        # 将[0,1]区间的sentiment_score映射到[-1,1]区间的opinion
        return (sentiment_score * 2) - 1
        
    def _calculate_confidence(self, like_counts: int, reply_counts: int, word_count: int) -> float:
        """根据互动数据计算自信度"""
        # 归一化处理
        max_likes = self.df['like_counts'].max()
        max_replies = self.df['reply_counts'].max()
        max_words = self.df['word_count'].max()
        
        # 计算各项指标的权重
        likes_weight = 0.4
        replies_weight = 0.4
        words_weight = 0.2
        
        # 计算综合得分
        normalized_likes = like_counts / max_likes if max_likes > 0 else 0
        normalized_replies = reply_counts / max_replies if max_replies > 0 else 0
        normalized_words = word_count / max_words if max_words > 0 else 0
        
        confidence = (
            normalized_likes * likes_weight +
            normalized_replies * replies_weight +
            normalized_words * words_weight
        )
        
        # 确保自信度在合理范围内
        return max(0.3, min(0.9, confidence))
        
    def _calculate_self_censorship(self, word_count: int, segment_count: int) -> float:
        """根据文本特征计算自我审查程度"""
        # 文本越短，自我审查程度可能越高
        # 段落数越多，表达可能越谨慎
        words_per_segment = word_count / segment_count if segment_count > 0 else 0
        
        # 根据平均每段字数计算自我审查程度
        if words_per_segment < 5:
            censorship = 0.8  # 非常谨慎
        elif words_per_segment < 10:
            censorship = 0.6  # 比较谨慎
        else:
            censorship = 0.4  # 相对开放
            
        return max(0.2, min(0.8, censorship))
        
    def _generate_learned_description(self, keywords: str, cluster: int) -> str:
        """生成用户背景描述"""
        # 根据聚类和关键词生成描述
        cluster_descriptions = {
            0: "支持华为的用户，关注智能驾驶和5G技术",
            1: "对华为持批评态度的用户，关注产品质量问题",
            2: "中立用户，关注行业动态",
            3: "小米支持者，关注品牌竞争"
        }
        
        base_description = cluster_descriptions.get(cluster, "普通用户")
        if isinstance(keywords, str):
            keywords_list = keywords.split()
            if len(keywords_list) > 0:
                return f"{base_description}，关注{', '.join(keywords_list[:3])}"
                
        return base_description
        
    def process_comments(self, num_profiles: int = 100) -> List[Dict[str, Any]]:
        """
        处理评论数据，生成agent配置文件
        
        Args:
            num_profiles: 需要生成的配置文件数量
            
        Returns:
            List[Dict]: 生成的配置文件列表
        """
        profiles = []
        
        # 确保数据量足够
        if len(self.df) < num_profiles:
            # 如果数据不足，循环使用现有数据
            repeat_times = (num_profiles // len(self.df)) + 1
            self.df = pd.concat([self.df] * repeat_times, ignore_index=True)
            
        # 随机选择指定数量的评论
        selected_comments = self.df.sample(n=num_profiles, replace=True)
        
        for idx, row in selected_comments.iterrows():
            profile = {
                "name": f"User_{idx}",
                "age": np.random.randint(18, 65),  # 年龄可以保持随机
                "learned": self._generate_learned_description(row.get('keywords', ''), row.get('new_cluster', 0)),
                "care": f"评论内容：{row.get('keywords', '')}",
                "initial_opinion": self._map_sentiment_to_opinion(row.get('sentiment_score', 0.5)),
                "confidence": self._calculate_confidence(
                    row.get('like_counts', 0),
                    row.get('reply_counts', 0),
                    row.get('word_count', 0)
                ),
                "self_censorship": self._calculate_self_censorship(
                    row.get('word_count', 0),
                    row.get('segment_count', 1)
                )
            }
            profiles.append(profile)
            
        return profiles
        
    def save_profiles(self, profiles: List[Dict[str, Any]], output_path: str):
        """保存配置文件"""
        with open(output_path, 'w', encoding='utf-8') as f:
            json.dump(profiles, f, ensure_ascii=False, indent=4)
            
def process_comments_to_profiles(csv_path: str, output_path: str, num_profiles: int = 100):
    """
    处理评论数据并生成配置文件
    
    Args:
        csv_path: 评论数据CSV文件路径
        output_path: 输出配置文件路径
        num_profiles: 需要生成的配置文件数量
    """
    processor = CommentProcessor(csv_path)
    profiles = processor.process_comments(num_profiles)
    processor.save_profiles(profiles, output_path) 