# 用Python脚本快速生成演示数据

from faker import Faker
import json
import random

fake = Faker('zh_CN')

class DemoDataGenerator:
    def __init__(self, num_users=30, num_posts=50, num_topics=15):
        self.num_users = num_users
        self.num_posts = num_posts
        self.num_topics = num_topics

        # 预定义的主题簇（用于构建有意义的知识图谱）- 扩展版本
        self.topic_clusters = {
            "AI_ML": {
                "topics": ["机器学习", "深度学习", "神经网络", "强化学习", "迁移学习",
                          "模型训练", "特征工程", "集成学习", "监督学习", "无监督学习"],
                "color": "#FF6B6B"
            },
            "NLP_LLM": {
                "topics": ["自然语言处理", "大语言模型", "Transformer", "BERT", "GPT",
                          "文本生成", "情感分析", "机器翻译", "向量化", "embedding"],
                "color": "#4ECDC4"
            },
            "Computer_Vision": {
                "topics": ["计算机视觉", "图像识别", "目标检测", "图像分割", "CNN",
                          "GAN", "人脸识别", "OCR", "视频分析"],
                "color": "#95E1D3"
            },
            "Data_Engineering": {
                "topics": ["数据库", "SQL", "NoSQL", "数据仓库", "ETL",
                          "Spark", "Hadoop", "数据清洗", "数据管道"],
                "color": "#F7DC6F"
            },
            "System_Design": {
                "topics": ["系统设计", "分布式系统", "微服务", "负载均衡", "缓存",
                          "消息队列", "高可用", "性能优化", "容错设计"],
                "color": "#BB8FCE"
            },
            "Recommendation": {
                "topics": ["推荐系统", "协同过滤", "内容推荐", "召回策略", "排序算法",
                          "冷启动", "实时推荐", "A/B测试"],
                "color": "#00D9FF"
            },
            "Knowledge_Graph": {
                "topics": ["知识图谱", "图数据库", "Neo4j", "图算法", "实体识别",
                          "关系抽取", "知识推理", "本体建模"],
                "color": "#85C1E2"
            },
            "Math_Theory": {
                "topics": ["线性代数", "概率论", "统计学", "优化算法", "图论",
                          "信息论", "数值计算"],
                "color": "#FFA07A"
            },
            "Data_Science": {
                "topics": ["数据科学", "数据分析", "数据可视化", "特征选择", "降维",
                          "时间序列", "异常检测", "预测建模"],
                "color": "#98D8C8"
            }
        }

        # 关系类型定义
        self.relation_types = [
            "基于", "应用于", "需要", "包含", "相关",
            "扩展", "优化", "实现", "依赖", "支持"
        ]
        
    def generate_users(self):
        """生成演示用户"""
        users = []
        roles = ["student", "grad_student", "professor"]
        
        for i in range(self.num_users):
            user = {
                "id": f"user_{i}",
                "name": fake.name(),
                "role": random.choice(roles),
                "expertise": random.sample(
                    self._get_all_topics(), 
                    k=random.randint(2, 5)
                ),
                "interests": random.sample(
                    self._get_all_topics(),
                    k=random.randint(1, 3)
                ),
                "personality_type": random.choice([
                    "patient_explainer",
                    "deep_researcher", 
                    "practical_engineer",
                    "theorist"
                ]),
                "color": random.choice([
                    "#0066cc", "#FF6B6B", "#4ECDC4", "#95E1D3", 
                    "#F7DC6F", "#BB8FCE", "#85C1E2"
                ])
            }
            users.append(user)
        
        return users
    
    def generate_posts(self, users):
        """生成演示帖子"""
        posts = []
        
        for i in range(self.num_posts):
            author = random.choice(users)
            topics = random.sample(
                self._get_all_topics(),
                k=random.randint(1, 3)
            )
            
            post = {
                "id": f"post_{i}",
                "author_id": author["id"],
                "author_name": author["name"],
                "title": self._generate_post_title(topics),
                "content": f"关于{', '.join(topics)}的讨论",
                "topics": topics,
                "timestamp": fake.date_time_this_year().isoformat(),
                "is_question": random.random() > 0.4,
                "engagement": random.randint(0, 50)
            }
            posts.append(post)
        
        return posts
    
    def generate_knowledge_graph(self):
        """生成知识图谱 - 增强版，更多节点和边，明显聚类"""
        nodes = []
        edges = []

        # 1. 创建所有话题节点
        all_topics = self._get_all_topics()
        for topic in all_topics:
            cluster = self._get_topic_cluster(topic)
            color = self._get_cluster_color(cluster)

            node = {
                "id": f"topic_{topic}",
                "label": topic,
                "type": "topic",
                "cluster": cluster,
                "color": color,
                "cross_domain": False
            }
            nodes.append(node)

        # 2. 在每个簇内部创建密集连接（形成聚类）
        for cluster_name, cluster_data in self.topic_clusters.items():
            topics = cluster_data["topics"]

            # 簇内：每个话题连接到3-5个同簇话题
            for i, topic in enumerate(topics):
                # 选择3-5个同簇的其他话题连接
                num_connections = min(random.randint(3, 5), len(topics) - 1)
                other_topics = [t for t in topics if t != topic]
                connected_topics = random.sample(other_topics, num_connections)

                for other_topic in connected_topics:
                    # 避免重复边
                    source_id = f"topic_{topic}"
                    target_id = f"topic_{other_topic}"

                    # 检查是否已存在
                    if not any(
                        (e["source"] == source_id and e["target"] == target_id) or
                        (e["source"] == target_id and e["target"] == source_id)
                        for e in edges
                    ):
                        edges.append({
                            "source": source_id,
                            "target": target_id,
                            "type": random.choice(self.relation_types),
                            "cross_domain": False,
                            "importance": "medium"
                        })

        # 3. 创建跨簇连接（桥接不同领域）
        cross_cluster_connections = [
            # AI/ML -> NLP/LLM
            ("机器学习", "自然语言处理", "应用于"),
            ("深度学习", "大语言模型", "基于"),
            ("神经网络", "Transformer", "基于"),
            ("特征工程", "embedding", "相关"),

            # AI/ML -> Computer Vision
            ("机器学习", "计算机视觉", "应用于"),
            ("深度学习", "CNN", "基于"),
            ("神经网络", "GAN", "基于"),

            # NLP/LLM -> Knowledge Graph
            ("自然语言处理", "实体识别", "需要"),
            ("文本生成", "知识推理", "相关"),
            ("embedding", "知识图谱", "应用于"),

            # Recommendation -> AI/ML
            ("推荐系统", "机器学习", "基于"),
            ("协同过滤", "特征工程", "需要"),
            ("排序算法", "模型训练", "需要"),

            # Recommendation -> Knowledge Graph
            ("推荐系统", "知识图谱", "应用于"),
            ("召回策略", "图算法", "相关"),

            # Data Engineering -> System Design
            ("数据库", "分布式系统", "需要"),
            ("数据仓库", "高可用", "需要"),
            ("消息队列", "数据管道", "支持"),

            # Math Theory -> AI/ML
            ("线性代数", "神经网络", "基础"),
            ("概率论", "机器学习", "基础"),
            ("优化算法", "模型训练", "应用于"),
            ("图论", "图算法", "基础"),

            # Math Theory -> Knowledge Graph
            ("图论", "知识图谱", "基础"),
            ("图论", "Neo4j", "基础"),

            # Data Science -> AI/ML
            ("数据科学", "机器学习", "包含"),
            ("数据分析", "特征工程", "支持"),
            ("数据可视化", "模型训练", "辅助"),
            ("特征选择", "特征工程", "相关"),

            # Data Science -> NLP
            ("数据分析", "情感分析", "应用于"),
            ("时间序列", "文本生成", "相关"),

            # System Design -> Recommendation
            ("分布式系统", "推荐系统", "支持"),
            ("缓存", "实时推荐", "优化"),
            ("负载均衡", "推荐系统", "支持"),

            # Computer Vision -> Data Science
            ("图像识别", "数据分析", "应用"),
            ("OCR", "数据清洗", "相关"),
        ]

        for source, target, rel_type in cross_cluster_connections:
            source_id = f"topic_{source}"
            target_id = f"topic_{target}"

            # 检查节点是否存在
            if any(n["id"] == source_id for n in nodes) and any(n["id"] == target_id for n in nodes):
                edges.append({
                    "source": source_id,
                    "target": target_id,
                    "type": rel_type,
                    "cross_domain": True,
                    "importance": "high"
                })

        return {"nodes": nodes, "edges": edges}
    
    def _get_all_topics(self):
        """获取所有话题"""
        all_topics = []
        for cluster_data in self.topic_clusters.values():
            all_topics.extend(cluster_data["topics"])
        return all_topics

    def _get_topic_cluster(self, topic):
        """判断话题属于哪个簇"""
        for cluster_name, cluster_data in self.topic_clusters.items():
            if topic in cluster_data["topics"]:
                return cluster_name
        return None

    def _get_cluster_color(self, cluster_name):
        """获取簇的颜色"""
        return self.topic_clusters.get(cluster_name, {}).get("color", "#4ECDC4")
    
    def _generate_post_title(self, topics):
        """生成帖子标题"""
        templates = [
            f"请问{topics[0]}应该怎么学？",
            f"我想用{topics[0]}做一个项目，但{topics[1] if len(topics) > 1 else '完全不会'}...",
            f"关于{topics[0]}的深入讨论",
            f"分享：我如何掌握{topics[0]}",
        ]
        return random.choice(templates)

# 生成demo数据
generator = DemoDataGenerator(num_users=20, num_posts=30, num_topics=10)
users = generator.generate_users()
posts = generator.generate_posts(users)
kg = generator.generate_knowledge_graph()

demo_data = {
    "users": users,
    "posts": posts,
    "knowledge_graph": kg
}

with open("demo_data.json", "w") as f:
    json.dump(demo_data, f, ensure_ascii=False, indent=2)