# -*- coding: utf-8 -*-
"""
红楼梦人物共现网络分析
该脚本用于分析红楼梦中人物的共现关系，并生成可交互的HTML网络图
"""

import jieba
import re
from collections import defaultdict
from pyecharts import options as opts
from pyecharts.charts import Graph

# 红楼梦主要人物列表（部分）
CHARACTERS = [
    "贾宝玉", "林黛玉", "薛宝钗", "王熙凤", "贾母", "贾政", "贾琏", 
    "袭人", "晴雯", "紫鹃", "香菱", "平儿", "鸳鸯", "司棋", "金钏",
    "尤二姐", "尤三姐", "妙玉", "李纨", "探春", "湘云", "宝琴",
    "邢夫人", "王夫人", "尤氏", "刘姥姥", "薛姨妈", "贾珍", "贾蓉",
    "秦可卿", "贾环", "贾兰", "贾芸", "红玉", "龄官", "芳官"
]

def read_text(file_path='红楼梦.txt'):
    """读取红楼梦文本"""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            return f.read()
    except FileNotFoundError:
        # 如果红楼梦.txt不存在，尝试读取temp.txt
        try:
            with open('temp.txt', 'r', encoding='utf-8') as f:
                return f.read()
        except FileNotFoundError:
            print("未找到红楼梦文本文件")
            return None

def split_into_chapters(text):
    """将文本按章节分割"""
    pattern = r'第[一二三四五六七八九十百千]+回[^\n]*\n'
    chapters = re.split(pattern, text)
    # 移除第一个可能的空元素
    if chapters and not chapters[0].strip():
        chapters = chapters[1:]
    return chapters

def extract_character_cooccurrence(chapters, window_size=100):
    """
    提取人物共现关系
    :param chapters: 章节列表
    :param window_size: 滑动窗口大小，用于判断人物是否共现
    :return: 人物共现关系字典
    """
    # 人物共现次数统计
    cooccurrence = defaultdict(int)
    # 人物出现次数统计
    character_count = defaultdict(int)
    
    # 处理每个章节
    for chapter in chapters:
        # 在每个章节中使用滑动窗口检测共现
        words = jieba.lcut(chapter)
        
        # 统计人物出现次数
        for char in CHARACTERS:
            if char in chapter:
                character_count[char] += 1
        
        # 滑动窗口检测共现
        for i in range(len(words) - window_size):
            window = words[i:i + window_size]
            window_text = ''.join(window)
            
            # 找出窗口内出现的人物
            chars_in_window = []
            for char in CHARACTERS:
                if char in window_text:
                    chars_in_window.append(char)
            
            # 统计人物两两共现次数
            for i in range(len(chars_in_window)):
                for j in range(i + 1, len(chars_in_window)):
                    char1, char2 = chars_in_window[i], chars_in_window[j]
                    # 按字母顺序排列，确保(A,B)和(B,A)被视为同一种共现关系
                    if char1 > char2:
                        char1, char2 = char2, char1
                    cooccurrence[(char1, char2)] += 1
    
    return cooccurrence, character_count

def build_network_data(cooccurrence, character_count, min_cooccurrence=1):
    """
    构建网络图数据
    :param cooccurrence: 人物共现字典
    :param character_count: 人物出现次数字典
    :param min_cooccurrence: 最小共现次数阈值
    :return: 节点列表和边列表
    """
    # 构建节点
    nodes = []
    # 记录已添加的节点
    added_nodes = set()
    
    # 根据共现关系添加节点
    for (char1, char2), count in cooccurrence.items():
        if count >= min_cooccurrence:
            # 添加节点1
            if char1 not in added_nodes:
                nodes.append({
                    "name": char1,
                    "symbolSize": character_count.get(char1, 1) * 2,  # 节点大小与出现次数相关
                    "itemStyle": {"color": "#4285F4"}
                })
                added_nodes.add(char1)
            
            # 添加节点2
            if char2 not in added_nodes:
                nodes.append({
                    "name": char2,
                    "symbolSize": character_count.get(char2, 1) * 2,  # 节点大小与出现次数相关
                    "itemStyle": {"color": "#EA4335"}
                })
                added_nodes.add(char2)
    
    # 构建边
    links = []
    for (char1, char2), count in cooccurrence.items():
        # 只有当两个节点都存在时才添加边
        if count >= min_cooccurrence and char1 in added_nodes and char2 in added_nodes:
            links.append({
                "source": char1,
                "target": char2,
                "value": count,  # 边的权重为共现次数
                "lineStyle": {
                    "width": count * 0.5  # 边的宽度与共现次数相关
                }
            })
    
    return nodes, links

def create_character_network(nodes, links):
    """
    创建人物关系网络图
    :param nodes: 节点列表
    :param links: 边列表
    :return: Graph对象
    """
    graph = (
        Graph(init_opts=opts.InitOpts(width="1200px", height="800px"))
        .add(
            "",
            nodes,
            links,
            repulsion=8000,
            edge_length=100,
            gravity=0.1,
            friction=0.6,
            is_focusnode=True,
            is_roam=True,
            linestyle_opts=opts.LineStyleOpts(curve=0.3),
            label_opts=opts.LabelOpts(is_show=True, position="right", font_size=12)
        )
        .set_global_opts(
            title_opts=opts.TitleOpts(title="《红楼梦》人物关系网络图"),
            tooltip_opts=opts.TooltipOpts(is_show=True),
            toolbox_opts=opts.ToolboxOpts(
                is_show=True,
                orient="vertical",
                pos_left="95%",
                pos_top="center"
            )
        )
    )
    return graph

def main():
    print("《红楼梦》人物关系网络分析")
    print("=" * 40)
    
    # 读取文本
    text = read_text()
    if text is None:
        return
    
    # 分割章节
    chapters = split_into_chapters(text)
    print(f"共分割出 {len(chapters)} 个章节")
    
    # 提取人物共现关系
    print("正在分析人物共现关系...")
    cooccurrence, character_count = extract_character_cooccurrence(chapters)
    print(f"检测到 {len(cooccurrence)} 对人物共现关系")
    
    # 构建网络数据
    print("正在构建网络数据...")
    nodes, links = build_network_data(cooccurrence, character_count, min_cooccurrence=3)
    print(f"构建了 {len(nodes)} 个节点和 {len(links)} 条边")
    
    # 创建网络图
    print("正在生成网络图...")
    graph = create_character_network(nodes, links)
    
    # 保存为HTML文件
    graph.render("红楼梦人物关系网络.html")
    print("人物关系网络图已保存为 红楼梦人物关系网络.html")
    
    # 生成分析报告
    with open("人物关系分析报告.txt", "w", encoding="utf-8") as f:
        f.write("《红楼梦》人物关系分析报告\n")
        f.write("=" * 40 + "\n\n")
        
        f.write("一、数据统计\n")
        f.write(f"章节总数: {len(chapters)}\n")
        f.write(f"检测到的人物共现关系对数: {len(cooccurrence)}\n")
        f.write(f"网络图节点数: {len(nodes)}\n")
        f.write(f"网络图边数: {len(links)}\n\n")
        
        f.write("二、共现次数最多的10对人物\n")
        sorted_cooccurrence = sorted(cooccurrence.items(), key=lambda x: x[1], reverse=True)
        for i, ((char1, char2), count) in enumerate(sorted_cooccurrence[:10]):
            f.write(f"{i+1}. {char1} - {char2}: {count}次\n")
        
        f.write("\n三、人物出现次数统计\n")
        sorted_characters = sorted(character_count.items(), key=lambda x: x[1], reverse=True)
        for i, (char, count) in enumerate(sorted_characters[:20]):
            f.write(f"{i+1}. {char}: {count}次\n")
    
    print("分析报告已保存为 人物关系分析报告.txt")

if __name__ == "__main__":
    main()