#!/usr/bin/env python3
# -- coding: utf-8 --

import json
import jieba
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
import pandas as pd
import string
import matplotlib

# 1. 设置中文字体
matplotlib.rcParams['font.sans-serif'] = ['Hiragino Sans GB']  # MacOS 支持的字体
matplotlib.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 2. 读取 JSON 文件
with open('ai_history.json', 'r', encoding='utf-8') as f:
    data = json.load(f)

# 3. 定义与 AI 发展路径相关的关键字（根据发展阶段）
development_stages = {
    "起步发展期": ["符号主义", "逻辑推理", "规则基础", "专家系统"],
    "反思发展期": ["神经网络", "感知机", "机器学习", "深度学习", "反向传播"],
    "应用发展期": ["机器翻译", "语音识别", "图像识别", "自动驾驶", "自然语言处理"],
    "平稳发展期": ["大数据", "云计算", "人工智能平台", "智能助手"],
    "蓬勃发展期": ["强化学习", "深度学习", "生成对抗网络", "AI 伦理", "AI 医疗", "智能制造"]
}


# 4. 提取文本并进行分词
englishtexts = []
chinesetexts = []

for entry in data:
    text = entry.get('content', '')
    if any(ord(char) > 127 for char in text):  # 判断中文文本
        chinesetexts.append(text)
    else:
        englishtexts.append(text)

# 5. 中文分词处理
def chinesetokenizer(text):
    tokens = jieba.cut(text)
    cleaned_tokens = [word for word in tokens if word not in string.punctuation and len(word) > 1]
    return cleaned_tokens

# 6. 英文分词处理
def englishtokenizer(text):
    # 清理文本中的标点符号
    cleaned_text = ''.join([char for char in text if char not in string.punctuation])
    tokens = cleaned_text.lower().split()  # 将所有单词转换为小写并按空格分词
    return tokens

# 7. 提取各发展阶段相关的关键词
def extractstagekeywords(texts, stagekeywords):
    stagewordcount = {stage: 0 for stage in stagekeywords}  # 初始化字典，统计每个阶段的关键词出现次数

    # 对中文和英文文本进行分词，统计每个阶段相关的关键词出现频次
    for text in texts:
        tokens = chinesetokenizer(text) if any(ord(char) > 127 for char in text) else englishtokenizer(text)
        for stage, keywords in stagekeywords.items():
            for keyword in keywords:
                stagewordcount[stage] += tokens.count(keyword)  # 统计每个阶段的关键词出现次数

    return stagewordcount

# 8. 获取每个阶段的关键词频次
chinesestagewordcount = extractstagekeywords(chinesetexts, development_stages)
englishstagewordcount = extractstagekeywords(englishtexts, development_stages)

# 9. 构建 DataFrame，绘制热力图
dfchinese = pd.DataFrame(list(chinesestagewordcount.items()), columns=['Development Stage', 'Frequency'])
dfenglish = pd.DataFrame(list(englishstagewordcount.items()), columns=['Development Stage', 'Frequency'])

# 排序
dfchinesesorted = dfchinese.sort_values(by='Frequency', ascending=False)
dfenglishsorted = dfenglish.sort_values(by='Frequency', ascending=False)

# 10. 绘制中文发展路径热力图
if not dfchinesesorted.empty:
    plt.figure(figsize=(10, 6))
    heatmapdatachinese = dfchinesesorted.set_index('Development Stage').T
    sns.heatmap(heatmapdatachinese, annot=True, cmap='YlGnBu', cbar=True, cbar_kws={'label': 'Frequency'})
    plt.title('AI Development Path (Chinese)')
    plt.show()
else:
    print("No Chinese data available for heatmap.")

# 11. 绘制英文发展路径热力图
if not dfenglishsorted.empty:
    plt.figure(figsize=(10, 6))
    heatmapdataenglish = dfenglishsorted.set_index('Development Stage').T
    sns.heatmap(heatmapdataenglish, annot=True, cmap='YlGnBu', cbar=True, cbar_kws={'label': 'Frequency'})
    plt.title('AI Development Path (English)')
    plt.show()
else:
    print("No English data available for heatmap.")
