#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 16 22:21:30 2024

@author: hefeiyu
"""

import json
import jieba
import seaborn as sns
import matplotlib.pyplot as plt
from collections import Counter
import pandas as pd
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
import string
import matplotlib

# 1. 设置中文字体
matplotlib.rcParams['font.sans-serif'] = ['Hiragino Sans GB']  # MacOS 支持的字体
matplotlib.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 2. 读取 JSON 文件
with open('ai_history.json', 'r', encoding='utf-8') as f:
    data = json.load(f)

# 3. 提取文本并进行分词
english_texts = []
chinese_texts = []

for entry in data:
    text = entry.get('content', '')
    if any(ord(char) > 127 for char in text):  # 判断中文文本
        chinese_texts.append(text)
    else:
        english_texts.append(text)

# 4. 中文分词处理
def chinese_tokenizer(text):
    # 使用 jieba 进行分词后，去掉标点符号和单字
    tokens = jieba.cut(text)
    cleaned_tokens = [word for word in tokens if word not in string.punctuation and len(word) > 1]
    return cleaned_tokens

# 5. 英文分词处理
def english_tokenizer(text):
    # 先将文本转换为小写，并去掉标点符号
    cleaned_text = ''.join([char for char in text if char not in string.punctuation])
    tokens = cleaned_text.lower().split()
    # 去掉停用词
    return [word for word in tokens if word not in ENGLISH_STOP_WORDS]

# 6. 合并所有文本
all_chinese_tokens = []
for text in chinese_texts:
    all_chinese_tokens.extend(chinese_tokenizer(text))

all_english_tokens = []
for text in english_texts:
    all_english_tokens.extend(english_tokenizer(text))

# 7. 统计词频
chinese_word_freq = Counter(all_chinese_tokens)
english_word_freq = Counter(all_english_tokens)

# 8. 合并词频统计，转换为 DataFrame
df_chinese = pd.DataFrame(chinese_word_freq.items(), columns=['Word', 'Frequency'])
df_english = pd.DataFrame(english_word_freq.items(), columns=['Word', 'Frequency'])

# 排序，选择前 20 个最频繁的词
df_chinese_sorted = df_chinese.sort_values(by='Frequency', ascending=False).head(20)
df_english_sorted = df_english.sort_values(by='Frequency', ascending=False).head(20)

# 9. 绘制中文词频条形图
if not df_chinese_sorted.empty:
    plt.figure(figsize=(10, 6))
    sns.barplot(x='Frequency', y='Word', data=df_chinese_sorted, palette='Blues_d')
    plt.title('Top 20 Frequent Chinese Words in AI History')
    plt.xlabel('Frequency')
    plt.ylabel('Word')
    plt.show()

# 10. 绘制英文词频条形图
if not df_english_sorted.empty:
    plt.figure(figsize=(10, 6))
    sns.barplot(x='Frequency', y='Word', data=df_english_sorted, palette='Blues_d')
    plt.title('Top 20 Frequent English Words in AI History')
    plt.xlabel('Frequency')
    plt.ylabel('Word')
    plt.show()
    
    
    