#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 14 17:08:09 2024

@author: hefeiyu
"""

import json
import jieba
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from textblob import TextBlob
from collections import Counter
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
import string
from io import StringIO
from IPython.display import display, HTML

# 1. 读取 JSON 文件并加载数据
def load_json(file_path):
    with open(file_path, 'r', encoding='utf-8') as f:
        data = json.load(f)
    return data

# 2. 数据清洗与文本处理
def clean_text(text):
    # 去除标点符号
    text = text.translate(str.maketrans('', '', string.punctuation))
    return text

def chinese_word_cut(text):
    return " ".join(jieba.cut(text))

def english_word_cut(text):
    return text.lower()

# 3. 词频分析
def get_word_frequency(texts, lang='zh'):
    all_words = []
    
    # 使用ENGLISH_STOP_WORDS集合作为英文停用词
    stop_words = ENGLISH_STOP_WORDS if lang == 'en' else set()
    
    for text in texts:
        # 数据清洗
        text = clean_text(text)
        
        if lang == 'zh':  # 中文
            text = chinese_word_cut(text)
        elif lang == 'en':  # 英文
            text = english_word_cut(text)
        
        # 过滤停用词
        words = [word for word in text.split() if word not in stop_words]
        all_words.extend(words)
    
    word_count = Counter(all_words)
    return word_count

# 4. 生成词云
def generate_wordcloud(word_count):
    wordcloud = WordCloud(
        font_path='STHeiti Medium.ttc',  # 使用处于同一文件夹的字体文件
        width=800, height=400,
        background_color='white'
    ).generate_from_frequencies(word_count)
    
    plt.figure(figsize=(10, 5))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')
    plt.show()

# 5. 情感分析
def sentiment_analysis(texts):
    sentiment_scores = []
    
    for text in texts:
        # 简单情感分析：TextBlob返回一个情感得分，正值为正面情感，负值为负面情感
        blob = TextBlob(text)
        sentiment_scores.append(blob.sentiment.polarity)
    
    return sentiment_scores

# 6. 生成情感分析图表
def plot_sentiment(sentiment_scores):
    # 可视化情感得分
    plt.figure(figsize=(10, 6))
    plt.hist(sentiment_scores, bins=50, color='skyblue', edgecolor='black')
    plt.title('Sentiment Distribution')
    plt.xlabel('Sentiment Score')
    plt.ylabel('Frequency')
    plt.grid(True)
    plt.show()

# 自动化生成报告并保存为 HTML 文件
def generate_report(data, word_count, sentiment_scores, output_path='analysis_report.html'):
    # 创建 HTML 内容
    report = f"""
    <html>
    <head>
        <title>Data Analysis Report</title>
        <style>
            body {{ font-family: Arial, sans-serif; }}
            h1 {{ color: #4CAF50; }}
            .content {{ margin-bottom: 20px; }}
            .chart {{ margin-top: 20px; }}
        </style>
    </head>
    <body>
        <h1>Data Analysis Report</h1>
        <div class="content">
            <h2>Word Frequency Analysis</h2>
            <p>The most frequent words in the dataset are:</p >
            <pre>{str(word_count.most_common(10))}</pre>
        </div>

        <div class="content">
            <h2>Word Cloud</h2>
            <div class="chart">
                < img src="wordcloud.png" alt="Word Cloud" style="max-width:100%; height:auto;">
            </div>
        </div>

        <div class="content">
            <h2>Sentiment Analysis</h2>
            <p>Sentiment distribution based on the content:</p >
            <div class="chart">
                < img src="sentiment_distribution.png" alt="Sentiment Distribution" style="max-width:100%; height:auto;">
            </div>
        </div>
    </body>
    </html>
    """
    
    # 保存报告为 HTML 文件
    with open(output_path, 'w', encoding='utf-8') as f:
        f.write(report)

    print(f"报告已保存到 {output_path}.")

# 主函数
def main():
    # 读取JSON文件
    file_path = 'category_4.json'  # 替换需要分析的JSON文件路径
    data = load_json(file_path)
    
    # 提取"content"字段
    texts = [item['content'] for item in data]
    
    # 词频分析
    word_count = get_word_frequency(texts, lang='en')  # 假设是英文数据，若是中文则改为 'zh'
    
    # 生成词云
    generate_wordcloud(word_count)
    
    # 情感分析
    sentiment_scores = sentiment_analysis(texts)
    plot_sentiment(sentiment_scores)
    
    # 生成报告并保存为 HTML 文件
    generate_report(data, word_count, sentiment_scores, output_path='analysis_report.html')

if __name__ == '__main__':
    main()
    
    
    
    