import json
import pandas as pd
from openai import OpenAI
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from wordcloud import WordCloud
import matplotlib.pyplot as plt

# 设置OpenAI API客户端
client = OpenAI(
    base_url="https://api.gptsapi.net/v1",
    api_key="sk-kghaf948e5e3bc901fdb866fa5f1ffe18d19d9961a8N5pAg"
)

# 从文件加载数据
with open('extracted_data.json', 'r', encoding='utf-8') as file:
    legal_documents = json.load(file)

# 定义调用GPT-4 API生成文本总结的函数
def summarize_text(text):
    try:
        response = client.chat.completions.create(
            model="gpt-4o",
            messages=[
                {"role": "system", "content": "你是一个文本分析助手，请为以下文档生成一个详细的总结。"},
                {"role": "user", "content": f"请总结以下文档的主要主题，并包含详细的案情、主要问题和解决方案：\n\n{text}"}
            ]
        )
        summary = response.choices[0].message.content.strip()
        return summary
    except Exception as e:
        print(f"Error summarizing text: {e}")
        return None

# 定义调用GPT-4 API生成主题关键词的函数
def extract_keywords(summary):
    try:
        response = client.chat.completions.create(
            model="gpt-4o",
            messages=[
                {"role": "system", "content": "你是一个文本分析助手，请从以下文档总结中提取出主要的关键词，并说明这些关键词的共同特征。"},
                {"role": "user", "content": f"请从以下主题总结中提取出主要的关键词，这些关键词是需要总结归纳出来的，而不仅仅是从文本分割得到的，不要太长的句子而需要简洁的词语，确保这些关键词能准确反映该主题的共同特征：\n\n{summary}"}
            ]
        )
        keywords = response.choices[0].message.content.strip()
        return keywords
    except Exception as e:
        print(f"Error extracting keywords: {e}")
        return None

# 为所有文档生成总结
summaries = [summarize_text(doc) for doc in legal_documents]

# 将生成的总结保存到JSON文件
with open('summaries.json', 'w', encoding='utf-8') as file:
    json.dump(summaries, file, ensure_ascii=False, indent=4)

print("文档总结已保存到 'summaries.json' 文件中。")

# 使用TF-IDF向量化器对总结进行向量化
vectorizer = TfidfVectorizer(stop_words='english')
X = vectorizer.fit_transform(summaries)

# 使用LDA进行主题建模
num_topics = 3  # 设定主题数量
lda = LatentDirichletAllocation(n_components=num_topics, random_state=0)
lda.fit(X)

# 打印每个文档的主题分配
lda_topic_assignments = lda.transform(X)
document_topics = lda_topic_assignments.argmax(axis=1)

for i, summary in enumerate(summaries):
    print(f"Document {i} is in topic {document_topics[i]}")

# 保存结果到表格
df = pd.DataFrame({
    'Document': range(len(summaries)),
    'Summary': summaries,
    'Topic': document_topics
})
df.to_csv('document_topics_lda.csv', index=False)

print("主题建模结果已保存到 'document_topics_lda.csv' 文件中。")

# 提取每个主题的关键词
topic_keywords = []
for topic_num in range(num_topics):
    topic_summary = ' '.join([summaries[i] for i in range(len(summaries)) if document_topics[i] == topic_num])
    keywords = extract_keywords(topic_summary)
    topic_keywords.append(keywords)

# 保存每个主题的关键词到文件
keywords_file_path = 'topic_keywords.txt'
with open(keywords_file_path, 'w', encoding='utf-8') as file:
    for topic_num, keywords in enumerate(topic_keywords):
        file.write(f'Topic {topic_num} Keywords:\n')
        file.write(f'{keywords}\n\n')

print(f'每个主题的关键词已保存到 {keywords_file_path} 文件中。')

# 可视化每个主题的词云
def generate_wordcloud_for_topic(topic_num, keywords):
    # 创建词云
    wordcloud = WordCloud(width=800, height=400, background_color='white',
                          font_path='C:/Windows/Fonts/simsun.ttc').generate(keywords)  # 替换为字体路径

    # 保存词云图到本地
    file_path = f'topic_{topic_num}_wordcloud.png'
    wordcloud.to_file(file_path)
    print(f'词云图已保存到 {file_path}')

    # 显示词云图
    plt.figure(figsize=(10, 5))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')
    plt.title(f'Topic {topic_num} Word Cloud')
    plt.show()

# 为每个主题生成词云
for topic_num, keywords in enumerate(topic_keywords):
    generate_wordcloud_for_topic(topic_num, keywords)
