import re
import jieba
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签

# LLM评分
def llm_evaluate(score_value):
    with open(r'..\MetaGpt\score3.txt','r',encoding='utf-8')as file:
        score_result = file.read()
    sco=score_result[-9:]
    match = re.search(r'\d+', sco)
    if match:
        score_value = int(match.group())
    else:
        print("无法从 score_result 中提取数字")
    return float(score_value) / 100

# 连贯性
def split_into_paragraphs(novel_text):
    # 使用正则表达式按两个换行符分割段落
    paragraphs = re.split(r'\n\s*\n', novel_text)
    # 去除空段落并返回非空段落列表
    return [p.strip() for p in paragraphs if p.strip()]

def simple_coherence(paragraphs):
    # 简单地检查相邻段落是否有共同的关键词
    common_words = []
    for i in range(len(paragraphs) - 1):
        # 获取当前段落和下一段落的单词集合
        words1 = set(jieba.lcut(paragraphs[i].lower()))
        words2 = set(jieba.lcut(paragraphs[i+1].lower()))
        # 计算两个段落之间的共同单词数量
        common = words1.intersection(words2)
        # 计算共同单词比例并添加到列表中
        common_words.append(len(common) / len(words1.union(words2)))
    return np.mean(common_words)

# 词汇多样性
def lexical_diversity(text):
    # 将文本转换为小写并使用jieba进行分词
    words = jieba.lcut(text.lower())
    return len(set(words)) / len(words)

# 主题一致性
def simple_topic_consistency(text, top_n=10):
    words = jieba.lcut(text.lower())
    # 统计单词频率
    word_freq = Counter(words)
    top_words = set([word for word, _ in word_freq.most_common(top_n)])
    
    paragraphs = text.split('\n\n')
    consistency_scores = []
    for para in paragraphs:
        para_words = set(jieba.lcut(para.lower()))
        # 计算段落中包含的高频单词比例
        score = len(para_words.intersection(top_words)) / len(top_words)
        consistency_scores.append(score)
    return np.mean(consistency_scores)

# 句子复杂度
def sentence_complexity(text):
    sentences = text.split('。')
    # 计算每个句子的单词数并求平均值
    return min(np.mean([len(jieba.lcut(sent)) for sent in sentences]) / 20, 1)

def evaluate_novel(novel_text):
    paragraphs = split_into_paragraphs(novel_text)
    
    llm_score = llm_evaluate(novel_text)
    coherence = simple_coherence(paragraphs)
    diversity = lexical_diversity(novel_text)
    topic_consistency = simple_topic_consistency(novel_text)
    complexity = sentence_complexity(novel_text)
    structure_balance = (llm_score + coherence + topic_consistency) / 3

    return {
        "LLM评分": llm_score,
        "连贯性": coherence,
        "词汇多样性": diversity,
        "主题一致性": topic_consistency,
        "句子复杂度": complexity,
        "结构平衡": structure_balance
    }

def plot_radar_chart(data):
    categories = list(data.keys())
    values = list(data.values())
    # 计算雷达图的角度
    angles = [n / float(len(categories)) * 2 * np.pi for n in range(len(categories))]
    # 为了闭合雷达图，将第一个值添加到末尾
    values += values[:1]
    angles += angles[:1]
    # 创建雷达图
    fig, ax = plt.subplots(figsize=(6, 6), subplot_kw=dict(projection='polar'))
    # 绘制雷达图的线条
    ax.plot(angles, values)
    ax.fill(angles, values, alpha=0.1)
    # 设置雷达图的刻度标签
    ax.set_xticks(angles[:-1])
    ax.set_xticklabels(categories)
    ax.set_ylim(0, 1)
    plt.title("AI小说评估雷达图")
    plt.show()

# 使用示例
with open(r'..\MetaGpt\file3.txt','r',encoding='utf-8')as file:
    novel_text = file.read()
results = evaluate_novel(novel_text)
print(results)
file = open(r"..\crew\output1.txt", "w", encoding="utf-8")
file.write(str(results))
file.close()
plot_radar_chart(results)