# -*- coding: utf-8 -*-
"""
@Time: 2025/1/10 11:42
@Auth: Zhang Hongxing
@File: analysis_service.py
@Note:   
"""
#【接口2.1.3】GET /essay/history    # 获取作文详情

def get_essay_detail(user_id: str, essay_id: str, version_id: int):
    essay_dir = '../data/raw/essay_data'  # 作文存储路径
    file_path = os.path.join(essay_dir, f"{user_id}.json")  # 用户作文存储文件

    try:
        # 检查用户文件是否存在
        if not os.path.exists(file_path):
            print(f"[ERROR] 用户作文文件不存在: {file_path}")
            return (404, '用户作文数据不存在')

        # 读取用户的作文数据
        with open(file_path, 'r', encoding='utf-8') as f:
            try:
                user_data = json.load(f)
            except json.JSONDecodeError:
                print(f"[ERROR] 用户作文文件 JSON 解析失败: {file_path}")
                return (500, '用户作文数据损坏，无法解析')

        # 检查 `essay_id` 是否存在
        if essay_id not in user_data:
            print(f"[ERROR] 该用户未找到作文 ID: {essay_id}")
            return (404, '作文 ID 不存在')

        # 获取该作文的所有版本信息
        essay_versions = user_data[essay_id].get("versions", [])

        # 根据 `version_id` 查找对应的作文
        selected_version = next((v for v in essay_versions if v["version_id"] == version_id), None)

        if not selected_version:
            print(f"[ERROR] 未找到作文版本 ID: {version_id}")
            return (404, '作文版本 ID 不存在')

        # 返回作文内容
        return {
            "essay_id": essay_id,
            "version_id": version_id,
            "title": user_data[essay_id].get("title", ""),
            "text": selected_version["text"],
            "timestamp": selected_version["timestamp"]
        }

    except Exception as e:
        print(f"[ERROR] 获取作文详情失败: {str(e)}")
        return (500, '【接口2.1.3】获取作文详情失败')

#【接口2.1.4】GET /essay/history    # 获取历史作文列表
import os
import time
from typing import List, Dict,Optional
import re
def get_essay_history_from_files(user_id: str) -> List[Dict]:
    """
    获取用户的历史作文列表（从本地JSON文件）
    :param user_id: 用户ID（字符串类型）
    :return: 包含历史作文信息的列表
    """
    try:
        # 定义作文存储目录
        directory = '../data/raw/essay_data'
        if not os.path.exists(directory):
            return []

        # 构建用户JSON文件路径
        json_file_path = os.path.join(directory, f"{user_id}.json")

        # 检查文件是否存在
        if not os.path.exists(json_file_path):
            return []

        # 读取JSON文件内容
        with open(json_file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)

        # 构建结果列表
        result = []
        for essay_id, essay_data in data.items():
            title = essay_data.get("title", "无标题")
            versions = essay_data.get("versions", [])

            # 遍历每个版本
            for version in versions:
                result.append({
                    "essay_id": essay_id,  # 作文ID
                    "title": title,  # 作文标题
                    "version_id": version.get("version_id"),  # 版本ID
                    "content": version.get("text", ""),  # 作文内容
                    "created_at": version.get("timestamp", ""),  # 创建时间
                })

        # 按时间降序排序
        result.sort(key=lambda x: x.get("created_at", ""), reverse=True)

        return result

    except Exception as e:
        print(f"【接口2.1.4】获取历史作文失败: {e}")
        return []

# 【接口2.1.5】GET /essay/analysis    # 获取指定作文版本的分析数据
def get_analysis_data(user_id: str, essay_id: str, version_id: str) -> Optional[Dict]:
    """
    获取指定作文版本的分析数据（从本地JSON文件）
    :param essay_id: 作文ID（字符串类型）
    :param version_id: 版本ID（字符串类型）
    :return: 分析数据字典，若未找到则返回None
    """
    try:
        directory = '../data/raw/analysis_data'
        if not os.path.exists(directory):
            return None
        json_file_path = os.path.join(directory, f"{user_id}_analysis.json")

        if not os.path.exists(json_file_path):
            return None
        with open(json_file_path, 'r', encoding='utf-8') as f:
            data = json.load(f)

        # 获取指定作文的分析数据
        essay_data = data.get(essay_id, {}).get("analysis", [])
        for analysis in essay_data:
            if analysis.get("version_id") == version_id:
                return analysis
        return None
    except Exception as e:
        print(f"【接口2.1.5】获取分析数据失败: {e}")
        return None

# 【接口3.1.2】POST /model/preprocess   # 文本预处理
def preprocess_text(text: str) -> Optional[List[str]]:
    """
    对输入的英文文本进行预处理
    :param text: 原始英文文本
    :return: 预处理后的单词列表，处理失败则返回None
    """
    try:
        # 1. 输入验证
        if not isinstance(text, str):
            raise ValueError("输入必须是字符串类型")

        # 2. 去除多余空格和换行
        text = text.strip().replace("\n", " ")

        # 3. 转换为小写
        text = text.lower()

        # 4. 移除标点符号和特殊字符
        text = re.sub(r"[^\w\s-]", "", text)  # 保留连字符，因为可能有复合词

        # 5. 分词并过滤空字符串
        tokens = [token for token in text.split() if token]

        # 6. 基本验证
        if not tokens:
            raise ValueError("预处理后没有有效单词")

        return tokens

    except Exception as e:
        print(f"【接口3.1.2】文本预处理报错：{str(e)}")
        return None


# 【接口3.3.1】POST /model/feedback   # 提交模型评分反馈
import json
from typing import Dict, Optional

def save_feedback(user_id: str, essay_id: str, version_id: int, feedback: str, suggestion: Optional[str] = None) -> Dict:
    """
    存储用户的评分反馈信息到本地文件
    :param user_id: 用户ID
    :param essay_id: 作文ID
    :param score_id: 评分ID
    :param feedback: 反馈内容
    :param suggestion: 改进建议（可选）
    :return: 存储的反馈信息
    """
    try:
        # 定义存储目录
        directory = '../data/raw/feedback_data/'
        if not os.path.exists(directory):
            os.makedirs(directory)

        # 生成唯一的反馈ID
        feedback_id = f"{essay_id}_{version_id}_{int(time.time())}"
        created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

        # 构造反馈数据
        feedback_data = {
            "user_id": user_id,
            "feedback_id": feedback_id,
            "essay_id": essay_id,
            "version_id": version_id,
            "feedback": feedback,
            "suggestion": suggestion,
            "created_at": created_at
        }

        # 存储到文件
        file_path = os.path.join(directory, f"{feedback_id}.json")
        with open(file_path, 'w', encoding='utf-8') as f:
            json.dump(feedback_data, f, ensure_ascii=False, indent=4)

        return feedback_data
    except Exception as e:
        print(f"【接口3.3.1】存储评分反馈失败: {e}")
        return {}


#【接口3.3.2】POST /system/feedback        #系统反馈
from typing import Dict, Optional

def system_feedback(
    user_id: str,
    overall_satisfaction: int,
    rating_accuracy: int,
    user_experience: int,
    feedback: str,
    name: Optional[str] = None,
    phone: Optional[str] = None,
    email: Optional[str] = None
) -> Dict:
    """
    构造用户的评分反馈信息并返回
    :param user_id: 用户ID
    :param overall_satisfaction: 总体满意度（1-5）
    :param rating_accuracy: 评分准确性（1-5）
    :param user_experience: 用户体验（1-5）
    :param feedback: 详细反馈
    :param name: 姓名（选填）
    :param phone: 电话（选填）
    :param email: 邮箱（选填）
    :return: 构造的反馈信息
    """
    try:
        # 校验评分参数是否为整数且在1到5之间
        if not all(isinstance(score, int) and 1 <= score <= 5 for score in [overall_satisfaction, rating_accuracy, user_experience]):
            raise ValueError("评分参数必须是1到5之间的整数")

        # 生成唯一的反馈ID
        feedback_id = f"{int(time.time())}"
        created_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

        # 构造反馈数据
        feedback_data = {
            "user_id": user_id,
            "feedback_id": feedback_id,
            "overall_satisfaction": overall_satisfaction,
            "rating_accuracy": rating_accuracy,
            "user_experience": user_experience,
            "feedback": feedback,
            "contact_info": {
                "name": name,
                "phone": phone,
                "email": email
            },
            "created_at": created_at
        }

        return feedback_data

    except ValueError as ve:
        print(f"【接口3.3.2】参数校验失败: {ve}")
        return {}
    except Exception as e:
        print(f"【接口3.3.2】构造反馈信息失败: {e}")
        return {}

# 【接口5.1.1】POST /analysis/essay/{essay_id}/{version_id}   #获取单篇作文分析


def get_essay_analysis(essay_text: str, essay_knowledge: str, knowledge: str, modelName: str, user_id: str,
                       essay_id: str, version_id: int) -> Optional[Dict]:
    import os
    import json
    from datetime import datetime
    from typing import Optional, Dict
    from backend.model.scoring_model import score_essay_chatgpt_CHN
    from backend.model.scoring_model import score_essay_chatgpt_EN
    from backend.model.scoring_model import score_essay_chatgpt_JAPAN
    from backend.model.scoring_model import score_essay_chatgpt_FRANCE
    from backend.model.scoring_model import score_essay_chatgpt_RUSSIA
    from backend.model.scoring_model import score_essay_chatgpt_SPAIN

    try:
        # 选择评分函数
        if essay_knowledge == "中文":
            score_result = score_essay_chatgpt_CHN(essay_text, knowledge, modelName)
        elif essay_knowledge == "标准":
            score_result = score_essay_with_chatgpt(essay_text,knowledge)
        elif essay_knowledge == "英文":
            score_result = score_essay_chatgpt_EN(essay_text, knowledge, modelName)
        elif essay_knowledge == "日语":
            score_result = score_essay_chatgpt_JAPAN(essay_text, knowledge, modelName)
        elif essay_knowledge == "法语":
            score_result = score_essay_chatgpt_FRANCE(essay_text, knowledge, modelName)
        elif essay_knowledge == "俄语":
            score_result = score_essay_chatgpt_RUSSIA(essay_text, knowledge, modelName)
        elif essay_knowledge == "西班牙语":
            score_result = score_essay_chatgpt_SPAIN(essay_text, knowledge, modelName)
        else:
            return {"message": "不支持的作文知识类型"}

        # 如果评分失败
        if not score_result or score_result.get("score") is None:
            return {"message": "评分失败", "details": score_result}

        # 动态获取评分数据
        analysis_data = {
            "version_id": str(version_id),
            "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "overall_score": score_result.get("score"),
            "model_used": score_result.get("model_used", modelName)  # 使用返回结果中的模型名称，如果没有则使用默认的
        }

        # 动态提取详细评分
        details = score_result.get("details", {})
        for key, value in details.items():
            # 如果是list类型，如"good_sentences"或"suggestions"等，将其存储在JSON中
            if isinstance(value, list):
                analysis_data[key] = value
            else:
                # 如果是其他类型的评分数据（如 "vocabulary_score", "sentence_score"等），直接存储
                analysis_data[key] = value

        # 存储分析结果到本地
        user_file_path = f'../data/raw/analysis_data/{user_id}_analysis.json'
        os.makedirs(os.path.dirname(user_file_path), exist_ok=True)  # 确保目录存在

        # 读取/初始化数据
        try:
            with open(user_file_path, 'r', encoding='utf-8') as f:
                existing_data = json.load(f)
        except (FileNotFoundError, json.JSONDecodeError):
            existing_data = {}

        # 确保数据结构正确
        if not isinstance(existing_data, dict):
            existing_data = {}

        # 初始化作文ID数据结构
        if essay_id not in existing_data:
            existing_data[essay_id] = {"analysis": []}
        elif "analysis" not in existing_data[essay_id]:
            existing_data[essay_id]["analysis"] = []

        # 查找并更新或添加版本数据
        version_exists = False
        for i, version in enumerate(existing_data[essay_id]["analysis"]):
            if str(version.get("version_id")) == str(version_id):
                existing_data[essay_id]["analysis"][i] = analysis_data
                version_exists = True
                break

        if not version_exists:
            existing_data[essay_id]["analysis"].append(analysis_data)

        # 写回文件
        with open(user_file_path, 'w', encoding='utf-8') as f:
            json.dump(existing_data, f, ensure_ascii=False, indent=4)

        return analysis_data

    except Exception as e:
        print(f"【接口5.1.1】获取单篇作文分析失败: {str(e)}")
        return None

# 【接口5.1.1-2】POST /analysis/essay/{essay_id}/{version_id}   #获取单篇作文分析


def get_essay_analysis2(essay_text: str, essay_knowledge: str, knowledge: str, modelName: str, user_id: str,
                       essay_id: str, version_id: int) -> Optional[Dict]:
    import os
    import json
    from datetime import datetime
    from typing import Optional, Dict
    from backend.model.scoring_model import score_essay_chatgpt_CHN2
    from backend.model.scoring_model import score_essay_chatgpt_EN2
    from backend.model.scoring_model import score_essay_chatgpt_JAPAN2
    from backend.model.scoring_model import score_essay_chatgpt_FRANCE2
    from backend.model.scoring_model import score_essay_chatgpt_RUSSIA2
    from backend.model.scoring_model import score_essay_chatgpt_SPAIN2

    try:
        # 选择评分函数
        if essay_knowledge == "中文":
            score_result = score_essay_chatgpt_CHN2(essay_text, knowledge, modelName)
        elif essay_knowledge == "标准":
            score_result = score_essay_with_chatgpt(essay_text,knowledge)
        elif essay_knowledge == "英文":
            score_result = score_essay_chatgpt_EN2(essay_text, knowledge, modelName)
        elif essay_knowledge == "日语":
            score_result = score_essay_chatgpt_JAPAN2(essay_text, knowledge, modelName)
        elif essay_knowledge == "法语":
            score_result = score_essay_chatgpt_FRANCE2(essay_text, knowledge, modelName)
        elif essay_knowledge == "俄语":
            score_result = score_essay_chatgpt_RUSSIA2(essay_text, knowledge, modelName)
        elif essay_knowledge == "西班牙语":
            score_result = score_essay_chatgpt_SPAIN2(essay_text, knowledge, modelName)
        else:
            return {"message": "不支持的作文知识类型"}

        # 如果评分失败
        if not score_result or score_result.get("score") is None:
            return {"message": "评分失败", "details": score_result}

        # 动态获取评分数据
        analysis_data = {
            "version_id": str(version_id),
            "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "overall_score": score_result.get("score"),
            "model_used": score_result.get("model_used", modelName)  # 使用返回结果中的模型名称，如果没有则使用默认的
        }

        # 动态提取详细评分
        details = score_result.get("details", {})
        for key, value in details.items():
            # 如果是list类型，如"good_sentences"或"suggestions"等，将其存储在JSON中
            if isinstance(value, list):
                analysis_data[key] = value
            else:
                # 如果是其他类型的评分数据（如 "vocabulary_score", "sentence_score"等），直接存储
                analysis_data[key] = value

        # 存储分析结果到本地
        user_file_path = f'../data/raw/analysis_data/{user_id}_analysis.json'
        os.makedirs(os.path.dirname(user_file_path), exist_ok=True)  # 确保目录存在

        # 读取/初始化数据
        try:
            with open(user_file_path, 'r', encoding='utf-8') as f:
                existing_data = json.load(f)
        except (FileNotFoundError, json.JSONDecodeError):
            existing_data = {}

        # 确保数据结构正确
        if not isinstance(existing_data, dict):
            existing_data = {}

        # 初始化作文ID数据结构
        if essay_id not in existing_data:
            existing_data[essay_id] = {"analysis": []}
        elif "analysis" not in existing_data[essay_id]:
            existing_data[essay_id]["analysis"] = []

        # 查找并更新或添加版本数据
        version_exists = False
        for i, version in enumerate(existing_data[essay_id]["analysis"]):
            if str(version.get("version_id")) == str(version_id):
                existing_data[essay_id]["analysis"][i] = analysis_data
                version_exists = True
                break

        if not version_exists:
            existing_data[essay_id]["analysis"].append(analysis_data)

        # 写回文件
        with open(user_file_path, 'w', encoding='utf-8') as f:
            json.dump(existing_data, f, ensure_ascii=False, indent=4)

        return analysis_data

    except Exception as e:
        print(f"【接口5.1.1】获取单篇作文分析失败: {str(e)}")
        return None
# """
# 优化前
# """
# #【5.1.2】GET  /analysis/progress   获取进步分析
# def get_user_progress(user_id: str) -> Optional[Dict]:
#     """
#     获取用户的作文评分进展分析（对比相同主题下最新的两个版本）并保存进步数据
#     :param user_id: 用户ID
#     :return: 进步分析数据
#     """
#     user_file_path = f'../data/raw/analysis_data/{user_id}_analysis.json'
#     progress_file_path = f'../data/raw/progress_data/{user_id}_progress.json'
#
#     try:
#         if not os.path.exists(user_file_path):
#             return None  # 无评分历史
#
#         with open(user_file_path, 'r', encoding='utf-8') as f:
#             analysis_data = json.load(f)
#
#         latest_essay_id = None
#         latest_timestamp = None
#
#         # 遍历所有essay_id，找到最新的version_id所属的essay_id
#         for essay_id, essay_data in analysis_data.items():
#             if "analysis" not in essay_data or len(essay_data["analysis"]) == 0:
#                 continue  # 忽略无数据的情况
#
#             # 按时间戳排序
#             sorted_versions = sorted(
#                 essay_data["analysis"], key=lambda x: datetime.strptime(x["timestamp"], "%Y-%m-%d %H:%M:%S"),
#                 reverse=True
#             )
#
#             if latest_timestamp is None or datetime.strptime(sorted_versions[0]["timestamp"],
#                                                              "%Y-%m-%d %H:%M:%S") > latest_timestamp:
#                 latest_timestamp = datetime.strptime(sorted_versions[0]["timestamp"], "%Y-%m-%d %H:%M:%S")
#                 latest_essay_id = essay_id
#
#         if latest_essay_id is None or len(analysis_data[latest_essay_id]["analysis"]) < 2:
#             return None  # 只有一篇作文，不返回进步数据
#
#         # 获取最新essay_id的评分数据并进行对比
#         sorted_versions = sorted(
#             analysis_data[latest_essay_id]["analysis"],
#             key=lambda x: datetime.strptime(x["timestamp"], "%Y-%m-%d %H:%M:%S"), reverse=True
#         )
#         latest, previous = sorted_versions[:2]  # 取最近的两个版本
#
#         def generate_analysis(change, aspect):
#             if change > 0:
#                 return f"{aspect}有所提升，建议继续保持和优化。"
#             elif change < 0:
#                 return f"{aspect}有所下降，建议加强该方面的练习。"
#             else:
#                 return f"{aspect}保持不变，建议进一步优化提升。"
#
#         progress_entry = {
#             "essay_id": latest_essay_id,
#             "version_compare": [previous["version_id"], latest["version_id"]],
#             "overall_score_change": latest["overall_score"] - previous["overall_score"],
#             "overall_analysis": generate_analysis(latest["overall_score"] - previous["overall_score"], "整体评分"),
#             "vocabulary_change": latest["vocabulary_score"]["total"] - previous["vocabulary_score"]["total"],
#             "vocabulary_analysis": generate_analysis(
#                 latest["vocabulary_score"]["total"] - previous["vocabulary_score"]["total"], "词汇丰富度"),
#             "sentence_change": latest["sentence_score"]["total"] - previous["sentence_score"]["total"],
#             "sentence_analysis": generate_analysis(
#                 latest["sentence_score"]["total"] - previous["sentence_score"]["total"], "句子结构"),
#             "structure_change": latest["structure_score"]["total"] - previous["structure_score"]["total"],
#             "structure_analysis": generate_analysis(
#                 latest["structure_score"]["total"] - previous["structure_score"]["total"], "文章结构"),
#             "content_change": latest["content_score"]["total"] - previous["content_score"]["total"],
#             "content_analysis": generate_analysis(latest["content_score"]["total"] - previous["content_score"]["total"],
#                                                   "内容质量"),
#             "timestamp": latest["timestamp"]
#         }
#
#         # 确保progress_data目录存在
#         os.makedirs(os.path.dirname(progress_file_path), exist_ok=True)
#
#         # 读取已有的进步数据
#         if os.path.exists(progress_file_path):
#             with open(progress_file_path, 'r', encoding='utf-8') as f:
#                 existing_progress = json.load(f)
#         else:
#             existing_progress = {"user_id": user_id, "progress": []}
#
#         # 追加新进步数据
#         existing_progress["progress"].append(progress_entry)
#
#         # 写入文件
#         with open(progress_file_path, 'w', encoding='utf-8') as f:
#             json.dump(existing_progress, f, ensure_ascii=False, indent=4)
#
#         return existing_progress if progress_entry else None
#
#     except Exception as e:
#         print(f"【接口5.1.2】获取用户进步分析失败: {e}")
#         return None
"""
优化后
"""
#【5.1.2】GET  /analysis/progress   获取进步分析
def get_user_progress(user_id: str) -> Optional[Dict]:
    user_file_path = f'../data/raw/analysis_data/{user_id}_analysis.json'
    progress_file_path = f'../data/raw/progress_data/{user_id}_progress.json'
    mapping_path = '../data/raw/score_dimension_mapping.json'

    try:
        if not os.path.exists(user_file_path):
            return None
        with open(user_file_path, 'r', encoding='utf-8') as f:
            analysis_data = json.load(f)
        if not os.path.exists(mapping_path):
            dimension_mapping = {}
        else:
            with open(mapping_path, 'r', encoding='utf-8') as f:
                dimension_mapping = json.load(f)

        latest_entry = None
        latest_time = None
        # Step 1: 找到时间最新的 version（跨 essay）
        for essay_id, essay_data in analysis_data.items():
            for version in essay_data.get("analysis", []):
                ts = datetime.strptime(version["timestamp"], "%Y-%m-%d %H:%M:%S")
                if latest_time is None or ts > latest_time:
                    latest_time = ts
                    latest_entry = {
                        "essay_id": essay_id,
                        "version": version,
                        "timestamp": ts
                    }

        if not latest_entry:
            return None

        target_essay_id = latest_entry["essay_id"]
        model_used = latest_entry["version"].get("model_used")
        # Step 2: 找到相同 model_used 的最近两个版本
        versions = analysis_data[target_essay_id].get("analysis", [])
        same_model_versions = [v for v in versions if v.get("model_used") == model_used]
        same_model_versions.sort(key=lambda v: datetime.strptime(v["timestamp"], "%Y-%m-%d %H:%M:%S"), reverse=True)

        if len(same_model_versions) < 2:
            return None  # 不足以比较

        latest, previous = same_model_versions[:2]
        # Step 3: 提取变化的维度分数
        dimension_changes = {}
        analyses = {}
        for key in latest.keys():
            if re.match(r".*_score$", key):
                if isinstance(latest[key], dict) and "total" in latest[key] and \
                   isinstance(previous.get(key), dict) and "total" in previous[key]:
                    change = latest[key]["total"] - previous[key]["total"]
                    dimension_changes[key] = change

                    # 获取中文维度名
                    aspect_chinese = dimension_mapping.get(key, f"{key.replace('_score', '')}得分")

                    # 生成分析语句
                    if change > 0:
                        analysis = f"{aspect_chinese}相比上次有所提高，体现出写作能力在该维度的进步，建议继续保持并深入优化。"
                    elif change < 0:
                        analysis = f"{aspect_chinese}相比上次有所下降，可能存在相关能力短板，建议针对性强化练习提升表现。"
                    else:
                        analysis = f"{aspect_chinese}与上次持平，说明能力保持稳定，可尝试在该维度寻求更进一步的突破。"
                    analyses[f"{key}_change"] = round(change, 2)
                    analyses[f"{key}_analysis"] = analysis
        # 总分分析
        overall_change = latest["overall_score"] - previous["overall_score"]
        overall_aspect = dimension_mapping.get("overall_score", "整体评分")
        if overall_change > 0:
            overall_analysis = f"{overall_aspect}相比上次有所提高，体现出写作能力在该维度的进步，建议继续保持并深入优化。"
        elif overall_change < 0:
            overall_analysis = f"{overall_aspect}相比上次有所下降，可能存在相关能力短板，建议针对性强化练习提升表现。"
        else:
            overall_analysis = f"{overall_aspect}与上次持平，说明能力保持稳定，可尝试在该维度寻求更进一步的突破。"

        # Step 4: 构造进步分析结果
        progress_entry = {
            "essay_id": target_essay_id,
            "model_used": model_used,
            "version_compare": [previous["version_id"], latest["version_id"]],
            "overall_score_change": round(overall_change, 2),
            "overall_analysis": overall_analysis,
            "timestamp": latest["timestamp"]
        }
        progress_entry.update(analyses)

        # Step 5: 保存最新分析结果到文件
        os.makedirs(os.path.dirname(progress_file_path), exist_ok=True)
        all_progress = {}
        if os.path.exists(progress_file_path):
            with open(progress_file_path, 'r', encoding='utf-8') as f:
                all_progress = json.load(f)
        all_progress[target_essay_id] = {
            "progress": progress_entry
        }
        with open(progress_file_path, 'w', encoding='utf-8') as f:
            json.dump(all_progress, f, ensure_ascii=False, indent=4)

        return progress_entry

    except Exception as e:
        print(f"【接口5.1.2】获取用户进步分析失败: {e}")
        return None

"""
优化前
"""
# #【5.1.3】作文对比分析 GET /analysis/compare
# def compare_essays(essay_1, essay_2):
#     """
#     对比两篇作文的评分结果，分析其得分差异。
#     :param essay_1: 第一篇作文的分析数据
#     :param essay_2: 第二篇作文的分析数据
#     :return: 对比分析报告，包括总评分差异、维度得分对比、具体差异分析
#     """
#     try:
#         # 计算总评分差异
#         score_diff = essay_2["overall_score"] - essay_1["overall_score"]
#
#         # 计算各个维度的得分差异
#         score_comparison = {
#             "total_score_diff": round(score_diff, 2),
#             "vocabulary_diff": round(essay_2["vocabulary_score"]["total"] - essay_1["vocabulary_score"]["total"], 2),
#             "sentence_diff": round(essay_2["sentence_score"]["total"] - essay_1["sentence_score"]["total"], 2),
#             "structure_diff": round(essay_2["structure_score"]["total"] - essay_1["structure_score"]["total"], 2),
#             "content_diff": round(essay_2["content_score"]["total"] - essay_1["content_score"]["total"], 2),
#         }
#
#         # 生成差异分析
#         analysis_report = []
#         if score_diff > 0:
#             analysis_report.append("第二个版本的总评分比第一个版本更高，说明写作质量有所提高。")
#         elif score_diff < 0:
#             analysis_report.append("第二个版本的总评分比第一个版本更低，可能需要注意写作质量的下降。")
#         else:
#             analysis_report.append("两个版本的总评分相同，整体写作水平保持稳定。")
#
#         if score_comparison["vocabulary_diff"] > 0:
#             analysis_report.append("第二个版本的词汇评分更高，说明用词更丰富、准确。")
#         elif score_comparison["vocabulary_diff"] < 0:
#             analysis_report.append("第二个版本的词汇评分降低，建议提升词汇的多样性和准确度。")
#
#         if score_comparison["sentence_diff"] > 0:
#             analysis_report.append("第二个版本的句子结构评分更高，句子表达更加流畅和多样化。")
#         elif score_comparison["sentence_diff"] < 0:
#             analysis_report.append("第二个版本的句子结构评分下降，建议优化句子结构，提高表达流畅度。")
#
#         if score_comparison["structure_diff"] > 0:
#             analysis_report.append("第二个版本的篇章结构评分更高，逻辑更加清晰，段落衔接更顺畅。")
#         elif score_comparison["structure_diff"] < 0:
#             analysis_report.append("第二个版本的篇章结构评分下降，建议增强文章的逻辑性和段落连接。")
#
#         if score_comparison["content_diff"] > 0:
#             analysis_report.append("第二个版本的内容评分更高，论点更丰富，内容更具深度。")
#         elif score_comparison["content_diff"] < 0:
#             analysis_report.append("第二个版本的内容评分下降，建议增强内容的深度和新颖性。")
#
#         return {
#             "essay_1": {
#                 "overall_score": essay_1["overall_score"],
#                 "vocabulary_score": essay_1["vocabulary_score"]["total"],
#                 "sentence_score": essay_1["sentence_score"]["total"],
#                 "structure_score": essay_1["structure_score"]["total"],
#                 "content_score": essay_1["content_score"]["total"],
#             },
#             "essay_2": {
#                 "overall_score": essay_2["overall_score"],
#                 "vocabulary_score": essay_2["vocabulary_score"]["total"],
#                 "sentence_score": essay_2["sentence_score"]["total"],
#                 "structure_score": essay_2["structure_score"]["total"],
#                 "content_score": essay_2["content_score"]["total"],
#             },
#             "score_comparison": score_comparison,
#             "analysis_report": analysis_report
#         }
#
#     except Exception as e:
#         print(f"作文对比分析失败：{str(e)}")
#         return None

"""
优化后
"""
#【5.1.3】作文对比分析 GET /analysis/compare
def compare_essays(essay_1, essay_2):
    """
    对比两篇作文的评分结果，分析其得分差异。
    :param essay_1: 第一篇作文的分析数据
    :param essay_2: 第二篇作文的分析数据
    :return: 对比分析报告，包括总评分差异、维度得分对比、具体差异分析
    """
    try:
        # 加载中英文维度映射文件
        mapping_path = "../data/raw/score_dimension_mapping.json"
        if os.path.exists(mapping_path):
            with open(mapping_path, "r", encoding="utf-8") as f:
                mapping = json.load(f)
        else:
            mapping = {}

        essay1_scores = {}
        essay2_scores = {}
        score_diffs = {}
        analysis = []
        # 总评分对比
        overall_diff = round(essay_2["overall_score"] - essay_1["overall_score"], 2)
        score_diffs["overall_score_diff"] = overall_diff
        essay1_scores["overall_score"] = essay_1["overall_score"]
        essay2_scores["overall_score"] = essay_2["overall_score"]

        if overall_diff > 0:
            analysis.append("第二篇作文的总评分比第一篇更高，说明写作整体水平有所提升。")
        elif overall_diff < 0:
            analysis.append("第二篇作文的总评分比第一篇更低，说明写作质量可能有所下降。")
        else:
            analysis.append("两篇作文的总评分持平，整体写作水平变化不大。")

        # 遍历动态评分维度
        for key in essay_1:
            if key.endswith("_score") and isinstance(essay_1[key], dict) and "total" in essay_1[key]:
                score1 = essay_1[key]["total"]
                score2 = essay_2[key]["total"]
                diff = round(score2 - score1, 2)

                essay1_scores[key] = score1
                essay2_scores[key] = score2
                score_diffs[f"{key}_diff"] = diff

                # 分析语句
                dim_cn = mapping.get(key, key.replace("_score", "").capitalize())
                if diff > 0:
                    analysis.append(f"第二篇作文的{dim_cn}提升，说明该方面表现更优。")
                elif diff < 0:
                    analysis.append(f"第二篇作文的{dim_cn}下降，建议加强该方面的训练。")
                else:
                    analysis.append(f"第二篇作文的{dim_cn}与第一篇持平，保持稳定表现。")

        return {
            "essay_1": essay1_scores,
            "essay_2": essay2_scores,
            "score_comparison": score_diffs,
            "analysis_report": analysis
        }

    except Exception as e:
        print(f"【compare_essays_dynamic】对比分析失败：{str(e)}")
        return {
            "essay_1": {},
            "essay_2": {},
            "score_comparison": {},
            "analysis_report": ["对比分析过程中出现错误，无法完成评分维度的详细对比。"]
        }

#【5.1.4】 用户当前作文数量统计
import os
import json


def count_user_essays(user_id: str) -> int:
    """
    统计指定用户的所有作文数量（基于version_id的数量）
    :param user_id: 用户ID
    :return: 作文数量（int），如果文件不存在返回0
    """
    try:
        # 评分结果存储目录
        directory = "../data/raw/analysis_data"
        file_path = os.path.join(directory, f"{user_id}_analysis.json")

        # 如果文件不存在，则返回0
        if not os.path.exists(file_path):
            return 0

        # 读取JSON文件
        with open(file_path, "r", encoding="utf-8") as f:
            try:
                data = json.load(f)

                # 初始化计数器
                total_essays = 0

                # 遍历所有作文条目
                for essay_data in data.values():
                    if "analysis" in essay_data and isinstance(essay_data["analysis"], list):
                        # 累加每个作文的版本数量
                        total_essays += len(essay_data["analysis"])

                return total_essays

            except json.JSONDecodeError:
                return 0
    except Exception as e:
        print(f"【统计错误】无法统计用户{user_id}的作文数量: {e}")
        return 0

# 【5.1.5】POST /analysis/rank               #当前作文排名百分比
import os
import json
from datetime import datetime
from flask import jsonify
from backend.model.scoring_model import score_essay_with_chatgpt
from backend.services.analysis_service import get_essay_detail

# 评分存储路径
STORAGE_DIR = "../data/raw/analysis_data"


def get_score_percentile(user_id: str, essay_id: str, version_id: int):
    """
    计算当前用户作文的总分在所有作文中的排名百分比
    :param user_id: 用户 ID
    :param essay_id: 作文 ID
    :param version_id: 版本 ID
    :return: 评分排名百分比（JSON格式）
    """
    try:
        # 1. 获取当前作文的评分
        file_path = os.path.join(STORAGE_DIR, f"{user_id}_analysis.json")

        if not os.path.exists(file_path):
            return jsonify({"code": 404, "message": "未找到该用户的评分数据"}), 404

        with open(file_path, "r", encoding="utf-8") as f:
            try:
                user_scores = json.load(f)
            except json.JSONDecodeError:
                return jsonify({"code": 500, "message": "评分数据解析错误"}), 500

        # 确保 user_scores 是字典格式，并且包含 essay_id 的键
        if not isinstance(user_scores, dict) or str(essay_id) not in user_scores:
            return jsonify({"code": 404, "message": "该用户没有此作文的评分数据"}), 404

        # 查找当前作文的分析记录
        analysis_list = user_scores[str(essay_id)].get("analysis", [])
        current_score = None

        for entry in analysis_list:
            if str(entry.get("version_id")) == str(version_id):
                current_score = entry.get("overall_score")
                break

        if current_score is None:
            return jsonify({"code": 404, "message": "未找到该作文指定版本的评分"}), 404

        # 2. 遍历所有用户的评分文件，收集所有作文的得分
        all_scores = []

        for filename in os.listdir(STORAGE_DIR):
            if not filename.endswith("_analysis.json"):
                continue  # 跳过非评分数据文件

            file_path = os.path.join(STORAGE_DIR, filename)

            with open(file_path, "r", encoding="utf-8") as f:
                try:
                    scores = json.load(f)
                    if not isinstance(scores, dict):
                        continue  # 确保数据格式正确

                    # 遍历所有作文
                    for essay_data in scores.values():
                        if isinstance(essay_data, dict) and "analysis" in essay_data:
                            for entry in essay_data["analysis"]:
                                if "overall_score" in entry:
                                    all_scores.append(entry["overall_score"])

                except json.JSONDecodeError:
                    continue  # 跳过无法解析的文件

        # 3. 计算排名百分比
        if not all_scores:
            return jsonify({"code": 500, "message": "无历史作文评分数据，无法计算排名"}), 500

        # 计算低于当前作文得分的数量
        lower_scores_count = sum(1 for s in all_scores if s < current_score)

        # 计算百分比
        percentile = (lower_scores_count / len(all_scores)) * 100

        # 4. 返回结果
        return jsonify({
            "code": 200,
            "message": "计算排名成功",
            "data": {
                "user_id": user_id,
                "essay_id": essay_id,
                "version_id": version_id,
                "score": current_score,
                "percentile_rank": round(percentile, 2)
            }
        }), 200

    except Exception as e:
        print(f"【接口5.1.5】作文排名计算失败: {e}")
        return jsonify({"code": 500, "message": "服务器内部错误"}), 500

# 【接口5.1.6】GET /analysis/vocabulary-diversity 获取作文词汇多样性数据
def get_vocabulary_diversity_analysis(user_id: str, essay_id: str, version_id: str) -> Optional[Dict]:
    """
    获取作文词汇多样性的详细分析
    """
    try:
        # 读取分析数据文件
        file_path = f'../data/raw/analysis_data/{user_id}_analysis.json'
        if not os.path.exists(file_path):
            return None

        with open(file_path, 'r', encoding='utf-8') as f:
            analysis_data = json.load(f)

        # 获取指定作文版本的分析数据
        essay_data = analysis_data.get(essay_id, {}).get("analysis", [])
        target_version = next((v for v in essay_data if str(v.get("version_id")) == str(version_id)), None)

        if not target_version:
            return None

        # 获取词汇多样性得分
        diversity_score = target_version.get("vocabulary_grammar_score", {}).get("diversity", 0)

        # 根据得分确定等级和分布
        if diversity_score >= 4.25:
            level = "high"
            distribution = {
                "advanced": 35,
                "common": 45,
                "basic": 20
            }
        elif diversity_score >= 2.25:
            level = "medium"
            distribution = {
                "advanced": 20,
                "common": 50,
                "basic": 30
            }
        else:
            level = "low"
            distribution = {
                "advanced": 10,
                "common": 40,
                "basic": 50
            }

        return {
            "score": diversity_score,
            "max_score": 6.25,
            "level": level,
            "distribution": distribution,
            "timestamp": target_version.get("timestamp")
        }

    except Exception as e:
        print(f"获取词汇多样性分析失败: {str(e)}")
        return None

# 【接口5.1.7】GET /analysis/article-structure 获取作文文章结构数据
def get_article_structure_analysis(user_id: str, essay_id: str, version_id: str) -> Optional[Dict]:
    """
    获取作文结构的详细分析
    """
    try:
        # 读取分析数据文件
        file_path = f'../data/raw/analysis_data/{user_id}_analysis.json'
        if not os.path.exists(file_path):
            return None

        with open(file_path, 'r', encoding='utf-8') as f:
            analysis_data = json.load(f)

        # 获取指定作文版本的分析数据
        essay_data = analysis_data.get(essay_id, {}).get("analysis", [])
        target_version = next((v for v in essay_data if str(v.get("version_id")) == str(version_id)), None)

        if not target_version:
            return None

        # 获取结构得分
        organization_score = target_version.get("structure_score", {}).get("organization", 0)

        # 根据得分确定等级和分布
        if organization_score >= 4.25:
            level = "high"
            distribution = {
                "complex": 40,
                "compound": 40,
                "simple": 20
            }
        elif organization_score >= 2.25:
            level = "medium"
            distribution = {
                "complex": 25,
                "compound": 45,
                "simple": 30
            }
        else:
            level = "low"
            distribution = {
                "complex": 15,
                "compound": 35,
                "simple": 50
            }

        return {
            "score": organization_score,
            "max_score": 6.25,
            "level": level,
            "distribution": distribution,
            "timestamp": target_version.get("timestamp")
        }

    except Exception as e:
        print(f"获取文章结构分析失败: {str(e)}")
        return None

# 【接口5.1.8】GET /analysis/purpose-fit 获取作文目的契合度数据
def get_purpose_fit_analysis(user_id: str, essay_id: str, version_id: str) -> Optional[Dict]:
    """
    获取作文目的契合度的详细分析
    """
    try:
        # 读取分析数据文件
        file_path = f'../data/raw/analysis_data/{user_id}_analysis.json'
        if not os.path.exists(file_path):
            return None

        with open(file_path, 'r', encoding='utf-8') as f:
            analysis_data = json.load(f)

        # 获取指定作文版本的分析数据
        essay_data = analysis_data.get(essay_id, {}).get("analysis", [])
        target_version = next((v for v in essay_data if str(v.get("version_id")) == str(version_id)), None)

        if not target_version:
            return None

        # 获取目的契合度得分
        purpose_fit_score = target_version.get("content_score", {}).get("purpose_fit", 0)

        # 根据得分确定等级和分布
        if purpose_fit_score >= 4.25:
            level = "high"
            distribution = {
                "target_hit": 80,
                "partial_hit": 15,
                "off_target": 5
            }
        elif purpose_fit_score >= 2.25:
            level = "medium"
            distribution = {
                "target_hit": 60,
                "partial_hit": 30,
                "off_target": 10
            }
        else:
            level = "low"
            distribution = {
                "target_hit": 30,
                "partial_hit": 40,
                "off_target": 30
            }

        return {
            "score": purpose_fit_score,
            "max_score": 6.25,
            "level": level,
            "distribution": distribution,
            "timestamp": target_version.get("timestamp")
        }

    except Exception as e:
        print(f"获取目的契合度分析失败: {str(e)}")
        return None