"""
Utility function to assess risk level and compliance status appropriateness.
"""
from .call_llm import call_llm
import json
from .tools import format_json_response
def risk_assessment(description, guide, inspector, risk_level=None, compliance_status=None, title=""):
    """
    Evaluate if the risk level or compliance status is appropriate based on content.
    
    Args:
        description (str): Description content
        guide (str): Guide content
        inspector (str): Inspector content
        risk_level (str, optional): Current risk level
        compliance_status (str, optional): Current compliance status
        
    Returns:
        dict: Assessment results for risk level and compliance status
    """
    result = {
        "risk_level_issues": [],
        "compliance_status_issues": []
    }
    
    # Skip assessment if required fields are missing
    if not all([description, guide, inspector]):
        print("required fields are missing")
        return result
    
    # Create prompt for LLM to assess risk level
    prompt = ""
    if risk_level:
        prompt = f"""
请评估以下风险项的风险等级是否合理：

描述内容：{description}
指引内容：{guide}
检查人员描述：{inspector}
当前风险等级：{risk_level}

请以JSON格式返回评估结果，格式如下：
{{
  "is_appropriate": true/false,
  "issue": {{
    "type": "risk-level",
    "severity": "high/medium/low",
    "description": "问题描述",
    "suggestion": "改进建议"
  }}
}}

如果风险等级合理，请将is_appropriate设为true，并将issue设为null。
请确保返回的是有效的JSON格式。
"""

    if compliance_status:
        prompt = f"""
请评估以下保障项的满足情况是否合理：

描述内容：{description}
指引内容：{guide}
检查人员描述：{inspector}
当前满足情况：{compliance_status}

请以JSON格式返回评估结果，格式如下：
{{
  "is_appropriate": true/false,
  "issue": {{
    "type": "compliance-status",
    "severity": "high/medium/low",
    "description": "问题描述",
    "suggestion": "改进建议"
  }}
}}

如果满足情况合理，请将is_appropriate设为true，并将issue设为null。
请确保返回的是有效的JSON格式。
"""
        print("risk_assessment prompt: ",prompt)
        # Call LLM to assess risk level
        risk_response = call_llm(prompt)
        
        # Parse the response
        try:

            risk_response = format_json_response(risk_response)
            print("risk_response: ",risk_response)
            risk_result = json.loads(risk_response)
            if not risk_result.get("is_appropriate", True) and risk_result.get("issue"):
                issue = risk_result["issue"]
                # Add title and originalContent to the issue
                issue["title"] = title
                issue["originalContent"] = inspector
                result["risk_level_issues"].append(issue)
        except (json.JSONDecodeError, KeyError):
            # If LLM response is not valid JSON or missing keys, skip
            print("response: ",risk_response)
            print("LLM response is not valid JSON")
            return result
    
    return result

if __name__ == "__main__":
    # Test with sample data
    test_description = "系统未配置密码复杂度要求"
    test_guide = "系统应配置密码复杂度要求，包括长度、字符类型等"
    test_inspector = "检查发现系统未配置任何密码复杂度要求"
    test_risk_level = "低"  # 不合理的风险等级，应该是高
    test_compliance_status = "满足"  # 不合理的满足情况，应该是不满足
    test_title = "密码复杂度检查"
    
    result = risk_assessment(
        test_description, 
        test_guide, 
        test_inspector, 
        test_risk_level, 
        test_compliance_status,
        test_title
    )
    print(json.dumps(result, ensure_ascii=False, indent=2))
