# process_results.py (Revised and Robust Version)
import json
from tqdm import tqdm

# Import all settings from the config file
import config

def load_datasets(file_paths):
    """Loads and merges all input JSON datasets."""
    all_items = []
    for path in file_paths:
        try:
            with open(path, 'r', encoding='utf-8') as f:
                data = json.load(f)
                if isinstance(data, list):
                    all_items.extend(data)
        except Exception as e:
            print(f"Warning: Could not process file {path}. Error: {e}")
    return all_items

def calculate_cci_indicators_from_checks(checks):
    """Calculates CCI metrics directly from the context_coherence_checks dictionary."""
    # MODIFICATION: Added Penalty_A5 to align with the prompt's A1-A5 checks.
    penalties = {'Penalty_A1': 0.0, 'Penalty_A2': 0.0, 'Penalty_A3': 0.0, 'Penalty_A4': 0.0, 'Penalty_A5': 0.0}
    if not checks: return penalties
    
    # The 'passed' key from the prompt uses a boolean, so we check for `is False`
    if checks.get("A1_semantic_flow", {}).get("passed") is False: penalties['Penalty_A1'] = 0.50
    if checks.get("A2_discourse_role", {}).get("passed") is False: penalties['Penalty_A2'] = 0.40
    if checks.get("A3_co_reference", {}).get("passed") is False: penalties['Penalty_A3'] = 0.15
    if checks.get("A4_term_stability", {}).get("passed") is False: penalties['Penalty_A4'] = 0.25
    # MODIFICATION: Added the penalty calculation for A5_argumentative_strain.
    if checks.get("A5_argumentative_strain", {}).get("passed") is False: penalties['Penalty_A5'] = 0.30
    
    return penalties

def calculate_cri_indicators(analysis_details):
    """Calculates CRI metrics from the analysis_details dictionary."""
    if not analysis_details:
        return {'S_topic': 0.0, 'S_evidence': 0.0, 'S_scope': 0.0, 'S_intent': 0.0}

    # S_topic Calculation
    alignments = analysis_details.get("assertion_alignments", [])
    total_claims = len(alignments) if alignments else 0
    s_topic = 0.0
    if total_claims > 0:
        counts = {"support": 0, "absent": 0, "contradict": 0}
        for align in alignments:
            status = align.get("status")
            if status in counts:
                counts[status] += 1
        
        # Calculate score based on formula: (support*1.0 + absent*0.3 - contradict*1.0) / total
        s_topic_numerator = (counts["support"] * 1.0 + counts["absent"] * 0.3 - counts["contradict"] * 1.0)
        s_topic = s_topic_numerator / total_claims if total_claims > 0 else 0.0

    # S_evidence Calculation
    violations = analysis_details.get("violation_summary", {})
    s_evidence = 1.0
    if violations.get("C1_factual_contradiction", {}).get("is_present") is True: s_evidence -= 1.0
    if violations.get("C4_total_evidence_gap", {}).get("is_present") is True: s_evidence -= 0.8
    if violations.get("D1_strength_mismatch", {}).get("is_present") is True: s_evidence -= 0.4
    s_evidence = max(0, s_evidence)

    # S_scope Calculation
    s_scope = 1.0
    if violations.get("C2_scope_mismatch", {}).get("is_present") is True: s_scope = 0.0
    elif violations.get("C3_source_misuse", {}).get("is_present") is True: s_scope = 0.2

    # S_intent Calculation
    s_intent = 1.0
    if analysis_details.get("function_source_fit", {}).get("is_match") is False: s_intent -= 0.5
    if violations.get("D2_perfunctory_citation", {}).get("is_present") is True: s_intent -= 0.8
    s_intent = max(0, s_intent)

    return {'S_topic': s_topic, 'S_evidence': s_evidence, 'S_scope': s_scope, 'S_intent': s_intent}

def main():
    """Main function to process raw model responses and generate final output."""
    print("Starting post-processing of raw model responses...")
    
    try:
        with open(config.RAW_OUTPUT_FILE_PATH, 'r', encoding='utf-8') as f:
            raw_responses = [json.loads(line) for line in f]
    except FileNotFoundError:
        print(f"Error: Raw response file not found at {config.RAW_OUTPUT_FILE_PATH}")
        print("Please run the main.py script first to generate responses.")
        return

    all_items_list = load_datasets(config.INPUT_FILE_PATHS)
    original_items = {item['id']: item for item in all_items_list}
    
    processed_results = []
    error_count = 0

    for response_entry in tqdm(raw_responses, desc="Calculating Metrics and Verifying Consistency"):
        item_id = response_entry.get("input_item_id")
        model_output = response_entry.get("model_response", {})

        if "error" in model_output or not item_id:
            error_count += 1
            continue

        final_item = original_items.get(item_id, {"id": item_id})

        try:
            # --- START OF MODIFICATION: SIMPLIFIED AND CORRECTED DATA PARSING ---
            # According to the prompt, 'diagnostics' is the single source of truth for all checks.
            # We no longer need complex merging logic.
            issue_info = model_output.get("papers", [{}])[0].get("paper_level_issues", {})
            diagnostics = issue_info.get("diagnostics", {}) or {}

            # Get the final verdict directly from the definitive source.
            label = diagnostics.get("final_verdict", "unknown_error")
            final_item["label"] = label

            # Extract details directly from the single source of truth.
            context_checks = diagnostics.get("context_coherence_checks", {}) or {}
            analysis_details = diagnostics.get("analysis_details", {}) or {}
            # --- END OF MODIFICATION: SIMPLIFIED AND CORRECTED DATA PARSING ---
            
            # Calculate metrics based on the final, correctly parsed analysis details
            if label == "irrelevant":
                final_item["metrics"] = calculate_cci_indicators_from_checks(context_checks)
            elif label in ["improper", "correct"]:
                metrics = calculate_cri_indicators(analysis_details)
                final_item["metrics"] = metrics
            else:
                final_item["metrics"] = {}

            # --- START OF MODIFICATION: EXPANDED CONSISTENCY CHECKS ---
            # Check 1: 'irrelevant' verdict consistency (based on Prompt Mandate 1)
            if label == "irrelevant":
                has_a_failure = any(check.get("passed") is False for check in context_checks.values())
                if not has_a_failure:
                    print(f"\n[CONSISTENCY WARNING] ID={item_id}: Label is 'irrelevant' but all A-checks passed. This violates the verdict hierarchy.")

            elif label in ["improper", "correct"]:
                cri_score = (0.40 * final_item["metrics"].get('S_topic', 0) +
                             0.30 * final_item["metrics"].get('S_evidence', 0) +
                             0.20 * final_item["metrics"].get('S_scope', 0) +
                             0.10 * final_item["metrics"].get('S_intent', 0))

                # Check 2: 'improper' verdict consistency (based on Prompt Mandate 3)
                if label == "improper":
                    violations = analysis_details.get("violation_summary", {})
                    has_cd_violation = any(v.get("is_present") for k, v in violations.items() if k.startswith(('C', 'D')))
                    
                    alignments = analysis_details.get("assertion_alignments", [])
                    is_fully_unsupported = all(a.get("status") in ["absent", "contradict"] for a in alignments) if alignments else False

                    if not has_cd_violation and not is_fully_unsupported:
                        print(f"\n[CONSISTENCY WARNING] ID={item_id}: Label is 'improper' but no C/D violations or full non-support was found in diagnostics. The model may have made an inconsistent verdict.")

                # Check 3: 'correct' verdict consistency (heuristic check)
                elif label == "correct":
                    if cri_score < 0.60:
                        print(f"\n[CONSISTENCY WARNING] ID={item_id}: Label is 'correct' but calculated CRI score is {cri_score:.2f}, which is below the 0.60 passing threshold. The model may have been too lenient.")
            # --- END OF MODIFICATION: EXPANDED CONSISTENCY CHECKS ---

            processed_results.append(final_item)

        except (KeyError, IndexError, TypeError) as e:
            print(f"\nWarning: Failed to parse response for ID {item_id}. Error: {e}")
            error_count += 1

    print(f"\nProcessing complete. Successfully processed: {len(processed_results)}. Failed or errored: {error_count}.")

    with open(config.OUTPUT_FILE_PATH, 'w', encoding='utf-8') as f:
        json.dump(processed_results, f, ensure_ascii=False, indent=4)
    print(f"Final results with metrics saved to {config.OUTPUT_FILE_PATH}")

if __name__ == "__main__":
    main()