"""
执行准确率
"""
import pandas as pd
import json
from typing import Tuple, List, Optional


def calculate_row_match(predicted_row: pd.Series, ground_truth_row: pd.Series) -> Tuple[float, float, float]:
    """
    Calculate the matching percentage for a single row between predicted and ground truth.

    Args:
        predicted_row (pd.Series): The predicted row values.
        ground_truth_row (pd.Series): The actual row values from ground truth.

    Returns:
        Tuple[float, float, float]: Match percentage, pred-only percentage, truth-only percentage (0 to 1 scale).
    """
    total_columns = len(ground_truth_row)
    matches = 0
    element_in_pred_only = 0
    element_in_truth_only = 0

    # Convert to sets for comparison
    pred_values = set(predicted_row.astype(str).values)  # Convert to string to handle various types
    truth_values = set(ground_truth_row.astype(str).values)

    for pred_val in pred_values:
        if pred_val in truth_values:
            matches += 1
        else:
            element_in_pred_only += 1
    for truth_val in truth_values:
        if truth_val not in pred_values:
            element_in_truth_only += 1

    match_percentage = matches / total_columns if total_columns > 0 else 0
    pred_only_percentage = element_in_pred_only / total_columns if total_columns > 0 else 0
    truth_only_percentage = element_in_truth_only / total_columns if total_columns > 0 else 0
    return match_percentage, pred_only_percentage, truth_only_percentage


def calculate_f1_score(predicted: pd.DataFrame, ground_truth: pd.DataFrame) -> float:
    """
    Calculate the F1 score based on predicted and ground truth DataFrames.

    Args:
        predicted (pd.DataFrame): Predicted results.
        ground_truth (pd.DataFrame): Actual results expected (ground truth).

    Returns:
        float: The calculated F1 score.
    """
    if predicted.empty and ground_truth.empty:
        return 1.0

    # Drop duplicate rows
    predicted = predicted.drop_duplicates().reset_index(drop=True)
    ground_truth = ground_truth.drop_duplicates().reset_index(drop=True)

    match_scores = []
    pred_only_scores = []
    truth_only_scores = []

    # Compare rows up to the length of the shorter DataFrame
    min_rows = min(len(predicted), len(ground_truth))
    for i in range(min_rows):
        pred_row = predicted.iloc[i]
        gt_row = ground_truth.iloc[i]
        match_score, pred_only_score, truth_only_score = calculate_row_match(pred_row, gt_row)
        match_scores.append(match_score)
        pred_only_scores.append(pred_only_score)
        truth_only_scores.append(truth_only_score)

    # Handle extra rows in ground truth (false negatives)
    for i in range(min_rows, len(ground_truth)):
        match_scores.append(0)
        truth_only_scores.append(1)
        pred_only_scores.append(0)

    # Handle extra rows in predicted (false positives)
    for i in range(min_rows, len(predicted)):
        match_scores.append(0)
        pred_only_scores.append(1)
        truth_only_scores.append(0)

    tp = sum(match_scores)
    fp = sum(pred_only_scores)
    fn = sum(truth_only_scores)

    precision = tp / (tp + fp) if tp + fp > 0 else 0
    recall = tp / (tp + fn) if tp + fn > 0 else 0
    f1_score = 2 * precision * recall / (precision + recall) if precision + recall > 0 else 0
    return f1_score

def compute_soft_f1(
        predicted_df: pd.DataFrame,
        ground_truth_df: pd.DataFrame,
        diff_json_path: Optional[str] = None
) -> Tuple[float, float, float, float, List[int]]:
    """
    Compute Soft F1 scores for predicted and ground truth DataFrames, optionally by difficulty.

    Args:
        predicted_df (pd.DataFrame): DataFrame containing predicted results.
        ground_truth_df (pd.DataFrame): DataFrame containing ground truth results.
        diff_json_path (Optional[str]): Path to JSONL file with difficulty labels (optional).

    Returns:
        Tuple[float, float, float, float, List[int]]: Simple F1, Moderate F1, Challenging F1,
        Overall F1 (as percentages), and counts [simple, moderate, challenging, total].
    """
    # Ensure DataFrames have the same number of rows for pairing
    if len(predicted_df) != len(ground_truth_df):
        raise ValueError("Predicted and ground truth DataFrames must have the same number of rows")

    # Calculate F1 score for each row pair
    f1_scores = []
    for i in range(len(predicted_df)):
        pred_row = predicted_df.iloc[i]
        gt_row = ground_truth_df.iloc[i]
        f1_score = calculate_f1_score(pred_row.to_frame().T, gt_row.to_frame().T)
        f1_scores.append({"sql_idx": i, "res": f1_score})

    # Initialize results
    simple_results, moderate_results, challenging_results = [], [], []
    simple_f1, moderate_f1, challenging_f1 = 0.0, 0.0, 0.0

    if diff_json_path:
        # Load difficulty levels from JSONL
        with open(diff_json_path, 'r') as f:
            contents = [json.loads(line) for line in f]

        if len(contents) != len(f1_scores):
            raise ValueError("Difficulty JSONL file must have the same number of entries as DataFrames")

        for i, content in enumerate(contents):
            if content.get("difficulty") == "simple":
                simple_results.append(f1_scores[i])
            elif content.get("difficulty") == "moderate":
                moderate_results.append(f1_scores[i])
            elif content.get("difficulty") == "challenging":
                challenging_results.append(f1_scores[i])

        # Calculate F1 scores by difficulty
        simple_f1 = sum(res["res"] for res in simple_results) / len(simple_results) * 100 if simple_results else 0
        moderate_f1 = sum(res["res"] for res in moderate_results) / len(moderate_results) * 100 if moderate_results else 0
        challenging_f1 = sum(res["res"] for res in challenging_results) / len(challenging_results) * 100 if challenging_results else 0

    all_f1 = sum(res["res"] for res in f1_scores) / len(f1_scores) * 100 if f1_scores else 0
    count_lists = [len(simple_results), len(moderate_results), len(challenging_results), len(f1_scores)]

    return simple_f1, moderate_f1, challenging_f1, all_f1, count_lists


if __name__ == "__main__":

    predicted_df = pd.DataFrame({
        'col1': [1, 2, 3, 4],
        'col2': ['a', 'b', 'c', 'd']
    })
    ground_truth_df = pd.DataFrame({
        'col1': [1, 2, 3, 5],
        'col2': ['a', 'b', 'c', 'e']
    })

    simple_f1, moderate_f1, challenging_f1, all_f1, counts = compute_soft_f1(
        predicted_df, ground_truth_df
    )
    print(f"Simple F1: {simple_f1:.2f}%")
    print(f"Moderate F1: {moderate_f1:.2f}%")
    print(f"Challenging F1: {challenging_f1:.2f}%")
    print(f"Overall F1: {all_f1:.2f}%")
    print(f"Counts: {counts}")
