# Import the json module for working with JSON data.
import json
# Import the logging module for logging events and debugging.
import logging

# Import jieba for Chinese text segmentation.
import jieba
# Import enumeration classes for evaluation dataset and task statuses from the application's constants.
from myapp.const.evaluation import EnumEvaluationDatasetStatus, EnumEvaluationTaskStatus
# Import the EvaluationDataset model for interacting with evaluation dataset records in the database.
from myapp.models.model_evaluation_task import EvaluationDataset
# Import log_exception utility for logging exceptions gracefully.
from myapp.utils.exception import log_exception
# Import scan_table utility for iterating through database table records.
from myapp.utils.sess import scan_table
# Import SmoothingFunction and sentence_bleu from nltk for calculating BLEU scores in natural language processing.
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu
# Import Rouge for calculating ROUGE scores, another metric for evaluating text summarization and machine translation.
from rouge import Rouge
# Import accuracy_score and f1_score from sklearn.metrics for calculating classification accuracy and F1-score.
from sklearn.metrics import accuracy_score, f1_score


def calculate_evaluation_task_result(task, dbsession):
    """
    Calculates and aggregates the evaluation results for a given task.

    This function iterates through all evaluated datasets associated with a task,
    processes the detailed evaluation scores for each model and dimension,
    and then computes overall evaluation metrics and dimension-specific details.

    Args:
        task: The evaluation task object containing task details.
        dbsession: The database session object for querying evaluation datasets.
    """
    # 开始获取所有评测集结果
    # Initialize offset for pagination when querying evaluation datasets.
    offset = 0
    # Define the number of records to fetch per page during database queries.
    page_size = 100
    # Dictionary to store aggregated results per model.
    model_map = {}
    # Dictionary to store aggregated results per evaluation dimension.
    dimension_map = {}
    # Counter for the number of processed datasets.
    dataset_num = 0
    # Loop through evaluation datasets in batches until all are processed.
    while True:
        # Query the database for EvaluationDataset records associated with the current task and in 'evaluated' status.
        query = dbsession.query(EvaluationDataset).filter_by(task_id=task.id, status='evaluated')
        # Fetch a page of datasets, ordered by creation time.
        datasets = (
            query.order_by(EvaluationDataset.created_on.asc())
            .offset(offset)
            .limit(page_size)
            .all()
        )
        # Iterate over each dataset retrieved in the current batch.
        for dataset in datasets:
            # Parse the JSON detail field of the dataset, which contains model evaluation results.
            detail = json.loads(dataset.detail)
            # Skip if the detail is empty.
            if not detail:
                continue
            # Iterate over each model's evaluation results within the dataset detail.
            for model in detail:
                # Extract the model name.
                model_name = model.get('model_name')
                # If the model is not yet in model_map, initialize its entry.
                if model_name not in model_map:
                    model_map[model_name] = {'平均分': 0, '较好占比': 0}
                # Get the scores for the current model.
                scores = model.get('scores')
                # Skip if no scores are available for the model.
                if not scores:
                    continue
                # Iterate over each dimension's score for the current model.
                for dimension in scores:
                    # Extract the dimension name.
                    dimension_name = dimension.get('name')
                    # If the dimension is not yet in dimension_map, initialize its entry.
                    if dimension_name not in dimension_map:
                        dimension_map[dimension_name] = {}
                    # Get the score for the current dimension.
                    score = dimension.get('score')
                    # Add the score to the model's total average score.
                    model_map[model_name]['平均分'] += score
                    # Determine the evaluation level (e.g., '较好', '一般', '较差') based on the score.
                    level = get_level(score)
                    # Increment the '较好占比' (good ratio) if the level is '较好'.
                    if level == '较好':
                        model_map[model_name]['较好占比'] += 1
                    # 以模型名为key，统计各个评测维度的总分
                    # Aggregate scores for each dimension under the respective model.
                    if dimension_name not in model_map[model_name]:
                        model_map[model_name][dimension_name] = score
                    else:
                        model_map[model_name][dimension_name] += score

                    # 以评测维度为key，统计各个等级下各个模型的占比
                    # Aggregate model counts per level within each dimension.
                    if level not in dimension_map[dimension_name]:
                        dimension_map[dimension_name][level] = {}

                    if model_name not in dimension_map[dimension_name][level]:
                        dimension_map[dimension_name][level][model_name] = 1
                    else:
                        dimension_map[dimension_name][level][model_name] += 1
            # Increment the counter for processed datasets.
            dataset_num += 1

        # Move to the next page of results.
        offset += len(datasets)
        # If fewer datasets than page_size were returned, it means all datasets have been processed.
        if len(datasets) < page_size:
            break
    # Parse the evaluation dimensions from the task object.
    evaluation_dimension = json.loads(task.evaluation_dimension)
    # Calculate the total number of scores, which is the number of datasets multiplied by the number of evaluation dimensions per model.
    score_num = dataset_num * len(evaluation_dimension)  # 每个模型打分的总个数
    # Log the aggregated statistics for debugging and monitoring.
    logging.info(
        f'dataset_num:{dataset_num}, score_num:{score_num},'
        f' model_map:{model_map}, dimension_map:{dimension_map}'
    )
    # 获取综合评价数据
    # Initialize a dictionary to store the overall evaluation results for each model.
    overall_evaluation = {}
    # Iterate through each model and its aggregated scores.
    for k, v in model_map.items():
        # Initialize a dictionary to store the calculated metrics for the current model.
        case_map = {}
        # Iterate through each metric (e.g., '平均分', '较好占比', dimension scores) for the current model.
        for kk, vv in v.items():
            # Calculate the average score or good ratio based on the total number of scores.
            if kk == '平均分' or kk == '较好占比':
                case_map[kk] = round(vv / score_num, 2)
            # For dimension-specific scores, calculate the average based on the number of datasets.
            else:
                case_map[kk] = round(vv / dataset_num, 2)
        # Assign the calculated metrics to the current model in the overall_evaluation dictionary.
        overall_evaluation[k] = case_map
    # 获取指标详情数据
    # Initialize a dictionary to store detailed evaluation results per dimension.
    dimension_detail = {}
    # Iterate through each dimension and its aggregated level-based model counts.
    for k, v in dimension_map.items():
        # Initialize a dictionary for the current dimension's details.
        case_map1 = {}
        # Iterate through each evaluation level (e.g., '较差', '一般', '较好') within the dimension.
        for kk, vv in v.items():
            # Initialize a dictionary for models within the current level.
            case_map2 = {}
            # Iterate through each model and its count for the current level.
            for kkk, vvv in vv.items():
                # Calculate the proportion of the model at this level, rounded to two decimal places.
                case_map2[kkk] = round(vvv / dataset_num, 2)
            # Assign the model proportions to the current level.
            case_map1[kk] = case_map2
        # Assign the level-based model proportions to the current dimension.
        dimension_detail[k] = case_map1
    # Initialize a list to store the formatted dimension display data.
    dimension_display = []
    # Iterate through each dimension and its detailed evaluation data.
    for k, v in dimension_detail.items():
        # Create an item dictionary for the current dimension.
        item = {'name': k, 'data': []}
        # Iterate through each model to gather its performance across different levels for the current dimension.
        for key in model_map.keys():
            # Create a model info dictionary.
            model_info = {'model_name': key, 'data': []}
            # Append the proportion of '较差' (poor) evaluations for the current model and dimension.
            model_info['data'].append(v.get('较差', {}).get(key, 0))
            # Append the proportion of '一般' (average) evaluations for the current model and dimension.
            model_info['data'].append(v.get('一般', {}).get(key, 0))
            # Append the proportion of '较好' (good) evaluations for the current model and dimension.
            model_info['data'].append(v.get('较好', {}).get(key, 0))
            # Add the model's performance data to the current dimension's item.
            item['data'].append(model_info)
        # Add the fully populated dimension item to the display list.
        dimension_display.append(item)
    # Load the existing result from the task object.
    result = json.loads(task.result)
    # Update the overall evaluation data in the result.
    result['overall_evaluation'] = overall_evaluation
    # Update the dimension detail data in the result.
    result['dimension_detail'] = dimension_display
    # Dump the updated result back to a JSON string and assign it to the task object.
    task.result = json.dumps(result, ensure_ascii=False)
    # Set the task status to 'resulted' to indicate that evaluation results have been processed.
    task.status = EnumEvaluationTaskStatus.resulted.value
    # Commit the changes to the database session.
    dbsession.commit()


def get_level(score):
    """
    Determines the evaluation level based on a given score.

    Args:
        score (int): The numerical score to evaluate.

    Returns:
        str: The corresponding evaluation level ('较差', '一般', or '较好').
    """
    # If the score is 1, return '较差' (poor).
    if score == 1:
        return '较差'
    # If the score is 2, return '一般' (average).
    elif score == 2:
        return '一般'
    # For any other score (implicitly 3 or higher in this context), return '较好' (good).
    else:
        return '较好'


def tokenize_text(texts):
    """
    Tokenize a list of texts using jieba for Chinese.

    Args:
        texts (list of str): List of sentences to tokenize.

    Returns:
        list of str: Tokenized sentences.
    """
    # Use jieba.lcut to tokenize each text into a list of words, then join them with spaces.
    return [' '.join(jieba.lcut(text)) for text in texts]


def evaluate_predictions(predictions, references):
    """
    Evaluate predictions using Accuracy, F1, ROUGE, and BLEU metrics.

    Args:
        predictions (list of str): Model predictions.
        references (list of str): Ground truth references.

    Returns:
        dict: A dictionary containing evaluation scores.
    """
    # Ensure that the number of predictions matches the number of references.
    if len(predictions) != len(references):
        raise ValueError('Predictions and references must have the same length.')

    # Tokenize Chinese texts for both predictions and references using jieba.
    tokenized_predictions = tokenize_text(predictions)
    tokenized_references = tokenize_text(references)

    # Accuracy and F1 Score (using exact match for classification-like tasks)
    # Calculate the accuracy score by comparing tokenized references and predictions.
    accuracy = accuracy_score(tokenized_references, tokenized_predictions)
    # Calculate the F1 score, using 'weighted' averaging suitable for imbalanced datasets.
    f1 = f1_score(tokenized_references, tokenized_predictions, average='weighted')

    # ROUGE Scores
    # Initialize the Rouge scorer.
    rouge = Rouge()
    # Calculate ROUGE-1, ROUGE-2, and ROUGE-L scores, averaged across all predictions.
    rouge_scores = rouge.get_scores(tokenized_predictions, tokenized_references, avg=True)

    # BLEU-4 Scores
    # Initialize a list to store individual BLEU scores.
    bleu_scores = []
    # Use SmoothingFunction().method4 to handle cases where n-grams might not be found.
    smoothie = SmoothingFunction().method4
    # Iterate through tokenized predictions and references to calculate BLEU for each sentence pair.
    for pred, ref in zip(tokenized_predictions, tokenized_references):
        # Reference tokens need to be a list of lists for sentence_bleu.
        ref_tokens = [ref.split()]
        # Prediction tokens are split into a list.
        pred_tokens = pred.split()
        # Calculate sentence BLEU score and append to the list.
        bleu_scores.append(sentence_bleu(ref_tokens, pred_tokens, smoothing_function=smoothie))
    # Calculate the average BLEU-4 score across all sentences.
    bleu_4 = sum(bleu_scores) / len(bleu_scores)

    # Combine all calculated evaluation results into a single dictionary.
    evaluation_results = {
        '准确率': accuracy,
        'F1': f1,
        'ROUGE-1': rouge_scores['rouge-1']['f'],
        'ROUGE-2': rouge_scores['rouge-2']['f'],
        'ROUGE-L': rouge_scores['rouge-l']['f'],
        'BLEU-4': bleu_4,
    }

    # Return the dictionary of evaluation results.
    return evaluation_results


def calc_result_when_auto_mode(task_id):
    """
    Calculates evaluation results for tasks running in auto mode.

    This function retrieves evaluation datasets for a given task ID,
    collects predictions and references for each model, and then uses
    `evaluate_predictions` to compute various NLP metrics.

    Args:
        task_id (int): The ID of the evaluation task.

    Returns:
        dict: A dictionary where keys are model names and values are their evaluation scores.
    """
    # Initialize a dictionary to store predictions and references grouped by model name.
    data = {}
    # Initialize a dictionary to store the final evaluation results.
    ret = {}
    # Iterate through evaluation datasets associated with the task, excluding failed ones.
    for datasets, _ in scan_table(
        EvaluationDataset,
        filters=[
            EvaluationDataset.task_id == task_id,
            EvaluationDataset.status != EnumEvaluationDatasetStatus.failed.value,
        ],
    ):
        # Process each dataset within the retrieved batch.
        for dataset in datasets:
            # Log any exceptions that occur during dataset processing without stopping execution.
            with log_exception:
                # Parse the JSON detail field of the dataset.
                detail = json.loads(dataset.detail)
                # Iterate through each item in the detail, which represents a model's output.
                for item in detail:
                    # Extract the model name.
                    model_name = item['model_name']
                    # Get or initialize the temporary storage for the current model.
                    tmp = data.get(model_name, {})
                    # Get or initialize the list of predictions for the current model.
                    predictions = tmp.get('predictions', [])
                    # Get or initialize the list of references for the current model.
                    references = tmp.get('references', [])
                    # 推理的结果
                    # Append the model's output (prediction) to the predictions list.
                    predictions.append(item['output'])
                    # 参考的结果
                    # Append the dataset's ground truth output (reference) to the references list.
                    references.append(dataset.output)
                    # Update the data dictionary with the collected predictions and references for the model.
                    data[model_name] = {'predictions': predictions, 'references': references}

    # Iterate through each model and its collected predictions and references.
    for model_name, item in data.items():
        # Extract predictions for the current model.
        predictions = item['predictions']
        # Extract references for the current model.
        references = item['references']
        # 根据推理的结果跟参考的结果进行评估
        # Evaluate the model's predictions against the references using various metrics.
        res = evaluate_predictions(predictions, references)
        # Round the evaluation scores to four decimal places for consistency.
        for k, v in res.items():
            res[k] = round(v, 4)
        # Store the rounded evaluation results for the current model.
        ret[model_name] = res

    # Return the dictionary containing evaluation results for all models.
    return ret
