# worker/app/tasks.py (v3 - with uv)

import json
import os
import shutil
import zipfile
from datetime import datetime

import docker
from bert_score import score as bert_scorer
from celery import Celery
from sqlalchemy.orm import Session

from . import models

CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL", "redis://redis:6379/0")
celery_app = Celery("worker", broker=CELERY_BROKER_URL, backend=CELERY_BROKER_URL)
celery_app.conf.task_routes = {'worker.*': {'queue': 'worker_queue'}}


def get_db() -> Session:
    return models.SessionLocal()


def calculate_score(predictions: list, ground_truth_path: str) -> dict:
    with open(ground_truth_path, 'r', encoding='utf-8') as f:
        gt_data = {item['id']: item for item in json.load(f)}

    category_scores = {"单选": [], "多选": [], "简答": [], "多模态": []}

    for pred_item in predictions:
        pred_id = pred_item.get("id")
        if pred_id not in gt_data: continue

        gt_item = gt_data[pred_id]
        question_type, pred_answer, gt_answer = gt_item["type"], pred_item.get("answer", ""), gt_item.get("answer")

        score = 0.0
        if question_type in ["单选", "多模态"]:
            score = 1.0 if pred_answer.strip() == gt_answer.strip() else 0.0
        elif question_type == "多选":
            pred_set, gt_set = set(pred_answer.strip()), set(gt_answer.strip())
            if pred_set and pred_set.issubset(gt_set):
                score = len(pred_set) / len(gt_set) if gt_set else 0
        elif question_type == "简答":
            try:
                _, _, f1 = bert_scorer([pred_answer], [gt_answer], lang="zh", verbose=False, rescale_with_baseline=True)
                score = f1.item()
            except Exception:
                score = 0.0

        if question_type in category_scores:
            category_scores[question_type].append(score)

    results = {}
    total_score = 0
    for category, scores in category_scores.items():
        avg_score = (sum(scores) / len(scores)) if scores else 0
        # 字段名和 model 里的要对应
        if category == "单选": results['single_choice_score'] = avg_score
        if category == "多选": results['multi_choice_score'] = avg_score
        if category == "简答": results['short_answer_score'] = avg_score
        if category == "多模态": results['multimodal_score'] = avg_score
        total_score += avg_score * 0.25

    results['total_score'] = total_score
    return results

@celery_app.task(name="worker.run_evaluation")
def run_evaluation(submission_id: str):
    print(f"[{submission_id}] 开始处理评测任务 (uv-based)...")
    db = get_db()
    submission = db.query(models.Submission).filter(models.Submission.id == submission_id).first()

    workspace_path = f"/submissions_temp/{submission_id}_workspace"
    output_path = os.path.join(workspace_path, "output")
    user_code_path = os.path.join(workspace_path, "code")
    user_model_path = os.path.join(workspace_path, "model")
    EVAL_DATASET_PATH_HOST = "/root/nul-comp-benchmark/evaluation_dataset"
    GROUND_TRUTH_PATH = os.path.join(EVAL_DATASET_PATH_HOST, "ground_truth.json")

    try:
        submission.status = models.SubmissionStatus.RUNNING
        db.commit()

        os.makedirs(workspace_path, exist_ok=True)
        os.makedirs(output_path, exist_ok=True)

        with zipfile.ZipFile(submission.zip_file_path, 'r') as zip_ref:
            zip_ref.extractall(workspace_path)

        container_command = """
        bash -c "
        echo '--- [Container Shell] Creating virtual environment with uv... ---' && \
        uv venv /venv -p python3.10 && \
        echo '--- [Container Shell] Installing requirements with uv... ---' && \
        uv pip install --no-cache -r /user_code/requirements.txt --timeout=600 && \
        echo '--- [Container Shell] Running evaluation script... ---' && \
        /venv/bin/python /app/run.py
        "
        """

        client = docker.from_env()
        print(f"[{submission_id}] 准备启动评测容器 (uv-based)...")

        container_logs = client.containers.run(
            image="agri-eval-container:latest",
            command=container_command,
            volumes={
                user_code_path: {'bind': '/user_code', 'mode': 'ro'},
                user_model_path: {'bind': '/user_model', 'mode': 'ro'},
                EVAL_DATASET_PATH_HOST: {'bind': '/eval_dataset', 'mode': 'ro'},
                output_path: {'bind': '/output', 'mode': 'rw'},
            },
            device_requests=[
                docker.types.DeviceRequest(count=1, capabilities=[['gpu']])
            ],
            network_disabled=True,
            remove=True,
            detach=False,
            stdout=True, stderr=True
        )
        print(f"[{submission_id}] 容器日志:\n{container_logs.decode('utf-8')}")

        with open(os.path.join(output_path, "predictions.json"), 'r', encoding='utf-8') as f:
            predictions = json.load(f)
        if "error" in predictions:
            raise RuntimeError(f"评测容器内部错误: {predictions['error']}")

        print(f"[{submission_id}] 开始计算分数...")
        score_results = calculate_score(predictions, GROUND_TRUTH_PATH)

        for key, value in score_results.items():
            setattr(submission, key, value)

        submission.status = models.SubmissionStatus.COMPLETED
        submission.completed_at = datetime.datetime.utcnow()
        print(f"[{submission_id}] 评测完成，最终得分: {score_results.get('total_score', 0):.4f}")

    except Exception as e:
        print(f"[{submission_id}] 任务失败: {e}")
        submission.status = models.SubmissionStatus.FAILED
        submission.error_log = str(e)[:500]
    finally:
        db.commit()
        db.close()
        shutil.rmtree(workspace_path)
        print(f"[{submission_id}] 任务处理结束，已清理工作空间。")