import sys
import os
import json
import re
import yaml
import time
import argparse
import base64
from pathlib import Path
from tqdm import tqdm
from multiprocessing import Pool, cpu_count
from datetime import datetime

# 将项目根目录添加到Python路径
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

from src.utils import (
    CONFIG_PATH, DATABASE_PATH, TEST_DATA_PATH, BATCH_RESULTS_PATH, BATCH_HISTORY_PATH
)

# 全局变量，用于在工作进程中初始化一次
worker_modules = None

def init_worker(custom_config_json=None):
    """
    初始化工作进程。每个进程只执行一次。
    现在可以接收一个自定义配置的JSON字符串。
    """
    global worker_modules
    if worker_modules is None:
        from src.database import Database
        from src.fingerprinting.processor import Processor
        from src.fingerprinting.hashing import Hasher
        from src.matching.scorer import Scorer

        with open(CONFIG_PATH, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)

        if custom_config_json:
            custom_config = json.loads(custom_config_json)
            if 'scoring' in custom_config:
                config['scoring'] = custom_config['scoring']

        worker_modules = {
            'config': config,
            'db': Database(str(DATABASE_PATH)),
            'processor': Processor(config),
            'hasher': Hasher(config['hashing']),
            'scorer': Scorer(config['scoring'])
        }

def evaluate_match(true_song_name, true_start_time, result, eval_config):
    # ... (此函数逻辑不变)
    weights = eval_config.get('accuracy_weights', {})
    weight_song = weights.get('song_match', 0.8)
    weight_offset = weights.get('offset', 0.2)
    tolerance_sec = eval_config.get('offset_tolerance_seconds', 2)
    if not result or 'song_info' not in result: return 0, "No match found"
    is_song_correct = true_song_name in result['song_info']['name']
    offset_error = abs(result['offset'] - true_start_time)
    offset_score = max(0, 1 - (offset_error / tolerance_sec))
    accuracy = (is_song_correct * weight_song) + (offset_score * weight_offset)
    status = "Correct" if is_song_correct else "Wrong Song"
    return accuracy, status

def process_sample_worker(sample_path: Path):
    # ... (此函数逻辑不变)
    global worker_modules
    db = worker_modules['db']
    processor = worker_modules['processor']
    hasher = worker_modules['hasher']
    scorer = worker_modules['scorer']
    eval_config = worker_modules['config'].get('batch_test', {})
    match = re.match(r'(.+?)---(\d+)s---(\d+)s\.wav', sample_path.name)
    if not match: return {'sample': sample_path.name, 'status': 'Filename Error', 'accuracy': 0}
    true_song_name, true_start_time_str, _ = match.groups()
    true_start_time = int(true_start_time_str)
    try:
        with db:
            sample_peaks = processor.audio_to_peaks(str(sample_path))
            if not sample_peaks.any(): return {'sample': sample_path.name, 'status': 'No Peaks Found', 'accuracy': 0}
            sample_fingerprints = hasher.peaks_to_fingerprints(sample_peaks)
            query_hashes = [fp[0] for fp in sample_fingerprints]
            db_matches = db.find_matches(query_hashes)
            recognition_result, _ = scorer.find_best_match(sample_fingerprints, db_matches)
            accuracy, status = 0, "No match found"
            if recognition_result:
                song_info = db.get_song_by_id(recognition_result['song_id'])
                if song_info:
                    recognition_result['song_info'] = song_info
                    accuracy, status = evaluate_match(true_song_name, true_start_time, recognition_result, eval_config)
                else: status = "DB Error (Song ID not found)"
            test_case = {'sample': sample_path.name, 'ground_truth': {'name': true_song_name, 'offset': true_start_time}, 'recognized': None, 'accuracy': round(accuracy * 100, 2), 'status': status}
            if recognition_result and 'song_info' in recognition_result:
                test_case['recognized'] = {'name': recognition_result['song_info'].get('name', 'Unknown'), 'song_id': recognition_result['song_id'], 'offset': recognition_result['offset'], 'confidence': recognition_result['confidence'], 'votes': recognition_result['metrics']['votes']}
            return test_case
    except Exception as e:
        return {'sample': sample_path.name, 'status': f'Processing Error: {e}', 'accuracy': 0}

def main(parallel: bool, custom_config_b64: str = None):
    print(f"Starting batch test (Parallel: {parallel})...")
    start_time = time.time()

    custom_config_json = None
    if custom_config_b64:
        try:
            custom_config_json = base64.b64decode(custom_config_b64).decode('utf-8')
            print("Successfully decoded custom configuration.")
        except Exception as e:
            print(f"Error decoding custom config: {e}. Falling back to default.")

    with open(CONFIG_PATH, 'r', encoding='utf-8') as f:
        final_config_for_report = yaml.safe_load(f)

    if custom_config_json:
        custom_config = json.loads(custom_config_json)
        if 'scoring' in custom_config:
            final_config_for_report['scoring'] = custom_config['scoring']

    if not TEST_DATA_PATH.exists() or not any(TEST_DATA_PATH.glob("*.wav")):
        print("Test data directory not found or is empty.")
        return

    test_files = list(TEST_DATA_PATH.glob("*.wav"))
    print(f"Found {len(test_files)} samples to test.")

    results = []
    init_args = (custom_config_json,) if custom_config_json else ()

    if parallel:
        num_processes = max(1, cpu_count() - 1)
        print(f"Using {num_processes} worker processes.")
        with Pool(processes=num_processes, initializer=init_worker, initargs=init_args) as pool:
            progress_bar = tqdm(pool.imap_unordered(process_sample_worker, test_files), total=len(test_files), desc="Running tests")
            for result in progress_bar:
                results.append(result)
    else:
        init_worker(*init_args)
        for sample_path in tqdm(test_files, desc="Running tests (serial)"):
            results.append(process_sample_worker(sample_path))

    total_tests = len(results)
    summary = {
        'run_timestamp': datetime.now().isoformat(),
        'total_samples': total_tests,
        'correct_matches': sum(1 for r in results if r['status'] == 'Correct'),
        'wrong_song_matches': sum(1 for r in results if r['status'] == 'Wrong Song'),
        'no_matches_found': sum(1 for r in results if r['status'] == 'No match found'),
        'processing_errors': sum(1 for r in results if 'Error' in r['status']),
        'overall_accuracy_score': round(sum(r['accuracy'] for r in results) / total_tests, 2) if total_tests > 0 else 0,
        'test_duration_seconds': round(time.time() - start_time, 2),
        'config_used': final_config_for_report
    }

    final_report = {'summary': summary, 'test_cases': results}

    # 确保历史记录目录存在
    BATCH_HISTORY_PATH.mkdir(exist_ok=True)

    # 定义历史文件名
    timestamp_str = datetime.now().strftime("%Y%m%d_%H%M%S")
    history_filename = f"results_{timestamp_str}.json"
    history_filepath = BATCH_HISTORY_PATH / history_filename

    # 保存报告到两个位置
    # 1. 保存到历史文件
    with open(history_filepath, 'w', encoding='utf-8') as f:
        json.dump(final_report, f, indent=4, ensure_ascii=False)
    # 2. 保存到主结果文件 (用于实时刷新)
    with open(BATCH_RESULTS_PATH, 'w', encoding='utf-8') as f:
        json.dump(final_report, f, indent=4, ensure_ascii=False)

    print("\nBatch test complete!")
    print(f"Results saved to: {BATCH_RESULTS_PATH} and {history_filepath}")
    print(json.dumps(summary, indent=4, ensure_ascii=False))

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Run batch recognition tests on sample files.")
    parser.add_argument('--serial', action='store_true', help="Run tests serially for debugging.")
    parser.add_argument('--custom-config-b64', type=str, default=None, help="Base64 encoded JSON string of a custom config.")
    args = parser.parse_args()
    main(parallel=not args.serial, custom_config_b64=args.custom_config_b64)
