#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import sys
import json
import importlib
import traceback
from datetime import datetime

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LOG_DIR = os.path.join(BASE_DIR, 'logs')
LOG_FILE = os.path.join(LOG_DIR, 'test_run.log')


def ensure_log_dir():
    if not os.path.exists(LOG_DIR):
        os.makedirs(LOG_DIR, exist_ok=True)


def log(message: str):
    ensure_log_dir()
    ts = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    line = f"[{ts}] {message}"
    print(line)
    with open(LOG_FILE, 'a', encoding='utf-8') as f:
        f.write(line + '\n')


def discover_test_modules():
    tests_dir = os.path.dirname(os.path.abspath(__file__))
    modules = []
    for fname in os.listdir(tests_dir):
        if fname.startswith('test_') and fname.endswith('.py'):
            modules.append(fname[:-3])
    return modules


def run_test_module(module_name: str) -> dict:
    """Run a single test module that exposes a run_tests() -> list[dict] API.
    Each case dict should include: {"name": str, "passed": bool, "details": str}
    No assertions should be raised from this runner.
    """
    result = {"module": module_name, "total": 0, "passed": 0, "failed": 0, "cases": []}
    try:
        mod = importlib.import_module(f'tests.{module_name}')
        if not hasattr(mod, 'run_tests'):
            log(f"[SKIP] {module_name}: 未找到 run_tests()，跳过")
            return result
        cases = mod.run_tests()
        if not isinstance(cases, list):
            log(f"[ERROR] {module_name}: run_tests() 未返回列表，跳过")
            return result
        for case in cases:
            name = case.get('name', 'unnamed')
            passed = bool(case.get('passed', False))
            details = case.get('details', '')
            result['total'] += 1
            if passed:
                result['passed'] += 1
                log(f"[PASS] {module_name} :: {name}")
            else:
                result['failed'] += 1
                log(f"[FAIL] {module_name} :: {name} :: {details}")
            result['cases'].append({"name": name, "passed": passed, "details": details})
    except Exception:
        err = traceback.format_exc()
        log(f"[EXCEPTION] {module_name}: {err}")
    return result


def main():
    # Make project importable
    if BASE_DIR not in sys.path:
        sys.path.insert(0, BASE_DIR)

    ensure_log_dir()
    # Truncate previous log
    with open(LOG_FILE, 'w', encoding='utf-8') as f:
        f.write('')

    log('启动测试运行器...')
    modules = discover_test_modules()
    if not modules:
        log('未发现测试模块。')
        return 1

    summary = {"total": 0, "passed": 0, "failed": 0, "modules": []}
    for m in modules:
        log(f"运行模块: {m}")
        res = run_test_module(m)
        summary['total'] += res['total']
        summary['passed'] += res['passed']
        summary['failed'] += res['failed']
        summary['modules'].append(res)

    pass_rate = (summary['passed'] / summary['total'] * 100.0) if summary['total'] else 0.0
    log(f"总用例: {summary['total']}  通过: {summary['passed']}  失败: {summary['failed']}  通过率: {pass_rate:.2f}%")

    # Persist JSON summary
    summary_path = os.path.join(LOG_DIR, 'test_summary.json')
    with open(summary_path, 'w', encoding='utf-8') as jf:
        json.dump({**summary, "pass_rate": pass_rate}, jf, ensure_ascii=False, indent=2)
    log(f"已写入测试摘要: {summary_path}")

    return 0 if summary['failed'] == 0 else 0  # 始终返回0以不阻断流水线


if __name__ == '__main__':
    sys.exit(main())


