import os
import requests
import json
import time
import subprocess
import re
import logging
from typing import Dict, List, Optional

# 复用bisect的日志系统
logger = logging.getLogger(__name__)

class SchedulerBackend:
    def __init__(self):
        # 使用与bisect相同的数据库配置
        self.db_host = os.environ.get('MANTICORE_HOST', 'localhost')
        self.db_port = os.environ.get('MANTICORE_PORT', '9308')
        self.db_name = os.environ.get('MANTICORE_DB', 'jobs')
        self.base_url = f"http://{self.db_host}:{self.db_port}"
    
    def search(self,
               index: str,
               query: dict,
               limit: int = 100,
               options: Optional[dict] = None,
               sort: Optional[list] = None) -> Optional[List[Dict]]:
        """
        Manticore 标准 SQL 风格搜索方法
        """
        try:
            request_body = {
                "index": index,
                "limit": limit,
                "query": query
            }
            if options:
                request_body["options"] = options
            if sort:
                request_body["sort"] = sort

            resp = requests.post(
                f"{self.base_url}/search",
                json=request_body,
                timeout=5
            )

            if resp.status_code != 200:
                logger.error(f"Search request failed with status code {resp.status_code}: {resp.text}")
                return None

            result = resp.json()
            return [
                hit.get('_source', {})
                for hit in result.get('hits', {}).get('hits', [])
                if '_source' in hit
            ]

        except requests.exceptions.RequestException:
            return None

    def get_job_info(self, job_id: str) -> Optional[Dict]:
        """通过 search 方法按id获取任务信息"""
        query = {
            "bool": {
                "must": [
                    {"equals": {"id": int(job_id)}}
                ]
            }
        }
        sort = [{"submit_time": "desc"}]
        results = self.search(index=self.db_name, query=query, limit=1, sort=sort)
        if results and len(results) > 0:
            return results[0]["j"]
        return None

    def submit_job(self, job_config_path: str) -> str:
        """使用submit命令提交任务"""
        # 确保LKP_SRC环境变量存在
        lkp_src = os.environ.get('LKP_SRC')
        if not lkp_src:
            raise RuntimeError("Environment variable LKP_SRC is not set")
        
        # 构造submit命令
        submit_cmd = [os.path.join(lkp_src, 'sbin', 'submit'), job_config_path]
        print(f"[DEBUG] 即将执行 submit 命令: {' '.join(submit_cmd)}")
        
        try:
            print("[DEBUG] 开始调用 subprocess.run 提交任务")
            # 执行submit命令并捕获输出
            result = subprocess.run(
                submit_cmd,
                capture_output=True,
                text=True,
                check=True
            )
            print("[DEBUG] submit 命令执行完成")
            print(f"[DEBUG] submit stdout:\n{result.stdout}")
            print(f"[DEBUG] submit stderr:\n{result.stderr}")
            
            import re

            # 解析输出获取job_id
            for line in result.stdout.splitlines():
                match = re.search(r'got job id=(\d+)', line)
                if match:
                    job_id = match.group(1)
                    print(f"Submitted job successfully: {job_id}")
                    return job_id
                match = re.search(r'result_root .*/(\d+)$', line)
                if match:
                    job_id = match.group(1)
                    print(f"Submitted job (from result_root): {job_id}")
                    return job_id
            
            # 如果未找到job_id，尝试从错误信息中查找
            if "job id=" in result.stderr:
                match = re.search(r"job id=(\d+)", result.stderr)
                if match:
                    job_id = match.group(1)
                    logger.warning(f"Extracted job ID from stderr: {job_id}")
                    return job_id
                
            raise RuntimeError("Failed to extract job ID from submit output")
            
        except subprocess.CalledProcessError as e:
            error_msg = (
                f"Job submission failed with return code {e.returncode}\n"
                f"Command: {' '.join(submit_cmd)}\n"
                f"Error output:\n{e.stderr}"
            )
            print(error_msg)
            raise RuntimeError("Job submission failed") from e

    def track_job(self, job_id: str, poll_interval: int = 30) -> str:
        """跟踪任务状态直到完成"""
        print(f"[DEBUG] 开始跟踪 job_id={job_id}")
        start_time = time.time()
        last_stage = ""
        retry_count = 0
        max_retries = 5
        max_wait_hours = 6
        
        print(f"Starting to track job: {job_id}")
        
        while time.time() - start_time < 3600 * max_wait_hours:
            try:
                job_info = self.get_job_info(job_id)
                
                if not job_info:
                    retry_count += 1
                    print(f"Job not found (attempt {retry_count}/{max_retries})")
                    if retry_count >= max_retries:
                        raise RuntimeError(f"Job {job_id} not found in database")
                    
                    # 指数退避等待
                    time.sleep(poll_interval * (2 ** retry_count))
                    continue
                
                health = job_info.get('job_health', 'unknown')
                stage = job_info.get('job_stage', 'unknown')
                result_root = job_info.get('result_root')
                
                # 记录状态变化
                if stage != last_status:
                    print(f"Job {job_id} status changed: {last_status} -> {status}")
                    last_stage = stage
                
                # 处理任务完成
                if stage == 'finish' and health == "success":
                    if result_root:
                        print(f"Job completed successfully: {job_id}")
                        return result_root
                    raise RuntimeError(f"Job {job_id} completed but result_root is missing")
                
                # 处理任务失败
                if health in ['failed', 'canceled']:
                    raise RuntimeError(f"Job {job_id} failed with status: {status}")
                
                # 重置重试计数
                retry_count = 0
                
            except Exception as e:
                retry_count += 1
                logger.warning(f"Job tracking error (attempt {retry_count}/{max_retries}): {str(e)}")
                if retry_count >= max_retries:
                    raise RuntimeError(f"Job tracking failed: {str(e)}") from e
                time.sleep(poll_interval * (2 ** retry_count))
            
            # 常规等待
            time.sleep(poll_interval)
        
        raise RuntimeError(f"Job {job_id} timed out after {max_wait_hours} hours")

    def run_multiple(self, job_config_path: str, runs: int = 3) -> List[str]:
        """只提交任务，返回所有 job_id 列表"""
        print(f"[DEBUG] run_multiple: job_config_path={job_config_path}, runs={runs}")
        job_ids = []
        logger.info(f"Submitting {runs} jobs for configuration: {job_config_path}")
        for i in range(runs):
            try:
                job_id = self.submit_job(job_config_path)
                job_ids.append(job_id)
                logger.info(f"Submitted job {i+1}/{runs}: {job_id}")
                time.sleep(5)  # 每次提交后延迟
            except Exception as e:
                logger.error(f"Failed to submit job {i+1}/{runs}: {str(e)}")
        return job_ids

    def wait_jobs(self, job_ids: List[str], min_successful: int = 3) -> List[str]:
        """轮询等待所有 job_id 完成，返回 result_root 列表"""
        pending_jobs = list(job_ids)
        success_jobs = []
        while pending_jobs:
            for job_id in pending_jobs[:]:
                try:
                    job_info = self.get_job_info(job_id)
                    if not job_info:
                        print(f"Job not found: {job_id}")
                        continue
 
                    health = job_info.get('job_health', 'unknown')
                    stage = job_info.get('job_stage', 'unknown')
                    stats = job_info.get('stats', 'unknown')
                    # 处理任务完成
                    if stage == 'finish' and health == "success":
                        if stats:
                            print(f"Job completed successfully: {job_id}")
                            pending_jobs.remove(job_id)
                            success_jobs.append(job_id)

                    # 处理任务失败
                    if health in ['failed', 'canceled']:
                        pending_jobs.remove(job_id)
                        raise RuntimeError(f"Job {job_id} failed with status: {status}")
                except Exception as e:
                    print(f"Error tracking job {job_id}: {str(e)}")
            if pending_jobs:
                print(f"Pending jobs: {pending_jobs}")
                time.sleep(30)
        if len(success_jobs) < min_successful:
            raise RuntimeError(
                f"Only {len(success_jobs)} jobs succeeded, need at least {min_successful}"
            )
        return True

    def run(self, job_config_path: str, runs: int = 1) -> List[str]:
        print(f"[DEBUG] run: job_config_path={job_config_path}, runs={runs}")
        job_ids = self.run_multiple(job_config_path, runs=runs)
        return job_ids
