#!/usr/bin/env python3
# SPDX-License-Identifier: MulanPSL-2.0+

from bisect_log_config import logger
import json
import time
from typing import Optional, Dict, Any, Tuple, List
from functools import wraps
from manticore_simple import ManticoreClient

class BisectDBHttp:
    """HTTP-based database operations for bisect process using ManticoreClient"""

    def __init__(self, host: str, port: str, database: str):
        """Initialize HTTP client"""
        self.host = host
        # HTTP端口通常是9308，而不是MySQL的9306
        self.port = int(port) if port != '9306' else 9308
        self.database = database
        self.client = ManticoreClient(host=self.host, port=self.port)
        if not hasattr(self, '_cache'):
            self._cache = {}

    def cache_query(ttl: int = 300):
        """Cache decorator for database queries"""
        def decorator(func):
            @wraps(func)
            def wrapper(self, *args, **kwargs):
                # 使用JSON序列化来处理不可哈希的参数（如字典）
                try:
                    cache_key = f"{func.__module__}.{func.__name__}:" \
                               f"{hash(json.dumps(args, sort_keys=True, default=str))}:" \
                               f"{hash(json.dumps(kwargs, sort_keys=True, default=str))}"
                except (TypeError, ValueError):
                    # 如果序列化失败，跳过缓存
                    return func(self, *args, **kwargs)
                
                if cache_key in self._cache:
                    timestamp, result = self._cache[cache_key]
                    if time.time() - timestamp < ttl:
                        return result
                result = func(self, *args, **kwargs)
                self._cache[cache_key] = (time.time(), result)
                return result
            return wrapper
        return decorator

    @cache_query(ttl=600)
    def get_job_info(self, job_id: str) -> Optional[Dict[str, Any]]:
        """
        Fetch job information using HTTP search API

        Args:
            job_id: The job ID to fetch

        Returns:
            Dict containing job information or None if not found
        """
        if not job_id or not isinstance(job_id, str) or not job_id.isdigit():
            logger.error(f"无效任务ID格式 - 值: '{job_id}' - 类型: {type(job_id)}")
            return None

        try:
            # 构建查询条件
            query = {
                "bool": {
                    "must": [
                        {"equals": {"id": int(job_id)}}
                    ]
                }
            }

            results = self.client.search(
                index=self.database,
                query=query,
                limit=1
            )

            if not results or len(results) == 0:
                return None

            job_data = results[0]
            
            # 检查是否有 j 字段（主要的job数据）
            if 'j' in job_data:
                if isinstance(job_data['j'], str):
                    job_content = json.loads(job_data['j'])
                else:
                    job_content = job_data['j']
                
                # 添加 full_text_kv 字段如果存在
                if job_data.get('full_text_kv'):
                    job_content['full_text_kv'] = job_data['full_text_kv']
                
                return job_content
            else:
                # 如果没有 j 字段，直接返回整个结果
                return job_data

        except Exception as e:
            logger.error(f"HTTP database error: {e}")
            return None

    @cache_query(ttl=600)
    def check_existing_job(self, job: Dict[str, Any], limit: int = 1, time_range_days: int = 7, my_account: str = None) -> List[Tuple[str, str]]:
        """
        Check if jobs with same configuration exist using HTTP API

        Args:
            job: Job configuration to check
            limit: Maximum number of results to return (default: 1 for basic deduplication)
            time_range_days: Number of days to look back (default: 30, set to 0 to disable time filter)
            my_account: Filter by account name (default: None, no account filter)

        Returns:
            List of (job_id, result_root) tuples, empty list if none found

        Note:
            - For basic deduplication: use limit=1 (default)
            - For finding multiple candidates: use limit=10-20
            - For performance sampling: use limit=3-5
        """
        try:
            # 构建查询条件列表
            must_conditions = []

            # 时间筛选（可配置天数，默认30天）
            if time_range_days > 0:
                cutoff = int(time.time()) - time_range_days * 24 * 3600
                must_conditions.append({
                    "range": {
                        "submit_time": {"gte": cutoff}
                    }
                })

            # 添加 my_account 限制条件（可选，HTTP接口字段不需要j.前缀）
            if my_account is not None:
                must_conditions.append({
                    "equals": {"my_account": my_account}
                })

            # 处理 program 字段 - 使用所有字段进行去重（包括commit）
            if 'program' in job:
                for prog_name, prog_config in job['program'].items():
                    if isinstance(prog_config, dict):
                        for key, value in prog_config.items():
                            # 使用所有字段，不排除任何字段（包括commit）
                            # program相关字段在j内，需要j.前缀
                            field_name = f"j.program.{prog_name}.{key}"
                            must_conditions.append({
                                "equals": {field_name: str(value)}
                            })

            # 处理 ss 字段 - 使用所有字段进行去重（包括commit）
            if 'ss' in job:
                for ss_name, ss_config in job['ss'].items():
                    if isinstance(ss_config, dict):
                        for key, value in ss_config.items():
                            # 使用所有字段，不排除任何字段（包括commit）
                            # ss相关字段在j内，需要j.前缀
                            field_name = f"j.ss.{ss_name}.{key}"
                            must_conditions.append({
                                "equals": {field_name: str(value)}
                            })

            # 添加其他重要匹配条件
            # 根据compass-ci的constants-job.rb：
            # suite, my_account, testbox, arch, osv 是 MANTI_STRING_FIELDS，直接字段，不需要j.前缀
            if job.get('suite'):
                must_conditions.append({
                    "equals": {"suite": str(job['suite'])}
                })

            if job.get('testbox'):
                must_conditions.append({
                    "equals": {"testbox": str(job['testbox'])}
                })

            # 确保有统计数据（HTTP接口中需要检查j字段内的stats）
            # 注意：exists查询应该检查具体路径
            # 对于HTTP接口，统计数据通常在 j.stats 中
            # 暂时注释掉这个条件，因为不是所有job都有stats
            # must_conditions.append({
            #     "exists": {"field": "j.stats"}
            # })

            # 构建完整查询
            query = {
                "bool": {
                    "must": must_conditions
                }
            }

            # 排序条件
            sort = [{"submit_time": {"order": "desc"}}]

            # 执行搜索
            results = self.client.search(
                index=self.database,
                query=query,
                limit=limit,
                sort=sort
            )

            if not results:
                return []

            # 转换结果格式
            job_list = []
            for result in results:
                try:
                    # 获取job ID - 尝试多种可能的字段名
                    job_id = None
                    for id_field in ['id', '_id', 'doc_id']:
                        if id_field in result:
                            job_id = str(result[id_field])
                            break
                    
                    if not job_id:
                        # 简化日志输出，只显示关键信息
                        key_info = []
                        if 'pp.makepkg.project' in result:
                            key_info.append(f"project={result.get('pp.makepkg.project', 'unknown')}")
                        if 'pp.makepkg.commit' in result:
                            commit = result.get('pp.makepkg.commit', 'unknown')
                            if len(commit) > 12:
                                commit = commit[:12] + "..."
                            key_info.append(f"commit={commit}")
                        if 'suite' in result:
                            key_info.append(f"suite={result.get('suite', 'unknown')}")
                        if 'testbox' in result:
                            key_info.append(f"testbox={result.get('testbox', 'unknown')}")
                        
                        key_info_str = ", ".join(key_info) if key_info else "no key info"
                        logger.warning(f"No ID found in result: {key_info_str}")
                        logger.debug(f"Problematic result dump: {json.dumps(result, default=str)}")
                        continue
                    
                    # 获取result_root - 可能在 j 字段中，也可能直接在结果中
                    result_root = ''
                    if 'j' in result:
                        j_data = result['j']
                        if isinstance(j_data, str):
                            j_data = json.loads(j_data)
                        result_root = j_data.get('result_root', '')
                    else:
                        result_root = result.get('result_root', '')

                    job_list.append((job_id, result_root))

                except Exception as e:
                    logger.warning(f"Error processing search result: {str(e)}")
                    continue

            return job_list

        except Exception as e:
            logger.error(f"HTTP search error in check_existing_job: {e}")
            logger.error(f"Query that failed: {json.dumps(query, indent=2) if 'query' in locals() else 'N/A'}")
            return []

    @cache_query(ttl=600)
    def check_existing_job_by_md5(self, job: Dict[str, Any], limit: int = 1, time_range_days: int = 7) -> List[Tuple[str, str]]:
        """
        Simplified duplicate job detection using all_params_md5

        This method uses the existing all_params_md5 field in the job structure
        to detect duplicate jobs, following compass-ci's approach.

        Args:
            job: Job configuration to check (must contain all_params_md5 field)
            limit: Maximum number of results to return
            time_range_days: Number of days to look back (default: 30)

        Returns:
            List of (job_id, result_root) tuples, empty list if none found
        """
        try:
            # Extract all_params_md5 from job configuration
            all_params_md5 = job.get('all_params_md5')
            if not all_params_md5:
                logger.warning("Job configuration does not contain all_params_md5 field")
                return []

            logger.debug(f"Checking for duplicate jobs with all_params_md5: {all_params_md5}")

            # Build query using all_params_md5
            must_conditions = []

            # Time filter
            if time_range_days > 0:
                cutoff = int(time.time()) - time_range_days * 24 * 3600
                must_conditions.append({
                    "range": {
                        "submit_time": {"gte": cutoff}
                    }
                })

            # Add all_params_md5 condition - query full_text_kv field
            must_conditions.append({
                "match": {"full_text_kv": f"all_params_md5={all_params_md5}"}
            })

            # Build complete query
            query = {
                "bool": {
                    "must": must_conditions
                }
            }

            # Sort by submit_time descending
            sort = [{"submit_time": {"order": "desc"}}]

            # Execute search
            results = self.client.search(
                index=self.database,
                query=query,
                limit=limit,
                sort=sort
            )

            if not results:
                return []

            # Convert results to expected format
            job_list = []
            for result in results:
                try:
                    # Get job ID
                    job_id = None
                    for id_field in ['id', '_id', 'doc_id']:
                        if id_field in result:
                            job_id = str(result[id_field])
                            break

                    if not job_id:
                        continue

                    # Get result_root - may be in j field or directly in result
                    result_root = ''
                    if 'j' in result:
                        j_data = result['j']
                        if isinstance(j_data, str):
                            j_data = json.loads(j_data)
                        result_root = j_data.get('result_root', '')
                    else:
                        result_root = result.get('result_root', '')

                    job_list.append((job_id, result_root))

                except Exception as e:
                    logger.warning(f"Error processing MD5 search result: {str(e)}")
                    continue

            return job_list

        except Exception as e:
            logger.error(f"HTTP search error in check_existing_job_by_md5: {e}")
            return []

    def close(self):
        """Close HTTP client (no-op for HTTP client)"""
        # HTTP客户端无需显式关闭连接
        pass
