#!/usr/bin/env python3

import os
import sys
import logging
import argparse
import subprocess
import yaml
import random
from datetime import datetime
from copy import deepcopy
import hashlib
import time
import shutil

# Add bisect module path
sys.path.append(os.path.join(os.environ['LKP_SRC'], 'programs/bisect-py'))
from py_bisect import GitBisect
from bisect_log_config import logger

# Define repository base directory - consistent with daily_kernel_test.sh
REPO_BASE_DIR = os.environ.get('REPO_BASE_DIR', '/srv/git/auto_test_repos')
# Fallback to user directory if environment variable is not set
if not os.access(REPO_BASE_DIR, os.W_OK):
    REPO_BASE_DIR = os.path.expanduser("~/auto_test_repos")
os.makedirs(REPO_BASE_DIR, exist_ok=True)

def get_head_commit(repo_dir):
    """Get current HEAD commit ID of repository"""
    try:
        cmd = ['git', '-C', repo_dir, 'rev-parse', 'HEAD']
        commit = subprocess.check_output(cmd, text=True).strip()
        return commit
    except subprocess.CalledProcessError as e:
        logger.error(f"Failed to get HEAD commit: {str(e)}")
        return None

def get_base_commit(repo_dir, head_commit, branch=None):
    """Call git-base-rc-tag.sh to get stable tag"""
    # Get current script directory
    script_path = os.path.join(os.environ.get('LKP_SRC', ''), 'lib/git-base-rc-tag.sh')

    try:
        # git-base-rc-tag.sh only accepts -C and commit parameters
        cmd = ['bash', script_path, '-C', repo_dir, head_commit]

        if branch:
            # Script doesn't support -b parameter, but we can log it for debugging
            logger.info(f"Note: git-base-rc-tag.sh doesn't support -b parameter, branch '{branch}' will be ignored.")

        base_commit = subprocess.check_output(cmd, text=True).strip()

        # Validate if result is a valid commit ID
        if not base_commit or ' ' in base_commit:
            logger.warning(f"Invalid stable tag obtained: {base_commit}")
            return head_commit
        else:
            return base_commit

    except (subprocess.CalledProcessError, FileNotFoundError) as e:
        logger.warning(f"Unable to find stable tag, using HEAD commit: {str(e)}")
        return head_commit

def get_repo_dir(repo_url):
    """Generate unique repository directory path (deprecated - use --repo-dir instead)"""
    logger.warning("get_repo_dir() is deprecated. Repository cloning should be handled by daily_kernel_test.sh")
    return gb.clone_repo(repo_url, persistent=True)

def clone_repo(repo_url):
    """Unified clone interface (deprecated - use --repo-dir instead)"""
    logger.warning("clone_repo() is deprecated. Repository cloning should be handled by daily_kernel_test.sh")
    return get_repo_dir(repo_url)

def cleanup_repos(max_age_days=30):
    """Clean up repositories older than specified days"""
    now = time.time()
    for entry in os.listdir(REPO_BASE_DIR):
        repo_path = os.path.join(REPO_BASE_DIR, entry)
        if os.path.isdir(repo_path):
            mtime = os.path.getmtime(repo_path)
            if now - mtime > max_age_days * 86400:
                shutil.rmtree(repo_path)
                logger.info(f"Cleaned up old repository: {entry}")



def parse_arguments():
    """Parse command line arguments"""
    parser = argparse.ArgumentParser(
        description='Linux Kernel Automated Testing Tool',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    
    # Required arguments
    parser.add_argument(
        '--repo',
        required=True,
        help='Git repository URL to test'
    )
    parser.add_argument(
        '--template',
        required=True,
        help='Job template file (YAML format)'
    )
    
    # New parameter
    parser.add_argument(
        '--test-type',
        choices=['functional', 'performance'],
        required=True,
        help='Test type: functional (regular testing) or performance (performance testing)'
    )
    # Optional arguments
    parser.add_argument(
        '--override',
        action='append',
        default=[],
        help='Override template fields (format: path.to.field=value)'
    )
    parser.add_argument(
        '--var',
        action='append',
        default=[],
        help='Template variables (format: name=value)'
    )
    parser.add_argument(
        '--result-root',
        default='auto_test_results',
        help='Directory to store results'
    )
    parser.add_argument(
        '--append-yaml',
        help='Additional YAML file to merge (for ss, testbox, etc.)'
    )

    parser.add_argument(
        '--branch',
        help='Specific branch to test (for OpenEuler multi-branch testing)'
    )

    parser.add_argument(
        '--repo-dir',
        required=True,
        help='Local repository path (required - repository should be cloned by daily_kernel_test.sh)'
    )

    return parser.parse_args()

def load_template(template_path):
    """Load YAML job template, support multi-document"""
    try:
        with open(template_path, 'r') as f:
            docs = list(yaml.safe_load_all(f))
            # Merge all documents, later ones override earlier ones with the same field names
            merged = {}
            for doc in docs:
                if doc:
                    merged.update(doc)
            return merged
    except Exception as e:
        logger.error(f"Failed to load template: {str(e)}")
        raise RuntimeError(f"Template loading failed: {str(e)}")

def apply_variables(template, variables):
    """Apply template variables"""
    var_dict = {}
    for var in variables:
        if '=' not in var:
            logger.warning(f"Invalid variable format: {var}")
            continue
        name, value = var.split('=', 1)
        var_dict[name] = value
    
    # Recursively process template
    def process_item(item):
        if isinstance(item, dict):
            return {k: process_item(v) for k, v in item.items()}
        elif isinstance(item, list):
            return [process_item(i) for i in item]
        elif isinstance(item, str) and item.startswith('${') and item.endswith('}'):
            var_name = item[2:-1]
            return var_dict.get(var_name, item)
        return item
    
    return process_item(template)

def merge_dicts(base, extra):
    for k, v in extra.items():
        if isinstance(v, dict) and k in base and isinstance(base[k], dict):
            merge_dicts(base[k], v)
        else:
            base[k] = v

def apply_overrides(template, overrides):
    for override in overrides:
        if '=' not in override:
            logger.warning(f"Invalid override format: {override}")
            continue

        path, value = override.split('=', 1)

        # Special handling for kconfig_seed
        if path == "program.makepkg.kconfig_seed":
            # Ensure program.makepkg structure exists
            if "program" not in template:
                template["program"] = {}
            if "makepkg" not in template["program"]:
                template["program"]["makepkg"] = {}

            # Directly add seed field
            template["program"]["makepkg"]["kconfig_seed"] = value
            continue

        # Validate randconfig format
        if path == "program.makepkg.config" and value.startswith("randconfig"):
            config_num = value.split("-")[-1]
            if not config_num.isdigit() or int(config_num) < 1 or int(config_num) > 20:
                logger.warning(f"Invalid randconfig number: {value}")
                continue

        # Try nested structure first
        try:
            current = template
            keys = path.split('.')
            for key in keys[:-1]:
                if key not in current or not isinstance(current[key], dict):
                    current[key] = {}
                current = current[key]
            last_key = keys[-1]
            try:
                parsed_value = yaml.safe_load(value)
                current[last_key] = parsed_value
            except Exception:
                current[last_key] = value
            logger.debug(f"Overridden field: {path} = {current[last_key]}")
            continue
        except Exception:
            pass  # Nested structure failed, try dot-notation key

        # Try dot-notation key (e.g., program.unixbench as a whole key)
        try:
            keys = path.split('.')
            dot_key = '.'.join(keys)
            parent = template
            # If it's a top-level key
            if dot_key in parent:
                try:
                    parsed_value = yaml.safe_load(value)
                    parent[dot_key] = parsed_value
                except Exception:
                    parent[dot_key] = value
                logger.debug(f"Overridden dot-key field: {dot_key} = {parent[dot_key]}")
                continue
            # If it's a dot-notation key in nested structure
            # Search layer by layer, use dot-notation key for the last layer
            for i in range(len(keys) - 1, 0, -1):
                parent = template
                for key in keys[:i]:
                    if key in parent and isinstance(parent[key], dict):
                        parent = parent[key]
                    else:
                        break
                else:
                    dot_key2 = '.'.join(keys[i:])
                    if dot_key2 in parent:
                        try:
                            parsed_value = yaml.safe_load(value)
                            parent[dot_key2] = parsed_value
                        except Exception:
                            parent[dot_key2] = value
                        logger.debug(f"Overridden nested dot-key field: {dot_key2} = {parent[dot_key2]}")
                        break
        except Exception as e:
            logger.warning(f"Failed to override field {path}: {str(e)}")

def find_commit_field(job):
    """Find possible commit fields in job structure"""
    commit_paths = [
        'ss.linux.commit',
        'program.makepkg.commit',
    ]
    
    for path in commit_paths:
        keys = path.split('.')
        current = job
        found = True
        for key in keys:
            if key not in current:
                found = False
                break
            current = current[key]
        if found:
            return path
    
    # Check nested structures
    if 'ss' in job and 'linux' in job['ss'] and 'commit' in job['ss']['linux']:
        return 'ss.linux.commit'
    if 'program' in job:
        for prog in job['program']:
            if 'commit' in job['program'][prog]:
                return f'program.{prog}.commit'
    
    return None

def has_commit_override(job):
    """Check if commit field is already set in job (via override)"""
    # Check possible commit field paths
    commit_paths = [
        'ss.linux.commit',
        'program.makepkg.commit',
    ]

    for path in commit_paths:
        keys = path.split('.')
        current = job
        found = True
        for key in keys:
            if key not in current:
                found = False
                break
            current = current[key]
        if found and current:
            logger.info(f"Detected already-set commit field {path}: {current}")
            return True, current
    return False, None

def update_job_commit(job, commit):
    """Update commit field in job"""
    commit_path = find_commit_field(job)
    if not commit_path:
        logger.warning("No commit field found in job template")
        return job

    keys = commit_path.split('.')
    current = job
    for key in keys[:-1]:
        current = current[key]
    current[keys[-1]] = commit

    logger.debug(f"Updated commit at {commit_path}: {commit[:7]}")
    return job

def update_job_branch(job, branch):
    """Update branch field in job"""
    if not branch:
        return job

    # Try to set branch in program.makepkg.branch
    if 'program' in job and 'makepkg' in job['program']:
        job['program']['makepkg']['branch'] = branch
        logger.info(f"Set makepkg branch: {branch}")
    # For ss type jobs, branch info may be in other fields
    elif 'ss' in job and 'linux' in job['ss']:
        # For ss jobs, branch info may be in other fields
        # Specific logic for setting ss job branch can be added here if needed
        logger.info(f"SS job branch setting requires specific logic, current branch parameter: {branch}")

    return job

def main():
    args = parse_arguments()

    # Clean up old repositories
    cleanup_repos(30)

    # Create result directory
    os.makedirs(args.result_root, exist_ok=True)

    # Initialize GitBisect
    global gb
    gb = GitBisect()
    gb.set_log(args.result_root)

    # Create simplified log
    summary_log_path = os.path.join(args.result_root, 'summary.log')
    if not os.path.exists(summary_log_path):
        with open(summary_log_path, 'w') as f:
            f.write("Timestamp\tJobID\tTestType\tRepo\tCommit\n")

    # Function to record simplified log
    def log_summary(job_id, test_type, commit):
        timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        repo_name = os.path.basename(args.repo)
        with open(summary_log_path, 'a') as f:
            f.write(f"{timestamp}\t{job_id}\t{test_type}\t{repo_name}\t{commit[:8]}\n")

    # Load template
    try:
        template = load_template(args.template)
        # If there's additional yaml, merge it first
        if args.append_yaml:
            extra = load_template(args.append_yaml)
            merge_dicts(template, extra)
        template = apply_variables(template, args.var)
        apply_overrides(template, args.override)

        # Record original URL for protocol prefix detection
        original_url = ""
        if 'program' in template and 'makepkg' in template['program']:
            original_url = template['program']['makepkg'].get('_url', '')
        elif 'ss' in template and 'linux' in template['ss']:
            original_url = template['ss']['linux'].get('_url', '')

        # Update URL field, directly use --repo parameter
        updated_url = args.repo

        # Apply update
        if 'program' in template and 'makepkg' in template['program']:
            template['program']['makepkg']['_url'] = updated_url
        elif 'ss' in template and 'linux' in template['ss']:
            template['ss']['linux']['_url'] = updated_url

        logger.info(f"Updated repository URL: {original_url} → {updated_url}")

        # Apply branch parameter (if provided)
        if args.branch:
            template = update_job_branch(template, args.branch)
            logger.info(f"Applied branch parameter: {args.branch}")

    except Exception as e:
        logger.error(f"Template processing failed: {str(e)}")
        return 1
    
    try:
        # Use repository path passed from daily_kernel_test.sh
        repo_dir = args.repo_dir
        logger.info(f"Using repository path: {repo_dir}")

        # Validate that the path exists and is a valid git repository
        if not os.path.exists(os.path.join(repo_dir, '.git')):
            logger.error(f"Specified path is not a valid git repository: {repo_dir}")
            return 1

        # Get HEAD commit
        head_commit = get_head_commit(repo_dir)
        if not head_commit:
            logger.error("Unable to get HEAD commit")
            return 1
        
        # Execute different logic based on test type
        if args.test_type == 'functional':
            # Functional test: prioritize override commit, otherwise use HEAD commit
            functional_job = deepcopy(template)

            # Check if commit was set via override
            has_override, override_commit = has_commit_override(functional_job)
            if has_override:
                logger.info(f"Using commit set via override: {override_commit}")
                commit_to_use = override_commit
            else:
                logger.info(f"No override commit detected, using HEAD: {head_commit[:7]}")
                functional_job = update_job_commit(functional_job, head_commit)
                commit_to_use = head_commit

            try:
                job_id, _ = gb.submit_job(functional_job)
                logger.info(f"Regular test job submitted: JobID={job_id} ({commit_to_use[:7] if isinstance(commit_to_use, str) else str(commit_to_use)})")
                log_summary(job_id, 'functional', commit_to_use if isinstance(commit_to_use, str) else head_commit)
                return 0
            except Exception as e:
                logger.error(f"Failed to submit job: {str(e)}")
                return 1

        elif args.test_type == 'performance':
            # Performance test: prioritize override commit, otherwise compare HEAD with stable tag
            performance_job = deepcopy(template)

            # Check if commit was set via override
            has_override, override_commit = has_commit_override(performance_job)
            if has_override:
                # If commit is set via override, submit a single performance test job
                # This allows the caller (e.g., daily_kernel_test.sh) to control
                # baseline and current jobs separately
                logger.info(f"Performance test using commit set via override: {override_commit}")
                try:
                    job_id, _ = gb.submit_job(performance_job)
                    logger.info(f"Performance test job submitted: JobID={job_id} (commit: {override_commit[:7] if isinstance(override_commit, str) else str(override_commit)})")
                    # Print JobID in a parseable format for shell script
                    print(f"JobID={job_id}")
                    log_summary(job_id, 'performance', override_commit if isinstance(override_commit, str) else str(override_commit))
                    return 0
                except Exception as e:
                    logger.error(f"Failed to submit performance job: {str(e)}")
                    return 1
            else:
                # Original comparison logic: compare HEAD with stable tag
                # Prioritize branch parameter from command line
                branch = args.branch
                if not branch:
                    # If no command line parameter, extract branch info from template
                    if (template.get('program', {}).get('makepkg', {}).get('branch')):
                        branch = template['program']['makepkg']['branch']
                        logger.info(f"Detected makepkg branch: {branch}")

                base_commit = get_base_commit(repo_dir, head_commit, branch)
                logger.info(f"Performance test comparison: HEAD={head_commit}, Baseline={base_commit}, Branch={branch if branch else 'default'}")

                # Submit baseline job
                baseline_job = deepcopy(template)
                baseline_job = update_job_commit(baseline_job, base_commit)
                base_info = gb.submit_job(baseline_job)

                # Submit test job
                test_job = deepcopy(template)
                test_job = update_job_commit(test_job, head_commit)
                test_info = gb.submit_job(test_job)

                if base_info and test_info:
                    base_job_id, _ = base_info
                    test_job_id, _ = test_info
                    logger.info(
                        f"Performance comparison jobs submitted: "
                        f"Baseline JobID={base_job_id} ({base_commit}), "
                        f"Test JobID={test_job_id} ({head_commit})"
                    )
                    # Record in summary log
                    log_summary(f"{base_job_id},{test_job_id}", 'performance',
                               f"HEAD:{head_commit[:7]},BASE:{base_commit[:7]}")
                    return 0
                else:
                    logger.error("Performance test job submission failed")
                    return 1
        else:
            logger.error(f"Unsupported test type: {args.test_type}")
            return 1
    except Exception as e:
        logger.exception("Auto test failed")
        return 1
    finally:
        pass

if __name__ == "__main__":
    try:
        exit_code = main()
        sys.exit(exit_code)
    except Exception as e:
        logger.exception("Auto test failed")
        sys.exit(1)
    finally:
        pass
