# Copyright 2025 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

"""
IR utilities for custom pass tests.

This module provides helpers for cleaning graph dump directories, locating
before/after IR files generated by MindSpore's custom optimization phases,
and basic text utilities for IR analysis.
"""

import os
import re
import shutil
import logging
from typing import Optional, Tuple

logger = logging.getLogger(__name__)


def clean_graph_dir(graph_dir: str) -> None:
    """Remove previous graph dump to ensure IR verification uses fresh files.

    This avoids accidental assertions against stale IR files from previous runs.
    """
    try:
        if os.path.isdir(graph_dir):
            for entry in os.listdir(graph_dir):
                full_path = os.path.join(graph_dir, entry)
                if os.path.isfile(full_path) or os.path.islink(full_path):
                    os.remove(full_path)
                else:
                    shutil.rmtree(full_path, ignore_errors=True)
        else:
            os.makedirs(graph_dir, exist_ok=True)
    except (OSError, PermissionError) as exc:
        logger.warning("Failed to clean graphs directory '%s': %s", graph_dir, exc)


def find_latest_ir(graph_dir: str, prefix: str) -> Optional[str]:
    """Find the most recent IR file with the given prefix in graph_dir.

    MindSpore writes multiple IR files; select the newest by modification time.
    """
    candidates = []
    try:
        for name in os.listdir(graph_dir):
            if name.startswith(prefix):
                full = os.path.join(graph_dir, name)
                if os.path.isfile(full):
                    candidates.append(full)
    except FileNotFoundError:
        return None
    if not candidates:
        return None
    candidates.sort(key=os.path.getmtime, reverse=True)
    return candidates[0]


def read_text(path: str) -> str:
    with open(path, "r", encoding="utf-8", errors="ignore") as f:
        return f.read()


def get_ir_paths(
        graph_dir: str,
        device: str,
        before_prefix: Optional[str] = None,
        after_prefix: Optional[str] = None,
) -> Tuple[str, str]:
    """Return paths to before/after IR files for a given device and graph dir.

    The default prefixes match the custom optimization graph naming in MindSpore:
    - hwopt_{device}_custom_optimization_before_graph*
    - hwopt_{device}_custom_optimization_after_graph*
    """
    if before_prefix is None:
        before_prefix = f"hwopt_{device}_custom_optimization_before_graph"
    if after_prefix is None:
        after_prefix = f"hwopt_{device}_custom_optimization_after_graph"

    before_ir = find_latest_ir(graph_dir, before_prefix)
    after_ir = find_latest_ir(graph_dir, after_prefix)

    assert before_ir is not None, (
        f"Before-graph IR not found with prefix '{before_prefix}' in {graph_dir}"
    )
    assert after_ir is not None, (
        f"After-graph IR not found with prefix '{after_prefix}' in {graph_dir}"
    )
    return before_ir, after_ir


def get_ir_texts(
        graph_dir: str,
        device: str,
        before_prefix: Optional[str] = None,
        after_prefix: Optional[str] = None,
) -> Tuple[str, str]:
    """Load and return the contents of before/after IR files as strings."""
    before_ir, after_ir = get_ir_paths(graph_dir, device, before_prefix, after_prefix)
    logger.info("Found before IR: %s", before_ir)
    logger.info("Found after IR: %s", after_ir)
    return read_text(before_ir), read_text(after_ir)


def count_occurrences(text: str, token: str, use_word_boundary: bool = True) -> int:
    """Count occurrences of a token in text using regex with optional word boundary.

    Word boundaries help avoid matching a token inside longer identifiers.
    """
    if use_word_boundary:
        pattern = rf"\b{re.escape(token)}\b"
    else:
        pattern = re.escape(token)
    return len(re.findall(pattern, text))


def verify_op_removed_after_pass(
        graph_dir: str,
        device: str,
        op_name: str,
        replacement_op: Optional[str] = None
) -> None:
    """Verify an op count decreased after-pass, optionally checking for replacement op.

    This is a convenience helper for simple replacement passes (e.g., AddN -> Add).
    Since IR may contain multiple instances, we check for reduction rather than zero.
    """
    before_text, after_text = get_ir_texts(graph_dir, device)
    before_count = count_occurrences(before_text, op_name)
    after_count = count_occurrences(after_text, op_name)

    logger.info("%s occurrences - before: %s, after: %s", op_name, before_count, after_count)
    assert before_count >= 1, f"Expected at least one {op_name} op in before-graph IR"
    assert after_count < before_count, f"Expected {op_name} count to decrease after pass"

    if replacement_op:
        replacement_count = count_occurrences(after_text, replacement_op)
        logger.info("%s occurrences in after IR: %s", replacement_op, replacement_count)
        assert replacement_count > 0, f"Expected at least one {replacement_op} op in after-graph IR"
