from collections import defaultdict
from datetime import datetime
import re
import sys
from typing import Dict, List, Set, Tuple

MAX_DEPTH = 24
MIN_COST_TO_REPORT = 1.0  # ms
MAX_RESULT = 20

class FileInfo:
    def __init__(self, name: str, file_type: str, cost: float, parent_modules: List[str]):
        self.name = name
        self.file_type = file_type  # 'used' | 'unused'
        self.cost = cost
        self.parent_modules = parent_modules


def parse_file_map(path: str) -> Tuple[Dict[str, FileInfo], List[str]]:
    """
    Parse the redundant file and return:
    - files_map: mapping from file identifier -> FileInfo
    - unused_order: list of unused file identifiers in the original order
    """
    with open(path, 'r', encoding='utf-8') as f:
        content = f.read()

    files: Dict[str, FileInfo] = {}
    unused_order: List[str] = []

    # Parse used files (we include them in map for stop detection)
    used_section = content.split('<----------used file end---------->')[0]
    for m in re.finditer(
        r"used file (\d+): ([^,]+), cost time: ([\d.]+)ms\s*((?:\s*parentModule \d+: [^\n]*\n?)*)",
        used_section,
    ):
        file_name = m.group(2).strip()
        cost = float(m.group(3))
        parent_section = m.group(4) or ""
        parent_modules = [p.strip() for p in re.findall(r"parentModule \d+: ([^\s\n]+)", parent_section)]
        files[file_name] = FileInfo(file_name, 'used', cost, parent_modules)

    # Parse unused files (full section)
    if '<----unused file start---->' in content:
        unused_section = content.split('<----unused file start---->')[1]
        for m in re.finditer(
            r"unused file (\d+): ([^,]+), cost time: ([\d.]+)ms\s*((?:\s*parentModule \d+: [^\n]*\n?)*)",
            unused_section,
        ):
            file_name = m.group(2).strip()
            cost = float(m.group(3))
            parent_section = m.group(4) or ""
            # In unused section, keep only the module token (ignore trailing symbols)
            parent_modules = [
                p.strip()
                for p in re.findall(r"parentModule \d+: ([^\n]+?)(?:\s|$)", parent_section)
                if p.strip()
            ]
            files[file_name] = FileInfo(file_name, 'unused', cost, parent_modules)
            unused_order.append(file_name)

    return files, unused_order


def sanitize_identifier(s: str) -> str:
    """Trim wrapping and drop a trailing &<version> or trailing & if present."""
    s = s.strip().strip('()')
    # Remove a trailing &<version> or trailing &
    s = re.sub(r"&[\w\-.]*$", "", s)
    return s


def extract_harmony_module(identifier: str) -> str:
    """
    Extract the Harmony module name from a file/module identifier.
    Rules:
    - '@ohos:' / '@hms:' / '@normalized:' prefixes are kept as-is (normalized to the
      token immediately following the prefix, e.g. '@normalized:Y').
    - '@scope/name/...' -> '@scope/name'
    - '@name/src/...' -> 'name'
    - 'name/...' -> 'name'
    - Otherwise, return the sanitized base token.
    """
    s = sanitize_identifier(identifier)

    # Handle special namespaces first (no '/src' pattern)
    if s.startswith('@ohos:') or s.startswith('@hms:'):
        return s.split()[0]

    if s.startswith('@normalized:'):
        m = re.match(r"(@normalized:[^/&\s]+)", s)
        return m.group(1) if m else '@normalized'

    # Prefer extracting between leading token and '/src'
    if '/src' in s:
        prefix = s.split('/src', 1)[0]
        prefix = prefix.lstrip('&')
        return prefix.strip()

    # Fallbacks when no '/src'
    s2 = s.lstrip('&')
    return s2.split('/')[0].strip()

def _output_module_info(lines_out: List[str], simplified_modules: Set[str], 
                       parent_to_chains: Dict[str, List[List[str]]], 
                       file_name: str, info: object, files: Dict):
    """Helper function to output module and chain information"""
    if simplified_modules:
        mods_sorted = sorted(simplified_modules)
        for m in mods_sorted:
            lines_out.append(f"   module: {m}")
    else:
        lines_out.append("   module: (none)")
    
    # Emit import chains (reversed order: from entry point to unused file)
    if parent_to_chains:
        lines_out.append("   chains:")
        for parent, chains in parent_to_chains.items():
            for ci, chain in enumerate(chains, start=1):
                pretty_parts: List[str] = []
                
                # Start from the end of chain (entry point) and work backwards
                reversed_chain = list(reversed(chain))
                
                # First node: the entry point (last item in original chain)
                if reversed_chain:
                    entry_node = reversed_chain[0]
                    finfo = files.get(entry_node)
                    if finfo is None:
                        pretty_parts.append(
                            f"{sanitize_identifier(entry_node)} (unknown 0.000ms)"
                        )
                    else:
                        pretty_parts.append(
                            f"{sanitize_identifier(entry_node)} ({finfo.file_type} {finfo.cost:.3f}ms)"
                        )
                    
                    # Add intermediate nodes
                    for node in reversed_chain[1:]:
                        finfo = files.get(node)
                        if finfo is None:
                            pretty_parts.append(
                                f"{sanitize_identifier(node)} (unknown 0.000ms)"
                            )
                        else:
                            pretty_parts.append(
                                f"{sanitize_identifier(node)} ({finfo.file_type} {finfo.cost:.3f}ms)"
                            )
                
                # Finally: the unused file itself
                pretty_parts.append(
                    f"{sanitize_identifier(file_name)} (unused {info.cost:.3f}ms)"
                )
                
                # Extract module from entry point
                entry_mod = extract_harmony_module(chain[-1]) if chain else ''
                
                # Use -> to show the flow from entry to unused file
                lines_out.append(
                    "      - " + " -> ".join(pretty_parts) + (f"  (module: {entry_mod})" if entry_mod else "")
                )
    
    lines_out.append("")



def simplify_unused_parents(input_path: str, output_path: str, trace_target: str | None = None, trace_output: str | None = None) -> None:
    files, unused_order = parse_file_map(input_path)

    lines_out: List[str] = []
    lines_out.append('=== Simplified Unused Parents (by Harmony module) ===')
    lines_out.append('')

    def enumerate_chains(start_parent: str) -> Tuple[List[List[str]], int]:
        """Return list of chains (each chain is a list of node ids from parent to stop)."""
        results: List[List[str]] = []
        cut_num = 0
        stack: List[Tuple[str, List[str]]] = [(start_parent, [])]

        while stack:
            cur, path = stack.pop()
            if len(results) > MAX_RESULT:
                cut_num = -1
                break
            if len(path) >= MAX_DEPTH:
                # depth limit reached, treat current as stop
                results.append(path + [cur])
                cut_num += 1
                continue

            if cur in path:
                # cycle detected, stop this path
                results.append(path + [cur])
                continue

            # if is in current module
            if '@' not in cur and '/Index' not in cur:
                results.append(path + [cur])
                continue

            info = files.get(cur)
            if info is None:
                # unknown -> stop
                results.append(path + [cur])
                continue

            # Stop at first used node to match requested chain semantics
            if info.file_type == 'used':
                results.append(path + [cur])
                continue

            if not info.parent_modules:
                results.append(path + [cur])
                continue

            # Expand upwards through unused parents
            next_path = path + [cur]
            for p in info.parent_modules:
                stack.append((p, next_path))

        return results, cut_num

    groups: Dict[Tuple, List[Tuple[str, object]]] = defaultdict(list)
    individual_modules: List[Tuple[str, object]] = []
    cut_negative_modules: List[Tuple[str, object]] = []  # For cut_num_all == -1
    for idx, file_name in enumerate(unused_order, start=1):
        info = files[file_name]
        if info.cost < 1.0:
            break  # skip very cheap unused files
        
        # Compute simplified parents set and full chains
        simplified_modules: Set[str] = set()
        parent_to_chains: Dict[str, List[List[str]]] = {}
        cut_num_all = 0
        for parent in info.parent_modules:
            chains, cut_num = enumerate_chains(parent)
            if cut_num == -1:
                cut_num_all = cut_num
                break
            cut_num_all += cut_num
            parent_to_chains[parent] = chains
            for chain in chains:
                stop = chain[-1]
                mod = extract_harmony_module(stop)
                if mod:
                    simplified_modules.add(mod)
        
        # Handle cut_num_all == -1 case (don't break, collect them)
        if cut_num_all == -1:
            cut_negative_modules.append((file_name, info, simplified_modules, parent_to_chains, cut_num_all))
            continue
        # Create a hashable key for grouping
        if cut_num_all == 0:
            # Convert parent_to_chains to a hashable format for grouping
            chains_key = tuple(
                (parent, tuple(tuple(chain) for chain in chains))
                for parent, chains in sorted(parent_to_chains.items())
            )
            group_key = (cut_num_all, chains_key)
            groups[group_key].append((file_name, info, simplified_modules, parent_to_chains))
        else:
            individual_modules.append((file_name, info, simplified_modules, parent_to_chains, cut_num_all))
 
    # Process grouped modules (cut_num_all == 0 with same parent_to_chains)
    group_idx = 1
    for group_key, group_items in groups.items():
        cut_num_all, _ = group_key
        
        if len(group_items) == 1:
            # Single item, treat as individual
            file_name, info, simplified_modules, parent_to_chains = group_items[0]
            lines_out.append(f"{group_idx}. {sanitize_identifier(file_name)}")
            lines_out.append(f"   cost: {info.cost:.3f}ms  type: {info.file_type} cut_num: {cut_num_all}")
            _output_module_info(lines_out, simplified_modules, parent_to_chains, file_name, info, files)
            group_idx += 1
        else:
            # Multiple items with same characteristics, group them
            total_cost = sum(info.cost for _, info, _, _ in group_items)
            file_names = [file_name for file_name, _, _, _ in group_items]
            
            # Use the first item's data for shared characteristics
            _, first_info, simplified_modules, parent_to_chains = group_items[0]
            
            lines_out.append(f"{group_idx}. [GROUPED {len(group_items)} files]")
            lines_out.append(f"   total_cost: {total_cost:.3f}ms  type: {first_info.file_type} cut_num: {cut_num_all}")
            lines_out.append("   files:")
            for file_name, info, _, _ in group_items:
                lines_out.append(f"     - {sanitize_identifier(file_name)} ({info.cost:.3f}ms)")
            
            _output_module_info(lines_out, simplified_modules, parent_to_chains, file_names[0], first_info, files)
            group_idx += 1
    # Process individual modules (cut_num_all != 0 or unique)
    for file_name, info, simplified_modules, parent_to_chains, cut_num_all in individual_modules:
        lines_out.append(f"{group_idx}. {sanitize_identifier(file_name)}")
        lines_out.append(f"   cost: {info.cost:.3f}ms  type: {info.file_type} cut_num: {cut_num_all}")
        _output_module_info(lines_out, simplified_modules, parent_to_chains, file_name, info, files)
        group_idx += 1
    # Process cut_num_all == -1 modules (output at the end)
    if cut_negative_modules:
        lines_out.append("=" * 50)
        lines_out.append("FILES WITH CUT_NUM == -1 (Chain enumeration incomplete):")
        lines_out.append("=" * 50)
        
        for file_name, info, simplified_modules, parent_to_chains, cut_num_all in cut_negative_modules:
            lines_out.append(f"{group_idx}. {sanitize_identifier(file_name)}")
            lines_out.append(f"   cost: {info.cost:.3f}ms  type: {info.file_type} cut_num: {cut_num_all}")
            _output_module_info(lines_out, simplified_modules, parent_to_chains, file_name, info, files)
            group_idx += 1

        # Optional tracing of a specific file (used or unused)
        if trace_target:
            tgt = trace_target
            # Produce chains from the exact target
            lines_out.append('=== Trace Target ===')
            if tgt not in files:
                lines_out.append(f"Target not found: {tgt}")
            else:
                finfo = files[tgt]
                lines_out.append(f"target: {sanitize_identifier(tgt)}  type: {finfo.file_type}  cost: {finfo.cost:.3f}ms")
                lines_out.append("   chains:")
                cut_num_all = 0
                for parent in finfo.parent_modules:
                    chains, cut_num = enumerate_chains(parent)
                    cut_num_all += cut_num
                    for chain in chains:
                        pretty_parts: List[str] = []
                        pretty_parts.append(
                            f"{sanitize_identifier(tgt)} ({finfo.file_type} {finfo.cost:.3f}ms)"
                        )
                        for node in chain:
                            ninfo = files.get(node)
                            if ninfo is None:
                                pretty_parts.append(
                                    f"{sanitize_identifier(node)} (unknown 0.000ms)"
                                )
                            else:
                                pretty_parts.append(
                                    f"{sanitize_identifier(node)} ({ninfo.file_type} {ninfo.cost:.3f}ms)"
                                )
                        stop_mod = extract_harmony_module(chain[-1]) if chain else ''
                        lines_out.append(
                            "      - " + " <- ".join(pretty_parts) + (f"  => module: {stop_mod}" if stop_mod else "")
                        )
                lines_out.append(f"   total cut_num: {cut_num_all}")
                lines_out.append("")

    with open(output_path, 'w', encoding='utf-8') as f:
        f.write('\n'.join(lines_out))


def main(argv: List[str]) -> None:
    # Usage: python simplify_parents.py <input_file> [output_file] [--trace <file_id>]
    input_path = 'com.tencent.videohm_redundant_file.txt'
    datetimestr = datetime.now().strftime('%Y%m%d_%H%M%S')
    output_path = f'simplified_{input_path}_{datetimestr}.txt'
    trace_target = None

    # Basic positional args
    if len(argv) >= 2 and not argv[1].startswith('-'):
        input_path = argv[1]
    if len(argv) >= 3 and not argv[2].startswith('-'):
        output_path = argv[2]

    # Options parsing (very simple)
    i = 1
    while i < len(argv):
        a = argv[i]
        if a == '--trace' or a == '-t':
            if i + 1 < len(argv):
                trace_target = argv[i + 1]
                i += 2
                continue
        i += 1

    if len(argv) < 2:
        print('Usage: python simplify_parents.py <input_file> [output_file] [--trace <file_id>]')
        print('Default input_file: com.kuaishou.hmapp_redundant_file.txt')
        print('Default output_file: simplified_unused_parents.txt')

    simplify_unused_parents(input_path, output_path, trace_target=trace_target)
    print(f'Wrote simplified results to: {output_path}')


if __name__ == '__main__':
    main(sys.argv)
