# src/code_knowledge_extractor/main.py

import argparse
import pathlib
import json
import logging
import sys
import ast
import re # Import re

# Use relative imports within the package now
from .config import AppConfig, DEFAULT_EXCLUDE_PATTERNS, DEFAULT_OUTPUT_FILE
from .discovery import find_python_files
from .parser.resolver import Resolver
from .parser.visitor import AstVisitor
from .parser.models import CodeKnowledgeNode # Keep this if you want typed results list
from .linker import link_nodes # Import linker later when implemented
from .utils.path_utils import file_path_to_module_path
from typing import List, Dict

# Configure logging (can be more sophisticated later)
# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

def analyze_project(config: AppConfig) -> List[Dict]:
    """
    Analyzes the Python project based on the configuration.
    Includes Pass 1 (Extraction) and Pass 2 (Linking).
    """
    logger.info(f"Starting analysis for project: {config.project_root}")

    # 1. Discover Files
    python_files = find_python_files(config.project_root, config.exclude_patterns)
    if not python_files:
        logger.warning("No Python files found to analyze.")
        return []

    # 2. Prepare for Parsing (Pass 1)
    all_nodes_pass1: List[CodeKnowledgeNode] = [] # Use the Pydantic models internally
    known_module_paths: Dict[str, pathlib.Path] = {}
    module_path_to_file: Dict[str, pathlib.Path] = {}

    # Pre-scan to build module path map needed by resolver
    logger.info("Starting pre-scan to build module path map...") # Add info log
    for py_file in python_files:
        module_p = file_path_to_module_path(config.project_root, py_file)
        if module_p is not None:
            known_module_paths[module_p] = py_file
            # module_path_to_file[module_p] = py_file # This dict seems unused, commented out for now
        else:
            # Log relative path for easier identification
            try:
                rel_path = py_file.relative_to(config.project_root)
                logger.warning(f"Could not determine module path for: {rel_path}. Skipping.")
            except ValueError:
                logger.warning(f"Could not determine module path for file outside project root: {py_file}. Skipping.")

    logger.info(f"Module path map built. Total known paths: {len(known_module_paths)}") # Add info log

    # 3. Initialize Resolver
    resolver = Resolver(project_root=config.project_root, known_module_paths=known_module_paths)

    # 4. Parse Files (Pass 1 - Extraction)
    logger.info(f"Starting Pass 1: Parsing {len(python_files)} files...")
    for py_file in python_files:
        module_p = file_path_to_module_path(config.project_root, py_file)
        # Skip files we couldn't map to a module path earlier
        if module_p is None and py_file in known_module_paths.values():
            # Find the original bad path if possible for logging
            original_mod_path = next((mp for mp, fp in known_module_paths.items() if fp == py_file), None)
            logger.warning(f"Re-confirming skip for file without module path: {py_file} (Original attempted path: {original_mod_path})")
            continue
        elif module_p is None:
            logger.warning(f"Skipping file as module path could not be determined: {py_file}")
            continue


        logger.debug(f"Parsing file: {py_file} (Module: {module_p})")
        try:
            with open(py_file, 'r', encoding='utf-8') as f:
                source_code = f.read()
            tree = ast.parse(source_code, filename=str(py_file))

            visitor = AstVisitor(
                project_root=config.project_root,
                file_path=py_file,
                resolver=resolver # Pass the single resolver instance
            )
            visitor.visit(tree)
            all_nodes_pass1.extend(visitor.nodes) # Collect nodes from the visitor

        except FileNotFoundError:
            logger.error(f"File not found during parsing: {py_file}")
        except SyntaxError as e:
            logger.error(f"Syntax error in file: {py_file} - {e}")
        except Exception as e:
            logger.exception(f"Unexpected error parsing file: {py_file} - {e}") # Log stack trace

    logger.info(f"Pass 1 completed. Extracted {len(all_nodes_pass1)} initial nodes.")

    # --- Pass 2: Linking (To be implemented in Phase 4) ---
    logger.info("Starting Pass 2: Linking nodes (Placeholder)...")
    # linked_nodes_models = link_nodes(all_nodes_pass1) # Call linker when implemented
    linked_nodes_models = all_nodes_pass1 # Placeholder: Use Pass 1 results directly for now
    logger.info("Pass 2 completed (Placeholder).")


    # --- Convert Pydantic models to dictionaries if needed ---
    # If your visitor produces Pydantic models, convert them before linking
    # This ensures link_nodes works with the expected dictionary structure.
    nodes_for_linking = [node.model_dump(exclude_none=True) for node in all_nodes_pass1]
    # If your visitor already produces dicts, just use that list:
    # nodes_for_linking = all_nodes_pass1_dicts

    logger.info(f"Pass 1 completed. Extracted {len(nodes_for_linking)} initial nodes.")


    # --- Pass 2: Linking ---
    logger.info("Starting Pass 2: Linking nodes...")
    # Modify the list in-place or get the result
    linked_nodes_data = link_nodes(nodes_for_linking)
    logger.info("Pass 2 completed.")


    # --- Return the final linked data ---
    # 'linked_nodes_data' now contains the nodes with populated backlinks
    return linked_nodes_data # This is the final output

# --- Command Line Interface ---
def main_cli():
    """The command-line interface entry point."""
    parser = argparse.ArgumentParser(description="Extract code knowledge graph from a Python project.")
    parser.add_argument(
        "project_dir",
        type=str,
        help="Path to the root directory of the Python project to analyze."
    )
    parser.add_argument(
        "-o", "--output",
        type=str,
        default=DEFAULT_OUTPUT_FILE, # Use default from config
        help=f"Path to the output JSON file (default: {DEFAULT_OUTPUT_FILE})."
    )
    parser.add_argument(
        "-e", "--exclude",
        action='append', # Allow multiple exclude patterns
        help="Regex pattern for paths to exclude. Can be used multiple times."
    )
    parser.add_argument(
        "-v", "--verbose",
        action="store_true",
        help="Enable verbose debug logging."
    )

    args = parser.parse_args()

    # --- Configure Logging HERE (still inside main_cli) ---
    log_level = logging.DEBUG if args.verbose else logging.INFO
    log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    # basicConfig configures the root logger. Our module logger will inherit its level.
    logging.basicConfig(level=log_level, format=log_format, force=True) # Use force=True if reconfiguring
    # Note: force=True ensures that if logging was already configured by another library
    # (less likely here but possible), this config takes precedence. Be careful if dependencies
    # rely on specific logging setups. Usually safe for the main application entry point.

    logger.debug("Logging configured. Level: %s", logging.getLevelName(log_level))
    # --- End Logging Configuration ---

    # Setup logging level
    if args.verbose:
        logging.getLogger().setLevel(logging.DEBUG)
        logger.debug("Verbose logging enabled.")

    project_path = pathlib.Path(args.project_dir)
    output_path = pathlib.Path(args.output)

    # --- Configuration ---
    # Start with default EXCLUDE patterns (which are already compiled)
    current_exclude_patterns = list(DEFAULT_EXCLUDE_PATTERNS) # Copy the list

    # Add patterns from CLI arguments if provided
    if args.exclude:
        for pattern_str in args.exclude:
            try:
                # Compile the new patterns from the CLI
                compiled_pattern = re.compile(pattern_str)
                # Add if not already present (comparing compiled patterns might be tricky, compare source string if needed, but direct add is usually fine)
                # A simple check to avoid adding the exact same compiled object twice
                if compiled_pattern not in current_exclude_patterns:
                    current_exclude_patterns.append(compiled_pattern)
            except re.error as e:
                logger.warning(f"Invalid regex pattern provided via --exclude, skipping: '{pattern_str}'. Error: {e}")

    # Now current_exclude_patterns contains all compiled patterns
    logger.debug(f"Final compiled exclude patterns: {[p.pattern for p in current_exclude_patterns]}")

    try:
        # Pass the list of compiled patterns directly to AppConfig
        config = AppConfig(
            project_root=project_path,
            exclude_patterns=current_exclude_patterns,
            output_file=str(output_path)
        )
        logger.info(f"Configuration loaded for project: {config.project_root}")
        # Log the string representation for readability
        logger.info(f"Excluding patterns: {[p.pattern for p in config.exclude_patterns]}")
        logger.info(f"Output file set to: {config.output_file}")

    except ValueError as e:
        logger.error(f"Configuration error: {e}")
        sys.exit(1)
    except Exception as e:
        logger.exception(f"Unexpected error during configuration: {e}")
        sys.exit(1)

    # --- Run Analysis ---
    try:
        code_knowledge_data = analyze_project(config)

        # --- Save Output ---
        if code_knowledge_data:
            logger.info(f"Saving {len(code_knowledge_data)} knowledge nodes to {config.output_file}...")
            try:
                # Ensure output directory exists
                config.output_file.parent.mkdir(parents=True, exist_ok=True)
                with open(config.output_file, 'w', encoding='utf-8') as f:
                    json.dump(code_knowledge_data, f, indent=2, ensure_ascii=False)
                logger.info("Successfully saved knowledge graph.")
            except IOError as e:
                logger.error(f"Error writing output file {config.output_file}: {e}")
                sys.exit(1)
            except Exception as e:
                logger.exception(f"Unexpected error during JSON serialization or writing: {e}")
                sys.exit(1)
        else:
            logger.info("No knowledge data generated.")

    except Exception as e:
        logger.exception(f"An unexpected error occurred during analysis: {e}")
        sys.exit(1)

    logger.info("Process finished.")


# Allows running the script directly via 'python -m code_knowledge_extractor.main ...'
if __name__ == '__main__':
    main_cli()