import re
import os
from typing import Dict, Any, List, Optional, Union
from collections import defaultdict
from Tools.build_tree import build_tree
from Tools.print_tree import locate_subroutines_in_files
from Tools.filter_def_names import filter_func_names
import json
# Get the current working directory
# cwd = os.getcwd()
# # Set PYTHONPATH to the current working directory
# os.environ['PYTHONPATH'] = cwd

def tree_to_string(node: dict, prefix: str = '', is_last: bool = True, 
                  subroutine_locations: Dict[str, Dict] = None, 
                  include_steps: bool = False, label_list: List[str] = None, target_subroutine: str = None):
    """Convert tree structure to a string list, optionally including calculation steps, and can search for specific subroutines"""
    result = []
    connector = '└─ ' if is_last else '├─ '
    
    # Extract the real function name (remove prefix)
    real_name = node['name']
    if real_name.startswith('repeat## '):
        real_name = real_name[9:]
    elif real_name.startswith('** '):
        real_name = real_name[3:]
    
    # Add current node information
    if 'args' in node and node['args']:
        result.append(f"{prefix}{connector}{node['name']}({node['args']})")
    else:
        result.append(f"{prefix}{connector}{node['name']}")
    
    # Check if the target subroutine is found
    found_target = False
    if target_subroutine and real_name == target_subroutine:
        found_target = True
        
    # If we need to include calculation steps and have location information, add calculation steps
    if include_steps and subroutine_locations and real_name in subroutine_locations:
        loc = subroutine_locations[real_name]
        computation_steps = extract_computation_steps(
            loc['file_path'], loc['start_line'], loc['end_line']
        )
        
        if computation_steps:
            step_prefix = prefix + ('    ' if is_last else '│   ') + '   '
            result.append(f"{prefix + ('    ' if is_last else '│   ')}   [Calculation Steps]:")
            
            # Optimization: Pre-compile all regex patterns
            patterns = {}
            if label_list:
                patterns = {var: re.compile(r'\b' + re.escape(var) + r'(\s*\([^)]*\))?\s*=') for var in label_list}
            
            for step in computation_steps:
                step_line = f"{step_prefix}{step}"
                # Optimization: Use pre-compiled regex
                if label_list:
                    stripped_step = step.strip()
                    for var, pattern in patterns.items():
                        if pattern.search(stripped_step):
                            step_line += " ###Initialization###"
                            break
                result.append(step_line)
            result.append(step_prefix + "======================================")
    
    # If found target or no children, return the result
    if found_target or not node['children']:
        return result, found_target
    
    # Recursively search children
    new_prefix = prefix + ('    ' if is_last else '│   ')
    target_found_in_children = False
    
    for i, child in enumerate(node['children']):
        is_last_child = i == len(node['children']) - 1
        child_result, child_found_target = tree_to_string(
            child, new_prefix, is_last_child, 
            subroutine_locations, include_steps=include_steps,
            label_list=label_list, target_subroutine=target_subroutine
        )
        
        result.extend(child_result)
        
        # If target found in this child branch, mark it and stop searching more children
        if child_found_target:
            target_found_in_children = True
            break
    
    return result, found_target or target_found_in_children


def tree_to_string_orig(node: dict, prefix: str = '', is_last: bool = True, 
                  subroutine_locations: Dict[str, Dict] = None, 
                  include_steps: bool = False, label_list: List[str] = None, target_subroutine: str = None):
    """Convert tree structure to a string list, optionally including calculation steps, and can search for specific subroutines"""
    result = []
    connector = '└─ ' if is_last else '├─ '
    
    # Extract the real function name (remove prefix)
    real_name = node['name']
    if real_name.startswith('repeat## '):
        real_name = real_name[9:]
    elif real_name.startswith('** '):
        real_name = real_name[3:]
    
    # Check if the target subroutine is found
    found_target = False
    if target_subroutine and real_name == target_subroutine:
        found_target = True
    
    # If no target subroutine or current node is target, add node information
    if not target_subroutine or found_target:
        if 'args' in node and node['args']:
            result.append(f"{prefix}{connector}{node['name']}({node['args']})")
        else:
            result.append(f"{prefix}{connector}{node['name']}")
    
    # If we need to include calculation steps and have location information, add calculation steps
    if (not target_subroutine or found_target) and include_steps and subroutine_locations and real_name in subroutine_locations:
        loc = subroutine_locations[real_name]
        computation_steps = extract_computation_steps(
            loc['file_path'], loc['start_line'], loc['end_line']
        )
        
        if computation_steps:
            step_prefix = prefix + ('    ' if is_last else '│   ') + '   '
            result = [f"{prefix + ('    ' if is_last else '│   ')}   [Calculation Steps]:"]
            
            # Optimization: Pre-compile all regex patterns
            patterns = {}
            if label_list:
                patterns = {var: re.compile(r'\b' + re.escape(var) + r'(\s*\([^)]*\))?\s*=') for var in label_list}
                
            for step in computation_steps:
                step_line = f"{step_prefix}{step}"
                # Optimization: Use pre-compiled regex
                if label_list:
                    stripped_step = step.strip()
                    for var, pattern in patterns.items():
                        if pattern.search(stripped_step):
                            step_line += " ###Initialization###"
                            break
                result.append(step_line)
            result.append(step_prefix + "======================================")
    
    # If found target, don't continue searching
    if found_target:
        return result
    
    # Recursively search children
    if node['children']:
        new_prefix = prefix + ('    ' if is_last else '│   ')
        
        # Store child search results
        children_results = []
        
        for i, child in enumerate(node['children']):
            is_last_child = i == len(node['children']) - 1
            child_result = tree_to_string_orig(child, new_prefix, is_last_child, 
                                         subroutine_locations, include_steps=include_steps,
                                         label_list=label_list, target_subroutine=target_subroutine)
            
            # If child found target, add current node and child results
            if target_subroutine and child_result:
                if 'args' in node and node['args']:
                    result = [f"{prefix}{connector}{node['name']}({node['args']})"]
                else:
                    result = [f"{prefix}{connector}{node['name']}"]
                result.extend(child_result)
                return result
            
            if not target_subroutine:
                children_results.extend(child_result)
        
        # If no target subroutine, add all child results
        if not target_subroutine:
            result.extend(children_results)
    
    # If there's a target subroutine but not found, return empty list
    if target_subroutine and not found_target:
        return []
    
    return result

from Tools.build_tree import build_caller_tree

def analyze_subroutine_to_string(subroutine_name: str, relationships: Dict[str, List[Dict]], 
                             relationship_labels: Dict[str, List[Dict]] = None, 
                             aliases: Dict[str, str] = None,
                             filter_depth: Union[int, Dict[str, int]] = 2,
                             filter_mode: str = "down",
                             src_directory: str = None,
                             include_steps: bool = False,
                             include_patterns: List[str] = None,
                             exclude_patterns: List[str] = None,
                             root_name: str = None,
                             up_max_search_depth: int = 5,
                             analyze_globals: bool = True,
                             gloabal_vars_label: List[str] = None,
                             target_subroutine: str = None,
                             compiler_type: str = "intel"):
    """Analyze the specified subroutine, return string representations of its call tree and callee tree, optionally including processing steps"""
    lines = []
    # Initialize global_vars with default values
    global_vars = {'written': [], 'read': []}
    
    lines.append(f"\n===== Analyzing Subroutine: {subroutine_name} =====")
    
    # Check if the subroutine exists
    if subroutine_name not in relationships and not any(call['func'] == subroutine_name for calls in relationships.values() for call in calls):
        lines.append(f"Error: Subroutine '{subroutine_name}' not found in the codebase")
        return "\n".join(lines)
    
    # Locate all subroutines in the source code files (obtain in advance to pass to tree generation functions)
    subroutine_locations = {}
    if src_directory:
        if isinstance(src_directory, str):
            src_directories = [src_directory]
        else:
            src_directories = src_directory
        
        subroutine_locations = locate_subroutines_in_files(src_directories, recursive=True)
    
    # Get functions that call this subroutine (upward tree)
    lines.append(f"\n----- Functions calling {subroutine_name} -----")
    #save relationships to file
    with open('relationships.json', 'w', encoding='utf-8') as f:
        json.dump(relationships, f, ensure_ascii=False, indent=4)
    # Exclude self-call cases
    callers_exist = any(caller != subroutine_name and call['func'] == subroutine_name 
                     for caller, calls in relationships.items() 
                     for call in calls)
    if callers_exist:
        caller_tree = build_caller_tree(subroutine_name, relationships, relationship_labels, 
                                        filter_depth=1, root_name=root_name, max_search_depth=up_max_search_depth)
        caller_result = tree_to_string_orig(caller_tree, include_steps=False, 
                                  subroutine_locations=subroutine_locations)
        # Handle the result properly
        if isinstance(caller_result, tuple):
            caller_result = caller_result[0]  # Get the first item if it's a tuple
        
        # Ensure all items in caller_result are strings
        for item in caller_result:
            if isinstance(item, list):
                lines.extend([str(subitem) for subitem in item])
            else:
                lines.append(str(item))
    else:
        lines.append(f"No functions call {subroutine_name}")

    # Get functions called by this subroutine (downward tree)
    lines.append(f"\n----- Functions called by {subroutine_name} -----")
    if subroutine_name in relationships and relationships[subroutine_name]:
        tree_main = build_tree(subroutine_name, relationships, relationship_labels, aliases, 
                         filter_depth=filter_depth, filter_mode=filter_mode)
        tree_result, _ = tree_to_string(tree_main, include_steps=include_steps, 
                                  subroutine_locations=subroutine_locations, 
                                  label_list=gloabal_vars_label,
                                  target_subroutine=target_subroutine)
        # Ensure all items in tree_result are strings before extending lines
        for item in tree_result:
            if isinstance(item, list):
                lines.extend([str(subitem) for subitem in item])
            else:
                lines.append(str(item))
    else:
        lines.append(f"{subroutine_name} does not call other functions")
    
    # New: Analyze global variable dependencies
    if analyze_globals and subroutine_locations:
        lines.append(f"\n----- Global Variable Data for {subroutine_name} -----")
        global_vars = analyze_global_variables(subroutine_name, relationships, subroutine_locations)
        # print the count of global_vars['written'] to screen
        print(f"Global variable count for {subroutine_name}: {len(global_vars['written'])}")
        print(f"Global variable read count for {subroutine_name}: {len(global_vars['read'])}")

        # Report written variables
        if include_steps and global_vars['written']:
            lines.append("\nVariables written in the subroutine and its call chain:")
            
            formatted_vars = []
            for var in sorted(global_vars['written']):
                # Find which module this variable belongs to
                module_name = get_variable_module(var, subroutine_name, relationships)
                
                if module_name:
                    if compiler_type == "intel":
                        formatted_var = f"{var} ({module_name}_mp_{var}_)"
                    else:  # gfortran
                        formatted_var = f"{var} (__{module_name}_MOD_{var})"
                else:
                    formatted_var = var  # No module found
                
                formatted_vars.append(formatted_var)
            
            lines.append("  - " + ", ".join(formatted_vars))
            
        # Report read variables
        if include_steps and global_vars['read']:
            lines.append("\nVariables read in the subroutine and its call chain:")
            
            formatted_vars = []
            for var in sorted(global_vars['read']):
                # Find which module this variable belongs to
                module_name = get_variable_module(var, subroutine_name, relationships)
                
                if module_name:
                    if compiler_type == "intel":
                        formatted_var = f"{var} ({module_name}_mp_{var}_)"
                    else:  # gfortran
                        formatted_var = f"{var} (__{module_name}_MOD_{var})"
                else:
                    formatted_var = var  # No module found
                
                formatted_vars.append(formatted_var)
            
            lines.append("  - " + ", ".join(formatted_vars))
    
    # Return both the analysis string and the list of global variables
    return "\n".join(lines), global_vars['written'], global_vars['read']


def extract_computation_steps(file_path, start_line, end_line):
    """
    Extract computation statements from subroutine source code,
    including relevant comments preceding these statements.
    
    Parameters:
        file_path: Source file path
        start_line: Subroutine start line
        end_line: Subroutine end line
        
    Returns:
        List of computation statements and their related comments
    """
    with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
        lines = f.readlines()[start_line:end_line+1]
    
    computation_steps = []
    continuation_line = ""
    found_first_computation = False  # Flag if first computation statement has been found
    
    from Tools.filter_def_names import control_keywords
    # Track comments that might be related to the next computation statement
    pending_comments = []
    
    # Patterns to detect variable declarations
    declaration_patterns = [
        r'\bimplicit\b', r'\binteger\b', r'\breal\b', r'\bdouble\s+precision\b',
        r'\bcharacter\b', r'\bcomplex\b', r'\btype\b', r'\bparameter\b',
        r'\bdimension\b', r'\bcommon\b', r'\blocal\b', r'\bexternal\b'
    ]
    
    # Patterns to detect code-like comments
    code_comment_patterns = [
        r'\bdo\b', r'\bif\b', r'\bcall\b', r'\bend\b', r'\benddo\b', r'\bendif\b',
        r'=', r'\ballocate\b', r'\bdeallocate\b', r'\bthen\b'
    ]
    
    def is_code_comment(comment):
        """Check if a comment looks like commented-out code"""
        comment_text = comment.lstrip('! ').strip().lower()
        return any(re.search(pattern, comment_text) for pattern in code_comment_patterns)
    
    for line in lines:
        original_line = line.strip()
        
        # Skip subroutine declaration and ending lines
        if original_line.lower().startswith(('subroutine ', 'function ', 'end subroutine', 'end function')):
            pending_comments = []  # Reset comments at subroutine boundaries
            continue
            
        # Handle comment lines
        if line.strip().startswith('!') and '$omp' not in line:
            # Only collect comments after finding the first computation statement
            if found_first_computation:
                comment_text = "! " + line.strip()[1:].strip()
                if comment_text.strip() and not is_code_comment(comment_text):  # Only keep non-empty and non-code comments
                    pending_comments.append(comment_text)
            continue
            
        # Handle inline comments
        comment_text = ""
        if '!' in line and '$omp' not in line:
            parts = line.split('!', 1)
            line = parts[0]
            comment_text = parts[1].strip()
        
        # Skip empty lines
        line = line.strip()
        if not line:
            continue
            
        # Handle line continuation
        if continuation_line:
            line = continuation_line + line
            continuation_line = ""
            
        if line.endswith('&'):
            continuation_line = line[:-1]
            continue
        
        # Check if it's a variable declaration
        line_lower = line.lower()
        is_declaration = any(re.search(pattern, line_lower) for pattern in declaration_patterns)
        if is_declaration and not found_first_computation:
            continue
            
        # Check if it's a control flow statement
        is_control_flow = False
        for keyword in control_keywords:
            if re.match(rf'\b{keyword}\b', line_lower):
                is_control_flow = True
                break
                
        # Include assignment statements and function calls, but not control flow
        if not is_control_flow and ('=' in line or '(' in line and ')' in line):
            # Mark first computation statement as found
            found_first_computation = True
            
            # Clean up statement for readability
            clean_line = re.sub(r'\s+', ' ', line).strip()
            
            # Add inline comment if present
            if comment_text:
                clean_line += "  ! " + comment_text
            
            # Add pending comments before computation line, but filter out code comments
            if pending_comments:
                filtered_comments = [c for c in pending_comments if not is_code_comment(c)]
                computation_steps.extend(filtered_comments)
                pending_comments = []  # Reset after use
            
            computation_steps.append(clean_line)
        elif found_first_computation:
            # For non-computation lines, only consider their comments after finding the first computation
            if pending_comments and any(("param" in c.lower() or "input" in c.lower() or 
                                       "output" in c.lower() or "return" in c.lower() or
                                       "purpose" in c.lower() or "description" in c.lower())
                                      for c in pending_comments):
                filtered_comments = [c for c in pending_comments if not is_code_comment(c)]
                computation_steps.extend(filtered_comments)
            pending_comments = []
    
    return computation_steps

def extract_global_variables(file_path, start_line, end_line):
    """
    Extract global variables from a subroutine source code.
    
    Parameters:
        file_path: Source file path
        start_line: Subroutine start line
        end_line: Subroutine end line
        
    Returns:
        Dictionary with 'read' and 'write' lists containing global variables
    """
    # print(f"Debug: Extracting globals from {file_path}, lines {start_line}-{end_line}")
    
    try:
        with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
            lines = f.readlines()[start_line:end_line+1]
    except Exception as e:
        print(f"Debug: Error reading file: {e}")
        return {'read': set(), 'write': set()}
    
    # Temporary storage for all potential global variables
    potential_globals = {'read': set(), 'write': set()}
    # Set to store variables found in allocate statements
    allocatable_vars = set()
    
    in_common_block = False
    common_block_name = ""
    
    # Common block pattern
    common_pattern = re.compile(r'\bcommon\s*/([^/]*)/\s*(.*)', re.IGNORECASE)
    # Use pattern
    use_pattern = re.compile(r'\buse\s+(\w+)(?:\s*,\s*only\s*:\s*(.+))?', re.IGNORECASE)
    # Variable access pattern (simplified)
    access_pattern = re.compile(r'([a-zA-Z]\w*)(?:\s*=|[\(\[])')
    # Allocate statement pattern
    allocate_pattern = re.compile(r'\ballocate\w*\s*\(', re.IGNORECASE)
    
    for i, line in enumerate(lines):
        line = line.strip()
        # Skip empty lines and full-line comments
        if not line or line.startswith('!'):
            continue
            
        # Remove inline comments
        if '!' in line and not line.startswith('!'):
            line = line.split('!')[0].strip()
        
        # Check for COMMON blocks
        common_match = common_pattern.search(line)
        if common_match:
            common_block_name = common_match.group(1).strip()
            variables = common_match.group(2).strip()
            # print(f"Debug: Found COMMON block {common_block_name} with vars: {variables}")
            for var in re.split(r'\s*,\s*', variables):
                if var:  # Skip empty strings
                    # Add to both read and write as we can't determine usage here
                    potential_globals['read'].add(f"{common_block_name}::{var}")
                    potential_globals['write'].add(f"{common_block_name}::{var}")
        
        # Check for USE statements
        use_match = use_pattern.search(line)
        if use_match:
            module_name = use_match.group(1)
            # print(f"Debug: Found USE module {module_name}")
            if use_match.group(2):  # ONLY clause
                only_vars = re.split(r'\s*,\s*', use_match.group(2))
                # print(f"Debug: With ONLY vars: {only_vars}")
                for var in only_vars:
                    var = var.strip()
                    if var and not var.startswith('operator') and not var.startswith('procedure'):
                        # Clean up any rename syntax (var => local_name)
                        if '=>' in var:
                            var = var.split('=>')[0].strip()
                        potential_globals['read'].add(f"{module_name}::{var}")
            else:
                # Using entire module, can't determine specific variables here
                potential_globals['read'].add(f"{module_name}::*")
                
        # Check for allocate statements
        if allocate_pattern.search(line):
            # Extract variable names from allocate(var1(...), var2(...), ...)
            # First get everything inside the outer parentheses
            allocate_content = re.search(r'\ballocate\b\s*\((.*?)\)', line, re.IGNORECASE)
            if allocate_content:
                # print(f"Debug: Found ALLOCATE with content: {allocate_content.group(1)}")
                # Split by commas to handle multiple allocations
                for alloc_item in re.split(r'\s*,\s*', allocate_content.group(1)):
                    # Extract just the variable name (before any parentheses)
                    var_match = re.match(r'([a-zA-Z]\w*)(?:\(|$)', alloc_item.strip())
                    if var_match:
                        var_name = var_match.group(1)
                        allocatable_vars.add(var_name)
                        potential_globals['write'].add(var_name)
        
        # Check for variable access (simplified)
        if '=' in line:
            # Left side of assignment is a write
            left_side = line.split('=')[0].strip()
            left_var_match = re.match(r'([a-zA-Z]\w*)(?:\([^)]*\)|\[[^\]]*\])?$', left_side)
            if left_var_match:
                var_name = left_var_match.group(1)
                # Only consider it global if it's not a local variable
                # This is a simplification - would need full scope analysis for accuracy
                potential_globals['write'].add(var_name)
            
            # Right side of assignment contains reads
            right_side = line.split('=', 1)[1].strip()
            for var_match in re.finditer(r'\b([a-zA-Z]\w*)\b', right_side):
                var_name = var_match.group(1)
                if var_name.lower() not in ['if', 'then', 'else', 'endif', 'do', 'enddo', 'while', 'call']:
                    potential_globals['read'].add(var_name)
    
    # Filter global variables to only include those found in allocate statements
    global_vars = {
        'read': {var for var in potential_globals['read'] if var in allocatable_vars or '::' in var},
        'write': {var for var in potential_globals['write'] if var in allocatable_vars or '::' in var}
    }
    
    # print(f"Debug: Found global vars - Read: {len(global_vars['read'])}, Write: {len(global_vars['write'])}")
    return global_vars

def analyze_global_variables(subroutine_name, relationships, subroutine_locations):
    """
    Recursively analyze global variables required by a subroutine and its subfunctions.
    
    Parameters:
        subroutine_name: Target subroutine name
        relationships: Dictionary of function call relationships
        subroutine_locations: Dictionary mapping subroutine names to file locations
        
    Returns:
        Dictionary with global variables information
    """
    visited = set()
    required_globals = {'written': set(), 'read': set()}
    
    def analyze_recursive(func_name):
        if func_name in visited:
            return
        
        visited.add(func_name)
        
        # Extract global variables from this function
        if func_name in subroutine_locations:
            loc = subroutine_locations[func_name]
            globals_info = extract_global_variables(loc['file_path'], loc['start_line'], loc['end_line'])
            
            # Track both read and written variables
            required_globals['written'].update(globals_info['write'])
            required_globals['read'].update(globals_info['read'])
            
        # Recursively analyze subfunctions
        if func_name in relationships:
            for call in relationships[func_name]:
                analyze_recursive(call['func'])
    
    analyze_recursive(subroutine_name)
    print(f"Debug analyze_global_variables: read={len(required_globals['read'])}, written={len(required_globals['written'])}")
    return required_globals

def get_variable_module(variable_name, subroutine_name, relationships):
    """
    Determine which module a global variable belongs to by examining the program structure.
    
    Args:
        variable_name: Name of the variable to find
        subroutine_name: Current subroutine being analyzed
        relationships: Data structure containing program relationships
        
    Returns:
        Module name or None if not found
    """
    # Check all modules in the relationships
    for name, info in relationships.items():
        # Check if info is a dictionary (not a list)
        if isinstance(info, dict) and info.get('type') == 'module':
            # Check if this module defines the variable
            if 'variables' in info and variable_name in info['variables']:
                return name
            
            # Or check if the module has a more detailed structure with 'declarations'
            if 'declarations' in info:
                for decl in info['declarations']:
                    if decl.get('name') == variable_name:
                        return name
    
    # If we get here, we couldn't find the module
    # You might need additional logic to trace through USE statements
    # in the call hierarchy to find imported variables
    
    return None
