"""
Fortran lexical analyzer for CodeMCP framework.

This module provides lexical analysis capabilities for Fortran source code,
supporting both free-form and fixed-form formats.
"""

import re
from typing import List, Dict, Any, Optional, Tuple
from enum import Enum


class FortranFormat(Enum):
    """Fortran source format types."""
    FREE_FORM = "free"
    FIXED_FORM = "fixed"
    UNKNOWN = "unknown"


class TokenType(Enum):
    """Fortran token types."""
    # Identifiers and literals
    IDENTIFIER = "identifier"
    INTEGER = "integer"
    REAL = "real"
    COMPLEX = "complex"
    STRING = "string"
    LOGICAL = "logical"
    
    # Keywords
    PROGRAM = "program"
    MODULE = "module"
    SUBMODULE = "submodule"
    FUNCTION = "function"
    SUBROUTINE = "subroutine"
    END = "end"
    USE = "use"
    CALL = "call"
    IF = "if"
    THEN = "then"
    ELSE = "else"
    DO = "do"
    WHILE = "while"
    TYPE = "type"
    INTERFACE = "interface"
    COMMON = "common"
    EQUIVALENCE = "equivalence"
    GOTO = "goto"
    RETURN = "return"
    STOP = "stop"
    
    # Operators
    ASSIGN = "="
    PLUS = "+"
    MINUS = "-"
    MULTIPLY = "*"
    DIVIDE = "/"
    POWER = "**"
    EQ = "=="
    NE = "/="
    LT = "<"
    LE = "<="
    GT = ">"
    GE = ">="
    AND = ".and."
    OR = ".or."
    NOT = ".not."
    
    # Delimiters
    LPAREN = "("
    RPAREN = ")"
    COMMA = ","
    COLON = ":"
    SEMICOLON = ";"
    PERCENT = "%"
    
    # Special
    NEWLINE = "newline"
    COMMENT = "comment"
    CONTINUATION = "continuation"
    LABEL = "label"
    PREPROCESSOR = "preprocessor"
    EOF = "eof"


class Token:
    """Represents a Fortran token."""
    
    def __init__(self, type_: TokenType, value: str, line: int, column: int):
        self.type = type_
        self.value = value
        self.line = line
        self.column = column
    
    def __repr__(self):
        return f"Token({self.type}, '{self.value}', {self.line}:{self.column})"


class FortranLexer:
    """
    Lexical analyzer for Fortran source code.
    
    Supports both free-form and fixed-form Fortran formats with
    proper handling of continuations, comments, and preprocessor directives.
    """
    
    # Fortran keywords (case-insensitive)
    KEYWORDS = {
        'program', 'end', 'module', 'submodule', 'use', 'only', 'implicit', 'none',
        'integer', 'real', 'complex', 'character', 'logical', 'double', 'precision',
        'parameter', 'dimension', 'allocatable', 'pointer', 'target', 'intent',
        'in', 'out', 'inout', 'optional', 'save', 'public', 'private',
        'function', 'subroutine', 'call', 'return', 'contains', 'result',
        'interface', 'procedure', 'abstract', 'generic', 'operator', 'assignment',
        'type', 'class', 'extends', 'bind', 'sequence', 'protected',
        'if', 'then', 'else', 'elseif', 'endif', 'select', 'case', 'default',
        'do', 'while', 'forall', 'where', 'elsewhere', 'enddo', 'cycle', 'exit',
        'goto', 'stop', 'pause', 'continue',
        'common', 'equivalence', 'external', 'intrinsic', 'data', 'block',
        'format', 'entry', 'include'
    }
    
    # Operators that can be identifiers in Fortran
    OPERATOR_KEYWORDS = {
        '.and.', '.or.', '.not.', '.eq.', '.ne.', '.lt.', '.le.', '.gt.', '.ge.',
        '.eqv.', '.neqv.', '.true.', '.false.'
    }
    
    def __init__(self):
        self.source = ""
        self.lines = []
        self.current_line = 0
        self.current_column = 0
        self.format = FortranFormat.UNKNOWN
        
        # Regex patterns
        self._compile_patterns()
    
    def _compile_patterns(self):
        """Compile regex patterns for tokenization."""
        self.patterns = {
            'comment_c': re.compile(r'^[Cc*].*$'),  # Fixed-form comment
            'comment_excl': re.compile(r'!.*$'),    # Exclamation comment
            'preprocessor': re.compile(r'^\s*#.*$'), # Preprocessor directive
            'label': re.compile(r'^\s*(\d{1,5})\s+'),  # Statement label
            'continuation_fixed': re.compile(r'^\s{5}[^\s0]'),  # Fixed-form continuation
            'continuation_free': re.compile(r'&\s*$'),  # Free-form continuation
            'string_single': re.compile(r"'([^']*(?:''[^']*)*)'"),  # Single quote string
            'string_double': re.compile(r'"([^"]*(?:""[^"]*)*)"'),  # Double quote string
            'real': re.compile(r'\d+\.\d*([eE][+-]?\d+)?[_\w]*|\d+[eE][+-]?\d+[_\w]*|\.\d+([eE][+-]?\d+)?[_\w]*'),
            'integer': re.compile(r'\d+[_\w]*'),
            'identifier': re.compile(r'[a-zA-Z_]\w*'),
            'operator_power': re.compile(r'\*\*'),
            'operator_eq': re.compile(r'=='),
            'operator_ne': re.compile(r'/=|<>'),
            'operator_le': re.compile(r'<='),
            'operator_ge': re.compile(r'>='),
            'operator_dot': re.compile(r'\.[a-zA-Z_]\w*\.'),
        }
    
    def detect_format(self, source: str) -> FortranFormat:
        """
        Detect whether the source is free-form or fixed-form Fortran.
        
        Args:
            source: Fortran source code
            
        Returns:
            Detected Fortran format
        """
        lines = source.split('\n')
        
        # Check for obvious free-form indicators
        for line in lines[:20]:  # Check first 20 lines
            if not line.strip():
                continue
                
            # Free-form indicators
            if ('&' in line and line.strip().endswith('&') or
                line.startswith('    ') and len(line) > 72 or
                '::' in line):
                return FortranFormat.FREE_FORM
            
            # Fixed-form indicators
            if (len(line) > 0 and line[0] in 'Cc*' or
                (len(line) >= 6 and line[5] != ' ' and line[5] != '0' and 
                 line[:5].strip().isdigit())):
                return FortranFormat.FIXED_FORM
        
        # Default to free-form if unsure
        return FortranFormat.FREE_FORM
    
    def tokenize(self, source: str) -> List[Token]:
        """
        Tokenize Fortran source code.
        
        Args:
            source: Fortran source code to tokenize
            
        Returns:
            List of tokens
        """
        self.source = source
        self.lines = source.split('\n')
        self.format = self.detect_format(source)
        
        tokens = []
        line_num = 0
        
        for line_num, line in enumerate(self.lines, 1):
            line_tokens = self._tokenize_line(line, line_num)
            tokens.extend(line_tokens)
        
        # Add EOF token
        tokens.append(Token(TokenType.EOF, '', line_num, len(line) if self.lines else 0))
        return tokens
    
    def _tokenize_line(self, line: str, line_num: int) -> List[Token]:
        """
        Tokenize a single line of Fortran code.
        
        Args:
            line: Line of source code
            line_num: Line number (1-based)
            
        Returns:
            List of tokens for this line
        """
        tokens = []
        
        # Handle empty line
        if not line.strip():
            tokens.append(Token(TokenType.NEWLINE, '\n', line_num, len(line)))
            return tokens
        
        # Check for preprocessor directive
        if self.patterns['preprocessor'].match(line):
            tokens.append(Token(TokenType.PREPROCESSOR, line.strip(), line_num, 0))
            tokens.append(Token(TokenType.NEWLINE, '\n', line_num, len(line)))
            return tokens
        
        # Handle fixed-form comments
        if self.format == FortranFormat.FIXED_FORM and self.patterns['comment_c'].match(line):
            tokens.append(Token(TokenType.COMMENT, line.strip(), line_num, 0))
            tokens.append(Token(TokenType.NEWLINE, '\n', line_num, len(line)))
            return tokens
        
        # Extract statement label for fixed-form
        if self.format == FortranFormat.FIXED_FORM:
            label_match = self.patterns['label'].match(line)
            if label_match:
                label = label_match.group(1)
                tokens.append(Token(TokenType.LABEL, label, line_num, 0))
                line = line[label_match.end():]
        
        # Check for continuation line
        if self._is_continuation_line(line):
            tokens.append(Token(TokenType.CONTINUATION, '&', line_num, 0))
        
        # Remove comments from line for tokenization
        comment_pos = line.find('!')
        if comment_pos >= 0:
            comment = line[comment_pos:]
            tokens.append(Token(TokenType.COMMENT, comment, line_num, comment_pos))
            line = line[:comment_pos]
        
        # Tokenize the remaining line content
        col = 0
        while col < len(line):
            # Skip whitespace
            if line[col].isspace():
                col += 1
                continue
            
            # Try to match tokens
            token, consumed = self._match_token(line[col:], line_num, col)
            if token:
                tokens.append(token)
                col += consumed
            else:
                # Skip unrecognized character
                col += 1
        
        tokens.append(Token(TokenType.NEWLINE, '\n', line_num, len(line)))
        return tokens
    
    def _is_continuation_line(self, line: str) -> bool:
        """Check if line is a continuation line."""
        if self.format == FortranFormat.FREE_FORM:
            return line.rstrip().endswith('&')
        else:  # Fixed-form
            return len(line) >= 6 and line[5] not in ' 0'
    
    def _match_token(self, text: str, line_num: int, col: int) -> Tuple[Optional[Token], int]:
        """
        Try to match a token at the beginning of text.
        
        Returns:
            Tuple of (token, characters_consumed) or (None, 0) if no match
        """
        # String literals
        for pattern_name in ['string_single', 'string_double']:
            match = self.patterns[pattern_name].match(text)
            if match:
                return Token(TokenType.STRING, match.group(0), line_num, col), len(match.group(0))
        
        # Numbers
        match = self.patterns['real'].match(text)
        if match:
            return Token(TokenType.REAL, match.group(0), line_num, col), len(match.group(0))
        
        match = self.patterns['integer'].match(text)
        if match:
            return Token(TokenType.INTEGER, match.group(0), line_num, col), len(match.group(0))
        
        # Operators (check multi-character first)
        for op_pattern, token_type in [
            ('operator_power', TokenType.POWER),
            ('operator_eq', TokenType.EQ),
            ('operator_ne', TokenType.NE),
            ('operator_le', TokenType.LE),
            ('operator_ge', TokenType.GE)
        ]:
            match = self.patterns[op_pattern].match(text)
            if match:
                return Token(token_type, match.group(0), line_num, col), len(match.group(0))
        
        # Dot operators
        match = self.patterns['operator_dot'].match(text)
        if match:
            op_text = match.group(0).lower()
            if op_text == '.and.':
                return Token(TokenType.AND, op_text, line_num, col), len(match.group(0))
            elif op_text == '.or.':
                return Token(TokenType.OR, op_text, line_num, col), len(match.group(0))
            elif op_text == '.not.':
                return Token(TokenType.NOT, op_text, line_num, col), len(match.group(0))
            elif op_text in ['.true.', '.false.']:
                return Token(TokenType.LOGICAL, op_text, line_num, col), len(match.group(0))
            else:
                # Generic dot operator
                return Token(TokenType.IDENTIFIER, match.group(0), line_num, col), len(match.group(0))
        
        # Single character tokens
        single_char_tokens = {
            '=': TokenType.ASSIGN,
            '+': TokenType.PLUS,
            '-': TokenType.MINUS,
            '*': TokenType.MULTIPLY,
            '/': TokenType.DIVIDE,
            '<': TokenType.LT,
            '>': TokenType.GT,
            '(': TokenType.LPAREN,
            ')': TokenType.RPAREN,
            ',': TokenType.COMMA,
            ':': TokenType.COLON,
            ';': TokenType.SEMICOLON,
            '%': TokenType.PERCENT
        }
        
        if text[0] in single_char_tokens:
            return Token(single_char_tokens[text[0]], text[0], line_num, col), 1
        
        # Identifiers and keywords
        match = self.patterns['identifier'].match(text)
        if match:
            word = match.group(0).lower()
            if word in self.KEYWORDS:
                # Convert to appropriate token type
                token_type = self._keyword_to_token_type(word)
                return Token(token_type, word, line_num, col), len(match.group(0))
            else:
                return Token(TokenType.IDENTIFIER, match.group(0), line_num, col), len(match.group(0))
        
        return None, 0
    
    def _keyword_to_token_type(self, keyword: str) -> TokenType:
        """Convert keyword string to appropriate TokenType."""
        keyword_map = {
            'program': TokenType.PROGRAM,
            'module': TokenType.MODULE,
            'submodule': TokenType.SUBMODULE,
            'function': TokenType.FUNCTION,
            'subroutine': TokenType.SUBROUTINE,
            'end': TokenType.END,
            'use': TokenType.USE,
            'call': TokenType.CALL,
            'if': TokenType.IF,
            'then': TokenType.THEN,
            'else': TokenType.ELSE,
            'do': TokenType.DO,
            'while': TokenType.WHILE,
            'type': TokenType.TYPE,
            'interface': TokenType.INTERFACE,
            'common': TokenType.COMMON,
            'equivalence': TokenType.EQUIVALENCE,
            'goto': TokenType.GOTO,
            'return': TokenType.RETURN,
            'stop': TokenType.STOP
        }
        return keyword_map.get(keyword, TokenType.IDENTIFIER)