/* This is a generated file! */
use once_cell::sync::Lazy;
use sqlfluffrs_types::LexMatcher;
use sqlfluffrs_types::{Token, TokenConfig, RegexModeGroup};

pub static IMPALA_KEYWORDS: Lazy<Vec<String>> = Lazy::new(|| { vec![
    "ADD".to_string(),
    "AGGREGATE".to_string(),
    "ALL".to_string(),
    "ALLOCATE".to_string(),
    "ALTER".to_string(),
    "ANALYTIC".to_string(),
    "AND".to_string(),
    "ANTI".to_string(),
    "ANY".to_string(),
    "API_VERSION".to_string(),
    "ARE".to_string(),
    "ARRAY".to_string(),
    "ARRAY_AGG".to_string(),
    "ARRAY_MAX_CARDINALITY".to_string(),
    "AS".to_string(),
    "ASC".to_string(),
    "ASENSITIVE".to_string(),
    "ASYMMETRIC".to_string(),
    "AT".to_string(),
    "ATOMIC".to_string(),
    "AUTHORIZATION".to_string(),
    "AVRO".to_string(),
    "BEGIN_FRAME".to_string(),
    "BEGIN_PARTITION".to_string(),
    "BETWEEN".to_string(),
    "BIGINT".to_string(),
    "BINARY".to_string(),
    "BLOB".to_string(),
    "BLOCK_SIZE".to_string(),
    "BOOLEAN".to_string(),
    "BOTH".to_string(),
    "BUCKETS".to_string(),
    "BY".to_string(),
    "CACHE".to_string(),
    "CACHED".to_string(),
    "CALLED".to_string(),
    "CARDINALITY".to_string(),
    "CASCADE".to_string(),
    "CASCADED".to_string(),
    "CASE".to_string(),
    "CAST".to_string(),
    "CHANGE".to_string(),
    "CHAR".to_string(),
    "CHARACTER".to_string(),
    "CLASS".to_string(),
    "CLOB".to_string(),
    "CLOSE_FN".to_string(),
    "COLLATE".to_string(),
    "COLLECT".to_string(),
    "COLUMN".to_string(),
    "COLUMNS".to_string(),
    "COMMENT".to_string(),
    "COMMIT".to_string(),
    "COMPRESSION".to_string(),
    "COMPUTE".to_string(),
    "CONDITION".to_string(),
    "CONF".to_string(),
    "CONNECT".to_string(),
    "CONSTRAINT".to_string(),
    "CONTAINS".to_string(),
    "CONVERT".to_string(),
    "COPY".to_string(),
    "CORR".to_string(),
    "CORRESPONDING".to_string(),
    "COVAR_POP".to_string(),
    "COVAR_SAMP".to_string(),
    "CREATE".to_string(),
    "CROSS".to_string(),
    "CUBE".to_string(),
    "CURRENT".to_string(),
    "CURRENT_DATE".to_string(),
    "CURRENT_DEFAULT_TRANSFORM_GROUP".to_string(),
    "CURRENT_PATH".to_string(),
    "CURRENT_ROLE".to_string(),
    "CURRENT_ROW".to_string(),
    "CURRENT_SCHEMA".to_string(),
    "CURRENT_TIME".to_string(),
    "CURRENT_TIMESTAMP".to_string(),
    "CURRENT_TRANSFORM_GROUP_FOR_TYPE".to_string(),
    "CURSOR".to_string(),
    "CYCLE".to_string(),
    "DATA".to_string(),
    "DATABASE".to_string(),
    "DATABASES".to_string(),
    "DATE".to_string(),
    "DATETIME".to_string(),
    "DAYOFWEEK".to_string(),
    "DEALLOCATE".to_string(),
    "DEC".to_string(),
    "DECFLOAT".to_string(),
    "DECIMAL".to_string(),
    "DECLARE".to_string(),
    "DEFAULT".to_string(),
    "DEFINE".to_string(),
    "DELETE".to_string(),
    "DELIMITED".to_string(),
    "DEREF".to_string(),
    "DESC".to_string(),
    "DESCRIBE".to_string(),
    "DETERMINISTIC".to_string(),
    "DISABLE".to_string(),
    "DISCONNECT".to_string(),
    "DISTINCT".to_string(),
    "DIV".to_string(),
    "DOUBLE".to_string(),
    "DROP".to_string(),
    "DYNAMIC".to_string(),
    "EACH".to_string(),
    "ELEMENT".to_string(),
    "ELSE".to_string(),
    "EMPTY".to_string(),
    "ENABLE".to_string(),
    "ENCODING".to_string(),
    "END".to_string(),
    "END-EXEC".to_string(),
    "END_FRAME".to_string(),
    "END_PARTITION".to_string(),
    "EQUALS".to_string(),
    "ESCAPE".to_string(),
    "ESCAPED".to_string(),
    "EVERY".to_string(),
    "EXCEPT".to_string(),
    "EXCHANGE".to_string(),
    "EXEC".to_string(),
    "EXECUTE".to_string(),
    "EXISTS".to_string(),
    "EXPLAIN".to_string(),
    "EXTENDED".to_string(),
    "EXTERNAL".to_string(),
    "EXTRACT".to_string(),
    "FALSE".to_string(),
    "FETCH".to_string(),
    "FIELDS".to_string(),
    "FILEFORMAT".to_string(),
    "FILES".to_string(),
    "FILTER".to_string(),
    "FINALIZE_FN".to_string(),
    "FIRST".to_string(),
    "FLOAT".to_string(),
    "FLOOR".to_string(),
    "FOLLOWING".to_string(),
    "FOR".to_string(),
    "FOREIGN".to_string(),
    "FORMAT".to_string(),
    "FORMATTED".to_string(),
    "FRAME_ROW".to_string(),
    "FREE".to_string(),
    "FROM".to_string(),
    "FULL".to_string(),
    "FUNCTION".to_string(),
    "FUNCTIONS".to_string(),
    "FUSION".to_string(),
    "GET".to_string(),
    "GLOBAL".to_string(),
    "GRANT".to_string(),
    "GROUP".to_string(),
    "GROUPING".to_string(),
    "GROUPS".to_string(),
    "HASH".to_string(),
    "HAVING".to_string(),
    "HOLD".to_string(),
    "HUDIPARQUET".to_string(),
    "ICEBERG".to_string(),
    "IF".to_string(),
    "IGNORE".to_string(),
    "ILIKE".to_string(),
    "IMPORT".to_string(),
    "IN".to_string(),
    "INCREMENTAL".to_string(),
    "INDICATOR".to_string(),
    "INITIAL".to_string(),
    "INIT_FN".to_string(),
    "INNER".to_string(),
    "INOUT".to_string(),
    "INPATH".to_string(),
    "INSENSITIVE".to_string(),
    "INSERT".to_string(),
    "INT".to_string(),
    "INTEGER".to_string(),
    "INTERMEDIATE".to_string(),
    "INTERSECT".to_string(),
    "INTERSECTION".to_string(),
    "INTERVAL".to_string(),
    "INTO".to_string(),
    "INVALIDATE".to_string(),
    "IREGEXP".to_string(),
    "IS".to_string(),
    "JOIN".to_string(),
    "JSONFILE".to_string(),
    "JSON_ARRAY".to_string(),
    "JSON_ARRAYAGG".to_string(),
    "JSON_EXISTS".to_string(),
    "JSON_OBJECT".to_string(),
    "JSON_OBJECTAGG".to_string(),
    "JSON_QUERY".to_string(),
    "JSON_TABLE".to_string(),
    "JSON_TABLE_PRIMITIVE".to_string(),
    "JSON_VALUE".to_string(),
    "KUDU".to_string(),
    "LARGE".to_string(),
    "LAST".to_string(),
    "LATERAL".to_string(),
    "LEADING".to_string(),
    "LEFT".to_string(),
    "LESS".to_string(),
    "LEXICAL".to_string(),
    "LIKE".to_string(),
    "LIKE_REGEX".to_string(),
    "LIMIT".to_string(),
    "LINES".to_string(),
    "LISTAGG".to_string(),
    "LOAD".to_string(),
    "LOCAL".to_string(),
    "LOCALTIMESTAMP".to_string(),
    "LOCATION".to_string(),
    "LOG10".to_string(),
    "MACRO".to_string(),
    "MANAGEDLOCATION".to_string(),
    "MAP".to_string(),
    "MATCH".to_string(),
    "MATCHES".to_string(),
    "MATCH_NUMBER".to_string(),
    "MATCH_RECOGNIZE".to_string(),
    "MERGE".to_string(),
    "MERGE_FN".to_string(),
    "METADATA".to_string(),
    "METHOD".to_string(),
    "MINUS".to_string(),
    "MODIFIES".to_string(),
    "MORE".to_string(),
    "MULTISET".to_string(),
    "NATIONAL".to_string(),
    "NATURAL".to_string(),
    "NCHAR".to_string(),
    "NCLOB".to_string(),
    "NO".to_string(),
    "NON".to_string(),
    "NONE".to_string(),
    "NORELY".to_string(),
    "NORMALIZE".to_string(),
    "NOT".to_string(),
    "NOVALIDATE".to_string(),
    "NTH_VALUE".to_string(),
    "NULL".to_string(),
    "NULLS".to_string(),
    "NUMERIC".to_string(),
    "OCCURRENCES_REGEX".to_string(),
    "OCTET_LENGTH".to_string(),
    "OF".to_string(),
    "OFFSET".to_string(),
    "OMIT".to_string(),
    "ON".to_string(),
    "ONE".to_string(),
    "ONLY".to_string(),
    "OPTIMIZE".to_string(),
    "OR".to_string(),
    "ORC".to_string(),
    "ORDER".to_string(),
    "OUT".to_string(),
    "OUTER".to_string(),
    "OVER".to_string(),
    "OVERLAPS".to_string(),
    "OVERLAY".to_string(),
    "OVERWRITE".to_string(),
    "PARQUET".to_string(),
    "PARQUETFILE".to_string(),
    "PARTIALSCAN".to_string(),
    "PARTITION".to_string(),
    "PARTITIONED".to_string(),
    "PARTITIONS".to_string(),
    "PATTERN".to_string(),
    "PER".to_string(),
    "PERCENT".to_string(),
    "PERCENTILE_CONT".to_string(),
    "PERCENTILE_DISC".to_string(),
    "PORTION".to_string(),
    "POSITION".to_string(),
    "POSITION_REGEX".to_string(),
    "PRECEDES".to_string(),
    "PRECEDING".to_string(),
    "PRECISION".to_string(),
    "PREPARE".to_string(),
    "PREPARE_FN".to_string(),
    "PRESERVE".to_string(),
    "PRIMARY".to_string(),
    "PROCEDURE".to_string(),
    "PRODUCED".to_string(),
    "PTF".to_string(),
    "PURGE".to_string(),
    "RANGE".to_string(),
    "RCFILE".to_string(),
    "READS".to_string(),
    "REAL".to_string(),
    "RECOVER".to_string(),
    "RECURSIVE".to_string(),
    "REDUCE".to_string(),
    "REF".to_string(),
    "REFERENCES".to_string(),
    "REFERENCING".to_string(),
    "REFRESH".to_string(),
    "REGEXP".to_string(),
    "REGR_AVGX".to_string(),
    "REGR_AVGY".to_string(),
    "REGR_COUNT".to_string(),
    "REGR_INTERCEPT".to_string(),
    "REGR_R2".to_string(),
    "REGR_SLOPE".to_string(),
    "REGR_SXX".to_string(),
    "REGR_SXY".to_string(),
    "REGR_SYY".to_string(),
    "RELEASE".to_string(),
    "RELY".to_string(),
    "RENAME".to_string(),
    "REPEATABLE".to_string(),
    "REPLACE".to_string(),
    "REPLICATION".to_string(),
    "RESPECT".to_string(),
    "RESTRICT".to_string(),
    "RETURNS".to_string(),
    "REVOKE".to_string(),
    "RIGHT".to_string(),
    "RLIKE".to_string(),
    "ROLE".to_string(),
    "ROLES".to_string(),
    "ROLLBACK".to_string(),
    "ROLLUP".to_string(),
    "ROW".to_string(),
    "ROWS".to_string(),
    "RUNNING".to_string(),
    "RWSTORAGE".to_string(),
    "SAVEPOINT".to_string(),
    "SCHEMA".to_string(),
    "SCHEMAS".to_string(),
    "SCOPE".to_string(),
    "SCROLL".to_string(),
    "SEARCH".to_string(),
    "SEEK".to_string(),
    "SELECT".to_string(),
    "SELECTIVITY".to_string(),
    "SEMI".to_string(),
    "SENSITIVE".to_string(),
    "SEQUENCEFILE".to_string(),
    "SERDEPROPERTIES".to_string(),
    "SERIALIZE_FN".to_string(),
    "SET".to_string(),
    "SETS".to_string(),
    "SHOW".to_string(),
    "SIMILAR".to_string(),
    "SKIP".to_string(),
    "SMALLINT".to_string(),
    "SOME".to_string(),
    "SORT".to_string(),
    "SPEC".to_string(),
    "SPECIFIC".to_string(),
    "SPECIFICTYPE".to_string(),
    "SQLEXCEPTION".to_string(),
    "SQLSTATE".to_string(),
    "SQLWARNING".to_string(),
    "START".to_string(),
    "STATIC".to_string(),
    "STATS".to_string(),
    "STORAGEHANDLER_URI".to_string(),
    "STORED".to_string(),
    "STRAIGHT_JOIN".to_string(),
    "STRING".to_string(),
    "STRUCT".to_string(),
    "SUBMULTISET".to_string(),
    "SUBSET".to_string(),
    "SUBSTRING_REGEX".to_string(),
    "SUCCEEDS".to_string(),
    "SYMBOL".to_string(),
    "SYMMETRIC".to_string(),
    "SYNC".to_string(),
    "SYSTEM_TIME".to_string(),
    "SYSTEM_USER".to_string(),
    "SYSTEM_VERSION".to_string(),
    "TABLE".to_string(),
    "TABLES".to_string(),
    "TABLESAMPLE".to_string(),
    "TBLPROPERTIES".to_string(),
    "TERMINATED".to_string(),
    "TEXTFILE".to_string(),
    "THEN".to_string(),
    "TIME".to_string(),
    "TIMESTAMP".to_string(),
    "TIMEZONE_HOUR".to_string(),
    "TIMEZONE_MINUTE".to_string(),
    "TINYINT".to_string(),
    "TO".to_string(),
    "TRAILING".to_string(),
    "TRANSFORM".to_string(),
    "TRANSLATE_REGEX".to_string(),
    "TRANSLATION".to_string(),
    "TREAT".to_string(),
    "TRIGGER".to_string(),
    "TRIM_ARRAY".to_string(),
    "TRUE".to_string(),
    "TRUNCATE".to_string(),
    "UESCAPE".to_string(),
    "UNBOUNDED".to_string(),
    "UNCACHED".to_string(),
    "UNION".to_string(),
    "UNIQUE".to_string(),
    "UNIQUEJOIN".to_string(),
    "UNKNOWN".to_string(),
    "UNNEST".to_string(),
    "UNSET".to_string(),
    "UPDATE".to_string(),
    "UPDATE_FN".to_string(),
    "UPSERT".to_string(),
    "USE".to_string(),
    "USER".to_string(),
    "USER_DEFINED_FN".to_string(),
    "USING".to_string(),
    "UTC_TMESTAMP".to_string(),
    "VALIDATE".to_string(),
    "VALUES".to_string(),
    "VALUE_OF".to_string(),
    "VARBINARY".to_string(),
    "VARCHAR".to_string(),
    "VARYING".to_string(),
    "VERSIONING".to_string(),
    "VIEW".to_string(),
    "VIEWS".to_string(),
    "WHEN".to_string(),
    "WHENEVER".to_string(),
    "WHERE".to_string(),
    "WIDTH_BUCKET".to_string(),
    "WINDOW".to_string(),
    "WITH".to_string(),
    "WITHIN".to_string(),
    "WITHOUT".to_string(),
    "ZORDER".to_string(),
]});

pub static IMPALA_LEXERS: Lazy<Vec<LexMatcher>> = Lazy::new(|| { vec![

    LexMatcher::regex_lexer(
        "whitespace",
        r#"[^\S\r\n]+"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::whitespace_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        |_| true,
        None,
    ),

    LexMatcher::regex_lexer(
        "inline_comment",
        r#"(--|#)[^\n]*"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::comment_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        Some(vec![String::from("--"), String::from("#")]),
        None,
        None,
        None,
        None,
        None,
        |input| input.starts_with(['#','-','/']),
        None,
    ),

    LexMatcher::regex_lexer(
        "block_comment",
        r#"\/\*([^\*]|\*(?!\/))*\*\/"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::comment_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        Some(Box::new(
    LexMatcher::regex_subdivider(
        "newline",
        r#"\r\n|\n"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::newline_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        |_| true,
        None,
    ))),
        Some(Box::new(
    LexMatcher::regex_subdivider(
        "whitespace",
        r#"[^\S\r\n]+"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::whitespace_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        |_| true,
        None,
    ))),
        None,
        None,
        None,
        None,
        None,
        Some(extract_nested_block_comment),
        |input| input.starts_with("/"),
        None,
    ),

    LexMatcher::regex_lexer(
        "single_quote",
        r#"'([^'\\]|\\.|'')*'"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        Some((r#"'((?:[^'\\]|\\.|'')*)'"#.to_string(), RegexModeGroup::Index(1))),
        Some((r#"\\'|''"#.to_string(), r#"'"#.to_string())),
        None,
        None,
        |input| match input.as_bytes() {
        [b'\'', ..] => true,                     // Single quote case
        [b'R' | b'r', b'\'', ..] => true,        // r' or R'
        [b'B' | b'b', b'\'', ..] => true,        // b' or B'
        [b'R' | b'r', b'B' | b'b', b'\'', ..] => true, // rb', RB', etc.
        [b'B' | b'b', b'R' | b'r', b'\'', ..] => true, // br', Br', etc.
        _ => false,
    },
        None,
    ),

    LexMatcher::regex_lexer(
        "double_quote",
        r#""(""|[^"\\]|\\.)*""#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        Some((r#""((?:[^"\\]|\\.)*)""#.to_string(), RegexModeGroup::Index(1))),
        Some((r#"\\"|"""#.to_string(), r#"""#.to_string())),
        None,
        None,
        |input| match input.as_bytes() {
        [b'"', ..] => true,                     // Just a double quote
        [b'R' | b'r', b'"', ..] => true,        // r" or R"
        [b'B' | b'b', b'"', ..] => true,        // b" or B"
        [b'R' | b'r', b'B' | b'b', b'"', ..] => true, // rb", RB", etc.
        [b'B' | b'b', b'R' | b'r', b'"', ..] => true, // br", Br", etc.
        _ => false,
    },
        None,
    ),

    LexMatcher::regex_lexer(
        "back_quote",
        r#"`(?:[^`\\]|\\.)*`"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        Some((r#"`((?:[^`\\]|\\.)*)`"#.to_string(), RegexModeGroup::Index(1))),
        Some((r#"\\`"#.to_string(), r#"`"#.to_string())),
        None,
        None,
        |_| true,
        None,
    ),

    LexMatcher::regex_lexer(
        "dollar_quote",
        r#"\$(\w*)\$(.*?)\$\1\$"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        Some((r#"\$(\w*)\$(.*?)\$\1\$"#.to_string(), RegexModeGroup::Index(2))),
        None,
        None,
        None,
        |input| input.starts_with("$"),
        None,
    ),

    LexMatcher::regex_lexer(
        "numeric_literal",
        r#"(?>\d+\.\d+|\d+\.(?![\.\w])|\.\d+|\d+)(\.?[eE][+-]?\d+)?((?<=\.)|(?=\b))"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::literal_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        |input| input.starts_with(['x','X','.','0','1','2','3','4','5','6','7','8','9']),
        None,
    ),

    LexMatcher::regex_lexer(
        "obevo_annotation",
        r#"////\s*(CHANGE|BODY|METADATA)[^\n]*"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::comment_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        |_| true,
        None,
    ),

    LexMatcher::string_lexer(
        "glob_operator",
        "~~~",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::comparison_operator_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::regex_lexer(
        "like_operator",
        r#"!?~~?\*?"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::comparison_operator_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        |_| true,
        None,
    ),

    LexMatcher::regex_lexer(
        "newline",
        r#"\r\n|\n"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::newline_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        |_| true,
        None,
    ),

    LexMatcher::string_lexer(
        "casting_operator",
        "::",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "equals",
        "=",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "greater_than",
        ">",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "less_than",
        "<",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "not",
        "!",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "dot",
        ".",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "comma",
        ",",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "plus",
        "+",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "minus",
        "-",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "divide",
        "/",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "percent",
        "%",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "question",
        "?",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "ampersand",
        "&",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "vertical_bar",
        "|",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "caret",
        "^",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "star",
        "*",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "start_bracket",
        "(",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "end_bracket",
        ")",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "start_square_bracket",
        "[",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "end_square_bracket",
        "]",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "start_curly_bracket",
        "{",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "end_curly_bracket",
        "}",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "colon",
        ":",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::string_lexer(
        "semicolon",
        ";",
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::code_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
    ),

    LexMatcher::regex_lexer(
        "word",
        r#"[0-9a-zA-Z_]+"#,
        |raw, pos_marker, class_types, instance_types, trim_start, trim_chars,
         quoted_value, escape_replacement, casefold| {
            Token::word_token(raw, pos_marker, TokenConfig {
                class_types, instance_types, trim_start, trim_chars,
                quoted_value, escape_replacement, casefold,
            })
        },
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        None,
        |_| true,
        None,
    ),
]});


// Wrapper function that passes the dialect name to the shared implementation
fn extract_nested_block_comment(input: &str) -> Option<&str> {
    crate::extract_nested_block_comment(input, "impala")
}
