from __future__ import annotations

from decimal import Decimal
from itertools import groupby
import re
import typing as t

from sqlglot import exp, generator, parser, tokens, transforms

from sqlglot.dialects.dialect import (
    Dialect,
    JSON_EXTRACT_TYPE,
    NormalizationStrategy,
    approx_count_distinct_sql,
    arrow_json_extract_sql,
    binary_from_function,
    bool_xor_sql,
    build_default_decimal_type,
    count_if_to_sum,
    date_delta_to_binary_interval_op,
    date_trunc_to_time,
    datestrtodate_sql,
    no_datetime_sql,
    encode_decode_sql,
    build_formatted_time,
    no_comment_column_constraint_sql,
    no_time_sql,
    no_timestamp_sql,
    pivot_column_names,
    rename_func,
    remove_from_array_using_filter,
    strposition_sql,
    str_to_time_sql,
    timestrtotime_sql,
    unit_to_str,
    sha256_sql,
    build_regexp_extract,
    explode_to_unnest_sql,
    no_make_interval_sql,
    groupconcat_sql,
    inline_array_unless_query,
    regexp_replace_global_modifier,
    sha2_digest_sql,
)
from sqlglot.generator import unsupported_args
from sqlglot.helper import is_date_unit, seq_get
from sqlglot.tokens import TokenType
from sqlglot.parser import binary_range_parser

# Regex to detect time zones in timestamps of the form [+|-]TT[:tt]
# The pattern matches timezone offsets that appear after the time portion
TIMEZONE_PATTERN = re.compile(r":\d{2}.*?[+\-]\d{2}(?::\d{2})?")

# Characters that must be escaped when building regex expressions in INITCAP
REGEX_ESCAPE_REPLACEMENTS = {
    "\\": "\\\\",
    "-": r"\-",
    "^": r"\^",
    "[": r"\[",
    "]": r"\]",
}

# Whitespace control characters that DuckDB must process with `CHR({val})` calls
WS_CONTROL_CHARS_TO_DUCK = {
    "\u000b": 11,
    "\u001c": 28,
    "\u001d": 29,
    "\u001e": 30,
    "\u001f": 31,
}


# BigQuery -> DuckDB conversion for the DATE function
def _date_sql(self: DuckDB.Generator, expression: exp.Date) -> str:
    result = f"CAST({self.sql(expression, 'this')} AS DATE)"
    zone = self.sql(expression, "zone")

    if zone:
        date_str = self.func("STRFTIME", result, "'%d/%m/%Y'")
        date_str = f"{date_str} || ' ' || {zone}"

        # This will create a TIMESTAMP with time zone information
        result = self.func("STRPTIME", date_str, "'%d/%m/%Y %Z'")

    return result


# BigQuery -> DuckDB conversion for the TIME_DIFF function
def _timediff_sql(self: DuckDB.Generator, expression: exp.TimeDiff) -> str:
    this = exp.cast(expression.this, exp.DataType.Type.TIME)
    expr = exp.cast(expression.expression, exp.DataType.Type.TIME)

    # Although the 2 dialects share similar signatures, BQ seems to inverse
    # the sign of the result so the start/end time operands are flipped
    return self.func("DATE_DIFF", unit_to_str(expression), expr, this)


@unsupported_args(("expression", "DuckDB's ARRAY_SORT does not support a comparator."))
def _array_sort_sql(self: DuckDB.Generator, expression: exp.ArraySort) -> str:
    return self.func("ARRAY_SORT", expression.this)


def _sort_array_sql(self: DuckDB.Generator, expression: exp.SortArray) -> str:
    name = "ARRAY_REVERSE_SORT" if expression.args.get("asc") == exp.false() else "ARRAY_SORT"
    return self.func(name, expression.this)


def _build_sort_array_desc(args: t.List) -> exp.Expression:
    return exp.SortArray(this=seq_get(args, 0), asc=exp.false())


def _build_date_diff(args: t.List) -> exp.Expression:
    return exp.DateDiff(this=seq_get(args, 2), expression=seq_get(args, 1), unit=seq_get(args, 0))


def _build_generate_series(end_exclusive: bool = False) -> t.Callable[[t.List], exp.GenerateSeries]:
    def _builder(args: t.List) -> exp.GenerateSeries:
        # Check https://duckdb.org/docs/sql/functions/nested.html#range-functions
        if len(args) == 1:
            # DuckDB uses 0 as a default for the series' start when it's omitted
            args.insert(0, exp.Literal.number("0"))

        gen_series = exp.GenerateSeries.from_arg_list(args)
        gen_series.set("is_end_exclusive", end_exclusive)

        return gen_series

    return _builder


def _build_make_timestamp(args: t.List) -> exp.Expression:
    if len(args) == 1:
        return exp.UnixToTime(this=seq_get(args, 0), scale=exp.UnixToTime.MICROS)

    return exp.TimestampFromParts(
        year=seq_get(args, 0),
        month=seq_get(args, 1),
        day=seq_get(args, 2),
        hour=seq_get(args, 3),
        min=seq_get(args, 4),
        sec=seq_get(args, 5),
    )


def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[DuckDB.Parser], exp.Show]:
    def _parse(self: DuckDB.Parser) -> exp.Show:
        return self._parse_show_duckdb(*args, **kwargs)

    return _parse


def _struct_sql(self: DuckDB.Generator, expression: exp.Struct) -> str:
    args: t.List[str] = []

    # BigQuery allows inline construction such as "STRUCT<a STRING, b INTEGER>('str', 1)" which is
    # canonicalized to "ROW('str', 1) AS STRUCT(a TEXT, b INT)" in DuckDB
    # The transformation to ROW will take place if:
    #  1. The STRUCT itself does not have proper fields (key := value) as a "proper" STRUCT would
    #  2. A cast to STRUCT / ARRAY of STRUCTs is found
    ancestor_cast = expression.find_ancestor(exp.Cast)
    is_bq_inline_struct = (
        (expression.find(exp.PropertyEQ) is None)
        and ancestor_cast
        and any(
            casted_type.is_type(exp.DataType.Type.STRUCT)
            for casted_type in ancestor_cast.find_all(exp.DataType)
        )
    )

    for i, expr in enumerate(expression.expressions):
        is_property_eq = isinstance(expr, exp.PropertyEQ)
        value = expr.expression if is_property_eq else expr

        if is_bq_inline_struct:
            args.append(self.sql(value))
        else:
            if is_property_eq:
                if isinstance(expr.this, exp.Identifier):
                    key = self.sql(exp.Literal.string(expr.name))
                else:
                    key = self.sql(expr.this)
            else:
                key = self.sql(exp.Literal.string(f"_{i}"))

            args.append(f"{key}: {self.sql(value)}")

    csv_args = ", ".join(args)

    return f"ROW({csv_args})" if is_bq_inline_struct else f"{{{csv_args}}}"


def _datatype_sql(self: DuckDB.Generator, expression: exp.DataType) -> str:
    if expression.is_type("array"):
        return f"{self.expressions(expression, flat=True)}[{self.expressions(expression, key='values', flat=True)}]"

    # Modifiers are not supported for TIME, [TIME | TIMESTAMP] WITH TIME ZONE
    if expression.is_type(
        exp.DataType.Type.TIME, exp.DataType.Type.TIMETZ, exp.DataType.Type.TIMESTAMPTZ
    ):
        return expression.this.value

    return self.datatype_sql(expression)


def _json_format_sql(self: DuckDB.Generator, expression: exp.JSONFormat) -> str:
    sql = self.func("TO_JSON", expression.this, expression.args.get("options"))
    return f"CAST({sql} AS TEXT)"


def _unix_to_time_sql(self: DuckDB.Generator, expression: exp.UnixToTime) -> str:
    scale = expression.args.get("scale")
    timestamp = expression.this

    if scale in (None, exp.UnixToTime.SECONDS):
        return self.func("TO_TIMESTAMP", timestamp)
    if scale == exp.UnixToTime.MILLIS:
        return self.func("EPOCH_MS", timestamp)
    if scale == exp.UnixToTime.MICROS:
        return self.func("MAKE_TIMESTAMP", timestamp)

    return self.func("TO_TIMESTAMP", exp.Div(this=timestamp, expression=exp.func("POW", 10, scale)))


WRAPPED_JSON_EXTRACT_EXPRESSIONS = (exp.Binary, exp.Bracket, exp.In)


def _arrow_json_extract_sql(self: DuckDB.Generator, expression: JSON_EXTRACT_TYPE) -> str:
    arrow_sql = arrow_json_extract_sql(self, expression)
    if not expression.same_parent and isinstance(
        expression.parent, WRAPPED_JSON_EXTRACT_EXPRESSIONS
    ):
        arrow_sql = self.wrap(arrow_sql)
    return arrow_sql


def _implicit_datetime_cast(
    arg: t.Optional[exp.Expression], type: exp.DataType.Type = exp.DataType.Type.DATE
) -> t.Optional[exp.Expression]:
    if isinstance(arg, exp.Literal) and arg.is_string:
        ts = arg.name
        if type == exp.DataType.Type.DATE and ":" in ts:
            type = (
                exp.DataType.Type.TIMESTAMPTZ
                if TIMEZONE_PATTERN.search(ts)
                else exp.DataType.Type.TIMESTAMP
            )

        arg = exp.cast(arg, type)

    return arg


def _date_diff_sql(self: DuckDB.Generator, expression: exp.DateDiff) -> str:
    this = _implicit_datetime_cast(expression.this)
    expr = _implicit_datetime_cast(expression.expression)

    return self.func("DATE_DIFF", unit_to_str(expression), expr, this)


def _generate_datetime_array_sql(
    self: DuckDB.Generator, expression: t.Union[exp.GenerateDateArray, exp.GenerateTimestampArray]
) -> str:
    is_generate_date_array = isinstance(expression, exp.GenerateDateArray)

    type = exp.DataType.Type.DATE if is_generate_date_array else exp.DataType.Type.TIMESTAMP
    start = _implicit_datetime_cast(expression.args.get("start"), type=type)
    end = _implicit_datetime_cast(expression.args.get("end"), type=type)

    # BQ's GENERATE_DATE_ARRAY & GENERATE_TIMESTAMP_ARRAY are transformed to DuckDB'S GENERATE_SERIES
    gen_series: t.Union[exp.GenerateSeries, exp.Cast] = exp.GenerateSeries(
        start=start, end=end, step=expression.args.get("step")
    )

    if is_generate_date_array:
        # The GENERATE_SERIES result type is TIMESTAMP array, so to match BQ's semantics for
        # GENERATE_DATE_ARRAY we must cast it back to DATE array
        gen_series = exp.cast(gen_series, exp.DataType.build("ARRAY<DATE>"))

    return self.sql(gen_series)


def _json_extract_value_array_sql(
    self: DuckDB.Generator, expression: exp.JSONValueArray | exp.JSONExtractArray
) -> str:
    json_extract = exp.JSONExtract(this=expression.this, expression=expression.expression)
    data_type = "ARRAY<STRING>" if isinstance(expression, exp.JSONValueArray) else "ARRAY<JSON>"
    return self.sql(exp.cast(json_extract, to=exp.DataType.build(data_type)))


def _cast_to_varchar(arg: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
    if arg and arg.type and not arg.is_type(exp.DataType.Type.VARCHAR, exp.DataType.Type.UNKNOWN):
        return exp.cast(arg, exp.DataType.Type.VARCHAR)
    return arg


def _cast_to_blob(self: DuckDB.Generator, expression: exp.Expression, result_sql: str) -> str:
    is_binary = expression.is_type(exp.DataType.Type.BINARY)
    if is_binary:
        blob = exp.DataType.build("BLOB", dialect="duckdb")
        result_sql = self.sql(exp.Cast(this=result_sql, to=blob))
    return result_sql


def _anyvalue_sql(self: DuckDB.Generator, expression: exp.AnyValue) -> str:
    # Transform ANY_VALUE(expr HAVING MAX/MIN having_expr) to ARG_MAX_NULL/ARG_MIN_NULL
    having = expression.this
    if isinstance(having, exp.HavingMax):
        func_name = "ARG_MAX_NULL" if having.args.get("max") else "ARG_MIN_NULL"
        return self.func(func_name, having.this, having.expression)
    return self.function_fallback_sql(expression)


def _literal_sql_with_ws_chr(self: DuckDB.Generator, literal: str) -> str:
    # DuckDB does not support \uXXXX escapes, so we must use CHR() instead of replacing them directly
    if not any(ch in WS_CONTROL_CHARS_TO_DUCK for ch in literal):
        return self.sql(exp.Literal.string(literal))

    sql_segments: t.List[str] = []
    for is_ws_control, group in groupby(literal, key=lambda ch: ch in WS_CONTROL_CHARS_TO_DUCK):
        if is_ws_control:
            for ch in group:
                duckdb_char_code = WS_CONTROL_CHARS_TO_DUCK[ch]
                sql_segments.append(self.func("CHR", exp.Literal.number(str(duckdb_char_code))))
        else:
            sql_segments.append(self.sql(exp.Literal.string("".join(group))))

    sql = " || ".join(sql_segments)
    return sql if len(sql_segments) == 1 else f"({sql})"


def _escape_regex_metachars(
    self: DuckDB.Generator, delimiters: t.Optional[exp.Expression], delimiters_sql: str
) -> str:
    r"""
    Escapes regex metacharacters \ - ^ [ ] for use in character classes regex expressions.

    Literal strings are escaped at transpile time, expressions handled with REPLACE() calls.
    """
    if not delimiters:
        return delimiters_sql

    if delimiters.is_string:
        literal_value = delimiters.this
        escaped_literal = "".join(REGEX_ESCAPE_REPLACEMENTS.get(ch, ch) for ch in literal_value)
        return _literal_sql_with_ws_chr(self, escaped_literal)

    escaped_sql = delimiters_sql
    for raw, escaped in REGEX_ESCAPE_REPLACEMENTS.items():
        escaped_sql = self.func(
            "REPLACE",
            escaped_sql,
            self.sql(exp.Literal.string(raw)),
            self.sql(exp.Literal.string(escaped)),
        )

    return escaped_sql


def _build_capitalization_sql(
    self: DuckDB.Generator,
    value_to_split: str,
    delimiters_sql: str,
) -> str:
    # empty string delimiter --> treat value as one word, no need to split
    if delimiters_sql == "''":
        return f"UPPER(LEFT({value_to_split}, 1)) || LOWER(SUBSTRING({value_to_split}, 2))"

    delim_regex_sql = f"CONCAT('[', {delimiters_sql}, ']')"
    split_regex_sql = f"CONCAT('([', {delimiters_sql}, ']+|[^', {delimiters_sql}, ']+)')"

    # REGEXP_EXTRACT_ALL produces a list of string segments, alternating between delimiter and non-delimiter segments.
    # We do not know whether the first segment is a delimiter or not, so we check the first character of the string
    # with REGEXP_MATCHES. If the first char is a delimiter, we capitalize even list indexes, otherwise capitalize odd.
    return self.func(
        "ARRAY_TO_STRING",
        exp.case()
        .when(
            f"REGEXP_MATCHES(LEFT({value_to_split}, 1), {delim_regex_sql})",
            self.func(
                "LIST_TRANSFORM",
                self.func("REGEXP_EXTRACT_ALL", value_to_split, split_regex_sql),
                "(seg, idx) -> CASE WHEN idx % 2 = 0 THEN UPPER(LEFT(seg, 1)) || LOWER(SUBSTRING(seg, 2)) ELSE seg END",
            ),
        )
        .else_(
            self.func(
                "LIST_TRANSFORM",
                self.func("REGEXP_EXTRACT_ALL", value_to_split, split_regex_sql),
                "(seg, idx) -> CASE WHEN idx % 2 = 1 THEN UPPER(LEFT(seg, 1)) || LOWER(SUBSTRING(seg, 2)) ELSE seg END",
            ),
        ),
        "''",
    )


def _initcap_sql(self: DuckDB.Generator, expression: exp.Initcap) -> str:
    this_sql = self.sql(expression, "this")
    delimiters = expression.args.get("expression")
    if delimiters is None:
        # fallback for manually created exp.Initcap w/o delimiters arg
        delimiters = exp.Literal.string(self.dialect.INITCAP_DEFAULT_DELIMITER_CHARS)
    delimiters_sql = self.sql(delimiters)

    escaped_delimiters_sql = _escape_regex_metachars(self, delimiters, delimiters_sql)

    return _build_capitalization_sql(self, this_sql, escaped_delimiters_sql)


class DuckDB(Dialect):
    NULL_ORDERING = "nulls_are_last"
    SUPPORTS_USER_DEFINED_TYPES = True
    SAFE_DIVISION = True
    INDEX_OFFSET = 1
    CONCAT_COALESCE = True
    SUPPORTS_ORDER_BY_ALL = True
    SUPPORTS_FIXED_SIZE_ARRAYS = True
    STRICT_JSON_PATH_SYNTAX = False
    NUMBERS_CAN_BE_UNDERSCORE_SEPARATED = True

    # https://duckdb.org/docs/sql/introduction.html#creating-a-new-table
    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE

    DATE_PART_MAPPING = {
        **Dialect.DATE_PART_MAPPING,
        "DAYOFWEEKISO": "ISODOW",
    }

    DATE_PART_MAPPING.pop("WEEKDAY")

    INVERSE_TIME_MAPPING = {
        "%e": "%-d",  # BigQuery's space-padded day (%e) -> DuckDB's no-padding day (%-d)
    }

    def to_json_path(self, path: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
        if isinstance(path, exp.Literal):
            # DuckDB also supports the JSON pointer syntax, where every path starts with a `/`.
            # Additionally, it allows accessing the back of lists using the `[#-i]` syntax.
            # This check ensures we'll avoid trying to parse these as JSON paths, which can
            # either result in a noisy warning or in an invalid representation of the path.
            path_text = path.name
            if path_text.startswith("/") or "[#" in path_text:
                return path

        return super().to_json_path(path)

    class Tokenizer(tokens.Tokenizer):
        BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
        HEREDOC_STRINGS = ["$"]

        HEREDOC_TAG_IS_IDENTIFIER = True
        HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER

        KEYWORDS = {
            **tokens.Tokenizer.KEYWORDS,
            "//": TokenType.DIV,
            "**": TokenType.DSTAR,
            "^@": TokenType.CARET_AT,
            "@>": TokenType.AT_GT,
            "<@": TokenType.LT_AT,
            "ATTACH": TokenType.ATTACH,
            "BINARY": TokenType.VARBINARY,
            "BITSTRING": TokenType.BIT,
            "BPCHAR": TokenType.TEXT,
            "CHAR": TokenType.TEXT,
            "DATETIME": TokenType.TIMESTAMPNTZ,
            "DETACH": TokenType.DETACH,
            "FORCE": TokenType.FORCE,
            "INSTALL": TokenType.INSTALL,
            "INT8": TokenType.BIGINT,
            "LOGICAL": TokenType.BOOLEAN,
            "MACRO": TokenType.FUNCTION,
            "ONLY": TokenType.ONLY,
            "PIVOT_WIDER": TokenType.PIVOT,
            "POSITIONAL": TokenType.POSITIONAL,
            "RESET": TokenType.COMMAND,
            "ROW": TokenType.STRUCT,
            "SIGNED": TokenType.INT,
            "STRING": TokenType.TEXT,
            "SUMMARIZE": TokenType.SUMMARIZE,
            "TIMESTAMP": TokenType.TIMESTAMPNTZ,
            "TIMESTAMP_S": TokenType.TIMESTAMP_S,
            "TIMESTAMP_MS": TokenType.TIMESTAMP_MS,
            "TIMESTAMP_NS": TokenType.TIMESTAMP_NS,
            "TIMESTAMP_US": TokenType.TIMESTAMP,
            "UBIGINT": TokenType.UBIGINT,
            "UINTEGER": TokenType.UINT,
            "USMALLINT": TokenType.USMALLINT,
            "UTINYINT": TokenType.UTINYINT,
            "VARCHAR": TokenType.TEXT,
        }
        KEYWORDS.pop("/*+")

        SINGLE_TOKENS = {
            **tokens.Tokenizer.SINGLE_TOKENS,
            "$": TokenType.PARAMETER,
        }

        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}

    class Parser(parser.Parser):
        MAP_KEYS_ARE_ARBITRARY_EXPRESSIONS = True

        BITWISE = parser.Parser.BITWISE.copy()
        BITWISE.pop(TokenType.CARET)

        RANGE_PARSERS = {
            **parser.Parser.RANGE_PARSERS,
            TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
            TokenType.CARET_AT: binary_range_parser(exp.StartsWith),
            TokenType.TILDA: binary_range_parser(exp.RegexpFullMatch),
        }

        EXPONENT = {
            **parser.Parser.EXPONENT,
            TokenType.CARET: exp.Pow,
            TokenType.DSTAR: exp.Pow,
        }

        FUNCTIONS_WITH_ALIASED_ARGS = {*parser.Parser.FUNCTIONS_WITH_ALIASED_ARGS, "STRUCT_PACK"}

        SHOW_PARSERS = {
            "TABLES": _show_parser("TABLES"),
            "ALL TABLES": _show_parser("ALL TABLES"),
        }

        FUNCTIONS = {
            **parser.Parser.FUNCTIONS,
            "ANY_VALUE": lambda args: exp.IgnoreNulls(this=exp.AnyValue.from_arg_list(args)),
            "ARRAY_REVERSE_SORT": _build_sort_array_desc,
            "ARRAY_SORT": exp.SortArray.from_arg_list,
            "BIT_AND": exp.BitwiseAndAgg.from_arg_list,
            "BIT_OR": exp.BitwiseOrAgg.from_arg_list,
            "BIT_XOR": exp.BitwiseXorAgg.from_arg_list,
            "DATEDIFF": _build_date_diff,
            "DATE_DIFF": _build_date_diff,
            "DATE_TRUNC": date_trunc_to_time,
            "DATETRUNC": date_trunc_to_time,
            "DECODE": lambda args: exp.Decode(
                this=seq_get(args, 0), charset=exp.Literal.string("utf-8")
            ),
            "EDITDIST3": exp.Levenshtein.from_arg_list,
            "ENCODE": lambda args: exp.Encode(
                this=seq_get(args, 0), charset=exp.Literal.string("utf-8")
            ),
            "EPOCH": exp.TimeToUnix.from_arg_list,
            "EPOCH_MS": lambda args: exp.UnixToTime(
                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
            ),
            "GENERATE_SERIES": _build_generate_series(),
            "JSON": exp.ParseJSON.from_arg_list,
            "JSON_EXTRACT_PATH": parser.build_extract_json_with_path(exp.JSONExtract),
            "JSON_EXTRACT_STRING": parser.build_extract_json_with_path(exp.JSONExtractScalar),
            "LIST_CONTAINS": exp.ArrayContains.from_arg_list,
            "LIST_COSINE_DISTANCE": exp.CosineDistance.from_arg_list,
            "LIST_DISTANCE": exp.EuclideanDistance.from_arg_list,
            "LIST_FILTER": exp.ArrayFilter.from_arg_list,
            "LIST_HAS": exp.ArrayContains.from_arg_list,
            "LIST_HAS_ANY": exp.ArrayOverlaps.from_arg_list,
            "LIST_REVERSE_SORT": _build_sort_array_desc,
            "LIST_SORT": exp.SortArray.from_arg_list,
            "LIST_TRANSFORM": exp.Transform.from_arg_list,
            "LIST_VALUE": lambda args: exp.Array(expressions=args),
            "MAKE_DATE": exp.DateFromParts.from_arg_list,
            "MAKE_TIME": exp.TimeFromParts.from_arg_list,
            "MAKE_TIMESTAMP": _build_make_timestamp,
            "QUANTILE_CONT": exp.PercentileCont.from_arg_list,
            "QUANTILE_DISC": exp.PercentileDisc.from_arg_list,
            "RANGE": _build_generate_series(end_exclusive=True),
            "REGEXP_EXTRACT": build_regexp_extract(exp.RegexpExtract),
            "REGEXP_EXTRACT_ALL": build_regexp_extract(exp.RegexpExtractAll),
            "REGEXP_MATCHES": exp.RegexpLike.from_arg_list,
            "REGEXP_REPLACE": lambda args: exp.RegexpReplace(
                this=seq_get(args, 0),
                expression=seq_get(args, 1),
                replacement=seq_get(args, 2),
                modifiers=seq_get(args, 3),
                single_replace=True,
            ),
            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
            "STRFTIME": build_formatted_time(exp.TimeToStr, "duckdb"),
            "STRING_SPLIT": exp.Split.from_arg_list,
            "STRING_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
            "STRING_TO_ARRAY": exp.Split.from_arg_list,
            "STRPTIME": build_formatted_time(exp.StrToTime, "duckdb"),
            "STRUCT_PACK": exp.Struct.from_arg_list,
            "STR_SPLIT": exp.Split.from_arg_list,
            "STR_SPLIT_REGEX": exp.RegexpSplit.from_arg_list,
            "TIME_BUCKET": exp.DateBin.from_arg_list,
            "TO_TIMESTAMP": exp.UnixToTime.from_arg_list,
            "UNNEST": exp.Explode.from_arg_list,
            "XOR": binary_from_function(exp.BitwiseXor),
        }

        FUNCTIONS.pop("DATE_SUB")
        FUNCTIONS.pop("GLOB")

        FUNCTION_PARSERS = {
            **parser.Parser.FUNCTION_PARSERS,
            **dict.fromkeys(
                ("GROUP_CONCAT", "LISTAGG", "STRINGAGG"), lambda self: self._parse_string_agg()
            ),
        }
        FUNCTION_PARSERS.pop("DECODE")

        NO_PAREN_FUNCTION_PARSERS = {
            **parser.Parser.NO_PAREN_FUNCTION_PARSERS,
            "MAP": lambda self: self._parse_map(),
            "@": lambda self: exp.Abs(this=self._parse_bitwise()),
        }

        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS - {
            TokenType.SEMI,
            TokenType.ANTI,
        }

        PLACEHOLDER_PARSERS = {
            **parser.Parser.PLACEHOLDER_PARSERS,
            TokenType.PARAMETER: lambda self: (
                self.expression(exp.Placeholder, this=self._prev.text)
                if self._match(TokenType.NUMBER) or self._match_set(self.ID_VAR_TOKENS)
                else None
            ),
        }

        TYPE_CONVERTERS = {
            # https://duckdb.org/docs/sql/data_types/numeric
            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=18, scale=3),
            # https://duckdb.org/docs/sql/data_types/text
            exp.DataType.Type.TEXT: lambda dtype: exp.DataType.build("TEXT"),
        }

        STATEMENT_PARSERS = {
            **parser.Parser.STATEMENT_PARSERS,
            TokenType.ATTACH: lambda self: self._parse_attach_detach(),
            TokenType.DETACH: lambda self: self._parse_attach_detach(is_attach=False),
            TokenType.FORCE: lambda self: self._parse_force(),
            TokenType.INSTALL: lambda self: self._parse_install(),
            TokenType.SHOW: lambda self: self._parse_show(),
        }

        SET_PARSERS = {
            **parser.Parser.SET_PARSERS,
            "VARIABLE": lambda self: self._parse_set_item_assignment("VARIABLE"),
        }

        def _parse_lambda(self, alias: bool = False) -> t.Optional[exp.Expression]:
            index = self._index
            if not self._match_text_seq("LAMBDA"):
                return super()._parse_lambda(alias=alias)

            expressions = self._parse_csv(self._parse_lambda_arg)
            if not self._match(TokenType.COLON):
                self._retreat(index)
                return None

            this = self._replace_lambda(self._parse_assignment(), expressions)
            return self.expression(exp.Lambda, this=this, expressions=expressions, colon=True)

        def _parse_expression(self) -> t.Optional[exp.Expression]:
            # DuckDB supports prefix aliases, e.g. foo: 1
            if self._next and self._next.token_type == TokenType.COLON:
                alias = self._parse_id_var(tokens=self.ALIAS_TOKENS)
                self._match(TokenType.COLON)
                comments = self._prev_comments or []

                this = self._parse_assignment()
                if isinstance(this, exp.Expression):
                    # Moves the comment next to the alias in `alias: expr /* comment */`
                    comments += this.pop_comments() or []

                return self.expression(exp.Alias, comments=comments, this=this, alias=alias)

            return super()._parse_expression()

        def _parse_table(
            self,
            schema: bool = False,
            joins: bool = False,
            alias_tokens: t.Optional[t.Collection[TokenType]] = None,
            parse_bracket: bool = False,
            is_db_reference: bool = False,
            parse_partition: bool = False,
            consume_pipe: bool = False,
        ) -> t.Optional[exp.Expression]:
            # DuckDB supports prefix aliases, e.g. FROM foo: bar
            if self._next and self._next.token_type == TokenType.COLON:
                alias = self._parse_table_alias(
                    alias_tokens=alias_tokens or self.TABLE_ALIAS_TOKENS
                )
                self._match(TokenType.COLON)
                comments = self._prev_comments or []
            else:
                alias = None
                comments = []

            table = super()._parse_table(
                schema=schema,
                joins=joins,
                alias_tokens=alias_tokens,
                parse_bracket=parse_bracket,
                is_db_reference=is_db_reference,
                parse_partition=parse_partition,
            )
            if isinstance(table, exp.Expression) and isinstance(alias, exp.TableAlias):
                # Moves the comment next to the alias in `alias: table /* comment */`
                comments += table.pop_comments() or []
                alias.comments = alias.pop_comments() + comments
                table.set("alias", alias)

            return table

        def _parse_table_sample(self, as_modifier: bool = False) -> t.Optional[exp.TableSample]:
            # https://duckdb.org/docs/sql/samples.html
            sample = super()._parse_table_sample(as_modifier=as_modifier)
            if sample and not sample.args.get("method"):
                if sample.args.get("size"):
                    sample.set("method", exp.var("RESERVOIR"))
                else:
                    sample.set("method", exp.var("SYSTEM"))

            return sample

        def _parse_bracket(
            self, this: t.Optional[exp.Expression] = None
        ) -> t.Optional[exp.Expression]:
            bracket = super()._parse_bracket(this)

            if self.dialect.version < (1, 2) and isinstance(bracket, exp.Bracket):
                # https://duckdb.org/2025/02/05/announcing-duckdb-120.html#breaking-changes
                bracket.set("returns_list_for_maps", True)

            return bracket

        def _parse_map(self) -> exp.ToMap | exp.Map:
            if self._match(TokenType.L_BRACE, advance=False):
                return self.expression(exp.ToMap, this=self._parse_bracket())

            args = self._parse_wrapped_csv(self._parse_assignment)
            return self.expression(exp.Map, keys=seq_get(args, 0), values=seq_get(args, 1))

        def _parse_struct_types(self, type_required: bool = False) -> t.Optional[exp.Expression]:
            return self._parse_field_def()

        def _pivot_column_names(self, aggregations: t.List[exp.Expression]) -> t.List[str]:
            if len(aggregations) == 1:
                return super()._pivot_column_names(aggregations)
            return pivot_column_names(aggregations, dialect="duckdb")

        def _parse_attach_detach(self, is_attach=True) -> exp.Attach | exp.Detach:
            def _parse_attach_option() -> exp.AttachOption:
                return self.expression(
                    exp.AttachOption,
                    this=self._parse_var(any_token=True),
                    expression=self._parse_field(any_token=True),
                )

            self._match(TokenType.DATABASE)
            exists = self._parse_exists(not_=is_attach)
            this = self._parse_alias(self._parse_primary_or_var(), explicit=True)

            if self._match(TokenType.L_PAREN, advance=False):
                expressions = self._parse_wrapped_csv(_parse_attach_option)
            else:
                expressions = None

            return (
                self.expression(exp.Attach, this=this, exists=exists, expressions=expressions)
                if is_attach
                else self.expression(exp.Detach, this=this, exists=exists)
            )

        def _parse_show_duckdb(self, this: str) -> exp.Show:
            return self.expression(exp.Show, this=this)

        def _parse_force(self) -> exp.Install | exp.Command:
            # FORCE can only be followed by INSTALL or CHECKPOINT
            # In the case of CHECKPOINT, we fallback
            if not self._match(TokenType.INSTALL):
                return self._parse_as_command(self._prev)

            return self._parse_install(force=True)

        def _parse_install(self, force: bool = False) -> exp.Install:
            return self.expression(
                exp.Install,
                this=self._parse_id_var(),
                from_=self._parse_var_or_string() if self._match(TokenType.FROM) else None,
                force=force,
            )

        def _parse_primary(self) -> t.Optional[exp.Expression]:
            if self._match_pair(TokenType.HASH, TokenType.NUMBER):
                return exp.PositionalColumn(this=exp.Literal.number(self._prev.text))

            return super()._parse_primary()

    class Generator(generator.Generator):
        PARAMETER_TOKEN = "$"
        NAMED_PLACEHOLDER_TOKEN = "$"
        JOIN_HINTS = False
        TABLE_HINTS = False
        QUERY_HINTS = False
        LIMIT_FETCH = "LIMIT"
        STRUCT_DELIMITER = ("(", ")")
        RENAME_TABLE_WITH_DB = False
        NVL2_SUPPORTED = False
        SEMI_ANTI_JOIN_WITH_SIDE = False
        TABLESAMPLE_KEYWORDS = "USING SAMPLE"
        TABLESAMPLE_SEED_KEYWORD = "REPEATABLE"
        LAST_DAY_SUPPORTS_DATE_PART = False
        JSON_KEY_VALUE_PAIR_SEP = ","
        IGNORE_NULLS_IN_FUNC = True
        JSON_PATH_BRACKETED_KEY_SUPPORTED = False
        SUPPORTS_CREATE_TABLE_LIKE = False
        MULTI_ARG_DISTINCT = False
        CAN_IMPLEMENT_ARRAY_ANY = True
        SUPPORTS_TO_NUMBER = False
        SUPPORTS_WINDOW_EXCLUDE = True
        COPY_HAS_INTO_KEYWORD = False
        STAR_EXCEPT = "EXCLUDE"
        PAD_FILL_PATTERN_IS_REQUIRED = True
        ARRAY_CONCAT_IS_VAR_LEN = False
        ARRAY_SIZE_DIM_REQUIRED = False
        NORMALIZE_EXTRACT_DATE_PARTS = True
        SUPPORTS_LIKE_QUANTIFIERS = False
        SET_ASSIGNMENT_REQUIRES_VARIABLE_KEYWORD = True

        TRANSFORMS = {
            **generator.Generator.TRANSFORMS,
            exp.AnyValue: _anyvalue_sql,
            exp.ApproxDistinct: approx_count_distinct_sql,
            exp.Array: transforms.preprocess(
                [transforms.inherit_struct_field_names],
                generator=inline_array_unless_query,
            ),
            exp.ArrayFilter: rename_func("LIST_FILTER"),
            exp.ArrayRemove: remove_from_array_using_filter,
            exp.ArraySort: _array_sort_sql,
            exp.ArraySum: rename_func("LIST_SUM"),
            exp.ArrayUniqueAgg: lambda self, e: self.func(
                "LIST", exp.Distinct(expressions=[e.this])
            ),
            exp.BitwiseAndAgg: rename_func("BIT_AND"),
            exp.BitwiseOrAgg: rename_func("BIT_OR"),
            exp.BitwiseXor: rename_func("XOR"),
            exp.BitwiseXorAgg: rename_func("BIT_XOR"),
            exp.CommentColumnConstraint: no_comment_column_constraint_sql,
            exp.CosineDistance: rename_func("LIST_COSINE_DISTANCE"),
            exp.CurrentTime: lambda *_: "CURRENT_TIME",
            exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
            exp.DayOfMonth: rename_func("DAYOFMONTH"),
            exp.DayOfWeek: rename_func("DAYOFWEEK"),
            exp.DayOfWeekIso: rename_func("ISODOW"),
            exp.DayOfYear: rename_func("DAYOFYEAR"),
            exp.DataType: _datatype_sql,
            exp.Date: _date_sql,
            exp.DateAdd: date_delta_to_binary_interval_op(),
            exp.DateFromParts: rename_func("MAKE_DATE"),
            exp.DateSub: date_delta_to_binary_interval_op(),
            exp.DateDiff: _date_diff_sql,
            exp.DateStrToDate: datestrtodate_sql,
            exp.Datetime: no_datetime_sql,
            exp.DatetimeDiff: _date_diff_sql,
            exp.DatetimeSub: date_delta_to_binary_interval_op(),
            exp.DatetimeAdd: date_delta_to_binary_interval_op(),
            exp.DateToDi: lambda self,
            e: f"CAST(STRFTIME({self.sql(e, 'this')}, {DuckDB.DATEINT_FORMAT}) AS INT)",
            exp.Decode: lambda self, e: encode_decode_sql(self, e, "DECODE", replace=False),
            exp.DiToDate: lambda self,
            e: f"CAST(STRPTIME(CAST({self.sql(e, 'this')} AS TEXT), {DuckDB.DATEINT_FORMAT}) AS DATE)",
            exp.Encode: lambda self, e: encode_decode_sql(self, e, "ENCODE", replace=False),
            exp.EuclideanDistance: rename_func("LIST_DISTANCE"),
            exp.GenerateDateArray: _generate_datetime_array_sql,
            exp.GenerateTimestampArray: _generate_datetime_array_sql,
            exp.GroupConcat: lambda self, e: groupconcat_sql(self, e, within_group=False),
            exp.Explode: rename_func("UNNEST"),
            exp.IntDiv: lambda self, e: self.binary(e, "//"),
            exp.IsInf: rename_func("ISINF"),
            exp.IsNan: rename_func("ISNAN"),
            exp.JSONBExists: rename_func("JSON_EXISTS"),
            exp.JSONExtract: _arrow_json_extract_sql,
            exp.JSONExtractArray: _json_extract_value_array_sql,
            exp.JSONFormat: _json_format_sql,
            exp.JSONValueArray: _json_extract_value_array_sql,
            exp.Lateral: explode_to_unnest_sql,
            exp.LogicalOr: rename_func("BOOL_OR"),
            exp.LogicalAnd: rename_func("BOOL_AND"),
            exp.MakeInterval: lambda self, e: no_make_interval_sql(self, e, sep=" "),
            exp.Initcap: _initcap_sql,
            exp.MD5Digest: lambda self, e: self.func("UNHEX", self.func("MD5", e.this)),
            exp.SHA1Digest: lambda self, e: self.func("UNHEX", self.func("SHA1", e.this)),
            exp.SHA2Digest: lambda self, e: self.func("UNHEX", sha2_digest_sql(self, e)),
            exp.MonthsBetween: lambda self, e: self.func(
                "DATEDIFF",
                "'month'",
                exp.cast(e.expression, exp.DataType.Type.TIMESTAMP, copy=True),
                exp.cast(e.this, exp.DataType.Type.TIMESTAMP, copy=True),
            ),
            exp.PercentileCont: rename_func("QUANTILE_CONT"),
            exp.PercentileDisc: rename_func("QUANTILE_DISC"),
            # DuckDB doesn't allow qualified columns inside of PIVOT expressions.
            # See: https://github.com/duckdb/duckdb/blob/671faf92411182f81dce42ac43de8bfb05d9909e/src/planner/binder/tableref/bind_pivot.cpp#L61-L62
            exp.Pivot: transforms.preprocess([transforms.unqualify_columns]),
            exp.RegexpReplace: lambda self, e: self.func(
                "REGEXP_REPLACE",
                e.this,
                e.expression,
                e.args.get("replacement"),
                regexp_replace_global_modifier(e),
            ),
            exp.RegexpLike: rename_func("REGEXP_MATCHES"),
            exp.RegexpILike: lambda self, e: self.func(
                "REGEXP_MATCHES", e.this, e.expression, exp.Literal.string("i")
            ),
            exp.RegexpSplit: rename_func("STR_SPLIT_REGEX"),
            exp.Return: lambda self, e: self.sql(e, "this"),
            exp.ReturnsProperty: lambda self, e: "TABLE" if isinstance(e.this, exp.Schema) else "",
            exp.Rand: rename_func("RANDOM"),
            exp.SHA: rename_func("SHA1"),
            exp.SHA2: sha256_sql,
            exp.Split: rename_func("STR_SPLIT"),
            exp.SortArray: _sort_array_sql,
            exp.StrPosition: strposition_sql,
            exp.StrToUnix: lambda self, e: self.func(
                "EPOCH", self.func("STRPTIME", e.this, self.format_time(e))
            ),
            exp.Struct: _struct_sql,
            exp.Transform: rename_func("LIST_TRANSFORM"),
            exp.TimeAdd: date_delta_to_binary_interval_op(),
            exp.TimeSub: date_delta_to_binary_interval_op(),
            exp.Time: no_time_sql,
            exp.TimeDiff: _timediff_sql,
            exp.Timestamp: no_timestamp_sql,
            exp.TimestampAdd: date_delta_to_binary_interval_op(),
            exp.TimestampDiff: lambda self, e: self.func(
                "DATE_DIFF", exp.Literal.string(e.unit), e.expression, e.this
            ),
            exp.TimestampSub: date_delta_to_binary_interval_op(),
            exp.TimeStrToDate: lambda self, e: self.sql(exp.cast(e.this, exp.DataType.Type.DATE)),
            exp.TimeStrToTime: timestrtotime_sql,
            exp.TimeStrToUnix: lambda self, e: self.func(
                "EPOCH", exp.cast(e.this, exp.DataType.Type.TIMESTAMP)
            ),
            exp.TimeToStr: lambda self, e: self.func("STRFTIME", e.this, self.format_time(e)),
            exp.TimeToUnix: rename_func("EPOCH"),
            exp.TsOrDiToDi: lambda self,
            e: f"CAST(SUBSTR(REPLACE(CAST({self.sql(e, 'this')} AS TEXT), '-', ''), 1, 8) AS INT)",
            exp.TsOrDsAdd: date_delta_to_binary_interval_op(),
            exp.TsOrDsDiff: lambda self, e: self.func(
                "DATE_DIFF",
                f"'{e.args.get('unit') or 'DAY'}'",
                exp.cast(e.expression, exp.DataType.Type.TIMESTAMP),
                exp.cast(e.this, exp.DataType.Type.TIMESTAMP),
            ),
            exp.UnixMicros: lambda self, e: self.func("EPOCH_US", _implicit_datetime_cast(e.this)),
            exp.UnixMillis: lambda self, e: self.func("EPOCH_MS", _implicit_datetime_cast(e.this)),
            exp.UnixSeconds: lambda self, e: self.sql(
                exp.cast(
                    self.func("EPOCH", _implicit_datetime_cast(e.this)), exp.DataType.Type.BIGINT
                )
            ),
            exp.UnixToStr: lambda self, e: self.func(
                "STRFTIME", self.func("TO_TIMESTAMP", e.this), self.format_time(e)
            ),
            exp.DatetimeTrunc: lambda self, e: self.func(
                "DATE_TRUNC", unit_to_str(e), exp.cast(e.this, exp.DataType.Type.DATETIME)
            ),
            exp.UnixToTime: _unix_to_time_sql,
            exp.UnixToTimeStr: lambda self, e: f"CAST(TO_TIMESTAMP({self.sql(e, 'this')}) AS TEXT)",
            exp.VariancePop: rename_func("VAR_POP"),
            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
            exp.Xor: bool_xor_sql,
            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost", "max_dist")(
                rename_func("LEVENSHTEIN")
            ),
            exp.JSONObjectAgg: rename_func("JSON_GROUP_OBJECT"),
            exp.JSONBObjectAgg: rename_func("JSON_GROUP_OBJECT"),
            exp.DateBin: rename_func("TIME_BUCKET"),
        }

        SUPPORTED_JSON_PATH_PARTS = {
            exp.JSONPathKey,
            exp.JSONPathRoot,
            exp.JSONPathSubscript,
            exp.JSONPathWildcard,
        }

        TYPE_MAPPING = {
            **generator.Generator.TYPE_MAPPING,
            exp.DataType.Type.BINARY: "BLOB",
            exp.DataType.Type.BPCHAR: "TEXT",
            exp.DataType.Type.CHAR: "TEXT",
            exp.DataType.Type.DATETIME: "TIMESTAMP",
            exp.DataType.Type.DECFLOAT: "DECIMAL(38, 5)",
            exp.DataType.Type.FLOAT: "REAL",
            exp.DataType.Type.JSONB: "JSON",
            exp.DataType.Type.NCHAR: "TEXT",
            exp.DataType.Type.NVARCHAR: "TEXT",
            exp.DataType.Type.UINT: "UINTEGER",
            exp.DataType.Type.VARBINARY: "BLOB",
            exp.DataType.Type.ROWVERSION: "BLOB",
            exp.DataType.Type.VARCHAR: "TEXT",
            exp.DataType.Type.TIMESTAMPNTZ: "TIMESTAMP",
            exp.DataType.Type.TIMESTAMP_S: "TIMESTAMP_S",
            exp.DataType.Type.TIMESTAMP_MS: "TIMESTAMP_MS",
            exp.DataType.Type.TIMESTAMP_NS: "TIMESTAMP_NS",
            exp.DataType.Type.BIGDECIMAL: "DECIMAL(38, 5)",
        }

        # https://github.com/duckdb/duckdb/blob/ff7f24fd8e3128d94371827523dae85ebaf58713/third_party/libpg_query/grammar/keywords/reserved_keywords.list#L1-L77
        RESERVED_KEYWORDS = {
            "array",
            "analyse",
            "union",
            "all",
            "when",
            "in_p",
            "default",
            "create_p",
            "window",
            "asymmetric",
            "to",
            "else",
            "localtime",
            "from",
            "end_p",
            "select",
            "current_date",
            "foreign",
            "with",
            "grant",
            "session_user",
            "or",
            "except",
            "references",
            "fetch",
            "limit",
            "group_p",
            "leading",
            "into",
            "collate",
            "offset",
            "do",
            "then",
            "localtimestamp",
            "check_p",
            "lateral_p",
            "current_role",
            "where",
            "asc_p",
            "placing",
            "desc_p",
            "user",
            "unique",
            "initially",
            "column",
            "both",
            "some",
            "as",
            "any",
            "only",
            "deferrable",
            "null_p",
            "current_time",
            "true_p",
            "table",
            "case",
            "trailing",
            "variadic",
            "for",
            "on",
            "distinct",
            "false_p",
            "not",
            "constraint",
            "current_timestamp",
            "returning",
            "primary",
            "intersect",
            "having",
            "analyze",
            "current_user",
            "and",
            "cast",
            "symmetric",
            "using",
            "order",
            "current_catalog",
        }

        UNWRAPPED_INTERVAL_VALUES = (exp.Literal, exp.Paren)

        # DuckDB doesn't generally support CREATE TABLE .. properties
        # https://duckdb.org/docs/sql/statements/create_table.html
        PROPERTIES_LOCATION = {
            prop: exp.Properties.Location.UNSUPPORTED
            for prop in generator.Generator.PROPERTIES_LOCATION
        }

        # There are a few exceptions (e.g. temporary tables) which are supported or
        # can be transpiled to DuckDB, so we explicitly override them accordingly
        PROPERTIES_LOCATION[exp.LikeProperty] = exp.Properties.Location.POST_SCHEMA
        PROPERTIES_LOCATION[exp.TemporaryProperty] = exp.Properties.Location.POST_CREATE
        PROPERTIES_LOCATION[exp.ReturnsProperty] = exp.Properties.Location.POST_ALIAS
        PROPERTIES_LOCATION[exp.SequenceProperties] = exp.Properties.Location.POST_EXPRESSION

        IGNORE_RESPECT_NULLS_WINDOW_FUNCTIONS = (
            exp.FirstValue,
            exp.Lag,
            exp.LastValue,
            exp.Lead,
            exp.NthValue,
        )

        def _greatest_least_sql(
            self: DuckDB.Generator, expression: exp.Greatest | exp.Least
        ) -> str:
            """
            Handle GREATEST/LEAST functions with dialect-aware NULL behavior.

            - If null_if_any_null=True (BigQuery-style): return NULL if any argument is NULL
            - If null_if_any_null=False (DuckDB/PostgreSQL-style): ignore NULLs, return greatest/least non-NULL value
            """
            # Get all arguments
            all_args = [expression.this, *expression.expressions]
            fallback_sql = self.function_fallback_sql(expression)

            if expression.args.get("null_if_any_null"):
                # BigQuery behavior: NULL if any argument is NULL
                case_expr = exp.case().when(
                    exp.or_(*[arg.is_(exp.null()) for arg in all_args], copy=False),
                    exp.null(),
                    copy=False,
                )
                case_expr.set("default", fallback_sql)
                return self.sql(case_expr)

            # DuckDB/PostgreSQL behavior: use native GREATEST/LEAST (ignores NULLs)
            return self.sql(fallback_sql)

        def greatest_sql(self: DuckDB.Generator, expression: exp.Greatest) -> str:
            return self._greatest_least_sql(expression)

        def least_sql(self: DuckDB.Generator, expression: exp.Least) -> str:
            return self._greatest_least_sql(expression)

        def lambda_sql(
            self, expression: exp.Lambda, arrow_sep: str = "->", wrap: bool = True
        ) -> str:
            if expression.args.get("colon"):
                prefix = "LAMBDA "
                arrow_sep = ":"
                wrap = False
            else:
                prefix = ""

            lambda_sql = super().lambda_sql(expression, arrow_sep=arrow_sep, wrap=wrap)
            return f"{prefix}{lambda_sql}"

        def show_sql(self, expression: exp.Show) -> str:
            return f"SHOW {expression.name}"

        def install_sql(self, expression: exp.Install) -> str:
            force = "FORCE " if expression.args.get("force") else ""
            this = self.sql(expression, "this")
            from_clause = expression.args.get("from_")
            from_clause = f" FROM {from_clause}" if from_clause else ""
            return f"{force}INSTALL {this}{from_clause}"

        def fromiso8601timestamp_sql(self, expression: exp.FromISO8601Timestamp) -> str:
            return self.sql(exp.cast(expression.this, exp.DataType.Type.TIMESTAMPTZ))

        def strtotime_sql(self, expression: exp.StrToTime) -> str:
            if expression.args.get("safe"):
                formatted_time = self.format_time(expression)
                return f"CAST({self.func('TRY_STRPTIME', expression.this, formatted_time)} AS TIMESTAMP)"
            return str_to_time_sql(self, expression)

        def strtodate_sql(self, expression: exp.StrToDate) -> str:
            if expression.args.get("safe"):
                formatted_time = self.format_time(expression)
                return f"CAST({self.func('TRY_STRPTIME', expression.this, formatted_time)} AS DATE)"
            return f"CAST({str_to_time_sql(self, expression)} AS DATE)"

        def currentdate_sql(self, expression: exp.CurrentDate) -> str:
            if not expression.this:
                return "CURRENT_DATE"

            expr = exp.Cast(
                this=exp.AtTimeZone(this=exp.CurrentTimestamp(), zone=expression.this),
                to=exp.DataType(this=exp.DataType.Type.DATE),
            )
            return self.sql(expr)

        def parsejson_sql(self, expression: exp.ParseJSON) -> str:
            arg = expression.this
            if expression.args.get("safe"):
                return self.sql(exp.case().when(exp.func("json_valid", arg), arg).else_(exp.null()))
            return self.func("JSON", arg)

        def timefromparts_sql(self, expression: exp.TimeFromParts) -> str:
            nano = expression.args.get("nano")
            if nano is not None:
                expression.set(
                    "sec", expression.args["sec"] + nano.pop() / exp.Literal.number(1000000000.0)
                )

            return rename_func("MAKE_TIME")(self, expression)

        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
            sec = expression.args["sec"]

            milli = expression.args.get("milli")
            if milli is not None:
                sec += milli.pop() / exp.Literal.number(1000.0)

            nano = expression.args.get("nano")
            if nano is not None:
                sec += nano.pop() / exp.Literal.number(1000000000.0)

            if milli or nano:
                expression.set("sec", sec)

            return rename_func("MAKE_TIMESTAMP")(self, expression)

        def tablesample_sql(
            self,
            expression: exp.TableSample,
            tablesample_keyword: t.Optional[str] = None,
        ) -> str:
            if not isinstance(expression.parent, exp.Select):
                # This sample clause only applies to a single source, not the entire resulting relation
                tablesample_keyword = "TABLESAMPLE"

            if expression.args.get("size"):
                method = expression.args.get("method")
                if method and method.name.upper() != "RESERVOIR":
                    self.unsupported(
                        f"Sampling method {method} is not supported with a discrete sample count, "
                        "defaulting to reservoir sampling"
                    )
                    expression.set("method", exp.var("RESERVOIR"))

            return super().tablesample_sql(expression, tablesample_keyword=tablesample_keyword)

        def columndef_sql(self, expression: exp.ColumnDef, sep: str = " ") -> str:
            if isinstance(expression.parent, exp.UserDefinedFunction):
                return self.sql(expression, "this")
            return super().columndef_sql(expression, sep)

        def join_sql(self, expression: exp.Join) -> str:
            if (
                not expression.args.get("using")
                and not expression.args.get("on")
                and not expression.method
                and (expression.kind in ("", "INNER", "OUTER"))
            ):
                # Some dialects support `LEFT/INNER JOIN UNNEST(...)` without an explicit ON clause
                # DuckDB doesn't, but we can just add a dummy ON clause that is always true
                if isinstance(expression.this, exp.Unnest):
                    return super().join_sql(expression.on(exp.true()))

                expression.set("side", None)
                expression.set("kind", None)

            return super().join_sql(expression)

        def generateseries_sql(self, expression: exp.GenerateSeries) -> str:
            # GENERATE_SERIES(a, b) -> [a, b], RANGE(a, b) -> [a, b)
            if expression.args.get("is_end_exclusive"):
                return rename_func("RANGE")(self, expression)

            return self.function_fallback_sql(expression)

        def countif_sql(self, expression: exp.CountIf) -> str:
            if self.dialect.version >= (1, 2):
                return self.function_fallback_sql(expression)

            # https://github.com/tobymao/sqlglot/pull/4749
            return count_if_to_sum(self, expression)

        def bracket_sql(self, expression: exp.Bracket) -> str:
            if self.dialect.version >= (1, 2):
                return super().bracket_sql(expression)

            # https://duckdb.org/2025/02/05/announcing-duckdb-120.html#breaking-changes
            this = expression.this
            if isinstance(this, exp.Array):
                this.replace(exp.paren(this))

            bracket = super().bracket_sql(expression)

            if not expression.args.get("returns_list_for_maps"):
                if not this.type:
                    from sqlglot.optimizer.annotate_types import annotate_types

                    this = annotate_types(this, dialect=self.dialect)

                if this.is_type(exp.DataType.Type.MAP):
                    bracket = f"({bracket})[1]"

            return bracket

        def withingroup_sql(self, expression: exp.WithinGroup) -> str:
            expression_sql = self.sql(expression, "expression")

            func = expression.this
            if isinstance(func, exp.PERCENTILES):
                # Make the order key the first arg and slide the fraction to the right
                # https://duckdb.org/docs/sql/aggregates#ordered-set-aggregate-functions
                order_col = expression.find(exp.Ordered)
                if order_col:
                    func.set("expression", func.this)
                    func.set("this", order_col.this)

            this = self.sql(expression, "this").rstrip(")")

            return f"{this}{expression_sql})"

        def length_sql(self, expression: exp.Length) -> str:
            arg = expression.this

            # Dialects like BQ and Snowflake also accept binary values as args, so
            # DDB will attempt to infer the type or resort to case/when resolution
            if not expression.args.get("binary") or arg.is_string:
                return self.func("LENGTH", arg)

            if not arg.type:
                from sqlglot.optimizer.annotate_types import annotate_types

                arg = annotate_types(arg, dialect=self.dialect)

            if arg.is_type(*exp.DataType.TEXT_TYPES):
                return self.func("LENGTH", arg)

            # We need these casts to make duckdb's static type checker happy
            blob = exp.cast(arg, exp.DataType.Type.VARBINARY)
            varchar = exp.cast(arg, exp.DataType.Type.VARCHAR)

            case = (
                exp.case(self.func("TYPEOF", arg))
                .when("'BLOB'", self.func("OCTET_LENGTH", blob))
                .else_(
                    exp.Anonymous(this="LENGTH", expressions=[varchar])
                )  # anonymous to break length_sql recursion
            )

            return self.sql(case)

        def lower_sql(self, expression: exp.Lower) -> str:
            result_sql = self.func("LOWER", _cast_to_varchar(expression.this))
            return _cast_to_blob(self, expression, result_sql)

        def upper_sql(self, expression: exp.Upper) -> str:
            result_sql = self.func("UPPER", _cast_to_varchar(expression.this))
            return _cast_to_blob(self, expression, result_sql)

        def replace_sql(self, expression: exp.Replace) -> str:
            result_sql = self.func(
                "REPLACE",
                _cast_to_varchar(expression.this),
                _cast_to_varchar(expression.expression),
                _cast_to_varchar(expression.args.get("replacement")),
            )
            return _cast_to_blob(self, expression, result_sql)

        def objectinsert_sql(self, expression: exp.ObjectInsert) -> str:
            this = expression.this
            key = expression.args.get("key")
            key_sql = key.name if isinstance(key, exp.Expression) else ""
            value_sql = self.sql(expression, "value")

            kv_sql = f"{key_sql} := {value_sql}"

            # If the input struct is empty e.g. transpiling OBJECT_INSERT(OBJECT_CONSTRUCT(), key, value) from Snowflake
            # then we can generate STRUCT_PACK which will build it since STRUCT_INSERT({}, key := value) is not valid DuckDB
            if isinstance(this, exp.Struct) and not this.expressions:
                return self.func("STRUCT_PACK", kv_sql)

            return self.func("STRUCT_INSERT", this, kv_sql)

        def startswith_sql(self, expression: exp.StartsWith) -> str:
            return self.func(
                "STARTS_WITH",
                _cast_to_varchar(expression.this),
                _cast_to_varchar(expression.expression),
            )

        def unnest_sql(self, expression: exp.Unnest) -> str:
            explode_array = expression.args.get("explode_array")
            if explode_array:
                # In BigQuery, UNNESTing a nested array leads to explosion of the top-level array & struct
                # This is transpiled to DDB by transforming "FROM UNNEST(...)" to "FROM (SELECT UNNEST(..., max_depth => 2))"
                expression.expressions.append(
                    exp.Kwarg(this=exp.var("max_depth"), expression=exp.Literal.number(2))
                )

                # If BQ's UNNEST is aliased, we transform it from a column alias to a table alias in DDB
                alias = expression.args.get("alias")
                if isinstance(alias, exp.TableAlias):
                    expression.set("alias", None)
                    if alias.columns:
                        alias = exp.TableAlias(this=seq_get(alias.columns, 0))

                unnest_sql = super().unnest_sql(expression)
                select = exp.Select(expressions=[unnest_sql]).subquery(alias)
                return self.sql(select)

            return super().unnest_sql(expression)

        def ignorenulls_sql(self, expression: exp.IgnoreNulls) -> str:
            this = expression.this

            if isinstance(this, self.IGNORE_RESPECT_NULLS_WINDOW_FUNCTIONS):
                # DuckDB should render IGNORE NULLS only for the general-purpose
                # window functions that accept it e.g. FIRST_VALUE(... IGNORE NULLS) OVER (...)
                return super().ignorenulls_sql(expression)

            if isinstance(this, exp.First):
                this = exp.AnyValue(this=this.this)

            if not isinstance(this, (exp.AnyValue, exp.ApproxQuantiles)):
                self.unsupported("IGNORE NULLS is not supported for non-window functions.")

            return self.sql(this)

        def respectnulls_sql(self, expression: exp.RespectNulls) -> str:
            if isinstance(expression.this, self.IGNORE_RESPECT_NULLS_WINDOW_FUNCTIONS):
                # DuckDB should render RESPECT NULLS only for the general-purpose
                # window functions that accept it e.g. FIRST_VALUE(... RESPECT NULLS) OVER (...)
                return super().respectnulls_sql(expression)

            self.unsupported("RESPECT NULLS is not supported for non-window functions.")
            return self.sql(expression, "this")

        def arraytostring_sql(self, expression: exp.ArrayToString) -> str:
            this = self.sql(expression, "this")
            null_text = self.sql(expression, "null")

            if null_text:
                this = f"LIST_TRANSFORM({this}, x -> COALESCE(x, {null_text}))"

            return self.func("ARRAY_TO_STRING", this, expression.expression)

        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
            this = expression.this
            group = expression.args.get("group")
            params = expression.args.get("parameters")
            position = expression.args.get("position")
            occurrence = expression.args.get("occurrence")
            null_if_pos_overflow = expression.args.get("null_if_pos_overflow")

            if position and (not position.is_int or position.to_py() > 1):
                this = exp.Substring(this=this, start=position)

                if null_if_pos_overflow:
                    this = exp.Nullif(this=this, expression=exp.Literal.string(""))

            # Do not render group if there is no following argument,
            # and it's the default value for this dialect
            if (
                not params
                and group
                and group.name == str(self.dialect.REGEXP_EXTRACT_DEFAULT_GROUP)
            ):
                group = None

            if occurrence and (not occurrence.is_int or occurrence.to_py() > 1):
                return self.func(
                    "ARRAY_EXTRACT",
                    self.func("REGEXP_EXTRACT_ALL", this, expression.expression, group, params),
                    exp.Literal.number(occurrence),
                )

            return self.func("REGEXP_EXTRACT", this, expression.expression, group, params)

        @unsupported_args("culture")
        def numbertostr_sql(self, expression: exp.NumberToStr) -> str:
            fmt = expression.args.get("format")
            if fmt and fmt.is_int:
                return self.func("FORMAT", f"'{{:,.{fmt.name}f}}'", expression.this)

            self.unsupported("Only integer formats are supported by NumberToStr")
            return self.function_fallback_sql(expression)

        def autoincrementcolumnconstraint_sql(self, _) -> str:
            self.unsupported("The AUTOINCREMENT column constraint is not supported by DuckDB")
            return ""

        def aliases_sql(self, expression: exp.Aliases) -> str:
            this = expression.this
            if isinstance(this, exp.Posexplode):
                return self.posexplode_sql(this)

            return super().aliases_sql(expression)

        def posexplode_sql(self, expression: exp.Posexplode) -> str:
            this = expression.this
            parent = expression.parent

            # The default Spark aliases are "pos" and "col", unless specified otherwise
            pos, col = exp.to_identifier("pos"), exp.to_identifier("col")

            if isinstance(parent, exp.Aliases):
                # Column case: SELECT POSEXPLODE(col) [AS (a, b)]
                pos, col = parent.expressions
            elif isinstance(parent, exp.Table):
                # Table case: SELECT * FROM POSEXPLODE(col) [AS (a, b)]
                alias = parent.args.get("alias")
                if alias:
                    pos, col = alias.columns or [pos, col]
                    alias.pop()

            # Translate POSEXPLODE to UNNEST + GENERATE_SUBSCRIPTS
            # Note: In Spark pos is 0-indexed, but in DuckDB it's 1-indexed, so we subtract 1 from GENERATE_SUBSCRIPTS
            unnest_sql = self.sql(exp.Unnest(expressions=[this], alias=col))
            gen_subscripts = self.sql(
                exp.Alias(
                    this=exp.Anonymous(
                        this="GENERATE_SUBSCRIPTS", expressions=[this, exp.Literal.number(1)]
                    )
                    - exp.Literal.number(1),
                    alias=pos,
                )
            )

            posexplode_sql = self.format_args(gen_subscripts, unnest_sql)

            if isinstance(parent, exp.From) or (parent and isinstance(parent.parent, exp.From)):
                # SELECT * FROM POSEXPLODE(col) -> SELECT * FROM (SELECT GENERATE_SUBSCRIPTS(...), UNNEST(...))
                return self.sql(exp.Subquery(this=exp.Select(expressions=[posexplode_sql])))

            return posexplode_sql

        def addmonths_sql(self, expression: exp.AddMonths) -> str:
            this = expression.this

            if not this.type:
                from sqlglot.optimizer.annotate_types import annotate_types

                this = annotate_types(this, dialect=self.dialect)

            if this.is_type(*exp.DataType.TEXT_TYPES):
                this = exp.Cast(this=this, to=exp.DataType(this=exp.DataType.Type.TIMESTAMP))

            func = self.func(
                "DATE_ADD", this, exp.Interval(this=expression.expression, unit=exp.var("MONTH"))
            )

            # DuckDB's DATE_ADD function returns TIMESTAMP/DATETIME by default, even when the input is DATE
            # To match for example Snowflake's ADD_MONTHS behavior (which preserves the input type)
            # We need to cast the result back to the original type when the input is DATE or TIMESTAMPTZ
            # Example: ADD_MONTHS('2023-01-31'::date, 1) should return DATE, not TIMESTAMP
            if this.is_type(exp.DataType.Type.DATE, exp.DataType.Type.TIMESTAMPTZ):
                return self.sql(exp.Cast(this=func, to=this.type))

            return self.sql(func)

        def format_sql(self, expression: exp.Format) -> str:
            if expression.name.lower() == "%s" and len(expression.expressions) == 1:
                return self.func("FORMAT", "'{}'", expression.expressions[0])

            return self.function_fallback_sql(expression)

        def hexstring_sql(
            self, expression: exp.HexString, binary_function_repr: t.Optional[str] = None
        ) -> str:
            from_hex = super().hexstring_sql(expression, binary_function_repr="FROM_HEX")

            if expression.args.get("is_integer"):
                return from_hex

            # `from_hex` has transpiled x'ABCD' (BINARY) to DuckDB's '\xAB\xCD' (BINARY)
            # `to_hex` & CASTing transforms it to "ABCD" (BINARY) to match representation
            to_hex = exp.cast(self.func("TO_HEX", from_hex), exp.DataType.Type.BLOB)

            return self.sql(to_hex)

        def timestamptrunc_sql(self, expression: exp.TimestampTrunc) -> str:
            unit = unit_to_str(expression)
            zone = expression.args.get("zone")
            timestamp = expression.this

            if is_date_unit(unit) and zone:
                # BigQuery's TIMESTAMP_TRUNC with timezone truncates in the target timezone and returns as UTC.
                # Double AT TIME ZONE needed for BigQuery compatibility:
                # 1. First AT TIME ZONE: ensures truncation happens in the target timezone
                # 2. Second AT TIME ZONE: converts the DATE result back to TIMESTAMPTZ (preserving time component)
                timestamp = exp.AtTimeZone(this=timestamp, zone=zone)
                result_sql = self.func("DATE_TRUNC", unit, timestamp)
                return self.sql(exp.AtTimeZone(this=result_sql, zone=zone))

            return self.func("DATE_TRUNC", unit, timestamp)

        def trim_sql(self, expression: exp.Trim) -> str:
            result_sql = self.func(
                "TRIM",
                _cast_to_varchar(expression.this),
                _cast_to_varchar(expression.expression),
            )
            return _cast_to_blob(self, expression, result_sql)

        def round_sql(self, expression: exp.Round) -> str:
            this = expression.this
            decimals = expression.args.get("decimals")
            truncate = expression.args.get("truncate")

            func = "ROUND"
            if truncate:
                # BigQuery uses ROUND_HALF_EVEN; Snowflake uses HALF_TO_EVEN
                if truncate.this in ("ROUND_HALF_EVEN", "HALF_TO_EVEN"):
                    func = "ROUND_EVEN"
                    truncate = None
                # BigQuery uses ROUND_HALF_AWAY_FROM_ZERO; Snowflake uses HALF_AWAY_FROM_ZERO
                elif truncate.this in ("ROUND_HALF_AWAY_FROM_ZERO", "HALF_AWAY_FROM_ZERO"):
                    truncate = None

            return self.func(func, this, decimals, truncate)

        def approxquantiles_sql(self, expression: exp.ApproxQuantiles) -> str:
            """
            BigQuery's APPROX_QUANTILES(expr, n) returns an array of n+1 approximate quantile values
            dividing the input distribution into n equal-sized buckets.

            Both BigQuery and DuckDB use approximate algorithms for quantile estimation, but BigQuery
            does not document the specific algorithm used so results may differ. DuckDB does not
            support RESPECT NULLS.
            """
            this = expression.this
            if isinstance(this, exp.Distinct):
                # APPROX_QUANTILES requires 2 args and DISTINCT node grabs both
                if len(this.expressions) < 2:
                    self.unsupported("APPROX_QUANTILES requires a bucket count argument")
                    return self.function_fallback_sql(expression)
                num_quantiles_expr = this.expressions[1].pop()
            else:
                num_quantiles_expr = expression.expression

            if not isinstance(num_quantiles_expr, exp.Literal) or not num_quantiles_expr.is_int:
                self.unsupported("APPROX_QUANTILES bucket count must be a positive integer")
                return self.function_fallback_sql(expression)

            num_quantiles = t.cast(int, num_quantiles_expr.to_py())
            if num_quantiles <= 0:
                self.unsupported("APPROX_QUANTILES bucket count must be a positive integer")
                return self.function_fallback_sql(expression)

            quantiles = [
                exp.Literal.number(Decimal(i) / Decimal(num_quantiles))
                for i in range(num_quantiles + 1)
            ]

            return self.sql(
                exp.ApproxQuantile(this=this, quantile=exp.Array(expressions=quantiles))
            )

        def jsonextractscalar_sql(self, expression: exp.JSONExtractScalar) -> str:
            if expression.args.get("scalar_only"):
                expression = exp.JSONExtractScalar(
                    this=rename_func("JSON_VALUE")(self, expression), expression="'$'"
                )
            return _arrow_json_extract_sql(self, expression)

        def bitwisenot_sql(self, expression: exp.BitwiseNot) -> str:
            this = expression.this

            # Wrap in parentheses to prevent parsing issues such as "SELECT ~-1"
            if isinstance(this, exp.Neg):
                this = exp.Paren(this=this)

            return f"~{self.sql(this)}"
