# uncompyle6 version 3.2.3
# Python bytecode 3.6 (3379)
# Decompiled from: Python 3.6.8 |Anaconda custom (64-bit)| (default, Feb 21 2019, 18:30:04) [MSC v.1916 64 bit (AMD64)]
# Embedded file name: site-packages\sqlalchemy\dialects\firebird\base.py
r"""

.. dialect:: firebird
    :name: Firebird

Firebird Dialects
-----------------

Firebird offers two distinct dialects_ (not to be confused with a
SQLAlchemy ``Dialect``):

dialect 1
  This is the old syntax and behaviour, inherited from Interbase pre-6.0.

dialect 3
  This is the newer and supported syntax, introduced in Interbase 6.0.

The SQLAlchemy Firebird dialect detects these versions and
adjusts its representation of SQL accordingly.  However,
support for dialect 1 is not well tested and probably has
incompatibilities.

Locking Behavior
----------------

Firebird locks tables aggressively.  For this reason, a DROP TABLE may
hang until other transactions are released.  SQLAlchemy does its best
to release transactions as quickly as possible.  The most common cause
of hanging transactions is a non-fully consumed result set, i.e.::

    result = engine.execute("select * from table")
    row = result.fetchone()
    return

Where above, the ``ResultProxy`` has not been fully consumed.  The
connection will be returned to the pool and the transactional state
rolled back once the Python garbage collector reclaims the objects
which hold onto the connection, which often occurs asynchronously.
The above use case can be alleviated by calling ``first()`` on the
``ResultProxy`` which will fetch the first row and immediately close
all remaining cursor/connection resources.

RETURNING support
-----------------

Firebird 2.0 supports returning a result set from inserts, and 2.1
extends that to deletes and updates. This is generically exposed by
the SQLAlchemy ``returning()`` method, such as::

    # INSERT..RETURNING
    result = table.insert().returning(table.c.col1, table.c.col2).\
                   values(name='foo')
    print result.fetchall()

    # UPDATE..RETURNING
    raises = empl.update().returning(empl.c.id, empl.c.salary).\
                  where(empl.c.sales>100).\
                  values(dict(salary=empl.c.salary * 1.1))
    print raises.fetchall()


.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html

"""
import datetime
from sqlalchemy import schema as sa_schema
from sqlalchemy import exc, types as sqltypes, sql, util
from sqlalchemy.sql import expression
from sqlalchemy.engine import base, default, reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql.elements import quoted_name
from sqlalchemy.types import (
    BIGINT,
    BLOB,
    DATE,
    FLOAT,
    INTEGER,
    NUMERIC,
    SMALLINT,
    TEXT,
    TIME,
    TIMESTAMP,
    Integer,
)

RESERVED_WORDS = set(
    [
        "active",
        "add",
        "admin",
        "after",
        "all",
        "alter",
        "and",
        "any",
        "as",
        "asc",
        "ascending",
        "at",
        "auto",
        "avg",
        "before",
        "begin",
        "between",
        "bigint",
        "bit_length",
        "blob",
        "both",
        "by",
        "case",
        "cast",
        "char",
        "character",
        "character_length",
        "char_length",
        "check",
        "close",
        "collate",
        "column",
        "commit",
        "committed",
        "computed",
        "conditional",
        "connect",
        "constraint",
        "containing",
        "count",
        "create",
        "cross",
        "cstring",
        "current",
        "current_connection",
        "current_date",
        "current_role",
        "current_time",
        "current_timestamp",
        "current_transaction",
        "current_user",
        "cursor",
        "database",
        "date",
        "day",
        "dec",
        "decimal",
        "declare",
        "default",
        "delete",
        "desc",
        "descending",
        "disconnect",
        "distinct",
        "do",
        "domain",
        "double",
        "drop",
        "else",
        "end",
        "entry_point",
        "escape",
        "exception",
        "execute",
        "exists",
        "exit",
        "external",
        "extract",
        "fetch",
        "file",
        "filter",
        "float",
        "for",
        "foreign",
        "from",
        "full",
        "function",
        "gdscode",
        "generator",
        "gen_id",
        "global",
        "grant",
        "group",
        "having",
        "hour",
        "if",
        "in",
        "inactive",
        "index",
        "inner",
        "input_type",
        "insensitive",
        "insert",
        "int",
        "integer",
        "into",
        "is",
        "isolation",
        "join",
        "key",
        "leading",
        "left",
        "length",
        "level",
        "like",
        "long",
        "lower",
        "manual",
        "max",
        "maximum_segment",
        "merge",
        "min",
        "minute",
        "module_name",
        "month",
        "names",
        "national",
        "natural",
        "nchar",
        "no",
        "not",
        "null",
        "numeric",
        "octet_length",
        "of",
        "on",
        "only",
        "open",
        "option",
        "or",
        "order",
        "outer",
        "output_type",
        "overflow",
        "page",
        "pages",
        "page_size",
        "parameter",
        "password",
        "plan",
        "position",
        "post_event",
        "precision",
        "primary",
        "privileges",
        "procedure",
        "protected",
        "rdb$db_key",
        "read",
        "real",
        "record_version",
        "recreate",
        "recursive",
        "references",
        "release",
        "reserv",
        "reserving",
        "retain",
        "returning_values",
        "returns",
        "revoke",
        "right",
        "rollback",
        "rows",
        "row_count",
        "savepoint",
        "schema",
        "second",
        "segment",
        "select",
        "sensitive",
        "set",
        "shadow",
        "shared",
        "singular",
        "size",
        "smallint",
        "snapshot",
        "some",
        "sort",
        "sqlcode",
        "stability",
        "start",
        "starting",
        "starts",
        "statistics",
        "sub_type",
        "sum",
        "suspend",
        "table",
        "then",
        "time",
        "timestamp",
        "to",
        "trailing",
        "transaction",
        "trigger",
        "trim",
        "uncommitted",
        "union",
        "unique",
        "update",
        "upper",
        "user",
        "using",
        "value",
        "values",
        "varchar",
        "variable",
        "varying",
        "view",
        "wait",
        "when",
        "where",
        "while",
        "with",
        "work",
        "write",
        "year",
    ]
)


class _StringType(sqltypes.String):
    """Base for Firebird string types."""

    def __init__(self, charset=None, **kw):
        self.charset = charset
        super(_StringType, self).__init__(**kw)


class VARCHAR(_StringType, sqltypes.VARCHAR):
    """Firebird VARCHAR type"""

    __visit_name__ = "VARCHAR"

    def __init__(self, length=None, **kwargs):
        super(VARCHAR, self).__init__(length=length, **kwargs)


class CHAR(_StringType, sqltypes.CHAR):
    """Firebird CHAR type"""

    __visit_name__ = "CHAR"

    def __init__(self, length=None, **kwargs):
        super(CHAR, self).__init__(length=length, **kwargs)


class _FBDateTime(sqltypes.DateTime):
    def bind_processor(self, dialect):
        def process(value):
            if type(value) == datetime.date:
                return datetime.datetime(value.year, value.month, value.day)
            else:
                return value

        return process


colspecs = {sqltypes.DateTime: _FBDateTime}
ischema_names = {
    "SHORT": SMALLINT,
    "LONG": INTEGER,
    "QUAD": FLOAT,
    "FLOAT": FLOAT,
    "DATE": DATE,
    "TIME": TIME,
    "TEXT": TEXT,
    "INT64": BIGINT,
    "DOUBLE": FLOAT,
    "TIMESTAMP": TIMESTAMP,
    "VARYING": VARCHAR,
    "CSTRING": CHAR,
    "BLOB": BLOB,
}


class FBTypeCompiler(compiler.GenericTypeCompiler):
    def visit_boolean(self, type_, **kw):
        return self.visit_SMALLINT(type_, **kw)

    def visit_datetime(self, type_, **kw):
        return self.visit_TIMESTAMP(type_, **kw)

    def visit_TEXT(self, type_, **kw):
        return "BLOB SUB_TYPE 1"

    def visit_BLOB(self, type_, **kw):
        return "BLOB SUB_TYPE 0"

    def _extend_string(self, type_, basic):
        charset = getattr(type_, "charset", None)
        if charset is None:
            return basic
        else:
            return "%s CHARACTER SET %s" % (basic, charset)

    def visit_CHAR(self, type_, **kw):
        basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw)
        return self._extend_string(type_, basic)

    def visit_VARCHAR(self, type_, **kw):
        if not type_.length:
            raise exc.CompileError(
                "VARCHAR requires a length on dialect %s" % self.dialect.name
            )
        basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw)
        return self._extend_string(type_, basic)


class FBCompiler(sql.compiler.SQLCompiler):
    """Firebird specific idiosyncrasies"""

    ansi_bind_rules = True

    def visit_now_func(self, fn, **kw):
        return "CURRENT_TIMESTAMP"

    def visit_startswith_op_binary(self, binary, operator, **kw):
        return "%s STARTING WITH %s" % (
            binary.left._compiler_dispatch(self, **kw),
            binary.right._compiler_dispatch(self, **kw),
        )

    def visit_notstartswith_op_binary(self, binary, operator, **kw):
        return "%s NOT STARTING WITH %s" % (
            binary.left._compiler_dispatch(self, **kw),
            binary.right._compiler_dispatch(self, **kw),
        )

    def visit_mod_binary(self, binary, operator, **kw):
        return "mod(%s, %s)" % (
            self.process(binary.left, **kw),
            self.process(binary.right, **kw),
        )

    def visit_alias(self, alias, asfrom=False, **kwargs):
        if self.dialect._version_two:
            return super(FBCompiler, self).visit_alias(asfrom=asfrom, **kwargs)
        elif asfrom:
            alias_name = (
                isinstance(alias.name, expression._truncated_label)
                and self._truncated_identifier("alias", alias.name)
                or alias.name
            )
            return (
                (self.process(asfrom=asfrom, **kwargs))
                + " "
                + self.preparer.format_alias(alias, alias_name)
            )
        else:
            return self.process(alias.original, **kwargs)

    def visit_substring_func(self, func, **kw):
        s = self.process(func.clauses.clauses[0])
        start = self.process(func.clauses.clauses[1])
        if len(func.clauses.clauses) > 2:
            length = self.process(func.clauses.clauses[2])
            return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
        else:
            return "SUBSTRING(%s FROM %s)" % (s, start)

    def visit_length_func(self, function, **kw):
        if self.dialect._version_two:
            return "char_length" + self.function_argspec(function)
        else:
            return "strlen" + self.function_argspec(function)

    visit_char_length_func = visit_length_func

    def function_argspec(self, func, **kw):
        if func.clauses is not None:
            if len(func.clauses):
                return self.process(func.clause_expr, **kw)
            return ""

    def default_from(self):
        return " FROM rdb$database"

    def visit_sequence(self, seq):
        return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)

    def get_select_precolumns(self, select, **kw):
        """Called when building a ``SELECT`` statement, position is just
        before column list Firebird puts the limit and offset right
        after the ``SELECT``...
        """
        result = ""
        if select._limit_clause is not None:
            result += "FIRST %s " % (self.process(select._limit_clause, **kw))
        if select._offset_clause is not None:
            result += "SKIP %s " % (self.process(select._offset_clause, **kw))
        if select._distinct:
            result += "DISTINCT "
        return result

    def limit_clause(self, select, **kw):
        """Already taken care of in the `get_select_precolumns` method."""
        return ""

    def returning_clause(self, stmt, returning_cols):
        columns = [
            self._label_select_column(None, c, True, False, {})
            for c in expression._select_iterables(returning_cols)
        ]
        return "RETURNING " + (", ").join(columns)


class FBDDLCompiler(sql.compiler.DDLCompiler):
    """Firebird syntactic idiosyncrasies"""

    def visit_create_sequence(self, create):
        """Generate a ``CREATE GENERATOR`` statement for the sequence."""
        if create.element.start is not None:
            raise NotImplemented("Firebird SEQUENCE doesn't support START WITH")
        if create.element.increment is not None:
            raise NotImplemented("Firebird SEQUENCE doesn't support INCREMENT BY")
        if self.dialect._version_two:
            return "CREATE SEQUENCE %s" % self.preparer.format_sequence(create.element)
        else:
            return "CREATE GENERATOR %s" % self.preparer.format_sequence(create.element)

    def visit_drop_sequence(self, drop):
        """Generate a ``DROP GENERATOR`` statement for the sequence."""
        if self.dialect._version_two:
            return "DROP SEQUENCE %s" % self.preparer.format_sequence(drop.element)
        else:
            return "DROP GENERATOR %s" % self.preparer.format_sequence(drop.element)


class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
    """Install Firebird specific reserved words."""

    reserved_words = RESERVED_WORDS
    illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(["_"])

    def __init__(self, dialect):
        super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)


class FBExecutionContext(default.DefaultExecutionContext):
    def fire_sequence(self, seq, type_):
        """Get the next value from the sequence using ``gen_id()``."""
        return self._execute_scalar(
            "SELECT gen_id(%s, 1) FROM rdb$database"
            % self.dialect.identifier_preparer.format_sequence(seq),
            type_,
        )


class FBDialect(default.DefaultDialect):
    """Firebird dialect"""

    name = "firebird"
    max_identifier_length = 31
    supports_sequences = True
    sequences_optional = False
    supports_default_values = True
    postfetch_lastrowid = False
    supports_native_boolean = False
    requires_name_normalize = True
    supports_empty_insert = False
    statement_compiler = FBCompiler
    ddl_compiler = FBDDLCompiler
    preparer = FBIdentifierPreparer
    type_compiler = FBTypeCompiler
    execution_ctx_cls = FBExecutionContext
    colspecs = colspecs
    ischema_names = ischema_names
    construct_arguments = []
    _version_two = True

    def initialize(self, connection):
        super(FBDialect, self).initialize(connection)
        self._version_two = (
            "firebird" in self.server_version_info
            and self.server_version_info >= (2,)
            or "interbase" in self.server_version_info
            and self.server_version_info >= (6,)
        )
        if not self._version_two:
            self.ischema_names = ischema_names.copy()
            self.ischema_names["TIMESTAMP"] = sqltypes.DATE
            self.colspecs = {sqltypes.DateTime: sqltypes.DATE}
        self.implicit_returning = self._version_two and self.__dict__.get(
            "implicit_returning", True
        )

    def normalize_name(self, name):
        name = name and name.rstrip()
        if name is None:
            return
        if name.upper() == name:
            if not self.identifier_preparer._requires_quotes(name.lower()):
                return name.lower()
            if name.lower() == name:
                return quoted_name(name, quote=True)
            return name

    def denormalize_name(self, name):
        if name is None:
            return
        else:
            if name.lower() == name:
                if not self.identifier_preparer._requires_quotes(name.lower()):
                    return name.upper()
            return name

    def has_table(self, connection, table_name, schema=None):
        """Return ``True`` if the given table exists, ignoring
        the `schema`."""
        tblqry = "\n        SELECT 1 AS has_table FROM rdb$database\n        WHERE EXISTS (SELECT rdb$relation_name\n                      FROM rdb$relations\n                      WHERE rdb$relation_name=?)\n        "
        c = connection.execute(tblqry, [self.denormalize_name(table_name)])
        return c.first() is not None

    def has_sequence(self, connection, sequence_name, schema=None):
        """Return ``True`` if the given sequence (generator) exists."""
        genqry = "\n        SELECT 1 AS has_sequence FROM rdb$database\n        WHERE EXISTS (SELECT rdb$generator_name\n                      FROM rdb$generators\n                      WHERE rdb$generator_name=?)\n        "
        c = connection.execute(genqry, [self.denormalize_name(sequence_name)])
        return c.first() is not None

    @reflection.cache
    def get_table_names(self, connection, schema=None, **kw):
        s = "\n        select rdb$relation_name\n        from rdb$relations\n        where rdb$view_blr is null\n        and (rdb$system_flag is null or rdb$system_flag = 0);\n        "
        return [self.normalize_name(row[0]) for row in connection.execute(s)]

    @reflection.cache
    def get_view_names(self, connection, schema=None, **kw):
        s = "\n        select rdb$relation_name\n        from rdb$relations\n        where rdb$view_blr is not null\n        and (rdb$system_flag is null or rdb$system_flag = 0);\n        "
        return [self.normalize_name(row[0]) for row in connection.execute(s)]

    @reflection.cache
    def get_view_definition(self, connection, view_name, schema=None, **kw):
        qry = "\n        SELECT rdb$view_source AS view_source\n        FROM rdb$relations\n        WHERE rdb$relation_name=?\n        "
        rp = connection.execute(qry, [self.denormalize_name(view_name)])
        row = rp.first()
        if row:
            return row["view_source"]
        else:
            return

    @reflection.cache
    def get_pk_constraint(self, connection, table_name, schema=None, **kw):
        keyqry = "\n        SELECT se.rdb$field_name AS fname\n        FROM rdb$relation_constraints rc\n             JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name\n        WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?\n        "
        tablename = self.denormalize_name(table_name)
        c = connection.execute(keyqry, ["PRIMARY KEY", tablename])
        pkfields = [self.normalize_name(r["fname"]) for r in c.fetchall()]
        return {"constrained_columns": pkfields, "name": None}

    @reflection.cache
    def get_column_sequence(
        self, connection, table_name, column_name, schema=None, **kw
    ):
        tablename = self.denormalize_name(table_name)
        colname = self.denormalize_name(column_name)
        genqry = "\n        SELECT trigdep.rdb$depended_on_name AS fgenerator\n        FROM rdb$dependencies tabdep\n             JOIN rdb$dependencies trigdep\n                  ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name\n                     AND trigdep.rdb$depended_on_type=14\n                     AND trigdep.rdb$dependent_type=2\n             JOIN rdb$triggers trig ON\n                    trig.rdb$trigger_name=tabdep.rdb$dependent_name\n        WHERE tabdep.rdb$depended_on_name=?\n          AND tabdep.rdb$depended_on_type=0\n          AND trig.rdb$trigger_type=1\n          AND tabdep.rdb$field_name=?\n          AND (SELECT count(*)\n           FROM rdb$dependencies trigdep2\n           WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2\n        "
        genr = connection.execute(genqry, [tablename, colname]).first()
        if genr is not None:
            return dict(name=self.normalize_name(genr["fgenerator"]))

    @reflection.cache
    def get_columns(self, connection, table_name, schema=None, **kw):
        tblqry = "\n        SELECT r.rdb$field_name AS fname,\n                        r.rdb$null_flag AS null_flag,\n                        t.rdb$type_name AS ftype,\n                        f.rdb$field_sub_type AS stype,\n                        f.rdb$field_length/\n                            COALESCE(cs.rdb$bytes_per_character,1) AS flen,\n                        f.rdb$field_precision AS fprec,\n                        f.rdb$field_scale AS fscale,\n                        COALESCE(r.rdb$default_source,\n                                f.rdb$default_source) AS fdefault\n        FROM rdb$relation_fields r\n             JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name\n             JOIN rdb$types t\n              ON t.rdb$type=f.rdb$field_type AND\n                    t.rdb$field_name='RDB$FIELD_TYPE'\n             LEFT JOIN rdb$character_sets cs ON\n                    f.rdb$character_set_id=cs.rdb$character_set_id\n        WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?\n        ORDER BY r.rdb$field_position\n        "
        pk_constraint = self.get_pk_constraint(connection, table_name)
        pkey_cols = pk_constraint["constrained_columns"]
        tablename = self.denormalize_name(table_name)
        c = connection.execute(tblqry, [tablename])
        cols = []
        while True:
            row = c.fetchone()
            if row is None:
                break
            name = self.normalize_name(row["fname"])
            orig_colname = row["fname"]
            colspec = row["ftype"].rstrip()
            coltype = self.ischema_names.get(colspec)
            if coltype is None:
                util.warn(
                    "Did not recognize type '%s' of column '%s'" % (colspec, name)
                )
                coltype = sqltypes.NULLTYPE
            else:
                if issubclass(coltype, Integer):
                    if row["fprec"] != 0:
                        coltype = NUMERIC(
                            precision=row["fprec"], scale=row["fscale"] * -1
                        )
                    if colspec in ("VARYING", "CSTRING"):
                        coltype = coltype(row["flen"])
            if colspec == "TEXT":
                coltype = TEXT(row["flen"])
            else:
                if colspec == "BLOB":
                    if row["stype"] == 1:
                        coltype = TEXT()
                    else:
                        coltype = BLOB()
                else:
                    coltype = coltype()
                defvalue = None
                if row["fdefault"] is not None:
                    defexpr = row["fdefault"].lstrip()
                    if not defexpr[:8].rstrip().upper() == "DEFAULT":
                        raise AssertionError("Unrecognized default value: %s" % defexpr)
                    defvalue = defexpr[8:].strip()
                    if defvalue == "NULL":
                        defvalue = None
                col_d = {
                    "name": name,
                    "type": coltype,
                    "nullable": not bool(row["null_flag"]),
                    "default": defvalue,
                    "autoincrement": "auto",
                }
                if orig_colname.lower() == orig_colname:
                    col_d["quote"] = True
                if len(pkey_cols) == 1:
                    if name == pkey_cols[0]:
                        seq_d = self.get_column_sequence(connection, tablename, name)
                        if seq_d is not None:
                            col_d["sequence"] = seq_d
                cols.append(col_d)

        return cols

    @reflection.cache
    def get_foreign_keys(self, connection, table_name, schema=None, **kw):
        fkqry = "\n        SELECT rc.rdb$constraint_name AS cname,\n               cse.rdb$field_name AS fname,\n               ix2.rdb$relation_name AS targetrname,\n               se.rdb$field_name AS targetfname\n        FROM rdb$relation_constraints rc\n             JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name\n             JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key\n             JOIN rdb$index_segments cse ON\n                        cse.rdb$index_name=ix1.rdb$index_name\n             JOIN rdb$index_segments se\n                  ON se.rdb$index_name=ix2.rdb$index_name\n                     AND se.rdb$field_position=cse.rdb$field_position\n        WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?\n        ORDER BY se.rdb$index_name, se.rdb$field_position\n        "
        tablename = self.denormalize_name(table_name)
        c = connection.execute(fkqry, ["FOREIGN KEY", tablename])
        fks = util.defaultdict(
            lambda: {
                "name": None,
                "constrained_columns": [],
                "referred_schema": None,
                "referred_table": None,
                "referred_columns": [],
            }
        )
        for row in c:
            cname = self.normalize_name(row["cname"])
            fk = fks[cname]
            if not fk["name"]:
                fk["name"] = cname
                fk["referred_table"] = self.normalize_name(row["targetrname"])
            fk["constrained_columns"].append(self.normalize_name(row["fname"]))
            fk["referred_columns"].append(self.normalize_name(row["targetfname"]))

        return list(fks.values())

    @reflection.cache
    def get_indexes(self, connection, table_name, schema=None, **kw):
        qry = "\n        SELECT ix.rdb$index_name AS index_name,\n               ix.rdb$unique_flag AS unique_flag,\n               ic.rdb$field_name AS field_name\n        FROM rdb$indices ix\n             JOIN rdb$index_segments ic\n                  ON ix.rdb$index_name=ic.rdb$index_name\n             LEFT OUTER JOIN rdb$relation_constraints\n                  ON rdb$relation_constraints.rdb$index_name =\n                        ic.rdb$index_name\n        WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL\n          AND rdb$relation_constraints.rdb$constraint_type IS NULL\n        ORDER BY index_name, ic.rdb$field_position\n        "
        c = connection.execute(qry, [self.denormalize_name(table_name)])
        indexes = util.defaultdict(dict)
        for row in c:
            indexrec = indexes[row["index_name"]]
            if "name" not in indexrec:
                indexrec["name"] = self.normalize_name(row["index_name"])
                indexrec["column_names"] = []
                indexrec["unique"] = bool(row["unique_flag"])
            indexrec["column_names"].append(self.normalize_name(row["field_name"]))

        return list(indexes.values())
