#
#  Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
#  Licensed under the Apache License, Version 2.0 (the "License");
#  you may not use this file except in compliance with the License.
#  You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
#  Unless required by applicable law or agreed to in writing, software
#  distributed under the License is distributed on an "AS IS" BASIS,
#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#  See the License for the specific language governing permissions and
#  limitations under the License.
#

import contextlib
import logging
import os
import re
import json
import time
import copy
import psycopg2
from psycopg2 import pool, sql
from psycopg2.extras import execute_values
from rag import settings
from rag.settings import PAGERANK_FLD
from rag.utils import singleton
import pandas as pd
from api.utils.file_utils import get_project_base_directory

from rag.utils.doc_store_conn import (
    DocStoreConnection,
    MatchExpr,
    MatchTextExpr,
    MatchDenseExpr,
    FusionExpr,
    OrderByExpr,
)

logger = logging.getLogger('ragflow.vastbase_conn')



def get_table_exists(conn: psycopg2.extensions.connection, table_name: str) -> bool:
    """
    Get a table exists from a connection
    """
    with conn.cursor() as cur:
        # Check if table exists
        cur.execute("""
        SELECT EXISTS (
            SELECT 1 FROM information_schema.tables 
            WHERE table_name = %s
        )
        """, (table_name,))
        return cur.fetchone()[0]


def get_table_instance(conn: psycopg2.extensions.connection, table_name: str):
    """
    Get a table columns from a connection
    """
    with conn.cursor() as cur:
        # Check if table exists
        check_table_exists_sql = """
        SELECT EXISTS (
            SELECT 1 FROM information_schema.tables 
            WHERE table_name = %s
        )
        """
        logger.debug(f"Checking if table {table_name} exists with SQL: {check_table_exists_sql}")
        cur.execute(check_table_exists_sql, (table_name,))
        table_exists = cur.fetchone()[0]
        if table_exists:
            table_columns_sql = """
            SELECT column_name, data_type, column_default, is_nullable
            FROM information_schema.columns
            WHERE table_name=%s
            """
            logger.debug(f"Fetching columns for table {table_name} with SQL: {table_columns_sql}")
            cur.execute(table_columns_sql, (table_name,))
            return cur.fetchall()
        else:
            return None
        
def field_keyword(field_name: str):
        # The "docnm_kwd" field is always a string, not list.
        if field_name == "source_id" or (field_name.endswith("_kwd") and field_name != "docnm_kwd" and field_name != "knowledge_graph_kwd"):
            return True
        return False

def equivalent_condition_to_str(condition: dict, table_instance=None) -> str | None:
    assert "_id" not in condition
    clmns = {}
    if table_instance:
        for n, ty, de, _ in table_instance:
            clmns[n] = (ty, de)

    def exists(cln):
        nonlocal clmns
        assert cln in clmns, f"'{cln}' should be in '{clmns}'."
        ty, de = clmns[cln]
        if ty.lower().find("cha"):
            if not de:
                de = ""
            return f" {cln}!='{de}' "
        return f"{cln}!={de}"

    cond = list()
    for k, v in condition.items():
        if not isinstance(k, str) or k in ["kb_id"] or not v:
            continue
        if field_keyword(k):
            # if isinstance(v, list):
            #     inCond = list()
            #     pattern = r'[~^]\d+(?:\.\d+)?'
            #     for item in v:
            #         if isinstance(item, str):
            #             item = item.replace("'","''")
            #         # inCond.append(f"filter_fulltext('{k}', '{item}')")
            #         matching_text = re.sub(pattern, '', item)
            #         inCond.append(f"{k} @~@ '{matching_text}'")
            #     if inCond:
            #         strInCond = " and ".join(inCond)
            #         strInCond = f"({strInCond})"
            #         cond.append(strInCond)
            # else:
            #     # cond.append(f"filter_fulltext('{k}', '{v}')")
            #     matching_text = re.sub(pattern, '', item)
            #     cond.append(f"{k} @~@ '{matching_text}'")
            pass
        elif isinstance(v, list):
            inCond = list()
            for item in v:
                if isinstance(item, str):
                    item = item.replace("'","''")
                    inCond.append(f"'{item}'")
                else:
                    inCond.append(str(item))
            if inCond:
                strInCond = ", ".join(inCond)
                strInCond = f"{k} IN ({strInCond})"
                cond.append(strInCond)
        elif k == "must_not":
            if isinstance(v, dict):
                for kk, vv in v.items():
                    if kk == "exists":
                        cond.append("NOT (%s)" % exists(vv))
        elif isinstance(v, str):
            cond.append(f"{k}='{v}'")
        elif k == "exists":
            cond.append(exists(v))
        else:
            cond.append(f"{k}={str(v)}")
    return " AND ".join(cond) if cond else "1=1"


def concat_dataframes(df_list: list[pd.DataFrame], selectFields: list[str]) -> pd.DataFrame:
    df_list2 = [df for df in df_list if not df.empty]
    if df_list2:
        return pd.concat(df_list2, axis=0).reset_index(drop=True)
    
    schema = []
    for field_name in selectFields:
        if field_name == 'score()': # Workaround: fix schema is changed to score()
            schema.append('SCORE')
        elif field_name == 'similarity()': # Workaround: fix schema is changed to similarity()
            schema.append('SIMILARITY')
        else:
            schema.append(field_name)
    return pd.DataFrame(columns=schema)


@singleton
class VBConnection(DocStoreConnection):
    def __init__(self):
        self.dbName = settings.VB.get("db_name", "rag_flow")
        vb_host = settings.VB.get("host", "vastbase")
        vb_port = settings.VB.get("port", 5432)
        vb_user = settings.VB.get("user", "rag_flow")
        vb_password = settings.VB.get("password", "infini_rag_flow")
        
        self.connPool = None
        
        logger.info(f"Use Vastbase with floatvector at {vb_host}:{vb_port} as the doc engine.")
        
        # Try to connect to Vastbase
        for _ in range(24):
            try:
                # Create ThreadedConnectionPool
                connPool = pool.ThreadedConnectionPool(
                    minconn=5,
                    maxconn=20,
                    host=vb_host,
                    port=vb_port,
                    user=vb_user,
                    password=vb_password,
                    database=self.dbName
                )
                
                # Test connection
                conn = connPool.getconn()
                try:
                    with conn.cursor() as cur:
                        cur.execute("SELECT 1")
                        cur.close()
                finally:
                    if conn:
                        connPool.putconn(conn)
                self.connPool = connPool
                break
            except Exception as e:
                logger.warning(f"{str(e)}. Waiting Vastbase {vb_host}:{vb_port} to be healthy.")
                time.sleep(5)
                
        if self.connPool is None:
            msg = f"Vastbase {vb_host}:{vb_port} is unhealthy in 120s."
            logger.error(msg)
            raise Exception(msg)
            
        logger.info(f"Vastbase {vb_host}:{vb_port} is healthy.")

    @contextlib.contextmanager
    def get_conn(self):
        """
        Get a connection from the pool.
        """
        conn = self.connPool.getconn()
        try:
            yield conn
        except Exception as e:
            logger.error(f"Error in Vastbase connection: {str(e)}")
            conn.rollback()
            raise
        finally:
            self.connPool.putconn(conn)

    """
    Database operations
    """

    def dbType(self) -> str:
        return "vastbase"

    def health(self) -> dict:
        """
        Return the health status of the database.
        """
        with self.get_conn() as vb_conn:
            try:
                with vb_conn.cursor() as cur:
                    cur.execute("SELECT vb_version()")
                    cur.fetchone()
                res = {
                    "type": "vastbase",
                    "status": "green",
                    "error": ""
                }
            except Exception as e:
                res = {
                    "type": "vastbase",
                    "status": "red",
                    "error": str(e)
                }
            return res

    """
    Table operations
    """

    def createIdx(self, indexName: str, knowledgebaseId: str, vectorSize: int):
        """Create a table and necessary indexes for vector storage"""
        table_name = f"{indexName}_{knowledgebaseId}"
        with self.get_conn() as vb_conn:
            fp_mapping = os.path.join(
                get_project_base_directory(), "conf", "vastbase_mapping.json"
            )
            if not os.path.exists(fp_mapping):
                raise Exception(f"Mapping file not found at {fp_mapping}")
            schema = json.load(open(fp_mapping))
            vector_name = f"q_{vectorSize}_vec"
            
            columns = []
            # Process field definitions from mapping
            for field_name, field_info in schema.items():
                field_type = field_info["type"]
                field_default = field_info['default']
                columns.append(sql.SQL("{field_name} {field_type} DEFAULT {field_default}").format(
                    field_name=sql.Identifier(field_name),
                    field_type=sql.SQL(field_type),
                    field_default=sql.Literal(field_default)
                ))
            
            # Add vector field
            columns.append(sql.SQL("{vector_name} floatvector({vectorSize})").format(
                vector_name=sql.Identifier(vector_name),
                vectorSize=sql.SQL(str(vectorSize))
            ))
            
            # Create table
            create_table_sql = sql.SQL("""
            CREATE TABLE IF NOT EXISTS {table_name} (
                {columns}
            )
            """).format(
                table_name=sql.Identifier(table_name),
                columns=sql.SQL(", ").join(columns)
            )
        
            with vb_conn.cursor() as cur:
                cur.execute(create_table_sql)
                logging.debug(f"VASTBASE create table SQL: {create_table_sql.as_string(vb_conn)}")
                # Create vector index using HNSW (Hierarchical Navigable Small World) index
                create_q_vex_idx_sql = sql.SQL("""
                CREATE INDEX IF NOT EXISTS {index_name} 
                ON {table_name} USING hnsw ({vector_name} floatvector_cosine_ops)
                WITH (m=16, ef_construction=50)
                """).format(
                    index_name=sql.Identifier(f'q_vec_idx_{table_name}'),
                    table_name=sql.Identifier(table_name),
                    vector_name=sql.Identifier(vector_name)
                )
                logger.debug(f"VASTBASE create vector index SQL: {create_q_vex_idx_sql.as_string(vb_conn)}")
                cur.execute(create_q_vex_idx_sql)
                
                # Create full-text indexes for text fields
                text_idx_fields = [
                    ("title_tks", "cn_tokenizer"),
                    ("title_sm_tks", "cn_tokenizer"),
                    ("important_kwd", "cn_tokenizer"),
                    ("important_tks", "cn_tokenizer"),
                    ("question_tks", "cn_tokenizer"),
                    ("content_ltks", "cn_tokenizer"),
                    ("content_sm_ltks", "cn_tokenizer")
                ]
                # for field_name, field_info in schema.items():
                #     if field_info["type"] != "text" or "analyzer" not in field_info:
                #         continue
                #     text_idx_fields.append((field_name, field_info["analyzer"]))
                if text_idx_fields:
                    create_idx_sql = sql.SQL("""
                        CREATE INDEX IF NOT EXISTS {}
                        ON {} USING fulltext({});
                        """).format(
                            sql.Identifier(f'text_idx_{table_name}'),
                            sql.Identifier(table_name),
                            sql.SQL(', ').join([sql.Identifier(f) for f, _ in text_idx_fields])
                        )
                    logging.debug(f"VASTBASE create text index SQL: {create_idx_sql.as_string(vb_conn)}")
                    cur.execute(create_idx_sql)
                vb_conn.commit()
        logger.info(
            f"VASTBASE created table {table_name}, vector size {vectorSize}"
        )

    def deleteIdx(self, indexName: str, knowledgebaseId: str):
        """Drop the table for the given index and knowledgebase"""
        table_name = f"{indexName}_{knowledgebaseId}"
        with self.get_conn() as vb_conn:
            with vb_conn.cursor() as cur:
                drop_index_sql = sql.SQL("DROP TABLE IF EXISTS {table_name}").format(
                    table_name=sql.Identifier(table_name)
                )
                logger.debug(f"VASTBASE drop table SQL: {drop_index_sql.as_string(vb_conn)}")
                cur.execute(drop_index_sql)
                vb_conn.commit()
        logger.info(f"VASTBASE dropped table {table_name}")

    def indexExist(self, indexName: str, knowledgebaseId: str) -> bool:
        """Check if the table exists for the given index and knowledgebase"""
        table_name = f"{indexName}_{knowledgebaseId}"
        with self.get_conn() as vb_conn:
            exists = get_table_exists(vb_conn, table_name)
            return exists

    """
    CRUD operations
    """

    def search(
            self, selectFields: list[str],
            highlightFields: list[str],
            condition: dict,
            matchExprs: list[MatchExpr],
            orderBy: OrderByExpr,
            offset: int,
            limit: int,
            indexNames: str | list[str],
            knowledgebaseIds: list[str],
            aggFields: list[str] = [],
            rank_feature: dict | None = None
    ) -> tuple[pd.DataFrame, int]:
        """
        TODO: Vastbase doesn't provide highlight
        """
        if isinstance(indexNames, str):
            indexNames = indexNames.split(",")
        assert isinstance(indexNames, list) and len(indexNames) > 0
        with self.get_conn() as vb_conn:
            df_list = list()
            table_list = list()
            output = selectFields.copy()
            for essential_field in ["id"]:
                if essential_field not in selectFields:
                    output.append(essential_field)
            score_func = ""
            score_column = ""
            for matchExpr in matchExprs:
                if isinstance(matchExpr, MatchTextExpr):
                    score_func = "score()"
                    score_column = "SCORE"
                    break
            if not score_func:
                for matchExpr in matchExprs:
                    if isinstance(matchExpr, MatchDenseExpr):
                        score_func = "similarity()"
                        score_column = "SIMILARITY"
                        break
            if matchExprs:
                # if score_func not in output:
                #     output.append(score_func)
                if PAGERANK_FLD not in output:
                    output.append(PAGERANK_FLD)
            output = [f for f in output if f != "_score"]

            # Prepare expressions common to all tables
            filter_cond = None
            filter_fulltext = None
            filter_vector = None
            if condition:
                for indexName in indexNames:
                    table_name = f"{indexName}_{knowledgebaseIds[0]}"
                    table_instance = get_table_instance(vb_conn, table_name)
                    if table_instance:
                        filter_cond = equivalent_condition_to_str(condition, table_instance)
                        break

            vector_similarity_weight = 0.5
            for matchExpr in matchExprs:
                if isinstance(matchExpr, MatchTextExpr):
                    if filter_cond and "filter" not in matchExpr.extra_options:
                        matchExpr.extra_options.update({"filter": filter_cond})
                    pattern = r'[~^]\d+(?:\.\d+)?'
                    matching_text = re.sub(pattern, '', matchExpr.matching_text)
                    fields = [re.sub(pattern, '', f) for f in matchExpr.fields]
                    filter_fulltext = sql.SQL(' AND ').join([sql.SQL("{field} @~@ {matching_text}").format(
                        field=sql.Identifier(f),
                        matching_text=sql.Literal(matching_text)
                    ) for f in fields])
                    if filter_cond:
                        filter_fulltext = sql.SQL("({filter_cond}) AND ({filter_fulltext})").format(
                            filter_cond=sql.SQL(filter_cond),
                            filter_fulltext=filter_fulltext
                        )
                    # 暂不支持 minimum_should_match
                    # minimum_should_match = matchExpr.extra_options.get("minimum_should_match", 0.0)
                    # if isinstance(minimum_should_match, float):
                    #     str_minimum_should_match = str(int(minimum_should_match * 100)) + "%"
                    #     matchExpr.extra_options["minimum_should_match"] = str_minimum_should_match
                    logger.debug(f"VASTBASE search MatchTextExpr: {json.dumps(matchExpr.__dict__)}")
                elif isinstance(matchExpr, MatchDenseExpr):
                    similarity = matchExpr.extra_options.get("similarity")
                    if similarity:
                        filter_vector = sql.SQL("1 - ({vec_col} <=> {vec}) >= {similarity}").format(
                            vec_col=sql.Identifier(matchExpr.vector_column_name),
                            vec=sql.Literal([float(v) for v in matchExpr.embedding_data]),
                            similarity=sql.Literal(similarity),
                        )
                    logger.debug(f"VASTBASE search MatchDenseExpr: {json.dumps(matchExpr.__dict__)}")
                elif isinstance(matchExpr, FusionExpr):
                    if isinstance(matchExpr, FusionExpr) and matchExpr.method == "weighted_sum" and "weights" in matchExpr.fusion_params:
                        assert len(matchExprs) == 3 and isinstance(matchExprs[0], MatchTextExpr) and isinstance(
                            matchExprs[1],
                            MatchDenseExpr) and isinstance(
                            matchExprs[2], FusionExpr)
                        weights = matchExpr.fusion_params["weights"]
                        vector_similarity_weight = float(weights.split(",")[1])
                    logger.debug(f"VASTBASE search FusionExpr: {json.dumps(matchExpr.__dict__)}")

            order_by_expr_list = list()
            if orderBy.fields:
                for order_field in orderBy.fields:
                    if order_field[1] == 0:
                        order_by_expr_list.append((order_field[0], "ASC"))
                    else:
                        order_by_expr_list.append((order_field[0], "DESC"))

            total_hits_count = 0
            # Scatter search tables and gather the results
            for indexName in indexNames:
                for knowledgebaseId in knowledgebaseIds:
                    table_name = f"{indexName}_{knowledgebaseId}"
                    try:
                        table_exists = get_table_exists(vb_conn, table_name)
                        if not table_exists:
                            logger.warning(f"Table {table_name} not found, skipping...")
                            continue
                    except Exception:
                        logger.warning(f"Error checking table {table_name}, skipping...")
                        continue
                    table_list.append(table_name)
                    select_fields = sql.SQL(', ').join([sql.Identifier(field) for field in output])
                    sql_expr = None
                    filter_fulltext_expr = None
                    filter_vector_expr = None
                    if len(matchExprs) > 0:
                        for matchExpr in matchExprs:
                            if isinstance(matchExpr, MatchTextExpr):
                                filter_fulltext_expr = sql.SQL("""
                                SELECT {select_fields}, (bm25_score/MAX(bm25_score) OVER()) as "SCORE"
                                FROM (SELECT {select_fields}, bm25_score() as bm25_score
                                FROM {table_name}
                                WHERE {filter_fulltext})
                                WHERE bm25_score > 0
                                ORDER BY bm25_score DESC
                                LIMIT {limit}
                                """).format(
                                    select_fields=select_fields,
                                    table_name=sql.Identifier(table_name),
                                    filter_fulltext=filter_fulltext,
                                    limit=sql.Literal(matchExpr.topn)
                                )
                                sql_expr = filter_fulltext_expr
                            elif isinstance(matchExpr, MatchDenseExpr):
                                filter_vector_expr = sql.SQL("""
                                SELECT {select_fields}, (1-({vec_col}<=>{vec})) AS "SIMILARITY"
                                FROM {table_name}
                                WHERE {filter_vector}
                                ORDER BY {vec_col}<=>{vec}
                                LIMIT {limit}
                                """).format(
                                    select_fields=select_fields,
                                    vec_col=sql.Identifier(matchExpr.vector_column_name),
                                    vec=sql.Literal([float(v) for v in matchExpr.embedding_data]),
                                    table_name=sql.Identifier(table_name),
                                    filter_vector=filter_vector,
                                    limit=sql.Literal(matchExpr.topn)
                                )
                                if not sql_expr:
                                    sql_expr = filter_vector_expr
                            elif isinstance(matchExpr, FusionExpr):
                                sql_expr = sql.SQL("""
                                WITH filter_fulltext AS ({filter_fulltext_expr}),
                                     filter_vector AS ({filter_vector_expr})
                                SELECT {select_fields}, (COALESCE(a."SCORE", 0) * {fulltext_weight} + COALESCE(b."SIMILARITY", 0) * {vector_similarity_weight}) AS "SCORE"
                                FROM filter_fulltext a
                                FULL OUTER JOIN filter_vector b
                                ON a.id = b.id
                                ORDER BY (COALESCE(a."SCORE", 0) * {fulltext_weight} + COALESCE(b."SIMILARITY", 0) * {vector_similarity_weight}) DESC
                                LIMIT {limit}
                                """).format(
                                    filter_fulltext_expr=filter_fulltext_expr,
                                    filter_vector_expr=filter_vector_expr,
                                    select_fields=sql.SQL(', ').join([sql.SQL("COALESCE(a.{field},b.{field}) AS {field}").format(
                                        field=sql.Identifier(field)
                                        ) for field in output]),
                                    fulltext_weight=sql.Literal(1-vector_similarity_weight),
                                    vector_similarity_weight=sql.Literal(vector_similarity_weight),
                                    score_column=sql.Identifier(score_column),
                                    limit=sql.Literal(matchExpr.topn)
                                )
                    else:
                        if len(filter_cond) > 0:
                            sql_expr = sql.SQL("""
                            SELECT {select_fields}
                            FROM {table_name}
                            WHERE {filter_clause}
                            """).format(
                                select_fields=select_fields,
                                table_name=sql.Identifier(table_name),
                                filter_clause=sql.SQL(filter_cond)
                            )
                    sql_query = sql.SQL("""
                    SELECT *
                    FROM ({sub_query})
                    """).format(
                        sub_query=sql_expr
                    )
                    if orderBy.fields:
                        sql_query = sql.SQL("""
                        {sql_query}
                        ORDER BY {order_clause}
                        """).format(
                            sql_query=sql_query,
                            order_clause=sql.SQL(', ').join([sql.SQL('{field} {sort}').format(
                                field=sql.Identifier(field),
                                sort=sql.SQL(sort)
                            ) for field, sort in order_by_expr_list])
                        )
                    sql_query = sql.SQL("""
                    {sql_query}
                    LIMIT {limit} OFFSET {offset}""").format(
                        sql_query=sql_query,
                        limit=sql.Literal(limit),
                        offset=sql.Literal(offset)
                    )
                    with vb_conn.cursor() as cur:
                        logger.debug(f"Executing SQL query: {sql_query.as_string(vb_conn)}")
                        cur.execute(sql_query)
                        column_names = [desc[0] for desc in cur.description]
                        rows = cur.fetchall()
                        if rows:
                            total_hits_count += cur.rowcount
                        kb_res = pd.DataFrame(rows, columns=column_names)
                        logger.debug(f"VASTBASE search table: {str(table_list)}, result: {str(kb_res)}")
                        df_list.append(kb_res)

        res = concat_dataframes(df_list, output)
        if matchExprs:
            res['Sum'] = res[score_column] + res[PAGERANK_FLD]
            res = res.sort_values(by='Sum', ascending=False).reset_index(drop=True).drop(columns=['Sum'])
            res = res.head(limit)
        logger.debug(f"VASTBASE search final result: {str(res)}")
        return res, total_hits_count

    def get(
            self, chunkId: str, indexName: str, knowledgebaseIds: list[str]
    ) -> dict | None:
        with self.get_conn() as vb_conn:
            df_list = list()
            assert isinstance(knowledgebaseIds, list)
            table_list = list()
            for knowledgebaseId in knowledgebaseIds:
                table_name = f"{indexName}_{knowledgebaseId}"
                table_list.append(table_name)
                table_exists = get_table_exists(vb_conn, table_name)
                if not table_exists:
                    logger.warning(
                        f"Table not found: {table_name}, this knowledge base isn't created in Vastbase. Maybe it is created in other document engine.")
                    continue
                with vb_conn.cursor() as cur:
                    cur.execute(sql.SQL("SELECT * FROM {table_name} WHERE id = %s").format(
                        table_name=sql.Identifier(table_name)
                    ), (chunkId,))
                    column_names = [desc[0] for desc in cur.description]
                    rows = cur.fetchall()
                kb_res = pd.DataFrame(rows, columns=column_names)
                logger.debug(f"VASTBASE get table: {str(table_list)}, result: {str(kb_res)}")
                df_list.append(kb_res)
            res = concat_dataframes(df_list, ["id"])
            res_fields = self.getFields(res, res.columns.tolist())
            return res_fields.get(chunkId, None)

    def insert(
            self, documents: list[dict], indexName: str, knowledgebaseId: str = None
    ) -> list[str]:
        with self.get_conn() as vb_conn:
            table_name = f"{indexName}_{knowledgebaseId}"
            table_instance = get_table_instance(vb_conn, table_name)
            if not table_instance:
                # Need to create the table
                vector_size = 0
                patt = re.compile(r"q_(?P<vector_size>\d+)_vec")
                for k in documents[0].keys():
                    m = patt.match(k)
                    if m:
                        vector_size = int(m.group("vector_size"))
                        break
                if vector_size == 0:
                    raise ValueError("Cannot infer vector size from documents")
                self.createIdx(indexName, knowledgebaseId, vector_size)
                table_instance = get_table_instance(vb_conn, table_name)

            if not table_instance:
                raise ValueError(f"Table {table_name} does not exist in Vastbase.")
            # embedding fields can't have a default value....
            embedding_clmns = []
            for n, ty, _, _ in table_instance:
                r = re.search(r"Embedding\([a-z]+,([0-9]+)\)", ty)
                if not r:
                    continue
                embedding_clmns.append((n, int(r.group(1))))

            docs = copy.deepcopy(documents)
            for d in docs:
                assert "_id" not in d
                assert "id" in d
                for k, v in d.items():
                    if field_keyword(k):
                        if isinstance(v, list):
                            d[k] = "###".join(v)
                        else:
                            d[k] = v
                    elif re.search(r"_feas$", k):
                        d[k] = json.dumps(v)
                    elif k == 'kb_id':
                        if isinstance(d[k], list):
                            d[k] = d[k][0]  # since d[k] is a list, but we need a str
                    elif k == "position_int":
                        assert isinstance(v, list)
                        arr = [num for row in v for num in row]
                        d[k] = "_".join(f"{num:08x}" for num in arr)
                    elif k in ["page_num_int", "top_int"]:
                        assert isinstance(v, list)
                        d[k] = "_".join(f"{num:08x}" for num in v)
                    else:
                        d[k] = v

                    for n, vs in embedding_clmns:
                        if n in d:
                            continue
                        d[n] = [0] * vs
            ids = [d["id"] for d in docs]
            with vb_conn.cursor() as cur:
                cur.execute(sql.SQL("DELETE FROM {} WHERE id IN %s").format(
                    sql.Identifier(table_name)
                ), (tuple(ids),))
                # for doc in documents:
                #     logger.info(f"insert position_int: {doc['position_int']}")
                # logger.info(f"VastbaseConnection.insert {json.dumps(docs, ensure_ascii=False)}")
                column_names = list(docs[0].keys())
                values = [tuple(doc.get(col, None) for col in column_names) for doc in docs]
                insert_sql = sql.SQL("INSERT INTO {table_name} ({column_names}) VALUES %s").format(
                    table_name=sql.Identifier(table_name),
                    column_names=sql.SQL(', ').join([sql.Identifier(col) for col in column_names])
                )
                execute_values(cur, insert_sql, values)
                vb_conn.commit()
            logger.debug(f"VASTBASE inserted into {table_name} {ids}.")
            return []

    def update(
            self, condition: dict, newValue: dict, indexName: str, knowledgebaseId: str
    ) -> bool:
        # if 'position_int' in newValue:
        #     logger.info(f"update position_int: {newValue['position_int']}")
        with self.get_conn() as vb_conn:
            table_name = f"{indexName}_{knowledgebaseId}"
            table_instance = get_table_instance(vb_conn, table_name)
            #if "exists" in condition:
            #    del condition["exists"]
        
            clmns = {}
            if table_instance:
                for n, ty, de, _ in table_instance:
                    clmns[n] = (ty, de)
            filter = equivalent_condition_to_str(condition, table_instance)
            removeValue = {}
            for k, v in list(newValue.items()):
                if field_keyword(k):
                    if isinstance(v, list):
                        newValue[k] = "###".join(v)
                    else:
                        newValue[k] = v
                elif re.search(r"_feas$", k):
                    newValue[k] = json.dumps(v)
                elif k == 'kb_id':
                    if isinstance(newValue[k], list):
                        newValue[k] = newValue[k][0]  # since d[k] is a list, but we need a str
                elif k == "position_int":
                    assert isinstance(v, list)
                    arr = [num for row in v for num in row]
                    newValue[k] = "_".join(f"{num:08x}" for num in arr)
                elif k in ["page_num_int", "top_int"]:
                    assert isinstance(v, list)
                    newValue[k] = "_".join(f"{num:08x}" for num in v)
                elif k == "remove":
                    if isinstance(v, str):
                        assert v in clmns, f"'{v}' should be in '{clmns}'."
                        ty, de = clmns[v]
                        if ty.lower().find("cha"):
                            if not de:
                                de = ""
                        newValue[v] = de
                    else:
                        for kk, vv in v.items():
                            removeValue[kk] = vv
                        del newValue[k]
                else:
                    newValue[k] = v

            remove_opt = {}     # "[k,new_value]": [id_to_update, ...]
            with vb_conn.cursor() as cur:
                if removeValue:
                    col_to_remove = list(removeValue.keys())
                    col_to_remove.append('id')
                    cur.execute(sql.SQL("SELECT {columns} FROM {table_name} WHERE {filter_clause}").format(
                        columns=sql.SQL(', ').join([sql.Identifier(col) for col in col_to_remove]),
                        table_name=sql.Identifier(table_name),
                        filter_clause=sql.SQL(filter)
                    ))
                    column_names = [desc[0] for desc in cur.description]
                    rows = cur.fetchall()
                    row_to_opt = pd.DataFrame(rows, columns=column_names)
                    logger.debug(f"VASTBASE search table {str(table_name)}, filter {filter}, result: {str(row_to_opt[0])}")
                    row_to_opt = self.getFields(row_to_opt, col_to_remove)
                    for id, old_v in row_to_opt.items():
                        for k, remove_v in removeValue.items():
                            if remove_v in old_v[k]:
                                new_v = old_v[k].copy()
                                new_v.remove(remove_v)
                                kv_key = json.dumps([k, new_v])
                                if kv_key not in remove_opt:
                                    remove_opt[kv_key] = [id]
                                else:
                                    remove_opt[kv_key].append(id)

                logger.debug(f"VASTBASE update table {table_name}, filter {filter}, newValue {newValue}.")
                for update_kv, ids in remove_opt.items():
                    k, v = json.loads(update_kv)
                    cur.execute(sql.SQL("UPDATE {table_name} SET {k}=%s WHERE {filter_clause} AND id in %s").format(
                        table_name=sql.Identifier(table_name),
                        set_clause=sql.Identifier(k),
                        filter_clause=sql.SQL(filter)
                    ), ("###".join(v), tuple(ids)))

                cur.execute(sql.SQL("UPDATE {table_name} SET {set_clause} WHERE {filter_clause}").format(
                    table_name=sql.Identifier(table_name),
                    set_clause=sql.SQL(', ').join([sql.SQL("{k}={v}").format(
                        k=sql.Identifier(k), 
                        v=sql.Literal(v)
                    ) for k, v in newValue.items()]),
                    filter_clause=sql.SQL(filter)
                ))
                vb_conn.commit()
            return True

    def delete(self, condition: dict, indexName: str, knowledgebaseId: str) -> int:
        with self.get_conn() as vb_conn:
            table_name = f"{indexName}_{knowledgebaseId}"
            table_instance = get_table_instance(vb_conn, table_name)
            if not table_instance:
                logger.warning(f"Skipped deleting from table {table_name} since the table doesn't exist.")
                return 0
            filter = equivalent_condition_to_str(condition, table_instance)
            logger.debug(f"VASTBASE delete table {table_name}, filter {filter}.")
            with vb_conn.cursor() as cur:
                cur.execute(sql.SQL("DELETE FROM {table_name} WHERE {filter_clause}").format(
                    table_name=sql.Identifier(table_name),
                    filter_clause=sql.SQL(filter)
                ))
                deleted_rows = cur.rowcount
                vb_conn.commit()       
            return deleted_rows

    """
    Helper functions for search result
    """

    def getTotal(self, res: tuple[pd.DataFrame, int] | pd.DataFrame) -> int:
        if isinstance(res, tuple):
            return res[1]
        return len(res)

    def getChunkIds(self, res: tuple[pd.DataFrame, int] | pd.DataFrame) -> list[str]:
        if isinstance(res, tuple):
            res = res[0]
        return list(res["id"])

    def getFields(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, fields: list[str]) -> dict[str, dict]:
        if isinstance(res, tuple):
            res = res[0]
        if not fields:
            return {}
        fieldsAll = fields.copy()
        fieldsAll.append('id')
        column_map = {col.lower(): col for col in res.columns}
        matched_columns = {column_map[col.lower()]:col for col in set(fieldsAll) if col.lower() in column_map}
        none_columns = [col for col in set(fieldsAll) if col.lower() not in column_map]

        res2 = res[matched_columns.keys()]
        res2 = res2.rename(columns=matched_columns)
        res2.drop_duplicates(subset=['id'], inplace=True)

        for column in res2.columns:
            if res2[column] is None:
                res2[column] = ""
                continue
            k = column.lower()
            if field_keyword(k):
                res2[column] = res2[column].apply(lambda v:[kwd for kwd in (v or '').split("###") if kwd])
            elif k == "position_int":
                def to_position_int(v):
                    if v:
                        arr = [int(hex_val, 16) for hex_val in v.split('_')]
                        v = [arr[i:i + 5] for i in range(0, len(arr), 5)]
                    else:
                        v = []
                    return v
                res2[column] = res2[column].apply(to_position_int)
            elif k in ["page_num_int", "top_int"]:
                res2[column] = res2[column].apply(lambda v:[int(hex_val, 16) for hex_val in v.split('_')] if v else [])
            else:
                pass
        for column in none_columns:
            res2[column] = None
        
        return res2.set_index("id").to_dict(orient="index")

    def getHighlight(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, keywords: list[str], fieldnm: str):
        if isinstance(res, tuple):
            res = res[0]
        ans = {}
        num_rows = len(res)
        column_id = res["id"]
        if fieldnm not in res:
            return {}
        for i in range(num_rows):
            id = column_id[i]
            txt = res[fieldnm][i]
            txt = re.sub(r"[\r\n]", " ", txt, flags=re.IGNORECASE | re.MULTILINE)
            txts = []
            for t in re.split(r"[.?!;\n]", txt):
                for w in keywords:
                    t = re.sub(
                        r"(^|[ .?/'\"\(\)!,:;-])(%s)([ .?/'\"\(\)!,:;-])"
                        % re.escape(w),
                        r"\1<em>\2</em>\3",
                        t,
                        flags=re.IGNORECASE | re.MULTILINE,
                    )
                if not re.search(
                        r"<em>[^<>]+</em>", t, flags=re.IGNORECASE | re.MULTILINE
                ):
                    continue
                txts.append(t)
            ans[id] = "...".join(txts)
        return ans

    def getAggregation(self, res: tuple[pd.DataFrame, int] | pd.DataFrame, fieldnm: str):
        """
        TODO: Vastbase doesn't provide aggregation
        """
        return list()

    """
    SQL
    """

    def sql(sql: str, fetch_size: int, format: str):
        raise NotImplementedError("Not implemented")
