import asyncio
import psycopg
from psycopg_pool import AsyncConnectionPool
import re
import time
from functools import wraps

# 检查pandas是否可用
try:
    import pandas as pd
    PANDAS_AVAILABLE = True
except ImportError:
    PANDAS_AVAILABLE = False
    pd = None

class AsyncPgLaDb:
    def __init__(self, dbconfig=None):
        self.dbconfig = None
        self._load_config(dbconfig)
        
        # 验证配置是否包含必要字段
        required_fields = ['host', 'port', 'username', 'password', 'dbname']
        if not all(field in self.dbconfig for field in required_fields):
            raise ValueError("数据库配置缺少必要字段。需要包含: host, port, username, password, dbname")
            
        self._pool = None
        self._pool_lock = asyncio.Lock()
        self._local = {}  # 使用字典代替 threading.local()
        
        # 获取模式参数，默认为mysa
        self.schema = self.dbconfig.get('schema', 'mysa')
        
        # 实例级缓存
        self._metadata_cache = {
            'constraints': {},  # 表约束缓存 {table_name: constraints}
            'structure': {},    # 表结构缓存 {table_name: columns}
            'query': {}         # 新增查询结果缓存 {sql: (result, timestamp)}
        }
        self._cache_ttl = 3600  # 默认缓存过期时间(秒)

    def _load_config(self, dbconfig):
        """加载数据库配置"""
        # 如果dbconfig是字典类型，直接使用
        if isinstance(dbconfig, dict):
            self.dbconfig = dbconfig
            return
            
        # 从conf__g中获取数据库配置
        from . import conf__g
        
        # 从 conf__g.databases 改为 conf__g.postgres
        if hasattr(conf__g, 'postgres') and isinstance(conf__g.postgres, list):
            pg_configs = conf__g.postgres
        else:
            raise Exception("未找到有效的数据库配置")
        
        # 如果没有提供dbconfig，则使用第一个配置
        if dbconfig is None:
            if pg_configs:
                self.dbconfig = pg_configs[0]
            else:
                raise Exception("未找到任何数据库配置")
            return
            
        # 如果dbconfig是数字索引
        if isinstance(dbconfig, int):
            if 0 <= dbconfig < len(pg_configs):
                self.dbconfig = pg_configs[dbconfig]
            else:
                raise Exception(f"数据库配置索引 {dbconfig} 超出范围，有效范围为 0-{len(pg_configs)-1}")
            return
            
        # 如果dbconfig是字符串，查找匹配的配置名称
        if isinstance(dbconfig, str):
            for config in pg_configs:
                if config.get('name') == dbconfig:
                    self.dbconfig = config
                    return
            raise Exception(f"未找到名为 {dbconfig} 的数据库配置")
            
        # 默认使用第一个配置
        if pg_configs:
            self.dbconfig = pg_configs[0]
        else:
            raise Exception("未找到任何数据库配置")

    def _init_pool(self):
        """初始化连接池 - 实际的连接将在第一次使用时创建"""
        pass  # 实际连接池将在需要时创建

    async def _get_pool(self):
        """异步获取连接池"""
        if self._pool is None:
            async with self._pool_lock:
                if self._pool is None:
                    # 使用连接字符串创建异步连接池
                    conninfo = f"postgresql://{self.dbconfig['username']}:{self.dbconfig['password']}@{self.dbconfig['host']}:{self.dbconfig['port']}/{self.dbconfig['dbname']}?options=-c%20search_path%3D{self.schema},public"
                    
                    # 使用异步上下文管理器模式创建连接池（官方推荐方式）
                    # 这种方式完全避免在构造函数中打开连接池
                    self._pool = AsyncConnectionPool(
                        conninfo,
                        min_size=self.dbconfig.get('minsize', 1),
                        max_size=self.dbconfig.get('maxsize', 5),
                        # 明确设置open参数为False，避免自动打开
                        open=False
                    )
                    # 显式打开连接池（符合官方标准）
                    await self._pool.open()
        return self._pool

    async def close(self):
        """安全关闭连接池"""
        async with self._pool_lock:
            if self._pool:
                await self._pool.close()
                self._pool = None

    def __del__(self):
        """析构时自动关闭"""
        # 注意：在异步环境中，不建议依赖 __del__ 来关闭资源
        pass

    @property
    def _state(self):
        """获取当前上下文的查询状态"""
        task_id = id(asyncio.current_task())
        if task_id not in self._local:
            self._local[task_id] = {
                'table': '',
                'where_keys': '',
                'where_vals': [],
                'fields': '*',
                'limit': '',
                '_order_by': '',
                '_group_by': '',
                'debug_level': False
            }
        return self._local[task_id]

    def _clear(self):
        """重置当前上下文的查询状态"""
        task_id = id(asyncio.current_task())
        if task_id in self._local:
            self._local[task_id].update({
                'table': '',
                'where_keys': '',
                'where_vals': [],
                'fields': '*',
                'limit': '',
                '_order_by': '',
                '_group_by': '',
            })

    @staticmethod
    def is_query(sql):
        """判断是否为查询操作"""
        sql = sql.strip().lower()
        query_keywords = r'^(select|with|show|desc|describe|explain|call)\b'
        return re.search(query_keywords, sql, re.IGNORECASE) is not None

    @staticmethod
    def needs_commit(sql):
        """判断是否为写操作"""
        sql = sql.strip().lower()
        write_keywords = r'^(insert|update|delete|replace|merge|create|alter|drop|truncate)\b'
        return re.search(write_keywords, sql, re.IGNORECASE) is not None

    async def query(self, sql=None, tuple_values=None, r_type='dict'):
        if not sql:
            raise Exception('sql语句不能为空')
        if tuple_values and not isinstance(tuple_values, (tuple, list)):
            raise Exception('参数类型必须是元组或列表')

        if not self.is_query(sql):
            raise PermissionError("query()方法仅允许查询操作(SELECT/WITH/SHOW/DESC/EXPLAIN/CALL)")

        return await self._execute(sql, tuple_values, fetch="all", r_type=r_type)

    async def _execute(self, sql, params=None, fetch=None, r_type='dict'):
        """执行SQL的核心方法（异步版本）"""
        pool = await self._get_pool()
        async with pool.connection() as conn:
            try:
                if self._state['debug_level']:
                    # psycopg3 中使用 mogrify 的方式不同
                    print("Executing:", sql)
                    if params:
                        print("Params:", params)
                result = await self._process_result(conn, sql, params, fetch, r_type)
                # 非查询操作立即提交
                if self.needs_commit(sql):
                    await conn.commit()
                return result
            except psycopg.Error as e:
                await conn.rollback()
                error_msg = f"PostgreSQL数据库操作错误: {type(e).__name__}: {e}"
                print(error_msg)
                raise Exception(error_msg) from e

    async def _process_result(self, conn, sql, params, fetch, r_type='dict'):
        """结果处理抽离为独立方法"""
        async with conn.cursor() as cur:
            await cur.execute(sql, params)
            
            if fetch is None and self.is_query(sql):
                fetch = "all"

            # 获取原始数据 - 使用psycopg3官方异步fetch方法
            if fetch == "all":
                # 使用异步fetchall方法获取所有结果
                raw_data = await cur.fetchall()
            elif fetch == "one":
                # 使用异步fetchone方法获取单个结果
                raw_data = await cur.fetchone()
            elif fetch == "value":
                # 获取单个值
                record = await cur.fetchone()
                return record[0] if record else None
            else:
                # 对于非查询操作，返回影响的行数
                return cur.rowcount if hasattr(cur, 'rowcount') else None

            # 根据r_type转换结果
            if r_type == 'tuple':
                return raw_data
            elif r_type == 'dict':
                # 将元组转换为字典
                columns = [desc[0] for desc in cur.description] if cur.description else []
                if fetch == "all":
                    return [dict(zip(columns, row)) for row in raw_data]
                elif fetch == "one":
                    return dict(zip(columns, raw_data)) if raw_data else None
            elif r_type == 'df':
                # 转换为DataFrame
                columns = [desc[0] for desc in cur.description] if cur.description else []
                if not PANDAS_AVAILABLE:
                    raise Exception("需要安装pandas库才能使用dataframe返回类型")
                
                if fetch == "all":
                    # 兼容老版本pandas
                    data_list = list(raw_data) if raw_data else []
                    return pd.DataFrame(data_list, columns=columns)
                elif fetch == "one":
                    data_list = [raw_data] if raw_data else []
                    return pd.DataFrame(data_list, columns=columns) if raw_data else pd.DataFrame(columns=columns)
            
            # 默认返回元组格式
            return raw_data

    def table(self, name):
        """设置表名"""
        prefix = self.dbconfig.get('pre', '')
        self._state['table'] = f"{prefix}{name}" if prefix else name
        return self

    # 定义比较符号常量（按类型分组）
    COMPARE_SIGNS = [
        # 基础比较
        '=', '<>', '!=', '>', '<', '>=', '<=',
        # 模糊匹配
        'like', 'not like',
        # 集合操作
        'in', 'not in',
        # NULL判断
        'is', 'is not',
        # 范围判断
        'between', 'not between',
    ]

    @staticmethod
    def _build_where_conditions(where, where_keys='', where_vals=None):
        '''构建WHERE条件'''
        if where_vals is None:
            where_vals = []
        conditions = []
        condition_operators = []  # 存储每个条件之间的逻辑运算符
        for key, item in where.items():
            # 默认逻辑运算符为 AND
            cond_operator = 'and'
            
            # 判断是否为函数调用（包含括号的字段名）
            is_function_call = '(' in key and ')' in key
            
            if isinstance(item, (list, tuple)) and len(item) >= 2:
                # 如果条件中有第三个参数，则使用指定的逻辑运算符
                if len(item) >= 3:
                    cond_operator = item[2].lower() if item[2].lower() in ('and', 'or') else 'and'
                # 处理复杂条件 [操作符, 值, 逻辑符?]
                sign = item[0] if item[0] in AsyncPgLaDb.COMPARE_SIGNS else '='

                # 特殊处理 in/not in 操作符
                if sign in ['in', 'not in']:
                    if not isinstance(item[1], (list, tuple)):
                        raise ValueError(f"{sign} 操作符的值必须是列表或元组")
                    placeholders = ','.join(['%s'] * len(item[1]))
                    # 函数调用不加引号，普通字段加引号
                    field_name = key if is_function_call else f'"{key}"'
                    conditions.append(f'{field_name} {sign} ({placeholders})')
                    where_vals.extend(item[1])
                # 特殊处理 is/is not 操作符
                elif sign in ['is', 'is not']:
                    if item[1] is None or str(item[1]).lower() == 'null':
                        # 函数调用不加引号，普通字段加引号
                        field_name = key if is_function_call else f'"{key}"'
                        conditions.append(f'{field_name} {sign} NULL')
                    else:
                        raise ValueError(f"{sign} 操作符的值必须是 None 或 'null'")
                # 特殊处理 between/not between 操作符
                elif sign in ['between', 'not between']:
                    if not isinstance(item[1], (list, tuple)) or len(item[1]) != 2:
                        raise ValueError(f"{sign} 操作符的值必须是包含两个值的列表或元组")
                    # 函数调用不加引号，普通字段加引号
                    field_name = key if is_function_call else f'"{key}"'
                    conditions.append(f'{field_name} {sign} %s AND %s')
                    where_vals.extend(item[1])
                else:
                    # 处理其他操作符
                    sign_str = f' {sign} ' if sign in ['like', 'not like'] else sign
                    # 函数调用不加引号，普通字段加引号
                    field_name = key if is_function_call else f'"{key}"'
                    conditions.append(f'{field_name}{sign_str}%s')
                    where_vals.append(item[1])
            else:
                # 简单条件 - 函数调用不加引号，普通字段加引号
                field_name = key if is_function_call else f'"{key}"'
                conditions.append(f'{field_name}=%s')
                where_vals.append(item)
            condition_operators.append(cond_operator)

        # 第一个条件
        where_keys = conditions[0]
        # 从第二个条件开始拼接
        for i in range(1, len(conditions)):
            where_keys += f" {condition_operators[i]} {conditions[i]}"

        return {'keys': where_keys, 'vals': tuple(where_vals)}

    def where(self, where=None, value=None, method='and', wrap=None):
        """条件设置"""
        if value is not None:
            where = {where: value}
        # 判断参数
        if where is not None:
            if not isinstance(where, dict):
                raise Exception('查询条件必须是字典类型')
            where_keys, where_vals = '', []
            # 查询字段
            _select = self._build_where_conditions(where, where_keys, where_vals)
            where_keys = _select['keys']
            where_vals = _select['vals']

            # 直接处理括号逻辑
            if wrap == '(':
                where_keys = f"({where_keys}"
            elif wrap == ')':
                where_keys = f"{where_keys})"
            elif wrap == '()':  # 针对多参数的情况，把where_keys用()包裹
                where_keys = f"({where_keys})"

        # 更新状态
        if self._state.get('where_keys'):  # 检查 _state['where_keys'] 是否存在
            self._state['where_keys'] += f' {method} {where_keys}'
            self._state['where_vals'] += where_vals
        else:
            self._state['where_keys'] = where_keys
            self._state['where_vals'] = where_vals

        return self

    def fields(self, params=None):
        """字段设置"""
        if not params:
            self._state['fields'] = '*'
        else:
            if isinstance(params, list):
                # 为每个字段名添加引号保证大小写敏感
                params = ','.join([f'"{field}"' for field in params])
            self._state['fields'] = params
        return self

    def page(self, page=1, rows=10):
        self._state['limit'] = f"{(page - 1) * rows},{rows}"
        return self

    def limit(self, a: int, b: int = None):
        self._state['limit'] = f"{a},{b}" if b else str(a)
        return self

    def groupBy(self, param=None):
        if param and isinstance(param, str):
            self._state['_group_by'] = param
        return self

    def orderBy(self, order_by=None):
        if order_by:
            self._state['_order_by'] = order_by
        return self

    async def format_sql(self):
        """SQL格式化方法，支持PostgreSQL GROUP BY适配"""
        # 如果有GROUP BY且fields为*，需要获取表的所有字段
        if self._state['_group_by'] and self._state['fields'] == '*':
            # 异步获取表结构
            structure = await self._get_table_structure()
            # 构造字段列表
            all_fields = ', '.join([f'"{col}"' for col in structure]) if structure else '*'
            sql = "SELECT {} FROM \"{}\"".format(all_fields, self._state['table'])
        else:
            sql = "SELECT {} FROM \"{}\"".format(self._state['fields'], self._state['table'])
            
        # 添加模式(schema)支持
        if self.schema and self.schema != 'public':
            sql = sql.replace(f'FROM "{self._state["table"]}"', f'FROM "{self.schema}"."{self._state["table"]}"')
            
        if self._state['where_keys']:
            sql += f" WHERE {self._state['where_keys']}"
        if self._state['_group_by']:
            sql += f" GROUP BY {self._state['_group_by']}"
        if self._state['_order_by']:
            sql += f" ORDER BY {self._state['_order_by']}"
        if self._state['limit']:
            # PostgreSQL使用LIMIT/OFFSET语法
            parts = self._state['limit'].split(',')
            if len(parts) == 2:
                offset, limit = parts[0], parts[1]
                sql += f" LIMIT {limit} OFFSET {offset}"
            else:
                sql += f" LIMIT {self._state['limit']}"
        
        # 如果有GROUP BY子句，需要对PostgreSQL进行特殊处理
        if self._state['_group_by'] and self._state['fields'] != '*':
            # 解析GROUP BY字段
            group_fields = [f.strip().strip('"') for f in self._state['_group_by'].split(',')]
            # 解析SELECT字段
            select_fields = self._state['fields']
            
            # 对于简单的COUNT(*)查询，不需要特殊处理
            if not (select_fields.strip().lower().startswith('count(*)') or 
                   select_fields.strip().lower().startswith('count(')):
                # 对于包含非聚合字段的GROUP BY查询，需要处理字段包装
                import re
                
                # 分割字段列表，正确处理包含函数的字段
                field_parts = self._split_sql_fields(select_fields)
                
                processed_fields = []
                aggregate_patterns = [
                    r'\bcount\s*\(', r'\bsum\s*\(', r'\bavg\s*\(', 
                    r'\bmax\s*\(', r'\bmin\s*\(', r'\bany_value\s*\('
                ]
                
                for field_part in field_parts:
                    # 检查是否为聚合函数
                    is_aggregate = any(re.search(pattern, field_part.lower()) for pattern in aggregate_patterns)
                    
                    if not is_aggregate:
                        # 提取字段名（处理 "field" as alias 或 field as alias 的情况）
                        field_name_match = re.match(r'^([^,]*?)(?:\s+(as\s+.+))?$', field_part.strip(), re.IGNORECASE)
                        if field_name_match:
                            field_expr = field_name_match.group(1).strip()
                            alias_part = field_name_match.group(2) if field_name_match.group(2) else ""
                            
                            # 获取不带引号的字段名
                            field_name = field_expr.strip().strip('"')
                            
                            # 如果字段不在GROUP BY中，则用ANY_VALUE包装
                            if field_name not in group_fields:
                                if alias_part:
                                    processed_fields.append(f'ANY_VALUE({field_expr}) {alias_part}')
                                else:
                                    # 如果没有别名，使用字段名作为别名
                                    processed_fields.append(f'ANY_VALUE({field_expr}) as {field_name}')
                            else:
                                processed_fields.append(field_part)
                        else:
                            processed_fields.append(field_part)
                    else:
                        processed_fields.append(field_part)
                
                # 重新构建SELECT子句
                sql = sql.replace(f"SELECT {select_fields} FROM", f"SELECT {', '.join(processed_fields)} FROM")
        # 处理GROUP BY且fields为*的情况
        elif self._state['_group_by'] and self._state['fields'] == '*':
            # 解析GROUP BY字段
            group_fields = [f.strip().strip('"') for f in self._state['_group_by'].split(',')]
            # 异步获取表的所有字段
            structure = await self._get_table_structure()
            all_fields = [f'"{col}"' for col in structure]
            
            processed_fields = []
            for field in all_fields:
                field_name = field.strip().strip('"')
                # 如果字段不在GROUP BY中，则用ANY_VALUE包装
                if field_name not in group_fields:
                    processed_fields.append(f'ANY_VALUE({field}) as {field_name}')
                else:
                    processed_fields.append(field)
            
            # 重新构建SELECT子句
            sql = sql.replace(f"SELECT {', '.join(all_fields)} FROM", f"SELECT {', '.join(processed_fields)} FROM")
        
        return sql

    def _split_sql_fields(self, field_string):
        """正确分割SQL字段列表，考虑函数和括号"""
        fields = []
        current_field = ""
        paren_level = 0
        
        for char in field_string:
            if char == ',' and paren_level == 0:
                fields.append(current_field.strip())
                current_field = ""
            else:
                if char == '(':
                    paren_level += 1
                elif char == ')':
                    paren_level -= 1
                current_field += char
        
        if current_field:
            fields.append(current_field.strip())
            
        return fields

    @staticmethod
    def auto_clear(func):
        @wraps(func)
        async def wrapper(self, *args, **kwargs):
            try:
                if asyncio.iscoroutinefunction(func):
                    return await func(self, *args, **kwargs)
                else:
                    return func(self, *args, **kwargs)
            finally:
                self._clear()
        return wrapper

    @auto_clear
    async def get(self, use_cache=False, cache_ttl=None, r_type='dict'):
        """获取多条记录
        :param use_cache: 是否使用缓存
        :param cache_ttl: 缓存时间(秒)，None表示使用默认值
        :param r_type: 返回类型，支持'dict'(默认)、'tuple'、'df'
        """
        sql = await self.format_sql()
        params = self._state['where_vals']
        if not use_cache:
            return await self._execute(
                sql=sql,
                params=params,
                fetch="all",
                r_type=r_type
            )

        # 生成缓存键
        cache_key = f"{sql}:{params}:{r_type}"

        # 检查缓存是否存在且未过期
        if cache_key in self._metadata_cache['query']:
            cached_time = self._metadata_cache['query'][cache_key][1]
            ttl = cache_ttl if cache_ttl is not None else self._cache_ttl
            if (time.time() - cached_time) < ttl:
                return self._metadata_cache['query'][cache_key][0]

        # 执行查询并缓存结果
        result = await self._execute(
            sql=sql,
            params=params,
            fetch="all",
            r_type=r_type
        )
        self._metadata_cache['query'][cache_key] = (result, time.time())
        return result

    @auto_clear
    async def first(self, r_type='dict'):
        """获取第一条记录
        :param r_type: 返回类型，支持'dict'(默认)、'tuple'、'df'
        """
        # 为first方法设置limit为1
        original_limit = self._state['limit']
        self._state['limit'] = '0,1'
        try:
            sql = await self.format_sql()
            params = self._state['where_vals']
            return await self._execute(
                sql=sql,
                params=params,
                fetch="one",
                r_type=r_type
            )
        finally:
            # 恢复原来的limit设置
            self._state['limit'] = original_limit

    @auto_clear
    async def value(self, field=None):
        """获取单个字段的值
        :param field: 查询字段名
        """
        if not field:
            raise Exception('查询字段不能为空')

        # 强制只查询指定字段
        self.fields(field)
        # 使用元组格式返回以获得最佳性能
        result = await self.first(r_type='tuple')

        if result is None:
            return None

        # 对于元组格式，直接返回第一个（也是唯一一个）元素
        return result[0]

    async def _get_table_structure(self):
        """获取表结构（带实例级缓存）"""
        table_name = self._state['table']
        
        # 检查实例缓存
        if table_name in self._metadata_cache['structure']:
            return self._metadata_cache['structure'][table_name]

        # 查询表结构的SQL
        # 使用已初始化的self.schema参数
        sql = """
        SELECT column_name
        FROM information_schema.columns 
        WHERE table_schema = %s AND table_name = %s
        ORDER BY ordinal_position
        """
        params = [self.schema, table_name]
        debug_level = self._state['debug_level']
        if debug_level <= 1:
            self.debug_level(0)  # 这里不需要输出测试信息。
        result = await self._execute(sql, params, fetch="all")
        self.debug_level(debug_level)
        
        # 提取列名
        columns = [row['column_name'] for row in result]
        
        # 存入实例缓存
        self._metadata_cache['structure'][table_name] = columns
        return columns

    async def _get_unique_constraints(self):
        """获取表的所有唯一约束（带实例级缓存）"""
        table_name = self._state['table']

        # 检查实例缓存
        if table_name in self._metadata_cache['constraints']:
            return self._metadata_cache['constraints'][table_name]

        """获取表的所有唯一约束（格式优化为字典）"""
        # PostgreSQL查询约束的SQL
        # 使用已初始化的self.schema参数
        sql = """
        SELECT 
            tc.constraint_type,
            STRING_AGG(kcu.column_name, ',' ORDER BY kcu.ordinal_position) AS columns
        FROM 
            information_schema.table_constraints tc
        JOIN 
            information_schema.key_column_usage kcu 
            ON tc.constraint_name = kcu.constraint_name 
            AND tc.table_schema = kcu.table_schema
            AND tc.table_name = kcu.table_name
        WHERE 
            tc.table_schema = %s
            AND tc.table_name = %s
            AND tc.constraint_type IN ('PRIMARY KEY', 'UNIQUE')
        GROUP BY 
            tc.constraint_name, tc.constraint_type;
        """
        params = [self.schema, table_name]
        debug_level = self._state['debug_level']
        if debug_level <= 1:
            self.debug_level(0)  # 这里不需要输出测试信息。
        result = await self._execute(sql, params, fetch="all")
        self.debug_level(debug_level)
        # 新增结构转换逻辑
        constraints = {
            'primary': [],
            'unique': []
        }
        for row in result:
            key = 'primary' if row['constraint_type'] == 'PRIMARY KEY' else 'unique'
            # 保持原始字段名的大小写（从information_schema获取的已经是正确的）
            columns = tuple(row['columns'].split(','))  # 转换为元组便于后续处理
            if columns not in constraints[key]:        # 去重
                constraints[key].append(columns)

        # 存入实例缓存
        self._metadata_cache['constraints'][table_name] = constraints
        return constraints

    async def _get_table_columns_info(self):
        """获取表的列信息，包括是否为自增列"""
        table_name = self._state['table']
        
        # 检查实例缓存
        cache_key = f"{table_name}_columns"
        if cache_key in self._metadata_cache['structure']:
            return self._metadata_cache['structure'][cache_key]
        
        # 查询表列信息的SQL
        # 使用已初始化的self.schema参数
        sql = """
        SELECT 
            c.column_name,
            c.data_type,
            c.is_nullable,
            c.column_default,
            a.attgenerated,
            a.attidentity
        FROM 
            information_schema.columns c
        LEFT JOIN pg_attribute a 
            ON a.attname = c.column_name 
            AND a.attrelid = (c.table_schema || '.' || c.table_name)::regclass
        WHERE 
            c.table_schema = %s 
            AND c.table_name = %s
        ORDER BY 
            c.ordinal_position;
        """
        params = [self.schema, table_name]
        result = await self._execute(sql, params, fetch="all")
        
        columns_info = {}
        for row in result:
            column_name = row['column_name']
            columns_info[column_name] = {
                'data_type': row['data_type'],
                'is_nullable': row['is_nullable'],
                'column_default': row['column_default'],
                'is_serial': self._is_serial_column(row)
            }
            
        # 存入实例缓存
        self._metadata_cache['structure'][cache_key] = columns_info
        return columns_info
        
    def _is_serial_column(self, column_info):
        """判断列是否为SERIAL类型（实际上是integer+sequence）"""
        # 检查默认值中是否包含nextval（传统SERIAL）或者是否为IDENTITY列
        default = column_info['column_default']
        # 检查是否为GENERATED BY DEFAULT AS IDENTITY或GENERATED ALWAYS AS IDENTITY
        is_identity = column_info.get('attgenerated') == 's' or column_info.get('attidentity') in ('a', 'd')
        return (default and 'nextval(' in default) or is_identity

    async def insert(self, data, on_conflict='ignore'):
        """
        支持单条/批量插入及自动错误跳过的增强方法：

        :param data: 单条字典或批量列表（列表元素为字典）
        :param on_conflict: 插入时遇到重复键的处理策略 
            'ignore' - 忽略重复（默认）
            'update' - 覆盖更新现有记录，这个模式可以替代updateOrInsert
            None 或其他值 - 普通插入（可能因重复键失败）
        """
        # 参数类型校验
        if not data:
            raise Exception('新增内容不能为空')
        if not isinstance(data, (dict, list)):
            raise TypeError("参数必须为字典或字典列表")

        # 统一处理数据格式
        if isinstance(data, list):
            if len(data) == 0:
                raise ValueError("数据列表不能为空")
            # 强制所有数据项的键完全一致
            keys = list(data[0].keys())
            for item in data:
                if list(item.keys()) != keys:
                    raise ValueError("批量操作要求所有数据项字段完全一致")
            data_list = data
        else:
            keys = list(data.keys())
            data_list = [data]

        # 构造SQL语句
        placeholders = ', '.join(['%s'] * len(keys))
        # PostgreSQL字段转义逻辑
        escaped_keys = [f'"{k}"' for k in keys]

        # 只有update或ignore模式才需要检查约束
        required_cols = None
        if on_conflict in ('update', 'ignore'):  # 需要判断重复的模式，最好的情况是有唯一键。主键适合获取id后更新
            # 新增约束检查逻辑
            constraints = await self._get_unique_constraints()
            columns_info = await self._get_table_columns_info()
            
            # 查找自增主键字段
            serial_primary_keys = []
            if constraints['primary']:
                primary_cols = constraints['primary'][0]  # 取第一个主键约束
                for col in primary_cols:
                    if col in columns_info and columns_info[col]['is_serial']:
                        serial_primary_keys.append(col)
            
            required_cols = None
            
            # 情况1：有唯一联合key - 检查插入数据是否包含唯一联合key的所有字段
            if constraints['unique']:
                for unique_set in constraints['unique']:
                    if all(col in keys for col in unique_set):  # 检查是否包含该唯一键的所有字段
                        required_cols = unique_set
                        break
            
            # 情况2：没有匹配的唯一联合key，处理主键
            if not required_cols and constraints['primary']:
                primary_set = constraints['primary'][0]
                # 检查是否是自增主键
                if len(primary_set) == 1 and primary_set[0] in serial_primary_keys:
                    # 单个自增主键的情况
                    if primary_set[0] in keys:
                        # 如果插入数据包含自增主键，检查是否与现有数据冲突
                        required_cols = primary_set
                    else:
                        # 插入数据不包含自增主键，降级为普通插入
                        on_conflict = None
                elif all(col in keys for col in primary_set):
                    # 数据包含所有联合主键字段（非自增主键）
                    required_cols = primary_set
                # 如果不包含所有主键字段，且不是自增主键，则降级为普通插入
            
            # 情况3：没有唯一联合key，也没有联合pk，只有自增pk
            # 这个情况已经在情况2中统一处理了，不需要重复检查
            
            # 最终校验 - 如果表有约束（唯一键或主键）但数据中不包含任何约束字段，则降级为普通插入
            # 对于只有单个自增主键的情况，已在前面逻辑中处理
            if not required_cols and (constraints['unique'] or 
                                     (constraints['primary'] and not (
                                         len(constraints['primary'][0]) == 1 and 
                                         constraints['primary'][0][0] in serial_primary_keys and
                                         constraints['primary'][0][0] not in keys))):
                # 表中有约束，但当前数据不包含这些约束字段，降级为普通插入
                # 特殊处理：如果只有一个自增主键字段，且数据中不包含该字段，则不降级，因为这种情况是正常的
                on_conflict = None

        if on_conflict == 'update' and required_cols:
            conflict_cols_str = ','.join([f'"{col}"' for col in required_cols])
            update_fields = [f'"{k}" = EXCLUDED."{k}"' for k in keys]
            sql = f"""
            INSERT INTO "{self.schema}"."{self._state['table']}" ({', '.join(escaped_keys)})
            VALUES ({placeholders})
            ON CONFLICT ({conflict_cols_str}) DO UPDATE SET {', '.join(update_fields)}
            """
        elif on_conflict == 'ignore' and required_cols:
            conflict_cols_str = ','.join([f'"{col}"' for col in required_cols])
            sql = f"""
            INSERT INTO "{self.schema}"."{self._state['table']}" ({', '.join(escaped_keys)})
            VALUES ({placeholders})
            ON CONFLICT ({conflict_cols_str}) DO NOTHING
            """
        else:
            sql = f"INSERT INTO \"{self.schema}\".\"{self._state['table']}\" ({', '.join(escaped_keys)}) VALUES ({placeholders})"

        # 准备参数列表
        params_list = [tuple(item.get(k) for k in keys) for item in data_list]  # 使用get避免KeyError

        result = {
            'inserted': 0,
            'updated': 0,
            'skipped': 0,
            'errors': 0,
            'error_details': [],
            'batch_success': False
        }
        try:
            pool = await self._get_pool()
            async with pool.connection() as conn:
                async with conn.cursor() as cs:
                    if self._state['debug_level']:
                        print(f"批量执行 SQL: {sql}")

                    # 使用executemany进行批量插入
                    # 先尝试批量执行
                    try:
                        # 获取初始行数
                        count_sql = f"SELECT COUNT(*) AS num FROM \"{self._state['table']}\""
                        await cs.execute(count_sql)
                        start_row = await cs.fetchone()
                        startnum = start_row[0] if start_row else 0
                        
                        # 执行批量插入
                        await cs.executemany(sql, params_list)
                        await conn.commit()
                        
                        # 获取最终行数
                        await cs.execute(count_sql)
                        end_row = await cs.fetchone()
                        endnum = end_row[0] if end_row else 0
                        
                        # 计算结果
                        total = len(params_list)
                        result['batch_success'] = True
                        
                        # 精确插入数 = 表行数差值
                        inserted = endnum - startnum
                        # 冲突数 = 总操作数 - 插入数
                        conflict_count = total - inserted
                        
                        # 注意：在异步版本中，我们无法直接获取row_count
                        # 所以我们采用与同步版本类似的方法计算
                        result['inserted'] = inserted
                        result['skipped'] = conflict_count
                        
                    except psycopg.Error as e:
                        # 如果批量执行失败，回退到逐条执行
                        await conn.rollback()
                        print(f"批量执行失败({e})，开始逐条处理...")
                        result = await self._insert_one_by_one(data_list, keys, on_conflict, sql, params_list)

        except psycopg.Error as e:
            error_msg = f"PostgreSQL插入操作错误: {type(e).__name__}: {e}"
            print(error_msg)
            raise Exception(error_msg) from e
            
        return result

    async def _insert_one_by_one(self, data_list, keys, on_conflict, sql, params_list):
        """
        逐条插入数据（错误处理和重试机制）
        """
        result = {
            'inserted': 0,
            'updated': 0,
            'skipped': 0,
            'errors': 0,
            'error_details': [],
            'batch_success': False
        }

        try:
            pool = await self._get_pool()
            async with pool.connection() as conn:
                async with conn.cursor() as cs:
                    # 执行逐条插入
                    inserted_total = 0
                    updated_total = 0
                    skipped_total = 0
                    error_count = 0
                    
                    for idx, params in enumerate(params_list):
                        try:
                            await cs.execute(sql, params)
                            if self._state['debug_level']:
                                print(f"执行 SQL: {cs.mogrify(sql, params) if hasattr(cs, 'mogrify') else sql}")
                            
                            # PostgreSQL的rowcount包含了插入/更新的行数
                            if cs.rowcount > 0:
                                inserted_total += cs.rowcount
                            
                        except psycopg.Error as e:
                            error_count += 1
                            result['error_details'].append({
                                'index': idx,
                                'params': params,
                                'error': str(e)
                            })
                            # 不再重新抛出异常，让其他条目继续处理
                    
                    await conn.commit()
                    
                    # 计算结果
                    result['inserted'] = inserted_total
                    result['updated'] = updated_total
                    result['skipped'] = skipped_total
                    result['errors'] = error_count
      
        except psycopg.Error as e:
            error_msg = f"PostgreSQL逐条插入操作错误: {type(e).__name__}: {e}"
            print(error_msg)
            raise Exception(error_msg) from e
            
        return result

    @auto_clear
    async def update(self, data=None):
        # 条件判断
        if not data:
            raise Exception('更新内容不能为空')
        if not isinstance(data, dict):
            raise Exception('参数必须是字典类型')
        if not self._state['where_keys'] or not self._state['where_vals']:
            raise Exception('必须先调用where()方法设置查询条件')
        # 更新字段 - 添加引号保证大小写敏感
        set_keys = ','.join([f'"{key}"=%s' for key in data.keys()])
        set_vals = list(data.values())
        # 合并dict并转化为元组
        val_list = tuple(set_vals + list(self._state['where_vals']))

        sql = f"UPDATE \"{self.schema}\".\"{self._state['table']}\" SET {set_keys} WHERE {self._state['where_keys']}"
        if self._state['debug_level']:
            print(f"执行 SQL: {sql}")
            print(f"参数: {val_list}")

        return await self._execute(sql, val_list)

    @auto_clear
    async def delete(self):
        if not self._state['table']:
            raise ValueError("在执行DELETE操作前，必须先调用table()方法设置表名")
        # 参数校验移至 try 块内
        if not self._state['where_keys'] or not self._state['where_vals']:
            raise ValueError("在执行DELETE操作前，必须先调用where()方法设置WHERE条件")

        sql = f"DELETE FROM \"{self.schema}\".\"{self._state['table']}\" WHERE {self._state['where_keys']}"
        params = self._state['where_vals']
        return await self._execute(sql, params)

    def debug_level(self, debug_level=0):
        self._state['debug_level'] = debug_level
        return self
    def showsql(self, debug_level=0):
        self._state['debug_level'] = debug_level
        return self
        
    def clear_metadata_cache(self, table_name=None):
        """清理实例级元数据缓存"""
        if table_name:
            self._metadata_cache['constraints'].pop(table_name, None)
            self._metadata_cache['structure'].pop(table_name, None)
        else:
            self._metadata_cache['constraints'].clear()
            self._metadata_cache['structure'].clear()
        return self