import psycopg2
import threading
import re
import time
from psycopg2 import pool

# 检查pandas是否可用
try:
    import pandas as pd
    PANDAS_AVAILABLE = True
except ImportError:
    PANDAS_AVAILABLE = False
    pd = None

# 使用统一的auto_clear装饰器
from .function import auto_clear

class PgLaDb(object):
    def __init__(self, dbconfig=None):
        self._load_config(dbconfig)
        self._pool = None
        self._pool_lock = threading.Lock()
        self._init_pool()
        self._local = threading.local()
        # 获取模式参数，默认为mysa
        self.schema = self.dbconfig.get('schema', 'mysa')
        # 实例级缓存
        self._metadata_cache = {
            'constraints': {},  # 表约束缓存 {table_name: constraints}
            'structure': {},    # 表结构缓存 {table_name: columns}
            'query': {}         # 新增查询结果缓存 {sql: (result, timestamp)}
        }
        self._cache_ttl = 3600  # 默认缓存过期时间(秒)

    def _load_config(self, dbconfig):
        """加载数据库配置"""
        # 如果dbconfig是字典类型，直接使用
        if isinstance(dbconfig, dict):
            self.dbconfig = dbconfig
            return

        # 从conf__g中获取数据库配置
        from . import conf__g

        # 从 conf__g.databases 改为 conf__g.postgres
        if hasattr(conf__g, 'postgres') and isinstance(conf__g.postgres, list):
            pg_configs = conf__g.postgres
        else:
            raise Exception("未找到有效的数据库配置")

        # 如果没有提供dbconfig，则使用第一个配置
        if dbconfig is None:
            if pg_configs:
                self.dbconfig = pg_configs[0]
            else:
                raise Exception("未找到任何数据库配置")
            return

        # 如果dbconfig是数字索引
        if isinstance(dbconfig, int):
            if 0 <= dbconfig < len(pg_configs):
                self.dbconfig = pg_configs[dbconfig]
            else:
                raise Exception(f"数据库配置索引 {dbconfig} 超出范围，有效范围为 0-{len(pg_configs)-1}")
            return

        # 如果dbconfig是字符串，查找匹配的配置名称
        if isinstance(dbconfig, str):
            for config in pg_configs:
                if config.get('name') == dbconfig:
                    self.dbconfig = config
                    return
            raise Exception(f"未找到名为 {dbconfig} 的数据库配置")

        # 默认使用第一个配置
        if pg_configs:
            self.dbconfig = pg_configs[0]
        else:
            raise Exception("未找到任何数据库配置")

    def _init_pool(self):
        """实例级连接池初始化"""
        if self._pool is None:
            with self._pool_lock:
                if self._pool is None:  # 双重检查
                    # 设置连接选项，使用独立的schema属性
                    options = f"-c search_path={self.schema},public"
                    self._pool = pool.ThreadedConnectionPool(
                        minconn=1,  # 最小连接数
                        maxconn=10,  # 最大连接数
                        host=self.dbconfig['host'],
                        port=self.dbconfig['port'],
                        user=self.dbconfig['username'],
                        password=self.dbconfig['password'],
                        database=self.dbconfig['dbname'],
                        options=options
                    )

    def close(self):
        """安全关闭连接池"""
        with self._pool_lock:
            if self._pool:
                self._pool.closeall()
                self._pool = None

    def __del__(self):
        """析构时自动关闭"""
        self.close()

    @property
    def _state(self):
        """获取当前线程的查询状态（带线程内锁）"""
        if not hasattr(self._local, 'state'):
            self._local.state = {
                'table': '',
                'schema': self.schema,  # 使用实例的schema属性
                'where_keys': '',
                'where_vals': [],
                'fields': '*',
                'limit': '',
                '_order_by': '',
                '_group_by': '',
                'showsql': False,
                '_lock': threading.RLock()
            }
        return self._local.state

    def _clear(self):
        """重置当前线程的查询状态"""
        self._state.update({
            'table': '',
            'where_keys': '',
            'where_vals': [],
            'fields': '*',
            'limit': '',
            '_order_by': '',
            '_group_by': '',
        })

    @staticmethod
    def is_query(sql):
        """判断是否为查询操作"""
        sql = sql.strip().lower()
        query_keywords = r'^(select|with|show|desc|describe|explain|call)\b'
        return re.search(query_keywords, sql, re.IGNORECASE) is not None

    @staticmethod
    def needs_commit(sql):
        """判断是否为写操作"""
        sql = sql.strip().lower()
        write_keywords = r'^(insert|update|delete|replace|merge|create|alter|drop|truncate)\b'
        return re.search(write_keywords, sql, re.IGNORECASE) is not None

    def query(self, sql=None, tuple_values=None, r_type='dict'):
        if not sql:
            raise Exception('sql语句不能为空')
        if tuple_values and not isinstance(tuple_values, (tuple, list)):
            raise Exception('参数类型必须是元组或列表')

        if not self.is_query(sql):
            raise PermissionError("query()方法仅允许查询操作(SELECT/WITH/SHOW/DESC/EXPLAIN/CALL)")

        return self._execute(sql, tuple_values, fetch="all", r_type=r_type)

    def _execute(self, sql, params=None, fetch=None, r_type='dict'):
        """执行SQL的核心方法"""
        conn = None
        try:
            conn = self._pool.getconn()
            try:
                with conn.cursor() as cursor:
                    if self._state['showsql']:
                        print("Executing:", cursor.mogrify(sql, params))
                    cursor.execute(sql, params)
                    result = self._process_result(cursor, fetch, sql, r_type)
                    # 非查询操作立即提交
                    if self.needs_commit(sql):
                        conn.commit()
                    return result
            finally:
                if conn:
                    self._pool.putconn(conn)
        except psycopg2.Error as e:
            # PostgreSQL特定的错误处理
            error_msg = f"PostgreSQL数据库操作错误: {type(e).__name__}: {e}"
            print(error_msg)
            raise Exception(error_msg) from e

    def _process_result(self, cursor, fetch, sql, r_type='dict'):
        """结果处理抽离为独立方法"""
        if fetch is None and self.is_query(sql):
            fetch = "all"

        # 获取原始数据
        if fetch == "all":
            raw_data = cursor.fetchall()
        elif fetch == "one":
            raw_data = cursor.fetchone()
        else:
            return cursor.rowcount if cursor.rowcount >= 0 else cursor.lastrowid

        # 根据r_type转换结果
        if r_type == 'tuple':
            return raw_data
        elif r_type == 'dict':
            # 将元组转换为字典
            columns = [desc[0] for desc in cursor.description] if cursor.description else []
            if fetch == "all":
                return [dict(zip(columns, row)) for row in raw_data]
            elif fetch == "one":
                return dict(zip(columns, raw_data)) if raw_data else None
        elif r_type == 'df':
            # 转换为DataFrame
            columns = [desc[0] for desc in cursor.description] if cursor.description else []
            if not PANDAS_AVAILABLE:
                raise Exception("需要安装pandas库才能使用dataframe返回类型")
            
            if fetch == "all":
                # 兼容老版本pandas
                data_list = list(raw_data) if raw_data else []
                return pd.DataFrame(data_list, columns=columns)
            elif fetch == "one":
                data_list = [raw_data] if raw_data else []
                return pd.DataFrame(data_list, columns=columns) if raw_data else pd.DataFrame(columns=columns)
        
        # 默认返回元组格式，与LaDb.py保持一致
        return raw_data

    def table(self, name):
        """设置表名（线程安全）"""
        with self._state['_lock']:
            self._state['table'] = self.dbconfig['pre'] + name if 'pre' in self.dbconfig and self.dbconfig['pre'] else name
        return self

    # 定义比较符号常量（按类型分组）
    COMPARE_SIGNS = [
        # 基础比较
        '=', '<>', '!=', '>', '<', '>=', '<=',
        # 模糊匹配
        'like', 'not like',
        # 集合操作
        'in', 'not in',
        # NULL判断
        'is', 'is not',
        # 范围判断
        'between', 'not between',
    ]

    @staticmethod
    def _build_where_conditions(where, where_keys='', where_vals=[]):
        '''构建WHERE条件'''
        conditions = []
        condition_operators = []  # 存储每个条件之间的逻辑运算符
        for key, item in where.items():
            # 默认逻辑运算符为 AND
            cond_operator = 'and'
            if isinstance(item, (list, tuple)) and len(item) >= 2:
                # 如果条件中有第三个参数，则使用指定的逻辑运算符
                if len(item) >= 3:
                    cond_operator = item[2].lower() if item[2].lower() in ('and', 'or') else 'and'
                # 处理复杂条件 [操作符, 值, 逻辑符?]
                sign = item[0] if item[0] in PgLaDb.COMPARE_SIGNS else '='

                # 特殊处理 in/not in 操作符
                if sign in ['in', 'not in']:
                    if not isinstance(item[1], (list, tuple)):
                        raise ValueError(f"{sign} 操作符的值必须是列表或元组")
                    placeholders = ','.join(['%s'] * len(item[1]))
                    conditions.append(f'"{key}" {sign} ({placeholders})')
                    where_vals.extend(item[1])
                # 特殊处理 is/is not 操作符
                elif sign in ['is', 'is not']:
                    if item[1] is None or str(item[1]).lower() == 'null':
                        conditions.append(f'"{key}" {sign} NULL')
                    else:
                        raise ValueError(f"{sign} 操作符的值必须是 None 或 'null'")
                # 特殊处理 between/not between 操作符
                elif sign in ['between', 'not between']:
                    if not isinstance(item[1], (list, tuple)) or len(item[1]) != 2:
                        raise ValueError(f"{sign} 操作符的值必须是包含两个值的列表或元组")
                    conditions.append(f'"{key}" {sign} %s AND %s')
                    where_vals.extend(item[1])
                else:
                    # 处理其他操作符
                    sign_str = f' {sign} ' if sign in ['like', 'not like'] else sign
                    conditions.append(f'"{key}"{sign_str}%s')
                    where_vals.append(item[1])
            else:
                # 简单条件 - 添加引号保证大小写敏感
                conditions.append(f'"{key}"=%s')
                where_vals.append(item)
            condition_operators.append(cond_operator)

        # 第一个条件
        where_keys = conditions[0]
        # 从第二个条件开始拼接
        for i in range(1, len(conditions)):
            where_keys += f" {condition_operators[i]} {conditions[i]}"

        return {'keys': where_keys, 'vals': tuple(where_vals)}

    def where(self, where=None, value=None, method='and', wrap=None):
        """线程安全的条件设置"""
        with self._state['_lock']:
            if value is not None:
                where = {where: value}
            # 判断参数
            if where is not None:
                if not isinstance(where, dict):
                    raise Exception('查询条件必须是字典类型')
                where_keys, where_vals = '', []
                # 查询字段
                _select = self._build_where_conditions(where, where_keys, where_vals)
                where_keys = _select['keys']
                where_vals = _select['vals']

                # 直接处理括号逻辑
                if wrap == '(':
                    where_keys = f"({where_keys}"
                elif wrap == ')':
                    where_keys = f"{where_keys})"
                elif wrap == '()':
                    where_keys = f"({where_keys})"

            # 更新状态
            if self._state.get('where_keys'):
                self._state['where_keys'] += f' {method} {where_keys}'
                self._state['where_vals'] += where_vals
            else:
                self._state['where_keys'] = where_keys
                self._state['where_vals'] = where_vals
        return self

    def fields(self, params=None):
        """线程安全的字段设置"""
        with self._state['_lock']:
            if not params:
                self._state['fields'] = '*'
            else:
                if isinstance(params, list):
                    # 为每个字段名添加引号保证大小写敏感
                    params = ','.join([f'"{field}"' for field in params])
                self._state['fields'] = params
        return self

    def page(self, page=1, rows=10):
        with self._state['_lock']:
            # PostgreSQL的OFFSET和LIMIT语法
            offset = (page - 1) * rows
            self._state['limit'] = f"{offset},{rows}"
        return self

    def limit(self, a: int, b: int = None):
        with self._state['_lock']:
            # PostgreSQL的LIMIT语法: LIMIT [count] OFFSET [start]
            if b is not None:
                # a是OFFSET, b是LIMIT
                self._state['limit'] = f"{a},{b}"
            else:
                # a是LIMIT
                self._state['limit'] = f"0,{a}"
        return self

    def groupBy(self, param=None):
        with self._state['_lock']:
            if param and isinstance(param, str):
                self._state['_group_by'] = param
        return self

    def orderBy(self, param=None):
        with self._state['_lock']:
            if param:
                self._state['_order_by'] = param
        return self

    def format_sql(self):
        # 如果有GROUP BY且fields为*，需要获取表的所有字段
        if self._state['_group_by'] and self._state['fields'] == '*':
            # 获取表结构
            structure = self._get_table_structure()
            # 构造字段列表
            all_fields = ', '.join([f'"{col}"' for col in structure])
            sql = "SELECT {} FROM \"{}\"".format(all_fields, self._state['table'])
        else:
            sql = "SELECT {} FROM \"{}\"".format(self._state['fields'], self._state['table'])
            
        # 添加模式(schema)支持
        if self.schema and self.schema != 'public':
            # 检查SQL中是否已经包含了schema，避免重复添加
            from_pattern = f'FROM "{self._state["table"]}"'
            if from_pattern in sql:
                sql = sql.replace(from_pattern, f'FROM "{self.schema}"."{self._state["table"]}"')
            
        if self._state['where_keys']:
            sql += f" WHERE {self._state['where_keys']}"
        if self._state['_group_by']:
            sql += f" GROUP BY {self._state['_group_by']}"
        if self._state['_order_by']:
            sql += f" ORDER BY {self._state['_order_by']}"
        if self._state['limit']:
            # PostgreSQL使用LIMIT/OFFSET语法
            parts = self._state['limit'].split(',')
            if len(parts) == 2:
                offset, limit = parts[0], parts[1]
                sql += f" LIMIT {limit} OFFSET {offset}"
            else:
                sql += f" LIMIT {self._state['limit']}"
        
        # 如果有GROUP BY子句，需要对PostgreSQL进行特殊处理
        if self._state['_group_by'] and self._state['fields'] != '*':
            # 解析GROUP BY字段
            group_fields = [f.strip().strip('"') for f in self._state['_group_by'].split(',')]
            # 解析SELECT字段
            select_fields = self._state['fields']
            
            # 对于简单的COUNT(*)查询，不需要特殊处理
            if not (select_fields.strip().lower().startswith('count(*)') or 
                   select_fields.strip().lower().startswith('count(')):
                # 对于包含非聚合字段的GROUP BY查询，需要处理字段包装
                import re
                
                # 分割字段列表，正确处理包含函数的字段
                field_parts = self._split_sql_fields(select_fields)
                
                processed_fields = []
                aggregate_patterns = [
                    r'\bcount\s*\(', r'\bsum\s*\(', r'\bavg\s*\(', 
                    r'\bmax\s*\(', r'\bmin\s*\(', r'\bany_value\s*\('
                ]
                
                for field_part in field_parts:
                    # 检查是否为聚合函数
                    is_aggregate = any(re.search(pattern, field_part.lower()) for pattern in aggregate_patterns)
                    
                    if not is_aggregate:
                        # 提取字段名（处理 "field" as alias 或 field as alias 的情况）
                        field_name_match = re.match(r'^([^,]*?)(?:\s+(as\s+.+))?$', field_part.strip(), re.IGNORECASE)
                        if field_name_match:
                            field_expr = field_name_match.group(1).strip()
                            alias_part = field_name_match.group(2) if field_name_match.group(2) else ""
                            
                            # 获取不带引号的字段名
                            field_name = field_expr.strip().strip('"')
                            
                            # 如果字段不在GROUP BY中，则用ANY_VALUE包装
                            if field_name not in group_fields:
                                if alias_part:
                                    processed_fields.append(f'ANY_VALUE({field_expr}) {alias_part}')
                                else:
                                    # 如果没有别名，使用字段名作为别名
                                    processed_fields.append(f'ANY_VALUE({field_expr}) as {field_name}')
                            else:
                                processed_fields.append(field_part)
                        else:
                            processed_fields.append(field_part)
                    else:
                        processed_fields.append(field_part)
                
                # 重新构建SELECT子句
                sql = sql.replace(f"SELECT {select_fields} FROM", f"SELECT {', '.join(processed_fields)} FROM")
        # 处理GROUP BY且fields为*的情况
        elif self._state['_group_by'] and self._state['fields'] == '*':
            # 解析GROUP BY字段
            group_fields = [f.strip().strip('"') for f in self._state['_group_by'].split(',')]
            # 获取表的所有字段
            structure = self._get_table_structure()
            all_fields = [f'"{col}"' for col in structure]
            
            processed_fields = []
            for field in all_fields:
                field_name = field.strip().strip('"')
                # 如果字段不在GROUP BY中，则用ANY_VALUE包装
                if field_name not in group_fields:
                    processed_fields.append(f'ANY_VALUE({field}) as {field_name}')
                else:
                    processed_fields.append(field)
            
            # 重新构建SELECT子句
            sql = sql.replace(f"SELECT {', '.join(all_fields)} FROM", f"SELECT {', '.join(processed_fields)} FROM")
        
        return sql

    def _split_sql_fields(self, field_string):
        """正确分割SQL字段列表，考虑函数和括号"""
        fields = []
        current_field = ""
        paren_level = 0
        
        for char in field_string:
            if char == ',' and paren_level == 0:
                fields.append(current_field.strip())
                current_field = ""
            else:
                if char == '(':
                    paren_level += 1
                elif char == ')':
                    paren_level -= 1
                current_field += char
        
        if current_field:
            fields.append(current_field.strip())
            
        return fields

    @auto_clear
    def get(self, use_cache=False, cache_ttl=None, r_type='dict'):
        """获取多条记录"""
        sql = self.format_sql()
        if not use_cache:
            return self._execute(
                sql=sql,
                params=self._state['where_vals'],
                fetch="all",
                r_type=r_type
            )

        # 生成缓存键
        cache_key = f"{sql}:{self._state['where_vals']}"

        # 检查缓存是否存在且未过期
        if cache_key in self._metadata_cache['query']:
            cached_time = self._metadata_cache['query'][cache_key][1]
            ttl = cache_ttl if cache_ttl is not None else self._cache_ttl
            if (time.time() - cached_time) < ttl:
                return self._metadata_cache['query'][cache_key][0]

        # 执行查询并缓存结果
        result = self._execute(
            sql=sql,
            params=self._state['where_vals'],
            fetch="all",
            r_type=r_type
        )
        self._metadata_cache['query'][cache_key] = (result, time.time())
        return result

    @auto_clear
    def first(self, r_type='dict'):
        # 为first方法设置limit为1
        original_limit = self._state['limit']
        self._state['limit'] = '0,1'
        try:
            sql = self.format_sql()
            return self._execute(
                sql=sql,
                params=self._state['where_vals'],
                fetch="one",
                r_type=r_type
            )
        finally:
            # 恢复原来的limit设置
            self._state['limit'] = original_limit

    @auto_clear
    def value(self, field=None):
        if not field:
            raise Exception('查询字段不能为空')

        # 强制只查询指定字段
        self.fields(field)
        # 使用元组格式返回以获得最佳性能
        result = self.first(r_type='tuple')

        if result is None:
            return None

        # 对于元组格式，直接返回第一个（也是唯一一个）元素
        return result[0]

    def _get_unique_constraints(self):
        """获取表的所有唯一约束（带实例级缓存）"""
        table_name = self._state['table']
        
        # 检查实例缓存
        if table_name in self._metadata_cache['constraints']:
            return self._metadata_cache['constraints'][table_name]

        """获取表的所有唯一约束（格式优化为字典）"""
        # PostgreSQL查询约束的SQL
        schema = self.dbconfig.get('schema', 'mysa') if self.dbconfig else 'mysa'
        sql = f"""
        SELECT 
            tc.constraint_type,
            STRING_AGG(kcu.column_name, ',' ORDER BY kcu.ordinal_position) AS columns
        FROM 
            information_schema.table_constraints tc
        JOIN 
            information_schema.key_column_usage kcu 
            ON tc.constraint_name = kcu.constraint_name 
            AND tc.table_schema = kcu.table_schema
            AND tc.table_name = kcu.table_name
        WHERE 
            tc.table_schema = %s
            AND tc.table_name = %s
            AND tc.constraint_type IN ('PRIMARY KEY', 'UNIQUE')
        GROUP BY 
            tc.constraint_name, tc.constraint_type;
        """
        showsql = self._state['showsql']
        if showsql <= 1:
            self.showsql(0)  # 这里不需要输出测试信息。
        result = self._execute(sql, params=(schema, table_name,), fetch="all")
        self.showsql(showsql)
        
        # 新增结构转换逻辑
        constraints = {
            'primary': [],
            'unique': []
        }
        for row in result:
            key = 'primary' if row['constraint_type'] == 'PRIMARY KEY' else 'unique'
            # 保持原始字段名的大小写（从information_schema获取的已经是正确的）
            columns = tuple(row['columns'].split(','))  # 转换为元组便于后续处理
            if columns not in constraints[key]:        # 去重
                constraints[key].append(columns)
        
        # 存入实例缓存
        self._metadata_cache['constraints'][table_name] = constraints
        return constraints

    def _get_table_structure(self):
        """获取表结构（带实例级缓存）"""
        table_name = self._state['table']
        
        # 检查实例缓存
        if table_name in self._metadata_cache['structure']:
            return self._metadata_cache['structure'][table_name]

        # 查询表结构的SQL
        schema = self.dbconfig.get('schema', 'mysa') if self.dbconfig else 'mysa'
        sql = """
        SELECT column_name
        FROM information_schema.columns 
        WHERE table_schema = %s AND table_name = %s
        ORDER BY ordinal_position
        """
        showsql = self._state['showsql']
        if showsql <= 1:
            self.showsql(0)  # 这里不需要输出测试信息。
        result = self._execute(sql, params=(schema, table_name,), fetch="all")
        self.showsql(showsql)
        
        # 提取列名
        columns = [row['column_name'] for row in result]
        
        # 存入实例缓存
        self._metadata_cache['structure'][table_name] = columns
        return columns

    def _get_table_columns_info(self):
        """获取表的列信息，包括是否为自增列"""
        table_name = self._state['table']
        
        # 检查实例缓存
        cache_key = f"{table_name}_columns"
        if cache_key in self._metadata_cache['structure']:
            return self._metadata_cache['structure'][cache_key]
        
        # 查询表列信息的SQL
        schema = self.dbconfig.get('schema', 'mysa') if self.dbconfig else 'mysa'
        sql = """
        SELECT 
            column_name,
            data_type,
            is_nullable,
            column_default
        FROM 
            information_schema.columns 
        WHERE 
            table_schema = %s 
            AND table_name = %s
        ORDER BY 
            ordinal_position;
        """
        result = self._execute(sql, params=(schema, table_name,), fetch="all")
        
        columns_info = {}
        for row in result:
            column_name = row['column_name']
            columns_info[column_name] = {
                'data_type': row['data_type'],
                'is_nullable': row['is_nullable'],
                'column_default': row['column_default'],
                'is_serial': self._is_serial_column(row)
            }
            
        # 存入实例缓存
        self._metadata_cache['structure'][cache_key] = columns_info
        return columns_info
        
    def _is_serial_column(self, column_info):
        """判断列是否为SERIAL类型（实际上是integer+sequence）"""
        # 简化判断逻辑 - 只需要检查默认值中是否包含nextval
        default = column_info['column_default']
        return default and 'nextval(' in default

    def insert(self, data, on_conflict='ignore'):
        """
        支持单条/批量插入及自动错误跳过的增强方法（使用CTE和计数器方案）:
        
        :param data: 单条字典或批量列表（列表元素为字典）
        :param on_conflict: 插入时遇到重复键的处理策略 
            'ignore' - 忽略重复（默认）
            'update' - 覆盖更新现有记录，这个模式可以替代updateOrInsert
            其他值  - 普通插入（可能因重复键失败）
        """
        # 参数类型校验
        if not data:
            raise Exception('新增内容不能为空')
        if not isinstance(data, (dict, list)):
            raise TypeError("参数必须为字典或字典列表")
        
        # 统一处理数据格式
        if isinstance(data, list):
            if len(data) == 0:
                raise ValueError("数据列表不能为空")
            # 强制所有数据项的键完全一致
            keys = list(data[0].keys())
            for item in data:
                if list(item.keys()) != keys:
                    raise ValueError("批量操作要求所有数据项字段完全一致")
            data_list = data
        else:
            keys = list(data.keys())
            data_list = [data]
            
        # 构造SQL语句
        placeholders = ', '.join(['%s'] * len(keys))
        # PostgreSQL字段转义逻辑
        escaped_keys = [f'"{k}"' for k in keys]

        # 只有update或ignore模式才需要检查约束
        required_cols = None
        if on_conflict in ('update', 'ignore'):  # 需要判断重复的模式，最好的情况是有唯一键。主键适合获取id后更新
            # 新增约束检查逻辑
            constraints = self._get_unique_constraints()
            columns_info = self._get_table_columns_info()
            
            # 查找自增主键字段
            serial_primary_keys = []
            if constraints['primary']:
                primary_cols = constraints['primary'][0]  # 取第一个主键约束
                for col in primary_cols:
                    if col in columns_info and columns_info[col]['is_serial']:
                        serial_primary_keys.append(col)
            
            required_cols = None
            
            # 情况1：有唯一联合key - 检查插入数据是否包含唯一联合key的所有字段
            if constraints['unique']:
                for unique_set in constraints['unique']:
                    if all(col in keys for col in unique_set):  # 检查是否包含该唯一键的所有字段
                        required_cols = unique_set
                        break
                    else:
                        # 有唯一联合key但数据中不包含所有字段，提示错误
                        missing_cols = [col for col in unique_set if col not in keys]
                        if missing_cols:
                            raise ValueError(f"表'{self._state['table']}'有唯一联合键{unique_set}，但插入数据缺少字段：{missing_cols}")
            
            # 情况2：没有唯一联合key，有联合pk - 检查插入数据是否包含联合pk的所有字段
            if not required_cols and constraints['primary']:
                primary_set = constraints['primary'][0]
                # 检查是否是自增主键
                if serial_primary_keys and len(primary_set) == 1:
                    # 只有自增主键的情况，允许不包含主键字段
                    if primary_set[0] in keys:
                        # 插入数据包含自增主键，保留约束检查
                        required_cols = primary_set
                    else:
                        # 插入数据不包含自增主键，降级为普通插入
                        on_conflict = None
                elif all(col in keys for col in primary_set):
                    # 数据包含所有联合主键字段
                    required_cols = primary_set
                else:
                    # 有联合主键但数据中不包含所有字段，提示错误
                    missing_cols = [col for col in primary_set if col not in keys]
                    if missing_cols:
                        raise ValueError(f"表'{self._state['table']}'有联合主键{primary_set}，但插入数据缺少字段：{missing_cols}")
            
            # 情况3：没有唯一联合key，也没有联合pk，只有自增pk
            # 这个情况已经在情况2中统一处理了，不需要重复检查
            
            # 最终校验 - 只有在需要约束检查且数据中确实包含了约束字段时才报错
            # 如果表有约束但数据中不包含这些约束字段，则降级为普通插入
            if not required_cols and (constraints['unique'] or constraints['primary']):
                # 表中有约束，但当前数据不包含这些约束字段，降级为普通插入
                on_conflict = None
            
        # 优化点3：增强VALUES参数容错处理
        # 直接使用原始数据，日期时间字段转换在业务层处理
        params_list = [tuple(item.get(k) for k in keys) for item in data_list]  # 使用get避免KeyError
        
        result = {
            'inserted': 0, 
            'updated': 0, 
            'skipped': 0,
            'errors': 0,
            'error_details': [],
            'batch_success': False
        }
        
        try:
            # 与LaDb.py保持一致，使用简单的INSERT语句
            # 注意：避免使用CTE模式，因为CTE可能导致字段类型推断错误（如numeric字段被识别为text）
            # 当前方案使用简单的VALUES语句，确保字段类型正确识别
            if on_conflict == 'update' and required_cols:
                conflict_cols_str = ','.join([f'"{col}"' for col in required_cols])
                update_fields = [f'"{k}" = EXCLUDED."{k}"' for k in keys]
                sql = f"""
                INSERT INTO "{self._state['table']}" ({', '.join(escaped_keys)})
                VALUES ({placeholders})
                ON CONFLICT ({conflict_cols_str}) DO UPDATE SET {', '.join(update_fields)}
                """
            elif on_conflict == 'ignore' and required_cols:
                conflict_cols_str = ','.join([f'"{col}"' for col in required_cols])
                sql = f"""
                INSERT INTO "{self._state['table']}" ({', '.join(escaped_keys)})
                VALUES ({placeholders})
                ON CONFLICT ({conflict_cols_str}) DO NOTHING
                """
            else:
                sql = f"INSERT INTO \"{self._state['table']}\" ({', '.join(escaped_keys)}) VALUES ({placeholders})"
            
            # 使用executemany进行批量操作
            conn = None
            try:
                conn = self._pool.getconn()
                with conn.cursor() as cs:
                    if self._state['showsql']:
                        print(f"批量执行 SQL: {cs.mogrify(sql, params_list[0])}")
                    
                    # 获取初始行数
                    count_sql = f"SELECT COUNT(*) AS num FROM \"{self._state['schema']}\".\"{self._state['table']}\""
                    cs.execute(count_sql)
                    startnum = cs.fetchone()[0]
                    
                    # 执行批量插入
                    cs.executemany(sql, params_list)
                    conn.commit()
                    row_count = cs.rowcount
                    
                    # 获取最终行数
                    cs.execute(count_sql)
                    endnum = cs.fetchone()[0]
                    
                    # 计算结果（与LaDb.py相同的逻辑）
                    total = len(params_list)
                    result['batch_success'] = True
                    
                    # 精确插入数 = 表行数差值
                    inserted = endnum - startnum
                    # 冲突数 = 总操作数 - 插入数
                    conflict_count = total - inserted
                    # 更新数 = (ROW_COUNT - 插入数) // 2
                    updated = (row_count - inserted) 
                    # 忽略数 = 冲突数 - 更新数
                    skipped = conflict_count - updated
                    
                    result['inserted'] = inserted
                    result['updated'] = updated
                    result['skipped'] = skipped
                    
            finally:
                if conn:
                    self._pool.putconn(conn)

        except (psycopg2.IntegrityError, psycopg2.OperationalError) as e:
            print(f"批量执行失败({e})，开始逐条处理...")
            # 批量处理失败，进入逐条更新模式
            result = self._insert_one_by_one(data_list, keys, on_conflict, required_cols)
            
        except Exception as e:
            error_msg = f"PostgreSQL插入操作错误: {type(e).__name__}: {e}"
            print(error_msg)
            raise Exception(error_msg) from e
            
        return result
        
    def _insert_one_by_one(self, data_list, keys, on_conflict, required_cols):
        """
        逐条插入数据（错误处理和重试机制）
        """
        result = {
            'inserted': 0, 
            'updated': 0, 
            'skipped': 0,
            'errors': 0,
            'error_details': [],
            'batch_success': False
        }
        
        # 构造SQL语句
        placeholders = ', '.join(['%s'] * len(keys))
        escaped_keys = [f'"{k}"' for k in keys]
        
        # 构建VALUES语句（与LaDb.py保持一致，不进行特殊类型转换）
        values_placeholders = ', '.join(['%s'] * len(keys))
        
        # 根据on_conflict和required_cols构建SQL
        if on_conflict == 'update' and required_cols:
            conflict_cols_str = ','.join([f'"{col}"' for col in required_cols])
            update_fields = [f'"{k}" = EXCLUDED."{k}"' for k in keys]
            sql = f"""
            INSERT INTO "{self._state['table']}" ({', '.join(escaped_keys)})
            VALUES ({values_placeholders})
            ON CONFLICT ({conflict_cols_str}) DO UPDATE SET {', '.join(update_fields)}
            RETURNING CASE WHEN xmax::text = '0' THEN 1 ELSE 0 END as inserted, CASE WHEN xmax::text > '0' THEN 1 ELSE 0 END as updated
            """
        elif on_conflict == 'ignore' and required_cols:
            conflict_cols_str = ','.join([f'"{col}"' for col in required_cols])
            sql = f"""
            INSERT INTO "{self._state['table']}" ({', '.join(escaped_keys)})
            VALUES ({values_placeholders})
            ON CONFLICT ({conflict_cols_str}) DO NOTHING
            RETURNING 1 as inserted
            """
        else:
            sql = f"INSERT INTO \"{self._state['table']}\" ({', '.join(escaped_keys)}) VALUES ({values_placeholders})"
        
        # 逐条处理
        conn = None
        try:
            conn = self._pool.getconn()
            with conn.cursor() as cs:
                for idx, item in enumerate(data_list):
                    try:
                        params = tuple(item.get(k) for k in keys)
                        
                        # 保持连接活性
                        conn.rollback()  # 确保连接处于干净状态
                        
                        if self._state['showsql']:
                            print(f"执行 SQL: {cs.mogrify(sql, params)}")
                        
                        cs.execute(sql, params)
                        
                        # 获取RETURNING子句的结果来准确判断操作类型
                        if on_conflict in ('update', 'ignore'):
                            try:
                                results = cs.fetchall()
                                if results:
                                    if on_conflict == 'update':
                                        result['inserted'] += results[0][0]
                                        result['updated'] += results[0][1]
                                    else:  # ignore
                                        result['inserted'] += results[0][0]
                                else:
                                    # 没有返回行意味着记录被忽略
                                    result['skipped'] += 1
                            except psycopg2.ProgrammingError as fetch_error:
                                # 处理 "no results to fetch" 异常
                                if "no results to fetch" in str(fetch_error).lower():
                                    # 没有返回结果意味着记录被忽略（ON CONFLICT DO NOTHING）
                                    result['skipped'] += 1
                                else:
                                    # 其他编程错误重新抛出
                                    raise
                        else:
                            # 普通插入模式
                            result['inserted'] += cs.rowcount
                            
                        conn.commit()
                        
                    except psycopg2.IntegrityError as e:
                        # 使用当前连接进行回滚
                        conn.rollback()
                        # PostgreSQL特定错误码处理
                        if isinstance(e, psycopg2.errors.UniqueViolation):
                            result['skipped'] += 1
                        else:
                            result['errors'] += 1
                            result['error_details'].append({
                                'index': idx,
                                'params': params,
                                'error': str(e)
                            })
                        # 不再重新抛出异常，让其他条目继续处理
                    except Exception as e:
                        conn.rollback()
                        result['errors'] += 1
                        result['error_details'].append({
                            'index': idx,
                            'params': params,
                            'error': str(e)
                        })
                        # 不再重新抛出异常，让其他条目继续处理
        finally:
            if conn:
                self._pool.putconn(conn)
                        
        return result

    @auto_clear
    def update(self, data=None):
        # 条件判断
        if not data:
            raise Exception('更新内容不能为空')
        if not isinstance(data, dict):
            raise Exception('参数必须是字典类型')
        if not self._state['where_keys'] or not self._state['where_vals']:
            raise Exception('必须先调用where()方法设置查询条件')
        # 更新字段 - 添加引号保证大小写敏感
        set_keys = ','.join([f'"{key}"=%s' for key in data.keys()])
        set_vals = list(data.values())
        # 合并dict并转化为元组
        val_list = tuple(set_vals + list(self._state['where_vals']))
    
        sql = f"UPDATE \"{self.schema}\".\"{self._state['table']}\" SET {set_keys} WHERE {self._state['where_keys']}"
        if self._state['showsql']:
            print(f"执行 SQL: {sql}")
            print(f"参数: {val_list}")
        
        return self._execute(sql, val_list)
            
    @auto_clear
    def delete(self):
        if not self._state['table']:
            raise ValueError("在执行DELETE操作前，必须先调用table()方法设置表名")
        # 参数校验移至 try 块内
        if not self._state['where_keys'] or not self._state['where_vals']:
            raise ValueError("在执行DELETE操作前，必须先调用where()方法设置WHERE条件")

        sql = f"DELETE FROM \"{self.schema}\".\"{self._state['table']}\" WHERE {self._state['where_keys']}"
        return self._execute(sql, self._state['where_vals'])

    def showsql(self, showsql=True):
        with self._state['_lock']:
            self._state['showsql'] = showsql
        return self

    def clear_metadata_cache(self, table_name=None):
        """清理实例级元数据缓存"""
        if table_name:
            self._metadata_cache['constraints'].pop(table_name, None)
            self._metadata_cache['structure'].pop(table_name, None)
        else:
            self._metadata_cache['constraints'].clear()
            self._metadata_cache['structure'].clear()
        return self