import numpy as np
import sys
import io
import struct
import logging
from psycopg2.extensions import ISQLQuote, new_type, register_type

# 设置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("VectorAdapter")


class VectorAdapter:
    """
    高效处理向量数据的适配器类，支持二进制传输和COPY命令
    使用大端序(>)float32格式确保跨平台一致性
    """

    def __init__(self, value):
        """
        初始化向量适配器
        :param value: 可接受的输入类型:
            - numpy.ndarray (dtype为float32或兼容)
            - Python list/tuple
            - VectorAdapter实例
        """
        # 处理numpy数组输入
        if isinstance(value, np.ndarray):
            # 确保使用大端序float32格式
            if value.dtype != '>f4':
                value = value.astype('>f4')
        # 处理Python列表/元组输入
        elif isinstance(value, (list, tuple)):
            value = np.array(value, dtype='>f4')
        # 处理VectorAdapter输入
        elif isinstance(value, VectorAdapter):
            value = value.value
        else:
            raise TypeError(f"Unsupported type for VectorAdapter: {type(value)}")

        # 验证维度
        if value.ndim != 1:
            raise ValueError("Vector must be 1-dimensional")

        # 检查并处理字节序
        if sys.byteorder == 'little' and value.dtype.byteorder == '=':
            value = value.byteswap().newbyteorder('>')

        self.value = value

    def __conform__(self, protocol):
        """实现ISQLQuote接口"""
        if protocol is ISQLQuote:
            return self
        return None

    def getquoted(self):
        """文本表示（用于非二进制传输）"""
        # 格式化为VexDB FloatVector文本表示，使用[...]格式
        elements = ','.join([str(float(v)) for v in self.value])
        return b"'" + f"[{elements}]".encode('utf-8') + b"'::FLOATVECTOR"

    def get_buffer(self):
        """返回二进制表示（用于COPY命令）"""
        dim = len(self.value)
        # 头部格式: [维度(2字节) | 保留位(2字节)] + 浮点数据
        header = struct.pack('>HH', dim, 0)
        return header + self.value.tobytes()

    @property
    def binary_size(self):
        """返回二进制表示的大小（字节）"""
        return 4 + 4 * len(self.value)  # 4字节头部 + 4字节/元素

    @classmethod
    def from_binary(cls, binary_data):
        """从二进制数据创建VectorAdapter实例"""
        dim, _ = struct.unpack_from('>HH', binary_data)
        array_data = np.frombuffer(binary_data, dtype='>f4', count=dim, offset=4)
        return cls(array_data)

    @classmethod
    def register_types(cls, connection, oid=5036):
        """
        注册类型转换器
        :param connection: psycopg2连接对象
        :param oid: PostgreSQL中FLOATVECTOR类型的OID
        """

        # 修正的类型转换器
        def cast_vector(value, cursor):
            if value is None:
                return None

            # 如果是二进制数据
            if isinstance(value, (bytes, bytearray, memoryview)):
                try:
                    dim, _ = struct.unpack_from('>HH', value)
                    return np.frombuffer(value, dtype='>f4', count=dim, offset=4).astype(np.float32)
                except Exception as e:
                    logger.error(f"Error casting binary vector: {e}")
                    return None

            # 如果是文本数据
            if isinstance(value, str):
                try:
                    # 处理两种可能的文本格式：[1.0,2.0] 或 {1.0,2.0}
                    if value.startswith('[') and value.endswith(']'):
                        text_data = value[1:-1]
                    elif value.startswith('{') and value.endswith('}'):
                        text_data = value[1:-1]
                    else:
                        text_data = value

                    return np.array([float(x) for x in text_data.split(',')], dtype=np.float32)
                except Exception as e:
                    logger.error(f"Error casting text vector: {e}")
                    return None

            # 默认处理
            try:
                return np.frombuffer(value, dtype=np.float32)
            except Exception as e:
                logger.error(f"Error casting vector: {e}")
                return None

        try:
            # 修正的类型注册 - 使用正确的OID元组
            vector_type = new_type((oid,), "FLOATVECTOR", cast_vector)
            register_type(vector_type, connection)
        except Exception as e:
            logger.error(f"Failed to register FLOATVECTOR type: {e}")

    @classmethod
    def copy_binary_data(cls, cursor, table, data_rows, columns=None, batch_size=10000):
        """
        使用COPY命令高效导入多字段数据（包括向量字段）

        :param cursor: 数据库游标
        :param table: 目标表名（可包含schema）
        :param data_rows: 数据行的可迭代对象，每行可以是：
            - dict: {column_name: value, ...}
            - tuple/list: 按照columns参数顺序的值
        :param columns: 列名列表，可选。如果data_rows是dict则可以省略
        :param batch_size: 批次大小（内存优化）

        示例用法：
        # 方式1：使用字典
        data = [
            {"id": 1, "name": "test1", "embedding": vector1},
            {"id": 2, "name": "test2", "embedding": vector2},
        ]
        copy_binary_data(cursor, "my_table", data)

        # 方式2：使用列表和columns参数
        data = [(1, "test1", vector1), (2, "test2", vector2)]
        copy_binary_data(cursor, "my_table", data, ["id", "name", "embedding"])
        """
        total_rows = 0
        batch_count = 0

        # 创建内存缓冲区
        buffer = io.BytesIO()

        # 初始化列信息
        column_names = None
        column_count = 0

        def _write_pgcopy_header(buf):
            """写入PGCOPY文件头部"""
            buf.write(b"PGCOPY\n\xff\r\n\00")  # 签名
            buf.write(b"\x00\x00\x00\x00")  # 标志字段
            buf.write(b"\x00\x00\x00\x00")  # 头部扩展区域长度

        def _encode_field_value(value):
            """将字段值编码为二进制格式"""
            if value is None:
                return b"\xff\xff\xff\xff"  # NULL值标记

            # 向量字段处理
            if isinstance(value, (cls, np.ndarray, list, tuple)):
                if not isinstance(value, cls):
                    value = cls(value)
                bin_data = value.get_buffer()
                return struct.pack('>i', len(bin_data)) + bin_data

            # 字符串字段处理
            elif isinstance(value, str):
                encoded = value.encode('utf-8')
                return struct.pack('>i', len(encoded)) + encoded

            # 整数字段处理
            elif isinstance(value, int):
                if -2147483648 <= value <= 2147483647:  # int4范围
                    bin_data = struct.pack('>i', value)
                else:  # int8范围 - 检查是否在有效范围内
                    if -9223372036854775808 <= value <= 9223372036854775807:
                        bin_data = struct.pack('>q', value)
                    else:
                        # 超出int8范围，转换为字符串
                        encoded = str(value).encode('utf-8')
                        return struct.pack('>i', len(encoded)) + encoded
                return struct.pack('>i', len(bin_data)) + bin_data

            # 浮点数字段处理
            elif isinstance(value, float):
                bin_data = struct.pack('>d', value)  # double precision
                return struct.pack('>i', len(bin_data)) + bin_data

            # 布尔字段处理
            elif isinstance(value, bool):
                bin_data = b'\x01' if value else b'\x00'
                return struct.pack('>i', len(bin_data)) + bin_data

            # 默认转换为字符串
            else:
                encoded = str(value).encode('utf-8')
                return struct.pack('>i', len(encoded)) + encoded

        def _process_batch(buf, count):
            """处理一个批次的数据"""
            if count == 0:
                return

            # 文件结束标记
            buf.write(b"\xff\xff")
            buf.seek(0)

            try:
                # 构建COPY查询
                if column_names:
                    columns_str = ', '.join(column_names)
                    copy_query = f"COPY {table} ({columns_str}) FROM STDIN WITH (FORMAT BINARY)"
                else:
                    copy_query = f"COPY {table} FROM STDIN WITH (FORMAT BINARY)"

                cursor.copy_expert(copy_query, buf)
            except Exception as e:
                logger.error(f"批次导入失败: {e}")
                raise

        # 写入初始头部
        _write_pgcopy_header(buffer)

        # 处理数据行
        for row_data in data_rows:
            # 第一行：确定列信息
            if column_names is None:
                if isinstance(row_data, dict):
                    column_names = list(row_data.keys())
                elif columns:
                    column_names = columns
                else:
                    raise ValueError("必须提供columns参数或使用字典格式的data_rows")

                column_count = len(column_names)

            # 写入字段数量
            buffer.write(struct.pack('>h', column_count))

            # 处理每个字段的值
            if isinstance(row_data, dict):
                for col_name in column_names:
                    value = row_data.get(col_name)
                    buffer.write(_encode_field_value(value))
            else:
                # 假设是tuple/list格式
                for i, value in enumerate(row_data):
                    if i >= column_count:
                        break
                    buffer.write(_encode_field_value(value))

                # 如果数据不足，填充NULL
                for i in range(len(row_data), column_count):
                    buffer.write(b"\xff\xff\xff\xff")

            total_rows += 1
            batch_count += 1

            # 分批处理避免内存溢出
            if batch_count >= batch_size:
                _process_batch(buffer, batch_count)

                # 重置缓冲区
                buffer = io.BytesIO()
                _write_pgcopy_header(buffer)
                batch_count = 0

        # 处理剩余数据
        _process_batch(buffer, batch_count)

        return total_rows

    @classmethod
    def copy_vectors_only(cls, cursor, table, vectors, column="embedding", batch_size=10000):
        """
        简化版本：仅导入向量数据（保持向后兼容）

        :param cursor: 数据库游标
        :param table: 目标表名
        :param vectors: 向量数据列表
        :param column: 向量列名
        :param batch_size: 批次大小
        """
        # 转换为新格式并调用主方法
        vector_data = [{column: vector} for vector in vectors]
        return cls.copy_binary_data(cursor, table, vector_data, batch_size=batch_size)