#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import sqlite3
import json
import logging
import numpy as np
import hashlib
import time
from typing import Dict, List, Any, Optional, Union, Tuple

from vector_store.vector_store_interface import VectorStoreInterface

# 获取logger
logger = logging.getLogger(__name__)

class SQLiteVectorStore(VectorStoreInterface):
    """
    使用SQLite作为向量数据库，存储知识图谱节点的向量表示
    """

    def __init__(self, db_path: str = "vector_store.db", api_key: str = None):
        """初始化SQLite向量存储

        Args:
            db_path: SQLite数据库文件路径
            api_key: 嵌入模型API密钥 (用于向后兼容，但不再使用)
        """
        self.db_path = db_path
        self.conn = None
        self.vid_cache = {}  # 缓存已经生成的VID，避免重复计算
        self.initialize_db()

    def initialize_db(self) -> None:
        """初始化SQLite数据库，创建必要的表结构"""
        try:
            self.conn = sqlite3.connect(self.db_path)
            cursor = self.conn.cursor()

            # 创建向量存储表
            cursor.execute('''
            CREATE TABLE IF NOT EXISTS node_vectors (
                vid TEXT PRIMARY KEY,
                original_id TEXT NOT NULL,
                node_type TEXT NOT NULL,
                node_text TEXT NOT NULL,
                vector BLOB NOT NULL,
                metadata TEXT,
                created_at INTEGER NOT NULL,
                updated_at INTEGER NOT NULL
            )
            ''')

            # 创建索引
            cursor.execute('CREATE INDEX IF NOT EXISTS idx_node_type ON node_vectors (node_type)')
            cursor.execute('CREATE INDEX IF NOT EXISTS idx_original_id ON node_vectors (original_id)')

            self.conn.commit()
            logger.info(f"SQLite向量数据库初始化成功: {self.db_path}")
        except Exception as e:
            logger.error(f"SQLite向量数据库初始化失败: {str(e)}")
            raise

    def close(self) -> None:
        """关闭数据库连接"""
        if self.conn:
            self.conn.close()
            self.conn = None

    def _generate_vid(self, original_id: str) -> str:
        """
        生成32位VID字符串，与图数据库中使用的ID保持一致

        Args:
            original_id: 原始ID字符串

        Returns:
            32位固定长度的字符串
        """
        # 使用缓存避免重复计算相同ID
        if original_id in self.vid_cache:
            return self.vid_cache[original_id]

        # 使用MD5生成32位字符串，确保符合图数据库VID长度要求
        vid = hashlib.md5(original_id.encode('utf-8')).hexdigest()

        # 记录到缓存
        self.vid_cache[original_id] = vid
        return vid

    def _vector_to_blob(self, vector: List[float]) -> bytes:
        """
        将向量转换为SQLite可存储的二进制格式

        Args:
            vector: 嵌入向量

        Returns:
            二进制格式的向量
        """
        return np.array(vector, dtype=np.float32).tobytes()

    def _blob_to_vector(self, blob: bytes) -> List[float]:
        """
        将二进制格式的向量转换为Python列表

        Args:
            blob: 二进制格式的向量

        Returns:
            嵌入向量列表
        """
        return np.frombuffer(blob, dtype=np.float32).tolist()

    def store_node_vector(self, vid: str, node_type: str, node_text: str, vector: List[float], metadata: Dict[str, Any] = None) -> bool:
        """
        存储节点向量

        Args:
            vid: 节点ID，会被转换为与图数据库一致的格式
            node_type: 节点类型 ('class', 'function' 或 'annotation')
            node_text: 节点文本内容
            vector: 节点的嵌入向量
            metadata: 节点元数据

        Returns:
            操作是否成功
        """
        try:
            if node_type not in ['class', 'function', 'annotation']:
                logger.warning(f"不支持的节点类型: {node_type}，仅支持class、function和annotation")
                return False

            # 生成与图数据库一致的VID
            nebula_vid = self._generate_vid(vid)
            logger.debug(f"原始ID: {vid} -> 向量存储VID: {nebula_vid}")

            # 准备元数据
            meta_json = json.dumps(metadata) if metadata else "{}"

            # 准备时间戳
            current_time = int(time.time())

            # 将向量转换为二进制格式
            vector_blob = self._vector_to_blob(vector)

            cursor = self.conn.cursor()
            # 使用REPLACE确保vid存在则更新，否则插入
            cursor.execute('''
            REPLACE INTO node_vectors
            (vid, original_id, node_type, node_text, vector, metadata, created_at, updated_at)
            VALUES (?, ?, ?, ?, ?, ?, ?, ?)
            ''', (nebula_vid, vid, node_type, node_text, vector_blob, meta_json, current_time, current_time))

            self.conn.commit()
            logger.info(f"节点向量已存储/更新: {vid} ({node_type})")
            return True
        except Exception as e:
            logger.error(f"存储节点向量失败: {str(e)}")
            return False

    def get_node_vector(self, vid: str) -> Optional[Dict[str, Any]]:
        """
        获取节点向量

        Args:
            vid: 节点ID，会被转换为与图数据库一致的格式

        Returns:
            节点数据字典或None
        """
        try:
            # 生成与图数据库一致的VID
            nebula_vid = self._generate_vid(vid)

            cursor = self.conn.cursor()
            cursor.execute('''
            SELECT vid, original_id, node_type, node_text, vector, metadata, created_at, updated_at
            FROM node_vectors
            WHERE vid = ?
            ''', (nebula_vid,))

            row = cursor.fetchone()
            if not row:
                return None

            vid, original_id, node_type, node_text, vector_blob, metadata_json, created_at, updated_at = row

            return {
                'vid': vid,
                'original_id': original_id,
                'node_type': node_type,
                'node_text': node_text,
                'vector': self._blob_to_vector(vector_blob),
                'metadata': json.loads(metadata_json),
                'created_at': created_at,
                'updated_at': updated_at
            }
        except Exception as e:
            logger.error(f"获取节点向量失败: {str(e)}")
            return None

    def get_node_by_original_id(self, original_id: str) -> Optional[Dict[str, Any]]:
        """
        通过原始ID获取节点向量

        Args:
            original_id: 原始节点ID

        Returns:
            节点数据字典或None
        """
        try:
            cursor = self.conn.cursor()
            cursor.execute('''
            SELECT vid, original_id, node_type, node_text, vector, metadata, created_at, updated_at
            FROM node_vectors
            WHERE original_id = ?
            ''', (original_id,))

            row = cursor.fetchone()
            if not row:
                return None

            vid, original_id, node_type, node_text, vector_blob, metadata_json, created_at, updated_at = row

            return {
                'vid': vid,
                'original_id': original_id,
                'node_type': node_type,
                'node_text': node_text,
                'vector': self._blob_to_vector(vector_blob),
                'metadata': json.loads(metadata_json),
                'created_at': created_at,
                'updated_at': updated_at
            }
        except Exception as e:
            logger.error(f"通过原始ID获取节点向量失败: {str(e)}")
            return None

    def search_similar_vectors(self, query_vector: List[float], node_type: Optional[str] = None, top_k: int = 10) -> List[Dict[str, Any]]:
        """
        搜索与查询向量最相似的节点

        Args:
            query_vector: 查询向量
            node_type: 节点类型过滤 ('class', 'function' 或 'annotation'，None表示不过滤)
            top_k: 返回结果数量

        Returns:
            相似节点列表，按相似度降序排序
        """
        try:
            # 转换查询向量为numpy数组
            query_vector_np = np.array(query_vector, dtype=np.float32)

            # 获取所有节点向量
            cursor = self.conn.cursor()
            if node_type:
                cursor.execute('''
                SELECT vid, original_id, node_type, node_text, vector, metadata
                FROM node_vectors
                WHERE node_type = ?
                ''', (node_type,))
            else:
                cursor.execute('''
                SELECT vid, original_id, node_type, node_text, vector, metadata
                FROM node_vectors
                ''')

            results = []
            for row in cursor.fetchall():
                vid, original_id, node_type, node_text, vector_blob, metadata_json = row
                node_vector = np.frombuffer(vector_blob, dtype=np.float32)

                # 计算余弦相似度
                similarity = np.dot(query_vector_np, node_vector) / (
                    np.linalg.norm(query_vector_np) * np.linalg.norm(node_vector)
                )

                results.append({
                    'vid': vid,
                    'original_id': original_id,
                    'node_type': node_type,
                    'node_text': node_text,
                    'similarity': float(similarity),
                    'metadata': json.loads(metadata_json)
                })

            # 按相似度降序排序
            results.sort(key=lambda x: x['similarity'], reverse=True)

            return results[:top_k]
        except Exception as e:
            logger.error(f"搜索相似节点失败: {str(e)}")
            return []

    def delete_node_vector(self, vid: str) -> bool:
        """
        删除节点向量

        Args:
            vid: 节点ID，会被转换为与图数据库一致的格式

        Returns:
            操作是否成功
        """
        try:
            # 生成与图数据库一致的VID
            nebula_vid = self._generate_vid(vid)

            cursor = self.conn.cursor()
            cursor.execute('DELETE FROM node_vectors WHERE vid = ?', (nebula_vid,))
            self.conn.commit()
            affected = cursor.rowcount > 0
            if affected:
                logger.info(f"节点向量已删除: {vid}")
            else:
                logger.warning(f"未找到要删除的节点向量: {vid}")
            return affected
        except Exception as e:
            logger.error(f"删除节点向量失败: {str(e)}")
            return False

    def get_stats(self) -> Dict[str, Any]:
        """
        获取向量存储统计信息

        Returns:
            统计信息字典
        """
        try:
            cursor = self.conn.cursor()

            # 获取总节点数
            cursor.execute('SELECT COUNT(*) FROM node_vectors')
            total_count = cursor.fetchone()[0]

            # 获取各类型节点数
            cursor.execute('SELECT node_type, COUNT(*) FROM node_vectors GROUP BY node_type')
            type_counts = {row[0]: row[1] for row in cursor.fetchall()}

            # 获取最后更新时间
            cursor.execute('SELECT MAX(updated_at) FROM node_vectors')
            last_updated = cursor.fetchone()[0]

            return {
                'total_count': total_count,
                'type_counts': type_counts,
                'last_updated': last_updated
            }
        except Exception as e:
            logger.error(f"获取统计信息失败: {str(e)}")
            return {
                'total_count': 0,
                'type_counts': {},
                'last_updated': None
            }