import requests
import psycopg2
from psycopg2.extras import execute_values
import os
import logging
import time
import sys

# 配置日志
# 创建日志格式器
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')

# 创建文件处理器
file_handler = logging.FileHandler('search_results.log')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)

# 创建控制台处理器
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)

# 获取根日志记录器
logger = logging.getLogger()
logger.setLevel(logging.INFO)

# 清除所有现有的处理器
for handler in logger.handlers[:]:
    logger.removeHandler(handler)

# 添加新的处理器
logger.addHandler(file_handler)
logger.addHandler(console_handler)

class OllamaVectorSearch:
    def __init__(self, db_params, ollama_base_url="http://10.48.0.81:11434"):
        """初始化向量搜索类"""
        self.conn = psycopg2.connect(**db_params)
        self.ollama_url = ollama_base_url

    def init_db(self):
        """初始化数据库表"""
        with self.conn.cursor() as cur:
            # 创建 vector 扩展
            cur.execute("CREATE EXTENSION IF NOT EXISTS vector")

            # 创建文档表
            cur.execute("""
                CREATE TABLE IF NOT EXISTS hnsw_embedding_documents (
                    id SERIAL PRIMARY KEY,
                    content TEXT,
                    embedding halfvec(3584)
                )
            """)
            
            # 创建hnsw索引
            cur.execute("""
                CREATE INDEX IF NOT EXISTS content_hnsw_idx
                ON hnsw_embedding_documents USING hnsw ((embedding::halfvec(3584)) halfvec_l2_ops);
            """)
        self.conn.commit()

    def get_embedding(self, text):
        """获取文本的向量嵌入"""
        response = requests.post(
            f"{self.ollama_url}/api/embeddings",
            json={
                "model": "bsahane/Qwen2.5-VL:7b",
                "prompt": text
            }
        )
        embedding = response.json()['embedding']
        # print(response.json())
        # print(len(embedding))
        logger.info(f"[Embedding Retrieved] Text: {text[:50]}... | Embedding Length: {len(embedding)}")
        return embedding

    def insert_documents(self, texts):
        """插入文档及其向量"""
        embeddings = [self.get_embedding(text) for text in texts]
    
        # 检查每个 embedding 的格式
        valid_values = []
        for text, embedding in zip(texts, embeddings):
            if isinstance(embedding, list) and len(embedding) > 0:
                valid_values.append((text, embedding))
            else:
                logger.warning(f"无效的 embedding 或空值，跳过插入: {text[:50]}...")
    
        if valid_values:
            with self.conn.cursor() as cur:
                execute_values(
                    cur,
                    "INSERT INTO hnsw_embedding_documents (content, embedding) VALUES %s",
                    valid_values,
                    template="(%s, %s::halfvec)"
                )
            self.conn.commit()
            logger.info(f"[Documents Inserted] 成功插入文档数量: {len(valid_values)}")
        else:
            logger.warning("[Insertion Skipped] 没有有效的文档可以插入")

    def search_similar(self, query_text, limit=5):
        """搜索相似文档"""
        start_time = time.time()
        logger.info(f"\n{'='*50}\n[开始检索] 查询文本: {query_text}\n{'='*50}")
        
        query_embedding = self.get_embedding(query_text)

        with self.conn.cursor() as cur:
            # 使用 HNSW 索引进行相似度计算
            cur.execute("""
                SELECT content, embedding <-> %s::halfvec AS distance
                FROM hnsw_embedding_documents
                WHERE embedding <-> %s::halfvec IS NOT NULL
                ORDER BY embedding <-> %s::halfvec
                LIMIT %s
            """, (query_embedding, query_embedding, query_embedding, limit))

            results = [(content, float(distance)) for content, distance in cur.fetchall()]
            end_time = time.time()
            elapsed_time = end_time - start_time
            
            logger.info("\n[检索结果]:")
            logger.info(f"{'='*50}")
            for idx, (content, distance) in enumerate(results, 1):
                logger.info(f"\n[结果 {idx}]")
                logger.info(f"相似度得分: {1 - distance:.4f}")
                logger.info(f"内容: {content}")
                logger.info(f"{'-'*30}")
            
            logger.info(f"\n[检索完成] 耗时: {elapsed_time:.4f} 秒\n{'='*50}\n")
            return results

    def close(self):
        """关闭数据库连接"""
        self.conn.close()


# 使用示例
if __name__ == "__main__":
    # 数据库连接参数
    db_params = {
        "dbname": "pgvector",
        "user": "pgvector",
        "password": "pgvector",
        "host": "10.48.0.81",
        "port": 54333
    }

    searcher = OllamaVectorSearch(db_params)

    try:
        # 初始化数据库
        # searcher.init_db()
        #
        # # 读取 Markdown 文件内容
        # file_path = "气象数据工具汇总.md"
        # if not os.path.exists(file_path):
        #     logging.error(f"文件 {file_path} 不存在")
        # else:
        #     with open(file_path, 'r', encoding='utf-8') as file:
        #         content = file.read()
        #     # 假设以双换行符作为段落分隔符
        #     sample_texts = content.split('\n')
        #     logging.info(sample_texts)
        #
        #     # 插入整理后的文档
        #     searcher.insert_documents(sample_texts)

        # 搜索相似文档
        query = "水文预报fadgaga数据"
        results = searcher.search_similar(query)

    finally:
        searcher.close()