import psycopg2
import torch

from modelscope import snapshot_download, AutoModelForCausalLM, AutoTokenizer, GenerationConfig
from pgvector.psycopg2 import register_vector
from transformers import AutoTokenizer, AutoModel

model_dir = snapshot_download('iic/nlp_gte_sentence-embedding_chinese-base')
tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
model = AutoModel.from_pretrained(model_dir)
gen_model_dir = snapshot_download('Qwen/Qwen2.5-1.5B-Instruct')

class VectorSearchEngine:
    def __init__(self, db_config):
        """
        初始化向量搜索引擎和生成模型
        :param db_config: 数据库连接配置字典
        """
        self.tokenizer = tokenizer
        self.model = model
        self.db_config = db_config
        
        # 初始化生成模型
        self.gen_tokenizer = AutoTokenizer.from_pretrained(gen_model_dir, trust_remote_code=True)
        self.gen_model = AutoModelForCausalLM.from_pretrained(gen_model_dir, trust_remote_code=True, device_map="auto")
        self.gen_model.generation_config = GenerationConfig.from_pretrained(gen_model_dir, trust_remote_code=True)
        
        self._initialize_database()

    def _initialize_database(self):
        """初始化数据库连接并注册向量类型"""
        self.conn = psycopg2.connect(**self.db_config)
        register_vector(self.conn)
        self._create_index_if_not_exists()

    def _create_index_if_not_exists(self):
        """创建HNSW索引（如果不存在）"""
        create_index_query = """
            CREATE INDEX IF NOT EXISTS collection_name
            ON vba_docs
            USING hnsw (vector vector_cosine_ops)
            WITH (m = 16, ef_construction = 64);
        """
        with self.conn.cursor() as cursor:
            cursor.execute(create_index_query)
        self.conn.commit()

    def get_embedding(self, text):
        """将文本转换为向量"""
        inputs = self.tokenizer(text, padding=True, truncation=True, return_tensors="pt")
        with torch.no_grad():
            outputs = self.model(**inputs)
            sentence_embedding = outputs.last_hidden_state.mean(dim=1).squeeze().tolist()
        return sentence_embedding

    def similarity_search(self, query_text, top_n=5, ef_search=100, generate_answer=True, similarity_threshold=0.5):
        """
        相似度查询并生成回答
        :param query_text: 查询文本
        :param top_n: 返回结果数量
        :param ef_search: HNSW搜索参数
        :param generate_answer: 是否生成回答
        :param similarity_threshold: 相似度阈值
        :return: 检索结果或生成的回答
        """
        embedding = self.get_embedding(query_text)
        try:
            with self.conn.cursor() as cursor:
                cursor.execute(f"SET hnsw.ef_search = {ef_search};")
                # 添加相似度阈值过滤
                query = """
                    SELECT id, content, metadata, 1 - (vector <=> %s::vector) AS similarity
                    FROM vba_docs
                    WHERE 1 - (vector <=> %s::vector) > %s
                    ORDER BY similarity DESC
                    LIMIT %s;
                """
                cursor.execute(query, (embedding, embedding, similarity_threshold, top_n))
                results = cursor.fetchall()

                if not results:
                    return {"answer": "抱歉，我找不到相关的信息来回答这个问题。", "context": "", "sources": []}

                # 整理检索结果
                contexts = []
                sources = []
                for row in results:
                    contexts.append(row[1])  # content
                    sources.append(row[2])   # source

                if generate_answer:
                    context_text = "\n".join(contexts)
                    answer = self.generate_with_context(query_text, context_text)
                    return {
                        "answer": answer,
                        "context": context_text,
                        "sources": sources,
                        "similarity_scores": [float(row[3]) for row in results]
                    }
                else:
                    return [{
                        "content": row[1],
                        "source": row[2],
                        "similarity": float(row[3])
                    } for row in results]

        except Exception as e:
            self.conn.rollback()
            raise RuntimeError(f"Database query failed: {str(e)}")

    def generate_with_context(self, query_text, context_text):
        """
        使用检索到的上下文生成回答
        :param query_text: 用户查询文本
        :param context_text: 检索到的上下文文本
        :return: 生成的回答
        """
        try:
            # 构建提示模板
            prompt = f"""请基于以下参考信息回答用户的问题。如果无法从参考信息中找到相关内容，请明确说明。
            
                    参考信息：
                    {context_text}

                    用户问题：{query_text}

                    请提供准确、简洁的回答："""

            # 设置生成参数
            gen_config = {
                "max_new_tokens": 512,
                "temperature": 0.7,
                "top_p": 0.9,
                "repetition_penalty": 1.1,
                "do_sample": True
            }

            # 生成回答
            inputs = self.gen_tokenizer(prompt, return_tensors="pt").to(self.gen_model.device)
            outputs = self.gen_model.generate(
                **inputs,
                **gen_config
            )
            response = self.gen_tokenizer.decode(outputs[0], skip_special_tokens=True)
            
            # 提取生成的回答（去除提示部分）
            answer = response.split("请提供准确、简洁的回答：")[-1].strip()
            
            return answer

        except Exception as e:
            return f"生成回答时出错: {str(e)}"

    def __del__(self):
        """清理资源"""
        if hasattr(self, 'conn'):
            self.conn.close()