# 步骤4：存入ChromaDB
import re


def _infer_tags_from_content(text: str) -> list:
    tags = set()
    if not text:
        return ["general"]
    lower_text = text.lower()

    if any(keyword in lower_text for keyword in ["境界", "练气", "筑基", "金丹", "元婴"]):
        tags.update(["修仙玄幻", "境界体系"])
    if any(keyword in lower_text for keyword in ["修炼资源", "灵气", "仙草", "法宝", "丹药"]):
        tags.update(["修仙玄幻", "修炼资源"])
    if any(keyword in lower_text for keyword in ["势力门派", "魔道", "正道", "宗", "门派"]):
        tags.update(["修仙玄幻", "势力体系"])
    if any(keyword in lower_text for keyword in ["都市", "霸总", "甜宠", "言情"]):
        tags.add("都市言情")
    if any(keyword in lower_text for keyword in ["科幻", "末世", "机甲", "基因", "丧尸"]):
        tags.add("科幻末世")
    if any(keyword in lower_text for keyword in ["穿越", "朝代", "宫斗"]):
        tags.add("历史/古言")
    if any(keyword in lower_text for keyword in ["案件", "推理", "犯罪", "侦探"]):
        tags.add("悬疑推理")
    if any(keyword in lower_text for keyword in ["种田", "经商", "商铺", "农家"]):
        tags.add("种田经商")

    if re.match(r"^#{1,3}\s", text.strip()):
        tags.add("目录/标题")
    if len(text.strip()) < 40:
        tags.add("短文本")

    if not tags:
        tags.add("general")
    return list(tags)


def save_to_chroma(vectors, chunks1=None, grouped=None):
    """
    将向量保存到ChromaDB
    
    :param vectors: 向量列表
    :param chunks1: PDF文本块列表（可选）
    :param grouped: 数据库小说数据（可选）
    """
    import sys
    import os
    import time
    sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
    
    from chromadb import PersistentClient
    
    # 初始化ChromaDB客户端和集合（复用RAG_lx_jd.py中的配置）
    chroma_client = PersistentClient(path="./chroma_db3")
    chroma_collection = chroma_client.get_or_create_collection(name="simple_rag3")
    
    # 准备数据
    ids = [f"doc_{i}" for i in range(len(vectors))]
    embeddings = vectors
    metadatas = []
    documents = []
    
    # 处理来自数据库的小说数据
    db_count = len(grouped) if grouped else 0
    pdf_count = len(chunks1) if chunks1 else 0
    
    # 添加数据库数据的元数据
    if grouped:
        for i, item in enumerate(grouped):
            meta = item.get("meta", {})
            
            # 构建元数据
            metadata = {
                "source": "database",
                "timestamp": time.strftime('%Y-%m-%d %H:%M:%S'),
                "title": meta.get("title", ""),
                "author": meta.get("author", ""),
                "category": meta.get("category", ""),
                "word_count": str(meta.get("word_count", 0)),
                "rating": str(meta.get("rating", 0))
            }
            
            # 构建文档内容
            content_parts = []
            if meta.get("title"):
                content_parts.append(f"小说标题: {meta.get('title')}")
            if meta.get("author"):
                content_parts.append(f"作者: {meta.get('author')}")
            if meta.get("category"):
                content_parts.append(f"分类: {meta.get('category')}")
            if meta.get("status"):
                content_parts.append(f"状态: {meta.get('status')}")
            if meta.get("description"):
                content_parts.append(f"简介: {meta.get('description')}")
            if meta.get("tags"):
                tags_str = ", ".join(meta.get("tags", []))
                content_parts.append(f"标签: {tags_str}")
                
            document = " ".join(content_parts)
            
            metadata["tags"] = list(set(meta.get("tags", [])) | {"小说简介", "非设定内容"})
            metadatas.append(metadata)
            documents.append(document)
    
    # 添加PDF数据的元数据
    if chunks1:
        for i, chunk in enumerate(chunks1):
            metadata = {
                "source": "pdf",
                "timestamp": time.strftime('%Y-%m-%d %H:%M:%S'),
                "title": f"PDF段落_{i}",
                "author": "",
                "category": ""
            }
            
            metadata["tags"] = _infer_tags_from_content(chunk)
            metadatas.append(metadata)
            documents.append(chunk)
    
    # 添加到ChromaDB
    chroma_collection.add(
        ids=ids,
        embeddings=embeddings,
        metadatas=metadatas,
        documents=documents
    )


# 步骤5：存入Elasticsearch
def save_to_es(chunks1, grouped):
    """
    将文本数据保存到Elasticsearch
    
    :param chunks1: PDF文本块列表
    :param grouped: 数据库小说数据
    """
    import sys
    import os
    sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
    
    from elasticsearch import Elasticsearch
    
    # 初始化Elasticsearch客户端（复用RAG_lx_jd.py中的配置）
    es_client = Elasticsearch(["http://localhost:9200"])
    
    # 创建或更新索引
    index_name = "simple_rag_es2"
    
    # 定义索引映射（支持中文分词）
    mapping = {
        "mappings": {
            "properties": {
                "content": {
                    "type": "text",
                    "analyzer": "ik_max_word",
                    "search_analyzer": "ik_smart"
                },
                "source": {"type": "keyword"},
                "title": {
                    "type": "text",
                    "analyzer": "ik_max_word",
                    "search_analyzer": "ik_smart"
                },
                "author": {"type": "text"},
                "category": {"type": "keyword"}
            }
        }
    }
    
    # 定义不使用中文分词器的简单映射
    simple_mapping = {
        "mappings": {
            "properties": {
                "content": {"type": "text"},
                "source": {"type": "keyword"},
                "title": {"type": "text"},
                "author": {"type": "text"},
                "category": {"type": "keyword"}
            }
        }
    }
    
    # 如果索引不存在，则创建它
    if not es_client.indices.exists(index=index_name):
        try:
            # 尝试创建使用中文分词器的索引
            es_client.indices.create(index=index_name, body=mapping)
        except:
            # 如果失败，创建使用默认分词器的索引
            es_client.indices.create(index=index_name, body=simple_mapping)
    
    # 准备要存储的数据
    documents = []
    
    # 处理来自数据库的小说数据
    if grouped:
        for i, item in enumerate(grouped):
            meta = item.get("meta", {})
            # 构建更完整的文档内容，包括标题、作者、分类和简介
            full_content = ""
            content_parts = []
            
            if meta.get("title"):
                content_parts.append(f"小说标题: {meta.get('title')}")
            if meta.get("author"):
                content_parts.append(f"作者: {meta.get('author')}")
            if meta.get("category"):
                content_parts.append(f"分类: {meta.get('category')}")
            if meta.get("status"):
                content_parts.append(f"状态: {meta.get('status')}")
            if meta.get("description"):
                content_parts.append(f"简介: {meta.get('description')}")
            if meta.get("tags"):
                tags_str = ", ".join(meta.get("tags", []))
                content_parts.append(f"标签: {tags_str}")
                
            full_content = " ".join(content_parts)
            
            doc = {
                "content": full_content,
                "source": "database",
                "title": meta.get("title", ""),
                "author": meta.get("author", ""),
                "category": meta.get("category", "")
            }
            documents.append(doc)
    
    # 处理来自PDF的文本块
    if chunks1:
        for i, chunk in enumerate(chunks1):
            doc = {
                "content": chunk,
                "source": "pdf",
                "title": f"PDF段落_{i}",
                "author": "",
                "category": ""
            }
            documents.append(doc)
    
    # 批量索引文档
    for i, doc in enumerate(documents):
        try:
            es_client.index(index=index_name, id=f"doc_{i}", document=doc)
        except Exception as e:
            print(f"存储文档到Elasticsearch时出错: {e}")
    
    # 刷新索引以确保数据可被搜索
    es_client.indices.refresh(index=index_name)


# 步骤6：用户问题向量化（复用步骤3的向量化函数）
def vectorize_question(question: str):
    """
    对用户问题进行向量化
    
    :param question: 用户问题
    :return: 问题的向量表示
    """
    import os
    from openai import OpenAI
    
    # 从环境变量获取API密钥
    api_key = os.getenv("DASHSCOPE_API_KEY")
    if not api_key:
        raise ValueError("未设置环境变量 DASHSCOPE_API_KEY")
    
    # 初始化OpenAI兼容客户端（连接阿里云百炼服务）
    client = OpenAI(
        api_key=api_key,
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
    )
    
    try:
        # 使用DashScope对问题进行向量化
        completion = client.embeddings.create(
            model="text-embedding-v4",
            input=[question],
            dimensions=1024,  # 改回1024维以匹配ChromaDB期望的维度
            encoding_format="float"
        )
        
        if completion:
            return completion.data[0].embedding
        else:
            print(f"问题向量化失败，无返回结果")
            return [0.0] * 1024  # 返回一个零向量作为占位符
    except Exception as e:
        print(f"问题向量化过程中发生异常: {e}")
        return [0.0] * 1024  # 返回一个零向量作为占位符
