import os
from langchain_community.chat_models.tongyi import ChatTongyi
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import FAISS
from dotenv import load_dotenv
from typing import List, Dict, Any
from langchain_core.messages import HumanMessage, AIMessage, BaseMessage
import sqlite3
from datetime import datetime
import pytz
import logging
import re

# 配置日志记录
logging.basicConfig(level=logging.INFO,
                   format='%(asctime)s - %(levelname)s - %(message)s',
                   handlers=[
                       logging.FileHandler(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rag.log')),
                       logging.StreamHandler()
                   ])
logger = logging.getLogger(__name__)

# 加载环境变量
load_dotenv()

# 检查必要的环境变量
DASHSCOPE_API_KEY = os.getenv('DASHSCOPE_API_KEY')
if not DASHSCOPE_API_KEY:
    logger.error("DASHSCOPE_API_KEY not found in environment variables")
    raise ValueError("DASHSCOPE_API_KEY not found in environment variables. Please set it in .env file")

# 全局初始化嵌入模型和聊天模型，避免重复加载
try:
    embedding = DashScopeEmbeddings(
        model="text-embedding-v2",
        dashscope_api_key=DASHSCOPE_API_KEY
    )
    model = ChatTongyi(
        model="deepseek-r1",
        dashscope_api_key=DASHSCOPE_API_KEY
    )
    logger.info("Successfully initialized embedding and chat models")
except Exception as e:
    logger.error(f"Error initializing models: {str(e)}")
    raise

# 定义向量存储的持久化路径
BACKEND_DIR = os.path.dirname(os.path.abspath(__file__))
VECTORSTORE_DIR = os.path.join(BACKEND_DIR, "vectorstore")
VECTORSTORE_PATH = os.path.join(VECTORSTORE_DIR, "index.faiss")
VECTORSTORE_PKL = os.path.join(VECTORSTORE_DIR, "index.pkl")

def get_current_time() -> str:
    """获取当前北京时间"""
    beijing_tz = pytz.timezone('Asia/Shanghai')
    current_time = datetime.now(beijing_tz)
    return current_time.strftime("%Y-%m-%d %H:%M:%S")

def process_document(document_text: str):
    """
    处理文档文本，创建嵌入向量，并返回一个FAISS向量存储。

    Args:
        document_text: 文档的文本内容。

    Returns:
        一个包含文档嵌入向量的FAISS向量存储。
    """
    try:
        if not document_text or not document_text.strip():
            logger.error("Empty document text provided")
            raise ValueError("Document text cannot be empty")

        logger.info("Starting document processing")
        logger.info(f"Document length: {len(document_text)} characters")
        
        # 首先按段落分割文本
        paragraphs = [p.strip() for p in document_text.split('\n') if p.strip()]
        logger.info(f"Document split into {len(paragraphs)} paragraphs")

        # 使用更小的chunk_size，考虑到中文token通常比字符数多
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=200,  # 显著减小块大小
            chunk_overlap=20,  # 相应减小重叠
            length_function=len,
            is_separator_regex=False,
            separators=["\n\n", "\n", "。", "！", "？", "；", "，", " ", ""]  # 添加中文分隔符
        )

        # 处理每个段落
        all_chunks = []
        for i, para in enumerate(paragraphs):
            chunks = text_splitter.split_text(para)
            all_chunks.extend(chunks)
            logger.info(f"Paragraph {i+1} split into {len(chunks)} chunks")

        logger.info(f"Total chunks after splitting: {len(all_chunks)}")
        
        if not all_chunks:
            logger.error("No text chunks generated after splitting")
            raise ValueError("Document splitting resulted in no chunks")

        # 记录每个文本块的长度，用于调试
        for i, text in enumerate(all_chunks):
            logger.info(f"Chunk {i} length: {len(text)} characters")

        try:
            # 分批处理文本块以创建向量存储
            vectorstore = None
            batch_size = 5  # 减小每批处理的数量
            
            for i in range(0, len(all_chunks), batch_size):
                batch_texts = all_chunks[i:i + batch_size]
                logger.info(f"Processing batch {i//batch_size + 1} of {(len(all_chunks) + batch_size - 1)//batch_size}")
                
                # 确保每个文本块不超过限制
                valid_texts = []
                for text in batch_texts:
                    if len(text) > 0 and len(text) <= 800:  # 设置一个保守的上限
                        valid_texts.append(text)
                    else:
                        logger.warning(f"Skipping text chunk of length {len(text)}")

                if valid_texts:
                    if vectorstore is None:
                        vectorstore = FAISS.from_texts(valid_texts, embedding)
                    else:
                        vectorstore.add_texts(valid_texts)
                
                logger.info(f"Successfully processed batch {i//batch_size + 1}")

            if vectorstore is None:
                raise Exception("Failed to create vectorstore: no vectors were created")

            logger.info("Vectorstore created successfully")
            return vectorstore
        except Exception as e:
            logger.error(f"Error creating vectorstore: {str(e)}")
            raise Exception(f"Error creating vectorstore: {str(e)}")
    except Exception as e:
        logger.error(f"Error in process_document: {str(e)}")
        raise

class RagChatBot:
    def __init__(self):
        self.embedding = embedding
        self.model = model
        self.vectorstore = self._load_vectorstore()
        logger.info(f"RagChatBot initialized. Vectorstore loaded: {self.vectorstore is not None}")

    def _load_vectorstore(self):
        """从磁盘加载向量存储"""
        if os.path.exists(VECTORSTORE_PATH) and os.path.exists(VECTORSTORE_PKL):
            try:
                logger.info(f"Loading vectorstore from {VECTORSTORE_DIR}")
                return FAISS.load_local(VECTORSTORE_DIR, self.embedding)
            except Exception as e:
                logger.error(f"Error loading vectorstore: {e}")
                return None
        logger.info("No existing vectorstore found")
        return None

    def _save_vectorstore(self, vectorstore):
        """将向量存储保存到磁盘"""
        try:
            # 确保目录存在
            os.makedirs(VECTORSTORE_DIR, exist_ok=True)
            vectorstore.save_local(VECTORSTORE_DIR)
            logger.info(f"Vectorstore saved to {VECTORSTORE_DIR}")
            return True
        except Exception as e:
            logger.error(f"Error saving vectorstore: {e}")
            return False

    def chat(self, query: str, deep_search: bool = False, reasoning: bool = False, vectorstore = None) -> str:
        """
        执行 RAG 或纯聊天模式。

        Args:
            query (str): 用户查询。
            deep_search (bool): 是否进行深度搜索。
            reasoning (bool): 是否进行逻辑推理。
            vectorstore: 可选的向量存储实例，用于 RAG 检索。

        Returns:
            str: 模型生成的回答。
        """
        # 检查是否是时间相关查询
        time_keywords = ["现在时间", "当前时间", "几点了", "what time", "current time"]
        if any(keyword in query.lower() for keyword in time_keywords):
            return f"当前北京时间是：{get_current_time()}"
            
        # 如果提供了 vectorstore，使用它而不是实例的 vectorstore
        vs = vectorstore if vectorstore is not None else self.vectorstore
        return rag_tool_func(query, vs, deep_search, reasoning)

    def process_document(self, document_text: str):
        """
        处理文档文本，创建嵌入向量，并更新向量存储。

        Args:
            document_text: 文档的文本内容。
        """
        logger.info("Processing new document")
        try:
            new_vectorstore = process_document(document_text)
            if new_vectorstore:
                self.vectorstore = new_vectorstore
                success = self._save_vectorstore(self.vectorstore)
                logger.info(f"Document processed and vectorstore saved: {success}")
                if not success:
                    raise Exception("Failed to save vectorstore")
            else:
                logger.error("Failed to create vectorstore from document")
                raise Exception("Failed to create vectorstore")
        except Exception as e:
            logger.error(f"Error in RagChatBot.process_document: {str(e)}")
            raise

def rag_tool_func(query: str, vectorstore=None, deep_search: bool = False, reasoning: bool = False) -> str:
    """
    执行 RAG 或纯聊天模式。

    Args:
        query (str): 用户查询。
        vectorstore: FAISS 向量存储实例（可选）。
        deep_search (bool): 是否进行深度思考。
        reasoning (bool): 是否进行逻辑推理。

    Returns:
        str: 模型生成的回答。
    """
    if vectorstore is None:  # Pure chat mode
        prompt_parts = [query]
        if deep_search:
            prompt_parts.append("请进行深度思考并提供详细信息。")
        if reasoning:
            prompt_parts.append("请进行逻辑推理并解释你的思考过程。")
        
        modified_query = " ".join(prompt_parts).strip()
        response = model.invoke(modified_query)
        return response.content
    else:  # RAG mode
        retriever = vectorstore.as_retriever()
        qa = RetrievalQA.from_chain_type(llm=model, retriever=retriever)
        
        # 新增：加严格RAG约束prompt
        rag_constraint = "请只根据提供的文档内容作答，如果文档没有相关内容，请直接回复'文档中未找到相关信息'。不要使用你自己的知识，只能引用文档内容。"
        rag_prompt_parts = [rag_constraint, query]
        if deep_search:
             rag_prompt_parts.append("请参考提供的文档，进行深度分析并回答问题。")
        if reasoning:
             rag_prompt_parts.append("请参考提供的文档，进行逻辑推理并解释你的思考过程。")

        modified_rag_query = " ".join(rag_prompt_parts).strip()
        response = qa.run(modified_rag_query)
        return response

def extract_city(query):
    # 去掉常见修饰词和标点
    city = query
    for word in ["天气", "怎么样", "怎样", "如何", "吗", "？", "今天", "明天", "后天", " "]:
        city = city.replace(word, "")
    city = city.strip()
    return city