#!/usr/bin/python
# -*- coding: UTF-8 -*-

import os
import sys
import warnings
import time
from typing import List, Optional

# 忽略特定警告
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)

try:
    import chromadb
    import jieba as jb
    from langchain.chains import RetrievalQA
    from langchain_community.chat_models import ChatOpenAI
    from langchain_community.document_loaders import DirectoryLoader, TextLoader
    from langchain_community.embeddings import HuggingFaceEmbeddings
    from langchain_community.vectorstores import Chroma
    from langchain.text_splitter import RecursiveCharacterTextSplitter
    from langchain.schema import Document
except ImportError as e:
    print(f"导入模块失败: {e}")
    print("请确保已安装所有必需的依赖包:")
    print("pip install chromadb langchain langchain-community jieba sentence-transformers")
    sys.exit(1)

# 设置DeepSeek API密钥
DEEPSEEK_API_KEY = 'sk-980ecc7694e14620a29b5d679a3d1c05'

class LocalQASystem:
    def __init__(self):
        self.chain = None
        self.embeddings = None
        self.vectordb = None
        
    def init_system(self):
        """初始化整个问答系统"""
        try:
            print("正在初始化问答系统...")
            start_time = time.time()
            
            # 1. 处理文档
            print("1. 处理文档...")
            self.process_documents()
            
            # 2. 创建向量数据库
            print("2. 创建向量数据库...")
            self.create_vector_db()
            
            # 3. 创建问答链
            print("3. 创建问答链...")
            self.create_qa_chain()
            
            end_time = time.time()
            print(f"系统初始化完成，耗时: {end_time - start_time:.2f}秒")
            return True
            
        except Exception as e:
            print(f"系统初始化失败: {e}")
            import traceback
            traceback.print_exc()
            return False
    
    def process_documents(self):
        """处理文档：加载、分词、分割"""
        try:
            cur_dir = os.path.dirname(os.path.abspath(__file__))
            data_dir = os.path.join(cur_dir, 'data')
            
            # 确保data目录存在
            if not os.path.exists(data_dir):
                os.makedirs(data_dir, exist_ok=True)
                print(f"警告: {data_dir} 目录为空，请添加txt文档")
                return []
            
            # 加载文档
            documents = self.load_documents(data_dir)
            if not documents:
                print("警告: 没有找到任何文档")
                return []
            
            # 处理文档（分词和分割）
            processed_docs = self.preprocess_documents(documents)
            return processed_docs
            
        except Exception as e:
            print(f"处理文档时出错: {e}")
            return []
    
    def load_documents(self, data_dir: str) -> List[Document]:
        """加载文档"""
        try:
            # 查找所有txt文件
            txt_files = []
            for root, dirs, files in os.walk(data_dir):
                for file in files:
                    if file.endswith('.txt') and not file.startswith('cut_'):
                        txt_files.append(os.path.join(root, file))
            
            if not txt_files:
                print("警告: 没有找到txt文档")
                return []
            
            documents = []
            for file_path in txt_files:
                try:
                    loader = TextLoader(file_path, encoding='utf-8')
                    docs = loader.load()
                    documents.extend(docs)
                    print(f"  已加载: {file_path}")
                except Exception as e:
                    print(f"  加载 {file_path} 时出错: {e}")
            
            print(f"  共加载 {len(documents)} 个文档")
            return documents
            
        except Exception as e:
            print(f"加载文档时出错: {e}")
            return []
    
    def preprocess_documents(self, documents: List[Document]) -> List[Document]:
        """预处理文档：分词和分割"""
        try:
            processed_docs = []
            
            for doc in documents:
                # 分词处理
                try:
                    content = doc.page_content
                    cut_content = " ".join([w for w in list(jb.cut(content))])
                    doc.page_content = cut_content
                except Exception as e:
                    print(f"  分词处理出错: {e}，使用原始内容")
                
                processed_docs.append(doc)
            
            # 文档分割
            text_splitter = RecursiveCharacterTextSplitter(
                chunk_size=800,
                chunk_overlap=100,
                separators=["\n\n", "\n", "。", "！", "？", "；", "，", " "]
            )
            
            split_docs = text_splitter.split_documents(processed_docs)
            print(f"  文档分割完成，共 {len(split_docs)} 个片段")
            return split_docs
            
        except Exception as e:
            print(f"预处理文档时出错: {e}")
            return documents
    
    def create_embeddings(self):
        """创建词嵌入模型"""
        try:
            if self.embeddings is not None:
                return self.embeddings
                
            print("正在加载Embeddings模型...")
            start_time = time.time()
            
            # 使用轻量级模型
            self.embeddings = HuggingFaceEmbeddings(
                model_name="sentence-transformers/all-MiniLM-L6-v2",
                model_kwargs={'device': 'cpu'},
                encode_kwargs={'normalize_embeddings': True}
            )
            
            end_time = time.time()
            print(f"Embeddings模型加载完成，耗时: {end_time - start_time:.2f}秒")
            return self.embeddings
            
        except Exception as e:
            print(f"创建Embeddings时出错: {e}")
            raise
    
    def create_vector_db(self):
        """创建向量数据库"""
        try:
            print("正在处理文档并创建向量数据库...")
            
            # 重新加载和处理文档
            cur_dir = os.path.dirname(os.path.abspath(__file__))
            data_dir = os.path.join(cur_dir, 'data')
            documents = self.load_documents(data_dir)
            
            if not documents:
                print("警告: 没有文档可处理")
                return None
            
            # 预处理文档
            processed_docs = self.preprocess_documents(documents)
            
            if not processed_docs:
                print("警告: 文档处理后没有内容")
                return None
            
            # 创建嵌入模型
            embeddings = self.create_embeddings()
            
            # 创建向量数据库
            print("正在创建向量数据库...")
            start_time = time.time()
            
            client = chromadb.EphemeralClient()
            self.vectordb = Chroma.from_documents(
                documents=processed_docs,
                embedding=embeddings,
                client=client,
                collection_name="local_qa_collection"
            )
            
            end_time = time.time()
            print(f"向量数据库创建完成，共 {len(processed_docs)} 个文档片段，耗时: {end_time - start_time:.2f}秒")
            return self.vectordb
            
        except Exception as e:
            print(f"创建向量数据库时出错: {e}")
            import traceback
            traceback.print_exc()
            return None
    
    def create_qa_chain(self):
        """创建问答链"""
        try:
            if self.vectordb is None:
                print("错误: 向量数据库未初始化")
                return None
            
            print("正在创建问答链...")
            
            # 创建DeepSeek聊天模型
            llm = ChatOpenAI(
                temperature=0.1,
                model_name="deepseek-chat",
                openai_api_base="https://api.deepseek.com/v1",
                openai_api_key=DEEPSEEK_API_KEY
            )
            
            # 创建检索器
            retriever = self.vectordb.as_retriever(
                search_type="similarity",
                search_kwargs={"k": 4}
            )
            
            # 创建问答链
            self.chain = RetrievalQA.from_chain_type(
                llm=llm,
                chain_type="stuff",
                retriever=retriever,
                return_source_documents=True
            )
            
            print("问答链创建完成")
            return self.chain
            
        except Exception as e:
            print(f"创建问答链时出错: {e}")
            import traceback
            traceback.print_exc()
            return None
    
    def ask_question(self, question: str) -> str:
        """回答问题"""
        if self.chain is None:
            return "系统未初始化，请先初始化系统。"
        
        try:
            print(f"正在思考问题: {question}")
            start_time = time.time()
            
            # 调用问答链
            result = self.chain.invoke({"query": question})
            
            end_time = time.time()
            print(f"回答完成，耗时: {end_time - start_time:.2f}秒")
            
            # 检查是否有相关文档
            if 'source_documents' in result and result['source_documents']:
                return result['result']
            else:
                return "抱歉，我在知识库中没有找到相关信息来回答您的问题。"
                
        except Exception as e:
            import traceback
            traceback.print_exc()
            return f"回答问题时出现错误: {str(e)}"

    def get_system_status(self):
        """获取系统状态"""
        if self.vectordb is None:
            return {"status": "未初始化", "document_count": 0}
        
        try:
            count = self.vectordb._collection.count()
            return {"status": "已初始化", "document_count": count}
        except:
            return {"status": "已初始化", "document_count": "未知"}

    def get_database_info(self):
        """获取数据库信息"""
        if self.vectordb is None:
            return {"error": "数据库未初始化"}
        
        try:
            count = self.vectordb._collection.count()
            
            # 获取部分文档预览
            docs = self.vectordb._collection.get(limit=10)
            
            doc_previews = []
            for i in range(len(docs['ids'])):
                preview = {
                    'id': docs['ids'][i],
                    'content': docs['documents'][i][:200] + '...' if len(docs['documents'][i]) > 200 else docs['documents'][i],
                    'metadata': docs['metadatas'][i] if i < len(docs['metadatas']) else {}
                }
                doc_previews.append(preview)
                
            return {
                'document_count': count,
                'document_previews': doc_previews
            }
        except Exception as e:
            return {"error": str(e)}

def main():
    """主函数"""
    print("=" * 60)
    print("📚 本地知识库问答系统")
    print("=" * 60)
    
    # 创建问答系统实例
    qa_system = LocalQASystem()
    
    # 初始化系统
    print("正在启动系统...")
    if not qa_system.init_system():
        print("系统启动失败！")
        return
    
    # 交互式问答
    print("\n" + "=" * 60)
    print("✅ 系统启动成功！")
    print("📝 使用说明:")
    print("   • 输入您的问题进行问答")
    print("   • 输入 'exit'、'quit' 或 'q' 退出程序")
    print("   • 输入 'help' 查看帮助信息")
    print("=" * 60)
    
    while True:
        try:
            user_input = input("\n请输入您的问题: ").strip()
            
            if not user_input:
                continue
            elif user_input.lower() in ['exit', 'quit', 'q']:
                print("👋 程序已退出，再见！")
                break
            elif user_input.lower() == 'help':
                print("\n📝 帮助信息:")
                print("   • 系统会根据您data目录下的txt文档回答问题")
                print("   • 请确保文档内容与您的问题相关")
                print("   • 如果没有相关文档，系统会提示找不到信息")
                continue
            else:
                # 回答问题
                answer = qa_system.ask_question(user_input)
                print(f"\n🤖 答案: {answer}")
                
        except KeyboardInterrupt:
            print("\n\n👋 程序被用户中断，正在退出...")
            break
        except EOFError:
            print("\n\n👋 程序结束，再见！")
            break
        except Exception as e:
            print(f"\n❌ 程序运行出错: {e}")
            import traceback
            traceback.print_exc()

if __name__ == '__main__':
    main()