#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
简历RAG系统 - 基于Word2Vec和ChromaDB
目标：构建专业的简历知识库问答系统

技术栈：
- 文档处理：python-docx (Word文档解析)
- 文本嵌入：Word2Vec (gensim)
- 向量数据库：ChromaDB
- 分词：jieba

设计理念：
- 专注简历和HR相关场景
- 使用Word2Vec进行语义理解
- ChromaDB提供持久化存储
- 支持实时查询和更新
"""

import os
import re
import json
import numpy as np
from typing import List, Dict, Tuple, Any, Optional
import jieba
import sys
import logging
from pathlib import Path
import time

# 第三方库导入
try:
    import chromadb
    from chromadb.config import Settings
    from chromadb.utils import embedding_functions
    print("✅ ChromaDB导入成功")
except ImportError:
    print("❌ ChromaDB未安装，请运行: pip install chromadb")
    sys.exit(1)

try:
    from gensim.models import Word2Vec
    from gensim.models.keyedvectors import KeyedVectors
    print("✅ Gensim导入成功")
except ImportError:
    print("❌ Gensim未安装，请运行: pip install gensim")
    sys.exit(1)

try:
    from docx import Document
    print("✅ python-docx导入成功")
except ImportError:
    print("❌ python-docx未安装，请运行: pip install python-docx")
    sys.exit(1)

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class Word2VecEmbeddingFunction:
    """
    Word2Vec嵌入函数，用于ChromaDB
    """
    
    def __init__(self, model_path: Optional[str] = None, vector_size: int = 100):
        """
        初始化Word2Vec嵌入函数
        
        Args:
            model_path: 预训练模型路径（可选）
            vector_size: 向量维度
        """
        self.vector_size = vector_size
        self.model = None
        self.model_path = model_path
        self.is_trained = False
        self.name = "Word2VecEmbedding"  # ChromaDB需要的name属性
        
        # 如果提供了模型路径，尝试加载
        if model_path and os.path.exists(model_path):
            self.load_model(model_path)
    
    def train_model(self, sentences: List[List[str]]):
        """
        训练Word2Vec模型
        
        Args:
            sentences: 分词后的句子列表
        """
        logger.info(f"🔢 开始训练Word2Vec模型，语料库大小: {len(sentences)}")
        
        # Word2Vec参数配置
        self.model = Word2Vec(
            sentences=sentences,
            vector_size=self.vector_size,  # 向量维度
            window=5,                      # 上下文窗口大小
            min_count=2,                   # 最小词频
            workers=4,                     # 并行线程数
            sg=1,                         # 使用Skip-gram模型
            epochs=10                     # 训练轮数
        )
        
        self.is_trained = True
        logger.info("✅ Word2Vec模型训练完成")
        
        # 保存模型
        if self.model_path:
            self.model.save(self.model_path)
            logger.info(f"💾 模型已保存到: {self.model_path}")
    
    def load_model(self, model_path: str):
        """加载预训练的Word2Vec模型"""
        try:
            self.model = Word2Vec.load(model_path)
            self.is_trained = True
            self.vector_size = self.model.vector_size
            logger.info(f"✅ 加载Word2Vec模型成功: {model_path}")
        except Exception as e:
            logger.error(f"❌ 加载模型失败: {str(e)}")
    
    def get_sentence_vector(self, words: List[str]) -> np.ndarray:
        """
        获取句子向量（词向量的平均值）
        
        Args:
            words: 分词后的词列表
        
        Returns:
            句子向量
        """
        if not self.is_trained or not self.model:
            raise ValueError("模型未训练或加载")
        
        vectors = []
        for word in words:
            if word in self.model.wv:
                vectors.append(self.model.wv[word])
        
        if vectors:
            return np.mean(vectors, axis=0)
        else:
            # 如果没有找到任何词，返回零向量
            return np.zeros(self.vector_size)
    
    def __call__(self, input: List[str]) -> List[List[float]]:
        """
        ChromaDB嵌入函数接口
        
        Args:
            input: 文本列表（ChromaDB新接口要求参数名为input）
        
        Returns:
            向量列表
        """
        embeddings = []
        
        for text in input:
            # 分词
            words = list(jieba.cut(text))
            # 获取向量
            vector = self.get_sentence_vector(words)
            embeddings.append(vector.tolist())
        
        return embeddings

class ResumeRAGSystem:
    """
    简历RAG系统
    
    功能：
    1. Word文档解析和处理
    2. Word2Vec文本嵌入
    3. ChromaDB向量存储
    4. 智能问答系统
    
    优化方向：
    1. 支持更多文档格式 (PDF, Excel等)
    2. 更好的文档结构解析
    3. 预训练Word2Vec模型集成
    4. 多轮对话支持
    5. 回答质量评估
    """
    
    def __init__(self, 
                 persist_directory: str = "./resume_chroma_db",
                 collection_name: str = "resume_collection",
                 word2vec_model_path: str = "./word2vec_resume.model"):
        """
        初始化简历RAG系统
        
        Args:
            persist_directory: ChromaDB数据持久化目录
            collection_name: 集合名称
            word2vec_model_path: Word2Vec模型保存路径
        """
        logger.info("🚀 初始化简历RAG系统")
        
        self.persist_directory = persist_directory
        self.collection_name = collection_name
        self.word2vec_model_path = word2vec_model_path
        
        # 初始化组件
        self.chroma_client = None
        self.collection = None
        self.word2vec_embedding = Word2VecEmbeddingFunction(word2vec_model_path)
        
        # 配置参数
        self.chunk_size = 200  # 文档切分大小
        self.chunk_overlap = 50  # 重叠大小
        self.top_k = 5  # 检索返回数量
        
        # 初始化ChromaDB
        self.init_chromadb()
        
        logger.info("✅ 简历RAG系统初始化完成")
    
    def init_chromadb(self):
        """初始化ChromaDB客户端和集合"""
        try:
            # 创建持久化客户端
            self.chroma_client = chromadb.PersistentClient(
                path=self.persist_directory,
                settings=Settings(
                    anonymized_telemetry=False
                )
            )
            
            logger.info(f"✅ ChromaDB客户端初始化成功: {self.persist_directory}")
        except Exception as e:
            logger.error(f"❌ ChromaDB初始化失败: {str(e)}")
            raise
    
    def create_or_get_collection(self):
        """创建或获取ChromaDB集合"""
        try:
            # 尝试获取现有集合
            self.collection = self.chroma_client.get_collection(
                name=self.collection_name
            )
            logger.info(f"✅ 获取现有集合: {self.collection_name}")
            
            # 检查集合是否为空，如果为空则可以重新使用
            count = self.collection.count()
            logger.info(f"📊 现有集合包含 {count} 个文档")
            
        except Exception as e:
            if "does not exist" in str(e):
                # 集合不存在，创建新集合
                try:
                    self.collection = self.chroma_client.create_collection(
                        name=self.collection_name,
                        embedding_function=self.word2vec_embedding,
                        metadata={"description": "简历和HR文档知识库"}
                    )
                    logger.info(f"✅ 创建新集合: {self.collection_name}")
                except Exception as create_error:
                    logger.error(f"❌ 创建集合失败: {str(create_error)}")
                    raise
            else:
                logger.error(f"❌ 集合操作失败: {str(e)}")
                raise
    
    def load_word_document(self, file_path: str) -> Dict[str, Any]:
        """
        加载Word文档
        
        Args:
            file_path: Word文档路径
        
        Returns:
            文档内容和元数据
        """
        logger.info(f"📄 读取Word文档: {file_path}")
        
        try:
            doc = Document(file_path)
            
            # 提取文本内容
            content_parts = []
            
            # 提取段落
            for paragraph in doc.paragraphs:
                if paragraph.text.strip():
                    content_parts.append(paragraph.text.strip())
            
            # 提取表格内容
            for table in doc.tables:
                table_text = []
                for row in table.rows:
                    row_text = []
                    for cell in row.cells:
                        if cell.text.strip():
                            row_text.append(cell.text.strip())
                    if row_text:
                        table_text.append(" | ".join(row_text))
                
                if table_text:
                    content_parts.append("表格内容：\n" + "\n".join(table_text))
            
            # 合并内容
            full_content = "\n\n".join(content_parts)
            
            # 提取元数据
            metadata = {
                'filename': os.path.basename(file_path),
                'file_path': file_path,
                'file_size': os.path.getsize(file_path),
                'paragraphs_count': len([p for p in doc.paragraphs if p.text.strip()]),
                'tables_count': len(doc.tables),
                'content_length': len(full_content)
            }
            
            logger.info(f"✅ 文档读取成功: {len(full_content)} 个字符")
            
            return {
                'content': full_content,
                'metadata': metadata
            }
        
        except Exception as e:
            logger.error(f"❌ 读取Word文档失败: {str(e)}")
            raise
    
    def clean_text(self, text: str) -> str:
        """
        文本清洗
        
        优化方向：
        - 更精细的清洗规则
        - 保留重要格式信息
        - 处理特殊符号和编码
        """
        # 去除多余空白
        text = re.sub(r'\s+', ' ', text)
        text = text.strip()
        
        # 去除特殊字符，保留中文标点
        text = re.sub(r'[^\w\s\u4e00-\u9fff，。！？；：""''（）【】、·]', '', text)
        
        return text
    
    def chunk_text(self, text: str, metadata: Dict[str, Any]) -> List[Dict[str, Any]]:
        """
        文档切分
        
        优化方向：
        - 基于语义的智能切分
        - 保持段落完整性
        - 考虑文档结构
        """
        chunks = []
        text_length = len(text)
        
        for i in range(0, text_length, self.chunk_size - self.chunk_overlap):
            chunk_text = text[i:i + self.chunk_size]
            
            # 边界处理：尽量在句号处切分
            if i + self.chunk_size < text_length:
                for j in range(len(chunk_text) - 1, max(0, len(chunk_text) - 50), -1):
                    if chunk_text[j] in '。！？\n':
                        chunk_text = chunk_text[:j + 1]
                        break
            
            if chunk_text.strip():
                chunk_metadata = metadata.copy()
                chunk_metadata.update({
                    'chunk_id': len(chunks),
                    'chunk_start': i,
                    'chunk_end': i + len(chunk_text),
                    'chunk_length': len(chunk_text)
                })
                
                chunks.append({
                    'content': chunk_text.strip(),
                    'metadata': chunk_metadata
                })
        
        logger.info(f"📄 文档切分完成: {len(chunks)} 个块")
        return chunks
    
    def prepare_training_data(self, documents: List[Dict[str, Any]]) -> List[List[str]]:
        """
        准备Word2Vec训练数据
        
        Args:
            documents: 文档列表
        
        Returns:
            分词后的句子列表
        """
        logger.info("🔤 准备Word2Vec训练数据...")
        
        sentences = []
        
        for doc in documents:
            content = doc['content']
            cleaned_content = self.clean_text(content)
            
            # 按句子分割
            sentence_parts = re.split(r'[。！？\n]', cleaned_content)
            
            for sentence in sentence_parts:
                if sentence.strip() and len(sentence.strip()) > 5:
                    # 分词
                    words = list(jieba.cut(sentence.strip()))
                    # 过滤短词和停用词
                    words = [word for word in words if len(word) > 1]
                    if len(words) > 3:  # 至少3个词
                        sentences.append(words)
        
        logger.info(f"📊 训练数据准备完成: {len(sentences)} 个句子")
        return sentences
    
    def build_knowledge_base(self, document_paths: List[str], reset: bool = False):
        """
        构建知识库
        
        Args:
            document_paths: 文档路径列表
            reset: 是否重置现有知识库
        """
        logger.info("🏗️ 开始构建知识库...")
        
        # 1. 加载所有文档
        all_documents = []
        all_chunks = []
        
        for doc_path in document_paths:
            try:
                doc_data = self.load_word_document(doc_path)
                all_documents.append(doc_data)
                
                # 切分文档
                chunks = self.chunk_text(doc_data['content'], doc_data['metadata'])
                all_chunks.extend(chunks)
                
            except Exception as e:
                logger.error(f"❌ 处理文档失败 {doc_path}: {str(e)}")
                continue
        
        if not all_documents:
            raise ValueError("没有成功加载任何文档")
        
        # 2. 训练Word2Vec模型
        training_sentences = self.prepare_training_data(all_documents)
        
        if not self.word2vec_embedding.is_trained:
            self.word2vec_embedding.train_model(training_sentences)
        
        # 3. 创建或获取ChromaDB集合
        self.create_or_get_collection()
        
        # 4. 检查是否需要重置或重新添加文档
        current_count = self.collection.count()
        if current_count > 0 and not reset:
            logger.info(f"📚 集合已包含 {current_count} 个文档，跳过重新添加")
            logger.info("🎉 知识库已存在，直接使用！")
            logger.info("💡 如需重新构建，请使用 reset=True 参数")
            return
        elif current_count > 0 and reset:
            logger.info(f"🔄 重置模式：清空现有 {current_count} 个文档")
            # 删除所有现有文档
            all_ids = self.collection.get()['ids']
            if all_ids:
                self.collection.delete(ids=all_ids)
                logger.info("✅ 现有文档已清空")
        
        # 5. 添加文档到ChromaDB
        logger.info("💾 将文档添加到ChromaDB...")
        
        documents = []
        metadatas = []
        ids = []
        
        for i, chunk in enumerate(all_chunks):
            documents.append(chunk['content'])
            metadatas.append(chunk['metadata'])
            ids.append(f"chunk_{i}_{int(time.time())}")
        
        # 批量添加
        batch_size = 100
        for i in range(0, len(documents), batch_size):
            batch_docs = documents[i:i + batch_size]
            batch_metas = metadatas[i:i + batch_size]
            batch_ids = ids[i:i + batch_size]
            
            try:
                self.collection.add(
                    documents=batch_docs,
                    metadatas=batch_metas,
                    ids=batch_ids
                )
                logger.info(f"✅ 添加批次 {i//batch_size + 1}: {len(batch_docs)} 个文档块")
            except Exception as e:
                logger.error(f"❌ 添加批次失败: {str(e)}")
        
        logger.info("🎉 知识库构建完成！")
        
        # 6. 显示统计信息
        stats = self.get_knowledge_base_stats()
        logger.info("📊 知识库统计:")
        for key, value in stats.items():
            logger.info(f"  {key}: {value}")
    
    def search_knowledge_base(self, query: str, n_results: int = None) -> List[Dict[str, Any]]:
        """
        搜索知识库
        
        Args:
            query: 查询问题
            n_results: 返回结果数量
        
        Returns:
            搜索结果列表
        """
        if n_results is None:
            n_results = self.top_k
        
        if not self.collection:
            raise ValueError("知识库未初始化，请先构建知识库")
        
        logger.info(f"🔍 搜索知识库: {query}")
        
        try:
            # 使用我们的Word2Vec嵌入函数进行查询向量化
            query_embeddings = self.word2vec_embedding([query])
            
            # 使用ChromaDB进行语义搜索
            results = self.collection.query(
                query_embeddings=query_embeddings,
                n_results=n_results
            )
            
            # 格式化结果
            formatted_results = []
            
            if results['ids'] and len(results['ids'][0]) > 0:
                for i in range(len(results['ids'][0])):
                    result = {
                        'id': results['ids'][0][i],
                        'content': results['documents'][0][i],
                        'metadata': results['metadatas'][0][i],
                        'distance': results['distances'][0][i],
                        'similarity': 1 - results['distances'][0][i],  # 转换为相似度
                        'rank': i + 1
                    }
                    formatted_results.append(result)
            
            logger.info(f"📋 找到 {len(formatted_results)} 个相关结果")
            
            return formatted_results
        
        except Exception as e:
            logger.error(f"❌ 搜索失败: {str(e)}")
            return []
    
    def generate_answer(self, query: str, search_results: List[Dict[str, Any]]) -> str:
        """
        生成回答
        
        优化方向：
        - 集成LLM API
        - 更好的上下文构建
        - 回答质量评估
        """
        if not search_results:
            return "抱歉，没有找到相关信息来回答您的问题。"
        
        # 构建上下文
        context_parts = []
        for result in search_results:
            context_parts.append(f"""
来源：{result['metadata']['filename']}
相似度：{result['similarity']:.3f}
内容：{result['content'][:300]}...
""")
        
        context = "\n".join(context_parts)
        
        # 简单的模板回答
        answer = f"""基于简历和HR文档知识库，我找到了以下相关信息：

{context}

针对您的问题 "{query}"，根据上述文档内容：

[注意：这是基础版本的回答。在实际应用中，这里应该集成LLM API来生成更智能的回答]

📚 参考文档：
"""
        
        for result in search_results:
            answer += f"- {result['metadata']['filename']} (相似度: {result['similarity']:.3f})\n"
        
        return answer
    
    def chat(self, query: str) -> Dict[str, Any]:
        """
        完整的RAG对话流程
        
        Args:
            query: 用户问题
        
        Returns:
            对话结果
        """
        logger.info(f"\n{'='*60}")
        logger.info(f"用户问题: {query}")
        logger.info('='*60)
        
        # 1. 搜索相关文档
        search_results = self.search_knowledge_base(query)
        
        # 2. 生成回答
        answer = self.generate_answer(query, search_results)
        
        # 3. 构建结果
        result = {
            'query': query,
            'answer': answer,
            'search_results': search_results,
            'result_count': len(search_results),
            'timestamp': time.time()
        }
        
        logger.info("🎯 RAG回答:")
        logger.info(answer)
        logger.info('='*60)
        
        return result
    
    def get_knowledge_base_stats(self) -> Dict[str, Any]:
        """获取知识库统计信息"""
        if not self.collection:
            return {"status": "未初始化"}
        
        try:
            count = self.collection.count()
            
            return {
                'collection_name': self.collection_name,
                'total_chunks': count,
                'persist_directory': self.persist_directory,
                'word2vec_trained': self.word2vec_embedding.is_trained,
                'vector_size': self.word2vec_embedding.vector_size,
                'chunk_size': self.chunk_size,
                'chunk_overlap': self.chunk_overlap
            }
        except Exception as e:
            return {"error": str(e)}
    
    def export_knowledge_base(self, output_file: str):
        """导出知识库数据"""
        if not self.collection:
            logger.error("知识库未初始化")
            return
        
        try:
            # 获取所有数据
            all_data = self.collection.get()
            
            export_data = {
                'collection_name': self.collection_name,
                'export_time': time.time(),
                'stats': self.get_knowledge_base_stats(),
                'data': all_data
            }
            
            with open(output_file, 'w', encoding='utf-8') as f:
                json.dump(export_data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"✅ 知识库已导出到: {output_file}")
        
        except Exception as e:
            logger.error(f"❌ 导出失败: {str(e)}")

def demo_resume_rag(reset: bool = False):
    """演示简历RAG系统"""
    
    print("🎯 简历RAG系统演示")
    print("="*60)
    
    # 1. 创建RAG系统
    rag_system = ResumeRAGSystem(
        persist_directory="./resume_chroma_db",
        collection_name="resume_knowledge_base",
        word2vec_model_path="./resume_word2vec.model"
    )
    
    # 2. 准备文档路径
    base_dir = Path(__file__).parent / "test_documents"
    document_paths = []
    
    # Word文档
    word_dir = base_dir / "word"
    if word_dir.exists():
        for file_path in word_dir.glob("*.docx"):
            document_paths.append(str(file_path))
            print(f"📄 发现Word文档: {file_path.name}")
    
    if not document_paths:
        print("❌ 没有找到Word文档，请检查test_documents/word/目录")
        return
    
    # 3. 构建知识库
    try:
        rag_system.build_knowledge_base(document_paths, reset=reset)
    except Exception as e:
        print(f"❌ 构建知识库失败: {str(e)}")
        return
    
    # 4. 测试问答
    test_questions = [
        "员工手册包含哪些内容？",
        "公司的考勤制度是什么？",
        "员工福利有哪些？",
        "如何申请年假？",
        "绩效考核的标准是什么？",
        "新员工入职流程？"
    ]
    
    print(f"\n🧪 开始测试问答...")
    
    for question in test_questions:
        try:
            result = rag_system.chat(question)
            
            print(f"\n问题: {result['query']}")
            print(f"找到结果: {result['result_count']} 个")
            
            if result['search_results']:
                print("最相关的结果:")
                top_result = result['search_results'][0]
                print(f"  - 文档: {top_result['metadata']['filename']}")
                print(f"  - 相似度: {top_result['similarity']:.3f}")
                print(f"  - 内容片段: {top_result['content'][:100]}...")
            
            print("-" * 40)
            
        except Exception as e:
            print(f"❌ 问答失败: {str(e)}")
    
    # 5. 显示统计信息
    stats = rag_system.get_knowledge_base_stats()
    print(f"\n📊 系统统计:")
    for key, value in stats.items():
        print(f"  {key}: {value}")
    
    print(f"\n🎉 简历RAG演示完成!")

def interactive_resume_chat(reset: bool = False):
    """交互式简历问答"""
    
    print("🎯 交互式简历RAG系统")
    print("="*60)
    
    # 初始化系统
    rag_system = ResumeRAGSystem()
    
    # 加载文档
    base_dir = Path(__file__).parent / "test_documents" / "word"
    document_paths = [str(p) for p in base_dir.glob("*.docx")]
    
    if not document_paths:
        print("❌ 没有找到Word文档")
        return
    
    # 构建知识库
    try:
        rag_system.build_knowledge_base(document_paths, reset=reset)
    except Exception as e:
        print(f"❌ 系统初始化失败: {str(e)}")
        return
    
    print(f"\n✅ 简历RAG系统准备就绪！")
    print(f"📄 已加载 {len(document_paths)} 个Word文档")
    print(f"💬 开始对话（输入 'quit' 退出）:")
    print("-" * 60)
    
    # 开始交互
    while True:
        try:
            user_input = input("\n🤔 您的问题: ").strip()
            
            if user_input.lower() in ['quit', 'exit', '退出', 'q']:
                print("👋 再见！")
                break
            
            if not user_input:
                continue
            
            # RAG问答
            result = rag_system.chat(user_input)
            
        except KeyboardInterrupt:
            print("\n\n👋 用户中断，再见！")
            break
        except Exception as e:
            print(f"\n❌ 出现错误: {str(e)}")

if __name__ == "__main__":
    import argparse
    
    parser = argparse.ArgumentParser(description='简历RAG系统 - Word2Vec + ChromaDB')
    parser.add_argument('--mode', choices=['demo', 'chat'], default='demo',
                       help='运行模式：demo=演示模式，chat=交互模式')
    parser.add_argument('--reset', action='store_true', 
                       help='重置知识库（清空现有数据重新构建）')
    
    args = parser.parse_args()
    
    print("🚀 简历RAG系统启动")
    print("技术栈: Word2Vec + ChromaDB + python-docx")
    if args.reset:
        print("🔄 重置模式：将清空现有知识库")
    print("="*60)
    
    if args.mode == 'demo':
        demo_resume_rag(reset=args.reset)
    elif args.mode == 'chat':
        interactive_resume_chat(reset=args.reset)
