"""
银河麒麟OS智能运维助手系统
天津理工大学  修改于2025年10月7日

本系统基于RAG架构，为银河麒麟操作系统提供智能运维支持。
支持本地知识库检索、流式输出和多轮对话功能。
"""

import torch
import faiss
import numpy as np
from llama_index.core.node_parser import SentenceSplitter
import re
from typing import List, Optional, Dict
from concurrent.futures import ThreadPoolExecutor, as_completed
import json
from recall import q_searching, TextRecallRank, search_bing
from openai import OpenAI
import os
import fitz  # PyMuPDF
from config import Config
from flask import Flask, request, jsonify, send_from_directory, session
import tempfile
import shutil
import uuid
from datetime import datetime
# 添加docx支持
import docx
# 导入modelscope模块用于本地模型
from modelscope import AutoModelForCausalLM, AutoTokenizer, AutoModel
import torch.nn.functional as F
# 导入BM25和混合检索器
from search import BM25Retriever, HybridRetriever
from flask import Response, stream_with_context
import requests

# 设置PyTorch CUDA内存分配配置以减少内存碎片
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'

# 设置CUDA内存管理
if torch.cuda.is_available():
    torch.cuda.empty_cache()  # 清空CUDA缓存

def check_gpu_memory():
    """检查GPU内存使用情况"""
    if torch.cuda.is_available():
        total_memory = torch.cuda.get_device_properties(0).total_memory
        allocated_memory = torch.cuda.memory_allocated(0)
        cached_memory = torch.cuda.memory_reserved(0)
        free_memory = total_memory - allocated_memory
        
        print(f"GPU内存状态:")
        print(f"  总内存: {total_memory / 1024**3:.2f} GB")
        print(f"  已分配: {allocated_memory / 1024**3:.2f} GB")
        print(f"  已缓存: {cached_memory / 1024**3:.2f} GB")
        print(f"  可用内存: {free_memory / 1024**3:.2f} GB")
        
        return free_memory > 100 * 1024 * 1024  # 至少需要100MB可用内存
    return False

def clear_gpu_memory():
    """清理GPU内存"""
    if torch.cuda.is_available():
        torch.cuda.empty_cache()
        torch.cuda.synchronize()

# 加载配置
cfg = Config()

# 创建输出目录
OUTPUT_DIR = cfg.output_dir
os.makedirs(OUTPUT_DIR, exist_ok=True)



# 初始化Flask应用
app = Flask(__name__, static_folder='static')
app.secret_key = os.environ.get('SECRET_KEY') or os.urandom(24)

# 存储对话历史的字典
conversations = {}

# 全局模型单例管理
class ModelManager:
    _instance = None
    _initialized = False
    
    def __new__(cls):
        if cls._instance is None:
            cls._instance = super(ModelManager, cls).__new__(cls)
        return cls._instance
    
    def __init__(self):
        if not self._initialized:
            self.cfg = Config()
            self.llm_model = None
            self.llm_tokenizer = None
            self.embedding_model = None
            self.embedding_tokenizer = None
            ModelManager._initialized = True
    
    def get_llm_model(self):
        """获取LLM模型，懒加载"""
        if not self.cfg.use_llm_api and self.llm_model is None:
            print("正在加载LLM模型...")
            try:
                # 修复meta tensor问题
                self.llm_tokenizer = AutoTokenizer.from_pretrained(
                    self.cfg.local_model_name,
                    trust_remote_code=True
                )
                self.llm_model = AutoModelForCausalLM.from_pretrained(
                    self.cfg.local_model_name,
                    torch_dtype=torch.float16,  # 明确指定数据类型
                    device_map="auto",
                    trust_remote_code=True,
                    low_cpu_mem_usage=True  # 减少CPU内存使用
                )
                print("LLM模型加载完成")
            except Exception as e:
                print(f"LLM模型加载失败: {e}")
                raise e
        return self.llm_model, self.llm_tokenizer
    
    def get_embedding_model(self):
        """获取embedding模型，懒加载，支持内存不足时自动切换到CPU"""
        if not self.cfg.use_api and self.embedding_model is None:
            print("正在加载Embedding模型...")
            
            # 首先检查GPU内存状态
            print("检查GPU内存状态...")
            gpu_available = check_gpu_memory()
            device = "cuda:0" if gpu_available else "cpu"
            use_gpu = gpu_available
            
            try:
                self.embedding_tokenizer = AutoTokenizer.from_pretrained(
                    self.cfg.bert_path,
                    trust_remote_code=True
                )
                
                # 尝试在GPU上加载模型
                if use_gpu:
                    try:
                        print("尝试在GPU上加载embedding模型...")
                        self.embedding_model = AutoModel.from_pretrained(
                            self.cfg.bert_path,
                            torch_dtype=torch.float16,
                            trust_remote_code=True,
                            low_cpu_mem_usage=True,
                            device_map="auto"  # 自动设备映射
                        )
                        print(f"Embedding模型在GPU上加载完成")
                    except RuntimeError as gpu_error:
                        if "out of memory" in str(gpu_error).lower():
                            print(f"GPU内存不足，切换到CPU加载: {gpu_error}")
                            # 清空CUDA缓存
                            torch.cuda.empty_cache()
                            device = "cpu"
                            use_gpu = False
                            # 在CPU上重新加载
                            self.embedding_model = AutoModel.from_pretrained(
                                self.cfg.bert_path,
                                torch_dtype=torch.float32,  # CPU上使用float32
                                trust_remote_code=True,
                                low_cpu_mem_usage=True
                            )
                            self.embedding_model = self.embedding_model.to(device)
                            print(f"Embedding模型在CPU上加载完成")
                        else:
                            raise gpu_error
                else:
                    # 直接在CPU上加载
                    self.embedding_model = AutoModel.from_pretrained(
                        self.cfg.bert_path,
                        torch_dtype=torch.float32,
                        trust_remote_code=True,
                        low_cpu_mem_usage=True
                    )
                    self.embedding_model = self.embedding_model.to(device)
                    print(f"Embedding模型在CPU上加载完成")
                    
            except Exception as e:
                print(f"Embedding模型加载失败: {e}")
                # 最后尝试：强制在CPU上加载，使用更小的精度
                try:
                    print("尝试在CPU上以低精度加载模型...")
                    self.embedding_model = AutoModel.from_pretrained(
                        self.cfg.bert_path,
                        torch_dtype=torch.float32,
                        trust_remote_code=True,
                        low_cpu_mem_usage=True
                    )
                    self.embedding_model = self.embedding_model.to("cpu")
                    device = "cpu"
                    print("Embedding模型在CPU上以低精度加载完成")
                except Exception as final_error:
                    print(f"所有加载尝试都失败了: {final_error}")
                    raise final_error
                    
        return self.embedding_model, self.embedding_tokenizer

# 创建全局模型管理器实例
model_manager = ModelManager()

# 银河麒麟OS智能运维助手模型客户端 - 支持多模型提供商
class DeepSeekClient:
    def __init__(self, provider=None, model=None):
        self.cfg = Config()
        self.current_provider = provider or self.cfg.current_provider
        
        # 根据当前提供商初始化客户端
        if self.current_provider == "deepseek":
            provider_config = self.cfg.providers["deepseek"]
            self.client = OpenAI(
                api_key=provider_config["api_key"],
                base_url=provider_config["api_url"]
            )
            self.default_model = model or provider_config["model"]
            self.provider_type = provider_config["type"]
            
        elif self.current_provider == "local":
            provider_config = self.cfg.providers["local"]
            # 对于Ollama，使用requests直接调用API
            self.api_url = provider_config["api_url"]
            self.chat_endpoint = provider_config["chat_endpoint"]
            self.default_model = model or provider_config["model"]
            self.provider_type = provider_config["type"]
            self.client = None  # 不使用OpenAI客户端
            
        # 兼容原有配置
        elif self.cfg.use_ollama:
            self.ollama_base_url = self.cfg.ollama_base_url
            self.ollama_api_key = self.cfg.ollama_api_key or "any_string"
            self.ollama_model = model or self.cfg.ollama_model
            self.client = OpenAI(
                api_key=self.ollama_api_key,
                base_url=self.ollama_base_url
            )
            self.default_model = self.ollama_model
            self.provider_type = "ollama"
            
        elif self.cfg.use_llm_api:
            self.api_key = self.cfg.llm_api_key
            self.base_url = self.cfg.llm_base_url
            self.default_model = model or self.cfg.llm_model
            self.client = OpenAI(
                api_key=self.api_key,
                base_url=self.base_url
            )
            self.provider_type = "openai"
            
        # 最后使用本地ModelScope模型
        else:
            self.model_name = self.cfg.local_model_name
            self.provider_type = "local"
    
    def switch_provider(self, provider, model=None):
        """动态切换模型提供商"""
        self.current_provider = provider
        
        # 根据新的提供商重新初始化客户端
        if provider == "deepseek":
            provider_config = self.cfg.providers["deepseek"]
            self.client = OpenAI(
                api_key=provider_config["api_key"],
                base_url=provider_config["api_url"]
            )
            self.default_model = model or provider_config["model"]
            self.provider_type = provider_config["type"]
            
        elif provider == "local":
            provider_config = self.cfg.providers["local"]
            # 对于Ollama，使用requests直接调用API
            self.api_url = provider_config["api_url"]
            self.chat_endpoint = provider_config["chat_endpoint"]
            self.default_model = model or provider_config["model"]
            self.provider_type = provider_config["type"]
            self.client = None  # 不使用OpenAI客户端
            
        else:
            raise ValueError(f"不支持的提供商: {provider}")
    
    def get_available_providers(self):
        """获取可用的模型提供商列表"""
        return list(self.cfg.providers.keys())
    
    def get_provider_models(self, provider=None):
        """获取指定提供商的可用模型"""
        provider = provider or self.current_provider
        if provider in self.cfg.providers:
            return [self.cfg.providers[provider]["model"]]
        return []
    
    def generate_answer(self, system_prompt, messages, model=None):
        # 1. 处理Ollama原生API调用
        if self.provider_type == 'ollama' and hasattr(self, 'api_url'):
            model_to_use = model or self.default_model
            api_messages = [{"role": "system", "content": system_prompt}]
            api_messages.extend(messages)
            
            payload = {
                "model": model_to_use,
                "messages": api_messages,
                "stream": False,
                "options": {
                    "temperature": 0.7,
                    "top_p": 0.95,
                    "num_predict": 4096
                }
            }
            
            try:
                import requests
                url = f"{self.api_url}{self.chat_endpoint}"
                response = requests.post(url, json=payload, timeout=60)
                response.raise_for_status()
                data = response.json()
                return data.get("message", {}).get("content", "")
            except Exception as e:
                print(f"Ollama API调用失败: {e}")
                return f"调用Ollama API失败: {str(e)}"
        
        # 2. 处理OpenAI兼容API调用（包括deepseek）
        elif hasattr(self, 'client') and self.client is not None:
            model_to_use = model or self.default_model
            api_messages = [{"role": "system", "content": system_prompt}]
            api_messages.extend(messages)
            
            response = self.client.chat.completions.create(
                model=model_to_use,
                messages=api_messages,
                stream=False,
                temperature=0.7,        # 提高随机性，增加回答丰富度
                top_p=0.95,             # 放宽生成范围，允许更多可能性
                max_tokens=4096,        # 增加最大 tokens 限制
                frequency_penalty=0.1,  # 添加惩罚参数，减少重复
                presence_penalty=0.2    # 增加鼓励使用新主题的力度
            )
            
            if response.choices and response.choices[0].message and response.choices[0].message.content:
                return response.choices[0].message.content
            return ""
        
        # 3. 本地ModelScope模型非流式调用
        else:
            # 对于本地模型，我们可以调用流式生成并收集所有内容
            full_response = ""
            for chunk in self.generate_answer_stream(system_prompt, messages, model):
                full_response += chunk
            return full_response
    
    def generate_answer_stream(self, system_prompt, messages, model=None):
        # 1. 处理Ollama原生API流式调用
        if self.provider_type == 'ollama' and hasattr(self, 'api_url'):
            model_to_use = model or self.default_model
            api_messages = [{"role": "system", "content": system_prompt}]
            api_messages.extend(messages)
            
            payload = {
                "model": model_to_use,
                "messages": api_messages,
                "stream": True,
                "options": {
                    "temperature": 0.7,
                    "top_p": 0.95,
                    "num_predict": 4096
                }
            }
            
            try:
                import requests
                url = f"{self.api_url}{self.chat_endpoint}"
                response = requests.post(url, json=payload, stream=True, timeout=60)
                response.raise_for_status()
                
                for line in response.iter_lines():
                    if line:
                        decoded = line.decode('utf-8', errors='replace').strip()
                        try:
                            data = json.loads(decoded)
                            content = data.get("message", {}).get("content", "")
                            if content:
                                yield content
                            if data.get("done", False):
                                break
                        except json.JSONDecodeError:
                            continue
            except Exception as e:
                print(f"Ollama流式API调用失败: {e}")
                yield f"调用Ollama API失败: {str(e)}"
        
        # 2. 处理OpenAI兼容API流式调用（包括deepseek）
        elif hasattr(self, 'client') and self.client is not None:
            model_to_use = model or self.default_model
            api_messages = [{"role": "system", "content": system_prompt}]
            api_messages.extend(messages)
            
            stream = self.client.chat.completions.create(
                model=model_to_use,
                messages=api_messages,
                stream=True,
                temperature=0.7,        # 提高随机性，增加回答丰富度
                top_p=0.95,             # 放宽生成范围，允许更多可能性
                max_tokens=4096,        # 增加最大 tokens 限制
                frequency_penalty=0.1,  # 添加惩罚参数，减少重复
                presence_penalty=0.2    # 增加鼓励使用新主题的力度
            )
            
            for chunk in stream:
                if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
                    yield chunk.choices[0].delta.content
        
        # 2. 本地ModelScope模型流式调用
        else:
            model, tokenizer = model_manager.get_llm_model()
            
            formatted_messages = [{"role": "system", "content": system_prompt}]
            formatted_messages.extend(messages)
            
            text = tokenizer.apply_chat_template(
                formatted_messages,
                tokenize=False,
                add_generation_prompt=True
            )
            
            model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

            # 流式生成
            with torch.no_grad():
                # 初始化累积token列表和长度跟踪
                self.generated_tokens = []
                previous_length = 0
                # 获取输入序列长度
                input_length = model_inputs.input_ids.shape[1]
                
                # 设置生成参数
                generation_kwargs = {
                    **model_inputs,
                    'max_new_tokens': 1,
                    'do_sample': True,
                    'temperature': 0.7,  # 提高随机性，增加回答丰富度
                    'top_p': 0.95,       # 放宽生成范围，允许更多可能性
                    'pad_token_id': tokenizer.eos_token_id
                }
                
                # 改进的手动流式生成实现
                current_tokens = model_inputs.input_ids
                total_generated = 0
                max_tokens = 4096  # 增加最大 tokens 限制
                last_output = ""
                
                # 增加每次生成的token数量以减少重复
                generation_kwargs['max_new_tokens'] = 5
                
                while total_generated < max_tokens:
                    # 生成多个token
                    with torch.no_grad():
                        output = model.generate(**generation_kwargs)
                    
                    # 提取新生成的token
                    if hasattr(output, 'sequences'):
                        new_tokens = output.sequences[0][input_length:]
                        current_tokens = output.sequences
                    elif isinstance(output, torch.Tensor):
                        # 直接使用Tensor作为序列
                        current_tokens = output
                        # 确保output是二维张量 [batch_size, seq_len]
                        if current_tokens.dim() == 1:
                            current_tokens = current_tokens.unsqueeze(0)
                        new_tokens = current_tokens[0][input_length:]
                    else:
                        break
                    
                    # 检查是否生成了结束token
                    if len(new_tokens) == 0 or (tokenizer.eos_token_id is not None and new_tokens[-1] == tokenizer.eos_token_id):
                        break
                    
                    # 更新输入，继续生成
                    generation_kwargs = {
                        'input_ids': current_tokens,
                        'max_new_tokens': 5,  # 每次生成5个token
                        'do_sample': True,
                        'temperature': 0.7,  # 提高随机性，增加回答丰富度
                        'top_p': 0.95,       # 放宽生成范围，允许更多可能性
                        'pad_token_id': tokenizer.eos_token_id
                    }
                    
                    # 只解码新生成的token，而不是全部累积的token
                    try:
                        # 仅解码新生成的token
                        new_text = tokenizer.decode(new_tokens, skip_special_tokens=True)
                        
                        # 过滤控制字符和异常字符
                        new_text = ''.join(c for c in new_text if c.isprintable())
                        
                        # 检查是否与上一次输出重复
                        if new_text and new_text != last_output:
                            yield new_text
                            last_output = new_text
                        
                        total_generated += len(new_tokens)
                    except Exception as e:
                        print(f"解码错误: {str(e)}")
                        # 尝试多种编码方式
                        encodings = ['utf-8', 'latin-1', 'gbk', 'cp1252', 'gb2312', 'iso-8859-1', 'mac_roman']
                        decoded = False
                        for encoding in encodings:
                            try:
                                new_text = tokenizer.decode(new_tokens, skip_special_tokens=True, encoding=encoding)
                                # 过滤控制字符和异常字符
                                new_text = ''.join(c for c in new_text if c.isprintable())
                                if new_text and new_text != last_output:
                                    yield new_text
                                    last_output = new_text
                                    decoded = True
                                    break
                            except:
                                continue
                        if not decoded:
                            # 尝试十六进制表示作为最后手段
                            try:
                                hex_str = ' '.join(f'{x:02x}' for x in new_tokens.tolist())
                                yield f"[Token Hex: {hex_str}]"
                            except:
                                error_msg = "[解码错误]"
                                if error_msg != last_output:
                                    yield error_msg
                                    last_output = error_msg

# 创建全局DeepSeekClient实例
deepseek_client = DeepSeekClient()

# 改进的语义分块函数 - 支持文件来源标记
def semantic_chunk(input_file_path, output_file_path=os.path.join(OUTPUT_DIR, "semantic_chunk_output.json"), chunk_size=2000, chunk_overlap=200):
    """
    改进的语义分块函数，确保生成的chunk接近指定大小
    """
    def split_text_by_size(text: str, max_size: int, overlap: int = 0) -> List[str]:
        """
        按指定大小分割文本，优先在句号处分割，其余在其他标点处分割
        """
        if len(text) <= max_size:
            return [text]
        
        chunks = []
        start = 0
        
        while start < len(text):
            # 确定这次分块的结束位置
            end = min(start + max_size, len(text))
            
            if end == len(text):
                # 已到文本末尾
                chunks.append(text[start:end].strip())
                break
            
            # 尝试在合适的位置断开
            chunk_text = text[start:end]
            
            # 优先级：句号 > 其他标点 > 空格 > 强制截断
            split_chars = ['。', '！', '？', '；', '：', '\n', '，', ' ']
            best_split = -1
            
            for char in split_chars:
                # 从后往前找分割点，但不要太靠前（至少保证有一半长度）
                min_pos = max(len(chunk_text) // 2, end - start - overlap) if overlap > 0 else len(chunk_text) // 2
                pos = chunk_text.rfind(char, min_pos)
                if pos != -1:
                    best_split = pos + 1  # 包含分割字符
                    break
            
            if best_split != -1:
                actual_end = start + best_split
                chunks.append(text[start:actual_end].strip())
                # 计算下次开始位置，考虑重叠
                start = max(start + 1, actual_end - overlap)
            else:
                # 强制截断
                chunks.append(text[start:end].strip())
                start = max(start + 1, end - overlap)
        
        # 过滤空白块和过短的块
        return [chunk for chunk in chunks if chunk.strip() and len(chunk.strip()) >= 20]

    with open(input_file_path, 'r', encoding='utf-8') as file:
        full_text = file.read()
        
        # 按文件分割文本
        file_sections = full_text.split("----- 文件: ")
        
        chunk_data_list = []
        chunk_id = 0
        
        for i, section in enumerate(file_sections):
            if i == 0 and not section.strip():
                continue
                
            # 提取文件名
            if " -----" in section:
                lines = section.split(" -----\n", 1)
                if len(lines) == 2:
                    filename = lines[0].strip()
                    content = lines[1]
                else:
                    filename = f"unknown_file_{i}"
                    content = section
            else:
                filename = f"unknown_file_{i}"
                content = section
            
            # 清理内容
            content = content.strip()
            if not content:
                continue
            
            print(f"处理文件: {filename}, 内容长度: {len(content)}")
            
            # 直接按指定大小分块，不再进行段落预处理
            chunks = split_text_by_size(content, chunk_size, chunk_overlap)
            
            print(f"文件 {filename} 生成了 {len(chunks)} 个分块")
            
            for chunk_idx, chunk in enumerate(chunks):
                chunk_data_list.append({
                    "id": f'chunk{chunk_id}',
                    "chunk": chunk,
                    "method": "semantic_chunk",
                    "source_file": filename,
                    "chunk_size": len(chunk)  # 添加实际大小信息用于调试
                })
                chunk_id += 1
                print(f"  分块 {chunk_idx}: 长度 {len(chunk)}")

        print(f"总共生成了 {len(chunk_data_list)} 个分块")
        
        with open(output_file_path, 'w', encoding='utf-8') as json_file:
            json.dump(chunk_data_list, json_file, ensure_ascii=False, indent=4)

        return chunk_data_list

# 构建Faiss索引 - 更新以支持文件来源
def build_faiss_index(vector_file, index_path, metadata_path):
    print(f"Building index for {vector_file}...")
    with open(vector_file, 'r', encoding='utf-8') as f:
        data = json.load(f)

    # 检查是否有vector字段
    if not data or 'vector' not in data[0]:
        raise ValueError("向量数据中缺少'vector'字段，请检查向量化是否成功完成。")

    vectors = [item['vector'] for item in data]
    vectors = np.array(vectors, dtype=np.float32)
    if vectors.size == 0:
        raise ValueError("向量数据为空，请检查输入文件。")

    dim = vectors.shape[1]
    n_vectors = vectors.shape[0]
    max_nlist = n_vectors // 39
    nlist = min(max_nlist, 128) if max_nlist >= 1 else 1

    if nlist >= 1 and n_vectors >= nlist * 39:
        quantizer = faiss.IndexFlatIP(dim)
        index = faiss.IndexIVFFlat(quantizer, dim, nlist)
        if not index.is_trained:
            index.train(vectors)
        index.add(vectors)
    else:
        index = faiss.IndexFlatIP(dim)
        index.add(vectors)

    faiss.write_index(index, index_path)
    metadata = [
        {
            'id': item['id'], 
            'chunk': item['chunk'], 
            'method': item['method'], 
            'window': item.get('window', ''),
            'source_file': item.get('source_file', 'unknown')
        } 
        for item in data
    ]
    with open(metadata_path, 'w', encoding='utf-8') as f:
        json.dump(metadata, f, ensure_ascii=False, indent=4)

def vectorize_file(input_file_path, output_file_path, field_name):
    """向量化文件，支持内存不足时的降级处理"""
    
    # 检查GPU内存状态
    print("检查GPU内存状态...")
    gpu_available = check_gpu_memory()
    
    with open(input_file_path, 'r', encoding='utf-8') as file:
        data_list = json.load(file)
        query = [data[field_name] for data in data_list]
    
    print(f"开始向量化 {len(query)} 个文本片段...")
    
    # 尝试向量化，如果失败则使用降级方案
    vectors = None
    batch_size = 10 if gpu_available else 5  # GPU可用时使用更大批次，否则使用小批次
    
    try:
        vectors = vectorize_query(query, batch_size=batch_size)
    except Exception as e:
        print(f"标准向量化失败: {e}")
        
        # 降级方案1：清理内存后重试
        print("尝试清理内存后重新向量化...")
        clear_gpu_memory()
        try:
            vectors = vectorize_query(query, batch_size=1)  # 使用最小批次
        except Exception as e2:
            print(f"清理内存后重试也失败: {e2}")
            
            # 降级方案2：分批处理，每批单独处理
            print("尝试分批处理向量化...")
            vectors = []
            batch_size = 1
            for i in range(0, len(query), batch_size):
                batch = query[i:i + batch_size]
                try:
                    clear_gpu_memory()  # 每批前清理内存
                    batch_vectors = vectorize_query(batch, batch_size=1)
                    if batch_vectors.size > 0:
                        vectors.extend(batch_vectors)
                    else:
                        # 如果还是失败，创建零向量作为占位符
                        print(f"批次 {i//batch_size + 1} 向量化失败，使用零向量占位")
                        zero_vector = np.zeros(1024)  # 假设向量维度为1024
                        vectors.append(zero_vector)
                except Exception as batch_e:
                    print(f"批次 {i//batch_size + 1} 处理失败: {batch_e}")
                    # 创建零向量作为占位符
                    zero_vector = np.zeros(1024)
                    vectors.append(zero_vector)
            
            vectors = np.array(vectors)
    
    if vectors is None or vectors.size == 0:
        print("所有向量化尝试都失败，使用零向量作为占位符")
        # 创建零向量作为最后的降级方案
        vectors = np.zeros((len(query), 1024))  # 假设向量维度为1024
        
    print(f"向量化完成，生成了 {vectors.shape[0]} 个向量")
        
    for data, vector in zip(data_list, vectors):
        data['vector'] = vector.tolist()
    
    with open(output_file_path, 'w', encoding='utf-8') as outfile:
        json.dump(data_list, outfile, ensure_ascii=False, indent=4)
    
    print(f"向量化结果已保存到: {output_file_path}")

# 向量搜索 - 更新以支持不同检索模式
def vector_search(query, index_path, metadata_path, limit, retrieval_mode="single"):
    query_vector = vectorize_query(query)
    if query_vector.size == 0:
        return []
        
    query_vector = np.array(query_vector, dtype=np.float32).reshape(1, -1)

    if retrieval_mode == "single":
        # 原来的向量检索方法
        index = faiss.read_index(index_path)
        with open(metadata_path, 'r', encoding='utf-8') as f:
            metadata = json.load(f)

        D, I = index.search(query_vector, limit)
        results = [metadata[i] for i in I[0]]
        return results
    elif retrieval_mode == "hybrid":
        # 使用混合检索
        hybrid_retriever = HybridRetriever(index_path, metadata_path)
        results = hybrid_retriever.hybrid_search(query, query_vector, limit)
        return results
    else:
        raise ValueError(f"不支持的检索模式: {retrieval_mode}")

# 仅使用BM25检索
def bm25_search(query, metadata_path, limit):
    bm25_retriever = BM25Retriever(metadata_path)
    results = bm25_retriever.search(query, limit)
    return results

# 文档提取函数
def extract_text_from_pdf(pdf_path):
    try:
        doc = fitz.open(pdf_path)
        text = ""
        for page in doc:
            text += page.get_text()
        return text
    except Exception as e:
        print(f"PDF文本提取失败 ({pdf_path}): {str(e)}")
        return ""

def extract_text_from_docx(docx_path):
    """提取docx文件的文本内容"""
    try:
        doc = docx.Document(docx_path)
        text = ""
        for paragraph in doc.paragraphs:
            text += paragraph.text + "\n"
        return text
    except Exception as e:
        print(f"DOCX文本提取失败 ({docx_path}): {str(e)}")
        return ""

def extract_text_from_doc(doc_path):
    """提取doc文件的文本内容 - 需要安装python-docx和python-docx2txt"""
    try:
        # 尝试使用docx2txt处理老版本doc文件
        import docx2txt
        text = docx2txt.process(doc_path)
        return text
    except ImportError:
        print("请安装docx2txt库: pip install docx2txt")
        return ""
    except Exception as e:
        print(f"DOC文本提取失败 ({doc_path}): {str(e)}")
        return ""

# 改进的多文件处理函数 - 添加重复处理检查
# 改进的多文件处理函数 - 支持增量处理新文件
def process_multiple_files(file_objs):
    if not file_objs:
        return "请上传文件"
    
    combined_text = ""
    processed_files = []
    failed_files = []
    skipped_files = []
    
    # 已处理文件记录的路径
    processed_files_record = os.path.join(OUTPUT_DIR, "processed_files.json")
    knowledge_base_path = os.path.join(OUTPUT_DIR, "knowledge_base.txt")
    
    # 读取已处理的文件记录
    processed_file_records = {}
    if os.path.exists(processed_files_record):
        try:
            with open(processed_files_record, 'r', encoding='utf-8') as f:
                processed_file_records = json.load(f)
        except Exception as e:
            print(f"读取已处理文件记录失败: {e}")
            processed_file_records = {}
    
    def get_file_hash(file_path):
        """计算文件的简单哈希值（基于文件大小和部分内容）"""
        try:
            import hashlib
            with open(file_path, 'rb') as f:
                # 读取文件大小
                f.seek(0, 2)  # 移动到文件末尾
                file_size = f.tell()
                f.seek(0)  # 回到文件开头
                
                # 读取前1024字节用于哈希
                content_sample = f.read(min(1024, file_size))
                
                # 创建简单的文件标识
                file_id = hashlib.md5(f"{file_size}_{content_sample}".encode()).hexdigest()
                return file_id
        except Exception as e:
            print(f"计算文件哈希失败: {e}")
            return None
    
    def smart_decode(file_path, content_bytes):
        """智能解码文件内容，尝试多种编码格式"""
        encodings_to_try = [
            'utf-8', 
            'utf-8-sig',
            'gbk', 
            'gb2312', 
            'gb18030',
            'big5',
            'cp936',
            'latin1',
            'ascii',
            'utf-16',
            'utf-16le',
            'utf-16be',
            'cp1252'
        ]
        
        for encoding in encodings_to_try:
            try:
                decoded_content = content_bytes.decode(encoding)
                print(f"成功使用 {encoding} 解码文件 {file_path}")
                return decoded_content, encoding
            except (UnicodeDecodeError, UnicodeError):
                continue
        
        try:
            decoded_content = content_bytes.decode('utf-8', errors='ignore')
            print(f"警告：使用 UTF-8 忽略非法字符解码文件 {file_path}")
            return decoded_content, 'utf-8-ignore'
        except Exception as e:
            print(f"致命错误：无法解码文件 {file_path}: {str(e)}")
            return None, None
    
    # 处理每个文件
    for file_obj in file_objs:
        try:
            file_name = os.path.basename(file_obj.name)
            file_ext = file_name.lower().split('.')[-1] if '.' in file_name else ''
            
            print(f"检查文件: {file_name}")
            
            # 计算文件标识
            file_hash = get_file_hash(file_obj.name)
            if file_hash is None:
                failed_files.append(f"{file_name} (无法计算文件标识)")
                continue
            
            # 检查文件是否已经处理过
            if file_name in processed_file_records:
                stored_hash = processed_file_records[file_name].get('hash')
                if stored_hash == file_hash:
                    print(f"文件 {file_name} 已处理过，跳过")
                    skipped_files.append(file_name)
                    continue
                else:
                    print(f"文件 {file_name} 内容已更新，重新处理")
            else:
                print(f"新文件 {file_name}，开始处理")
            
            text = ""
            
            # 根据文件类型提取文本
            if file_ext == 'pdf':
                text = extract_text_from_pdf(file_obj.name)
                if not text:
                    failed_files.append(f"{file_name} (PDF内容为空或无法提取)")
                    continue
            elif file_ext == 'docx':
                text = extract_text_from_docx(file_obj.name)
                if not text:
                    failed_files.append(f"{file_name} (DOCX内容为空或无法提取)")
                    continue
            elif file_ext == 'doc':
                text = extract_text_from_doc(file_obj.name)
                if not text:
                    failed_files.append(f"{file_name} (DOC内容为空或无法提取)")
                    continue
            else:
                # 处理文本文件
                try:
                    with open(file_obj.name, "rb") as uploaded_file:
                        content_bytes = uploaded_file.read()
                    
                    if len(content_bytes) == 0:
                        failed_files.append(f"{file_name} (文件为空)")
                        continue
                    
                    # 检查是否包含二进制数据
                    null_count = content_bytes.count(b'\x00')
                    if null_count > len(content_bytes) * 0.1:
                        failed_files.append(f"{file_name} (可能是二进制文件，无法处理)")
                        continue
                    
                    decoded_content, used_encoding = smart_decode(file_obj.name, content_bytes)
                    
                    if decoded_content is None:
                        failed_files.append(f"{file_name} (无法解码文件)")
                        continue
                    
                    text = decoded_content
                    
                except Exception as decode_error:
                    failed_files.append(f"{file_name} (读取失败: {str(decode_error)})")
                    continue
            
            # 验证提取的文本
            if text and text.strip():
                # 文本清理
                text = text.replace('\r\n', '\n').replace('\r', '\n')
                text = re.sub(r'\n{3,}', '\n\n', text)
                
                combined_text += f"\n\n----- 文件: {file_name} -----\n\n{text}"
                processed_files.append(file_name)
                
                # 记录已处理的文件
                processed_file_records[file_name] = {
                    'hash': file_hash,
                    'processed_time': datetime.now().isoformat(),
                    'file_size': len(text)
                }
                
                print(f"成功处理文件: {file_name}, 文本长度: {len(text)}")
            else:
                failed_files.append(f"{file_name} (提取的文本为空)")
            
        except Exception as e:
            error_msg = str(e)
            print(f"处理文件 {file_name} 时出错: {error_msg}")
            failed_files.append(f"{os.path.basename(file_obj.name)} (处理异常: {error_msg})")
    
    # 如果没有新文件需要处理
    if not processed_files:
        if skipped_files:
            return f"所有文件都已处理过，跳过的文件: {', '.join(skipped_files)}" + (f"\n处理失败的文件: {', '.join(failed_files)}" if failed_files else "")
        else:
            return f"所有文件处理失败: {', '.join(failed_files)}"
    
    # 保存新处理的文件内容
    try:
        # 如果已有知识库文件，追加新内容；否则创建新文件
        if os.path.exists(knowledge_base_path) and combined_text:
            with open(knowledge_base_path, "a", encoding='utf-8') as f:
                f.write(combined_text)
            print(f"新内容已追加到知识库文件: {knowledge_base_path}")
        elif combined_text:
            with open(knowledge_base_path, "w", encoding='utf-8') as f:
                f.write(combined_text)
            print(f"知识库文件已创建: {knowledge_base_path}")
        
        # 保存已处理文件记录
        with open(processed_files_record, 'w', encoding='utf-8') as f:
            json.dump(processed_file_records, f, ensure_ascii=False, indent=2)
        
    except Exception as e:
        return f"保存知识库文件失败: {str(e)}"
    
    # 重新构建索引（使用完整的知识库文件）
    try:
        process_and_index_txt_file(knowledge_base_path)
        
        # 构建状态消息
        status_parts = []
        if processed_files:
            status_parts.append(f"成功处理 {len(processed_files)} 个新文件: {', '.join(processed_files)}")
        if skipped_files:
            status_parts.append(f"跳过 {len(skipped_files)} 个已处理文件: {', '.join(skipped_files)}")
        if failed_files:
            status_parts.append(f"处理失败的文件: {', '.join(failed_files)}")
        
        return "\n".join(status_parts)
        
    except Exception as e:
        return f"文件处理成功，但索引构建失败: {str(e)}"

# 从检索结果生成回答 - 更新以支持多轮对话
def generate_answer_from_search(query, index_path, metadata_path, system_prompt="你是一名银河麒麟OS智能运维专家。", limit=3, retrieval_mode="single", history=None):
    results = vector_search(query, index_path, metadata_path, limit, retrieval_mode)
    if results:
        # 构建参考内容，包含来源信息
        ref_content_parts = []
        for i, result in enumerate(results[:3]):  # 取前3个结果
            source_info = f"[来源: {result.get('source_file', '未知文件')}]" if result.get('source_file') else ""
            ref_content_parts.append(f"参考片段{i+1}: {source_info}\n{result['chunk']}")
        
        ref_content = "\n\n".join(ref_content_parts)
        
        # 使用全局DeepSeekClient实例
        global deepseek_client
        
        # 使用配置中的系统提示词
        cfg = Config()
        if system_prompt is None:
            system_prompt = cfg.system_prompt
        
        # 构建消息列表
        messages = []
        if history:
            messages.extend(history)
        
        # 添加当前问题和参考内容
        context_message = f"问题：{query}\n\n参考内容：\n{ref_content}"
        messages.append({"role": "user", "content": context_message})
        
        answer = deepseek_client.generate_answer(system_prompt, messages)
        return answer
    else:
        return "没有找到相关内容"

# 处理并索引文件
def process_and_index_txt_file(input_file_path):
    semantic_chunk_output = os.path.join(OUTPUT_DIR, "semantic_chunk_output.json")
    semantic_chunk_vector = os.path.join(OUTPUT_DIR, "semantic_chunk_vector.json")
    semantic_chunk_index = os.path.join(OUTPUT_DIR, "semantic_chunk.index")
    semantic_chunk_metadata = os.path.join(OUTPUT_DIR, "semantic_chunk_metadata.json")

    try:
        semantic_chunk(input_file_path, semantic_chunk_output)
        print(f"语义分块完成: {semantic_chunk_output}")
        vectorize_file(semantic_chunk_output, semantic_chunk_vector, "chunk")
        print(f"语义分块向量化完成: {semantic_chunk_vector}")
        build_faiss_index(semantic_chunk_vector, semantic_chunk_index, semantic_chunk_metadata)
        print(f"语义分块索引构建完成: {semantic_chunk_index}")
    except Exception as e:
        print(f"索引构建过程中出错：{str(e)}")
        raise

# 银河麒麟OS智能运维助手核心RAG功能 - 使用模型管理器
def vectorize_query(query, model_name=None, batch_size=None) -> np.ndarray:
    """将输入文本向量化，支持分批处理，可以使用API或本地模型"""
    
    # 如果输入是单个字符串，转为列表
    if isinstance(query, str):
        query = [query]
    
    # 初始化结果数组
    all_vectors = []
    
    # 根据配置决定使用API还是本地模型
    if cfg.use_api:
        # 使用API进行向量化
        api_key = cfg.api_key
        base_url = cfg.base_url
        model_to_use = model_name or cfg.model_name
        batch_size_to_use = batch_size or cfg.batch_size
        dimensions = cfg.dimensions
        
        client = OpenAI(
            api_key=api_key,
            base_url=base_url
        )
        
        for i in range(0, len(query), batch_size_to_use):
            batch = query[i:i + batch_size_to_use]
            try:
                completion = client.embeddings.create(
                    model=model_to_use,
                    input=batch,
                    dimensions=dimensions,
                    encoding_format="float"
                )
                vectors = [embedding.embedding for embedding in completion.data]
                all_vectors.extend(vectors)
            except Exception as e:
                print(f"API向量化批次 {i//batch_size_to_use + 1} 失败：{str(e)}")
                return np.array([])
    else:
        # 使用本地模型进行向量化 - 通过模型管理器
        batch_size_to_use = batch_size or 10  # 默认批次大小为10
        
        # Mean Pooling - 考虑注意力掩码
        def mean_pooling(model_output, attention_mask):
            token_embeddings = model_output[0]  # 模型输出的第一个元素包含所有token嵌入
            input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
            return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
        
        try:
            # 从模型管理器获取模型
            model, tokenizer = model_manager.get_embedding_model()
            device = model.device
            
            # 动态调整批次大小以避免内存不足
            actual_batch_size = batch_size_to_use
            
            for i in range(0, len(query), actual_batch_size):
                batch = query[i:i + actual_batch_size]
                
                try:
                    # Tokenize sentences
                    encoded_input = tokenizer(batch, padding=True, truncation=True, return_tensors='pt')
                    encoded_input = {k: v.to(device) for k, v in encoded_input.items()}
                    
                    # Compute token embeddings
                    with torch.no_grad():
                        model_output = model(**encoded_input)
                    
                    # Perform pooling
                    sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
                    
                    # Normalize embeddings
                    sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
                    
                    # Convert to numpy and add to results
                    batch_vectors = sentence_embeddings.cpu().numpy()
                    all_vectors.extend(batch_vectors)
                    
                    # 清理GPU缓存
                    if device != "cpu":
                        torch.cuda.empty_cache()
                        
                except RuntimeError as batch_error:
                    if "out of memory" in str(batch_error).lower():
                        print(f"批次大小 {actual_batch_size} 导致内存不足，减小批次大小...")
                        # 清空CUDA缓存
                        if device != "cpu":
                            torch.cuda.empty_cache()
                        
                        # 减小批次大小
                        actual_batch_size = max(1, actual_batch_size // 2)
                        print(f"新的批次大小: {actual_batch_size}")
                        
                        # 重新处理当前批次
                        i -= actual_batch_size  # 回退到当前批次的开始
                        continue
                    else:
                        raise batch_error
                
        except Exception as e:
            print(f"本地向量化失败：{str(e)}")
            # 清理内存
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            return np.array([])
    
    return np.array(all_vectors)

# 更新：支持会话历史的问答函数
def ask_question_parallel(question: str, session_id: str, use_search: bool = True, retrieval_mode: str = "single") -> Dict:
    try:
        index_path = os.path.join(OUTPUT_DIR, "semantic_chunk.index")
        metadata_path = os.path.join(OUTPUT_DIR, "semantic_chunk_metadata.json")

        # 获取会话历史
        history = conversations.get(session_id, [])
        
        # 导入配置
        cfg = Config()
        
        search_background = ""
        local_background = []

        if use_search and os.path.exists(index_path):
            with ThreadPoolExecutor(max_workers=2) as executor:
                search_future = executor.submit(get_search_background, question)
                # 传入会话历史到RAG生成函数
                rag_future = executor.submit(
                    generate_answer_from_search, 
                    question, 
                    index_path, 
                    metadata_path, 
                    cfg.system_prompt, 
                    3, 
                    retrieval_mode,
                    history
                )
                for future in as_completed([search_future, rag_future]):
                    result = future.result()
                    if future == search_future:
                        search_background = result
                    elif future == rag_future:
                        local_background.append(result)
        elif use_search:
            search_background = get_search_background(question)
        elif os.path.exists(index_path):
            local_background.append(
                generate_answer_from_search(
                    question, 
                    index_path, 
                    metadata_path, 
                    cfg.system_prompt,
                    3,
                    retrieval_mode,
                    history
                )
            )

        # 构建检索背景信息
        full_background = "。".join([
            "[联网搜索结果]：" + (search_background or "无联网搜索结果"),
            "[本地知识库]：" + (" ".join(local_background) or "无本地结果")
        ]).strip("。")
        
        # 根据历史生成回答
        answer = generate_answer_from_deepseek(
            question=question,
            session_id=session_id,
            system_prompt=cfg.system_prompt,
            background_info=full_background
        )
        
        # 更新会话历史
        update_conversation_history(session_id, "user", question)
        update_conversation_history(session_id, "assistant", answer)
        
        # 返回包含背景信息的完整响应
        return {
            "answer": answer,
            "background": {
                "web_search": search_background or "无联网搜索结果",
                "local_kb": " ".join(local_background) or "无本地结果"
            }
        }
    except Exception as e:
        error_msg = f"查询失败：{str(e)}"
        return {
            "answer": error_msg,
            "background": {
                "web_search": "搜索过程中出错",
                "local_kb": "检索过程中出错"
            }
        }

# 更新会话历史
def update_conversation_history(session_id: str, role: str, content: str):
    if session_id not in conversations:
        conversations[session_id] = []
    
    # 添加新消息
    conversations[session_id].append({
        "role": role,
        "content": content,
        "timestamp": datetime.now().isoformat()
    })
    
    # 只保留最近20条消息（避免上下文过长）
    if len(conversations[session_id]) > 20:
        conversations[session_id] = conversations[session_id][-20:]

# 获取会话历史
def get_conversation_history(session_id: str):
    return conversations.get(session_id, [])

def generate_answer_from_deepseek(question: str, session_id: str, system_prompt: str = None, background_info: Optional[str] = None) -> str:
    cfg = Config()
    
    # 使用全局DeepSeekClient实例
    global deepseek_client
    
    # 使用配置中的系统提示词
    if system_prompt is None:
        system_prompt = cfg.system_prompt
    
    # 获取会话历史并格式化为消息列表
    history = get_conversation_history(session_id)
    messages = []
    
    # 只使用最近的对话历史（避免上下文过长）
    for msg in history[-20:]:  # 最多使用最近20条消息，增加上下文长度以提高回答质量
        messages.append({"role": msg["role"], "content": msg["content"]})
    
    # 添加当前问题和背景信息
    user_prompt = question
    if background_info:
        user_prompt = f"背景知识：{background_info}\n\n问题：{question}"
    
    messages.append({"role": "user", "content": user_prompt})
    
    try:
        answer = deepseek_client.generate_answer(system_prompt, messages)
        return answer
    except Exception as e:
        return f"生成回答时出错：{str(e)}"

def get_search_background(query: str, max_length: int = 1500) -> str:
    try:
        # 添加备用搜索方案
        try:
            search_results = q_searching(query)
            if not search_results or search_results.strip() == "":
                print(f"Warning: Empty search results for query: {query}")
                return f"无法为查询 '{query}' 找到相关信息。"
                
            cleaned_results = re.sub(r'\s+', ' ', search_results).strip()
            return cleaned_results[:max_length]
        except Exception as e:
            print(f"主搜索方法失败: {str(e)}")
            # 如果主搜索失败，可以尝试使用关键词搜索作为备选
            # 这里我们通过 try/except 分开处理两种搜索方式
            try:
                # 尝试使用关键词搜索作为备选
                cfg = Config()
                trr = TextRecallRank(cfg)
                if hasattr(trr, "rank_text_by_keywords") and callable(getattr(trr, "rank_text_by_keywords")):
                    data = search_bing(query)
                    if data:
                        backup_results = trr.rank_text_by_keywords(query, data)
                        if backup_results and backup_results.strip() != "":
                            cleaned_backup = re.sub(r'\s+', ' ', backup_results).strip()
                            return cleaned_backup[:max_length]
            except Exception as backup_err:
                print(f"备选搜索方法也失败: {str(backup_err)}")
                
            # 如果所有搜索方法都失败，返回一个友好的消息
            return f"无法为查询 '{query}' 找到相关信息。请稍后再试或尝试不同的搜索词。"
    except Exception as e:
        print(f"联网搜索失败：{str(e)}")
        # 返回一个用户友好的消息，而不是空字符串
        return f"联网搜索暂时不可用。请稍后再试。"

# 添加SSE流式响应路由
@app.route('/stream_answer', methods=['GET'])
def stream_answer():
    # 从查询参数获取数据
    data_str = request.args.get('data')
    if not data_str:
        return jsonify({'error': '缺少数据参数'}), 400
    
    try:
        data = json.loads(data_str)
    except json.JSONDecodeError:
        return jsonify({'error': '数据格式无效'}), 400
    
    if 'message' not in data:
        return jsonify({'error': '缺少消息参数'}), 400
    
    message = data['message']
    session_id = data.get('session_id')
    use_search = data.get('use_search', True)
    retrieval_mode = data.get('retrieval_mode', 'single')
    
    # 如果没有提供会话ID，创建一个新的
    if not session_id:
        session_id = str(uuid.uuid4())
        conversations[session_id] = []
    
    def generate():
        try:
            index_path = os.path.join(OUTPUT_DIR, "semantic_chunk.index")
            metadata_path = os.path.join(OUTPUT_DIR, "semantic_chunk_metadata.json")
            
            search_background = ""
            local_background = []
            
            if use_search:
                search_background = get_search_background(message)
            
            if os.path.exists(index_path):
                results = vector_search(message, index_path, metadata_path, 3, retrieval_mode)
                if results:
                    ref_content_parts = []
                    for i, result in enumerate(results[:3]):
                        source_info = f"[来源: {result.get('source_file', '未知文件')}]" if result.get('source_file') else ""
                        ref_content_parts.append(f"参考片段{i+1}: {source_info}\n{result['chunk']}")
                    
                    local_background = "\n\n".join(ref_content_parts)
            
            # 构建完整背景信息
            full_background = "。".join([
                "[联网搜索结果]：" + (search_background or "无联网搜索结果"),
                "[本地知识库]：" + (local_background or "无本地结果")
            ]).strip("。")
            
            # 获取会话历史
            history = get_conversation_history(session_id)
            
            # 构建消息
            messages = []
            if history:
                messages.extend(history[-10:])  # 最多使用最近10条消息
            
            user_prompt = f"背景知识：{full_background}\n\n问题：{message}"
            messages.append({"role": "user", "content": user_prompt})
            
            # 使用流式生成回答
            deepseek_client = DeepSeekClient()
            cfg = Config()
            full_answer = ""
            
            # 检查是否支持流式生成
            if hasattr(deepseek_client, 'generate_answer_stream'):
                # 使用配置中的系统提示词
                for chunk in deepseek_client.generate_answer_stream(cfg.system_prompt, messages):
                    if chunk:
                        full_answer += chunk
                        yield f"data: {json.dumps({'chunk': chunk})}\n\n"
                
                # 更新会话历史
                update_conversation_history(session_id, "user", message)
                update_conversation_history(session_id, "assistant", full_answer or "未生成有效回答")
                
                yield f"data: {json.dumps({'done': True})}\n\n"
            else:
                yield f"data: {json.dumps({'error': '流式生成不支持'})}\n\n"
        except Exception as e:
            yield f"data: {json.dumps({'error': str(e)})}\n\n"
    
    return Response(stream_with_context(generate()), mimetype='text/event-stream', headers={'Cache-Control': 'no-cache'})


# 处理并索引文件
def process_and_index_txt_file(input_file_path):
    semantic_chunk_output = os.path.join(OUTPUT_DIR, "semantic_chunk_output.json")
    semantic_chunk_vector = os.path.join(OUTPUT_DIR, "semantic_chunk_vector.json")
    semantic_chunk_index = os.path.join(OUTPUT_DIR, "semantic_chunk.index")
    semantic_chunk_metadata = os.path.join(OUTPUT_DIR, "semantic_chunk_metadata.json")

    try:
        semantic_chunk(input_file_path, semantic_chunk_output)
        print(f"语义分块完成: {semantic_chunk_output}")
        vectorize_file(semantic_chunk_output, semantic_chunk_vector, "chunk")
        print(f"语义分块向量化完成: {semantic_chunk_vector}")
        build_faiss_index(semantic_chunk_vector, semantic_chunk_index, semantic_chunk_metadata)
        print(f"语义分块索引构建完成: {semantic_chunk_index}")
    except Exception as e:
        print(f"索引构建过程中出错：{str(e)}")
        raise

# 核心 RAG 功能 - 使用模型管理器
def vectorize_query(query, model_name=None, batch_size=None) -> np.ndarray:
    """将输入文本向量化，支持分批处理，可以使用API或本地模型"""
    
    # 如果输入是单个字符串，转为列表
    if isinstance(query, str):
        query = [query]
    
    # 初始化结果数组
    all_vectors = []
    
    # 根据配置决定使用API还是本地模型
    if cfg.use_api:
        # 使用API进行向量化
        api_key = cfg.api_key
        base_url = cfg.base_url
        model_to_use = model_name or cfg.model_name
        batch_size_to_use = batch_size or cfg.batch_size
        dimensions = cfg.dimensions
        
        client = OpenAI(
            api_key=api_key,
            base_url=base_url
        )
        
        for i in range(0, len(query), batch_size_to_use):
            batch = query[i:i + batch_size_to_use]
            try:
                completion = client.embeddings.create(
                    model=model_to_use,
                    input=batch,
                    dimensions=dimensions,
                    encoding_format="float"
                )
                vectors = [embedding.embedding for embedding in completion.data]
                all_vectors.extend(vectors)
            except Exception as e:
                print(f"API向量化批次 {i//batch_size_to_use + 1} 失败：{str(e)}")
                return np.array([])
    else:
        # 使用本地模型进行向量化 - 通过模型管理器
        batch_size_to_use = batch_size or 10  # 默认批次大小为10
        
        # Mean Pooling - 考虑注意力掩码
        def mean_pooling(model_output, attention_mask):
            token_embeddings = model_output[0]  # 模型输出的第一个元素包含所有token嵌入
            input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
            return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
        
        try:
            # 从模型管理器获取模型
            model, tokenizer = model_manager.get_embedding_model()
            device = model.device
            
            # 动态调整批次大小以避免内存不足
            actual_batch_size = batch_size_to_use
            
            for i in range(0, len(query), actual_batch_size):
                batch = query[i:i + actual_batch_size]
                
                try:
                    # Tokenize sentences
                    encoded_input = tokenizer(batch, padding=True, truncation=True, return_tensors='pt')
                    encoded_input = {k: v.to(device) for k, v in encoded_input.items()}
                    
                    # Compute token embeddings
                    with torch.no_grad():
                        model_output = model(**encoded_input)
                    
                    # Perform pooling
                    sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
                    
                    # Normalize embeddings
                    sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
                    
                    # Convert to numpy and add to results
                    batch_vectors = sentence_embeddings.cpu().numpy()
                    all_vectors.extend(batch_vectors)
                    
                    # 清理GPU缓存
                    if device != "cpu":
                        torch.cuda.empty_cache()
                        
                except RuntimeError as batch_error:
                    if "out of memory" in str(batch_error).lower():
                        print(f"批次大小 {actual_batch_size} 导致内存不足，减小批次大小...")
                        # 清空CUDA缓存
                        if device != "cpu":
                            torch.cuda.empty_cache()
                        
                        # 减小批次大小
                        actual_batch_size = max(1, actual_batch_size // 2)
                        print(f"新的批次大小: {actual_batch_size}")
                        
                        # 重新处理当前批次
                        i -= actual_batch_size  # 回退到当前批次的开始
                        continue
                    else:
                        raise batch_error
                
        except Exception as e:
            print(f"本地向量化失败：{str(e)}")
            # 清理内存
            if torch.cuda.is_available():
                torch.cuda.empty_cache()
            return np.array([])
    
    return np.array(all_vectors)

# 更新：支持会话历史的问答函数
def ask_question_parallel(question: str, session_id: str, use_search: bool = True, retrieval_mode: str = "single") -> Dict:
    try:
        index_path = os.path.join(OUTPUT_DIR, "semantic_chunk.index")
        metadata_path = os.path.join(OUTPUT_DIR, "semantic_chunk_metadata.json")

        # 获取会话历史
        history = conversations.get(session_id, [])
        
        # 导入配置
        cfg = Config()
        
        search_background = ""
        local_background = []

        if use_search and os.path.exists(index_path):
            with ThreadPoolExecutor(max_workers=2) as executor:
                search_future = executor.submit(get_search_background, question)
                # 传入会话历史到RAG生成函数
                rag_future = executor.submit(
                    generate_answer_from_search,
                    question,
                    index_path,
                    metadata_path,
                    cfg.system_prompt,
                    3,
                    retrieval_mode,
                    history
                )
                for future in as_completed([search_future, rag_future]):
                    result = future.result()
                    if future == search_future:
                        search_background = result
                    elif future == rag_future:
                        local_background.append(result)
        elif use_search:
            search_background = get_search_background(question)
        elif os.path.exists(index_path):
            local_background.append(
                generate_answer_from_search(
                    question,
                    index_path,
                    metadata_path,
                    cfg.system_prompt,
                    3,
                    retrieval_mode,
                    history
                )
            )

        # 构建检索背景信息
        full_background = "。".join([
            "[联网搜索结果]：" + (search_background or "无联网搜索结果"),
            "[本地知识库]：" + (" ".join(local_background) or "无本地结果")
        ]).strip("。")
        
        # 根据历史生成回答
        answer = generate_answer_from_deepseek(
            question=question, 
            session_id=session_id,
            background_info=full_background
        )
        
        # 更新会话历史
        update_conversation_history(session_id, "user", question)
        update_conversation_history(session_id, "assistant", answer)
        
        # 返回包含背景信息的完整响应
        return {
            "answer": answer,
            "background": {
                "web_search": search_background or "无联网搜索结果",
                "local_kb": " ".join(local_background) or "无本地结果"
            }
        }
    except Exception as e:
        error_msg = f"查询失败：{str(e)}"
        return {
            "answer": error_msg,
            "background": {
                "web_search": "搜索过程中出错",
                "local_kb": "检索过程中出错"
            }
        }

# 更新会话历史
def update_conversation_history(session_id: str, role: str, content: str):
    if session_id not in conversations:
        conversations[session_id] = []
    
    # 添加新消息
    conversations[session_id].append({
        "role": role,
        "content": content,
        "timestamp": datetime.now().isoformat()
    })
    
    # 只保留最近20条消息（避免上下文过长）
    if len(conversations[session_id]) > 20:
        conversations[session_id] = conversations[session_id][-20:]

# 获取会话历史
def get_conversation_history(session_id: str):
    return conversations.get(session_id, [])

def get_search_background(query: str, max_length: int = 1500) -> str:
    try:
        # 添加备用搜索方案
        try:
            search_results = q_searching(query)
            if not search_results or search_results.strip() == "":
                print(f"Warning: Empty search results for query: {query}")
                return f"无法为查询 '{query}' 找到相关信息。"
                
            cleaned_results = re.sub(r'\s+', ' ', search_results).strip()
            return cleaned_results[:max_length]
        except Exception as e:
            print(f"主搜索方法失败: {str(e)}")
            # 如果主搜索失败，可以尝试使用关键词搜索作为备选
            # 这里我们通过 try/except 分开处理两种搜索方式
            try:
                # 尝试使用关键词搜索作为备选
                cfg = Config()
                trr = TextRecallRank(cfg)
                if hasattr(trr, "rank_text_by_keywords") and callable(getattr(trr, "rank_text_by_keywords")):
                    data = search_bing(query)
                    if data:
                        backup_results = trr.rank_text_by_keywords(query, data)
                        if backup_results and backup_results.strip() != "":
                            cleaned_backup = re.sub(r'\s+', ' ', backup_results).strip()
                            return cleaned_backup[:max_length]
            except Exception as backup_err:
                print(f"备选搜索方法也失败: {str(backup_err)}")
                
            # 如果所有搜索方法都失败，返回一个友好的消息
            return f"无法为查询 '{query}' 找到相关信息。请稍后再试或尝试不同的搜索词。"
    except Exception as e:
        print(f"联网搜索失败：{str(e)}")
        # 返回一个用户友好的消息，而不是空字符串
        return f"联网搜索暂时不可用。请稍后再试。"

# Web服务路由
@app.route('/')
def index():
    return send_from_directory('static', 'index.html')

@app.route('/<path:path>')
def static_files(path):
    return send_from_directory('static', path)

# API路由 - 处理文件上传
@app.route('/upload_files', methods=['POST'])
def upload_files_api():
    if 'files' not in request.files:
        return jsonify({'message': '没有找到文件'}), 400
    
    files = request.files.getlist('files')
    if not files or files[0].filename == '':
        return jsonify({'message': '没有选择文件'}), 400
    
    # 创建临时目录存储上传的文件
    temp_dir = tempfile.mkdtemp()
    file_objs = []
    
    try:
        for file in files:
            file_path = os.path.join(temp_dir, file.filename)
            file.save(file_path)
            # 创建一个简单的类似文件的对象，带有name属性
            class FileObj:
                def __init__(self, name):
                    self.name = name
            file_objs.append(FileObj(file_path))
        
        # 处理上传的文件
        result = process_multiple_files(file_objs)
        
        # 清理临时目录
        shutil.rmtree(temp_dir)
        
        return jsonify({'message': result})
    except Exception as e:
        # 出错时也清理临时目录
        shutil.rmtree(temp_dir)
        return jsonify({'message': f'处理文件时发生错误: {str(e)}'}), 500

# 新增：创建会话
@app.route('/create_session', methods=['POST'])
def create_session_api():
    session_id = str(uuid.uuid4())
    conversations[session_id] = []
    return jsonify({'session_id': session_id})

# 新增：获取会话历史
@app.route('/get_conversation', methods=['POST'])
def get_conversation_api():
    data = request.json
    if not data or 'session_id' not in data:
        return jsonify({'error': '缺少会话ID参数'}), 400
    
    session_id = data['session_id']
    history = get_conversation_history(session_id)
    
    return jsonify({
        'session_id': session_id,
        'history': history
    })

# 更新：多轮对话API
@app.route('/conversation', methods=['POST'])
def conversation_api():
    data = request.json
    if not data or 'message' not in data:
        return jsonify({'error': '缺少消息参数'}), 400
    
    message = data['message']
    session_id = data.get('session_id')
    use_search = data.get('use_search', True)
    retrieval_mode = data.get('retrieval_mode', 'single')
    
    # 如果没有提供会话ID，创建一个新的
    if not session_id:
        session_id = str(uuid.uuid4())
        conversations[session_id] = []
    
    try:
        # 初始状态响应
        response_data = {
            'status': 'processing',
            'answer': '正在生成回答...',
            'session_id': session_id,
            'search_results': generate_search_results_html('', [], use_search)
        }
        
        return jsonify(response_data)
    except Exception as e:
        return jsonify({
            'status': 'error',
            'answer': f"查询失败：{str(e)}",
            'session_id': session_id,
            'search_results': f"<p>处理失败：{str(e)}</p>"
        }), 500

# 更新：异步获取对话进度
@app.route('/check_conversation_progress', methods=['POST'])
def check_conversation_progress():
    data = request.json
    if not data or 'message' not in data or 'session_id' not in data:
        return jsonify({'error': '缺少必要参数'}), 400
    
    message = data['message']
    session_id = data['session_id']
    use_search = data.get('use_search', True)
    req_type = data.get('type', 'status')  # status, local_kb, web_search, answer
    retrieval_mode = data.get('retrieval_mode', 'single')
    
    try:
        index_path = os.path.join(OUTPUT_DIR, "semantic_chunk.index")
        metadata_path = os.path.join(OUTPUT_DIR, "semantic_chunk_metadata.json")
        
        # 根据请求类型返回不同内容
        if req_type == 'local_kb' and os.path.exists(index_path):
            # 返回本地知识库检索结果，考虑检索模式
            results = vector_search(message, index_path, metadata_path, limit=3, retrieval_mode=retrieval_mode)
            local_results_html = ""
            if results:
                for r in results:
                    # 添加来源标识
                    source_tag = f"<span class='source-tag {r.get('source', 'vector')}'>[{r.get('source', 'vector')}]</span>" if retrieval_mode == "hybrid" and 'source' in r else ""
                    file_tag = f"<span class='file-tag'>[文件: {r.get('source_file', '未知')}]</span>" if r.get('source_file') else ""
                    local_results_html += f'<div class="result-item"><strong>[{r["id"]}]</strong> {source_tag} {file_tag} {r["chunk"]}</div>'
            else:
                local_results_html = "无相关内容"
            
            return jsonify({
                'status': 'success',
                'local_kb_html': local_results_html
            })
            
        elif req_type == 'web_search' and use_search:
            # 返回联网搜索结果
            search_result = get_search_background(message)
            return jsonify({
                'status': 'success',
                'web_search_html': search_result or "联网搜索无结果"
            })
            
        elif req_type == 'answer':
            # 返回AI回答，考虑检索模式和会话历史
            result = ask_question_parallel(message, session_id, use_search, retrieval_mode)
            # 返回完整响应（包括背景信息）
            return jsonify({
                'status': 'success',
                'answer': result["answer"],
                'history': get_conversation_history(session_id),
                'background': result["background"]
            })
            
        else:
            # 返回整体状态
            return jsonify({
                'status': 'unknown_type',
                'message': f"未知请求类型: {req_type}"
            })
    except Exception as e:
        return jsonify({
            'status': 'error',
            'message': f"处理失败：{str(e)}"
        }), 500

# 生成搜索结果HTML的辅助函数
def generate_search_results_html(search_result, local_results, use_search):
    search_results_html = f"""
    <h3>联网搜索结果</h3>
    <div class="search-section" id="web-search-content">
        {search_result if search_result else '搜索中...' if use_search else '未启用联网搜索'}
    </div>
    
    <h3>本地知识库结果</h3>
    <div class="search-section" id="local-kb-content">
    """
    
    if local_results:
        for r in local_results:
            search_results_html += f'<div class="result-item"><strong>[{r["id"]}]</strong> {r["chunk"]}</div>'
    else:
        search_results_html += "检索中..."
    
    search_results_html += "</div>"
    
    return search_results_html

# 新增：模型管理API端点
@app.route('/api/models/available', methods=['GET'])
def get_available_models():
    """获取可用的模型提供商列表"""
    try:
        cfg = Config()
        providers = {}
        for provider, config in cfg.providers.items():
            providers[provider] = {
                'name': provider,
                'model': config['model'],
                'type': config['type'],
                'api_url': config.get('api_url', ''),
                'description': f"{provider} - {config['model']}"
            }
        return jsonify({
            'success': True,
            'providers': providers,
            'current_provider': cfg.current_provider
        })
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/models/switch', methods=['POST'])
def switch_model():
    """切换模型提供商"""
    try:
        data = request.json
        if not data or 'provider' not in data:
            return jsonify({'success': False, 'error': '缺少provider参数'}), 400
        
        provider = data['provider']
        model = data.get('model')
        
        # 切换全局客户端
        global deepseek_client
        deepseek_client.switch_provider(provider, model)
        
        return jsonify({
            'success': True,
            'message': f'已切换到 {provider} 提供商',
            'current_provider': provider,
            'current_model': deepseek_client.default_model
        })
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/models/current', methods=['GET'])
def get_current_model():
    """获取当前模型信息"""
    try:
        global deepseek_client
        return jsonify({
            'success': True,
            'current_provider': deepseek_client.current_provider,
            'current_model': deepseek_client.default_model,
            'provider_type': deepseek_client.provider_type
        })
    except Exception as e:
        return jsonify({'success': False, 'error': str(e)}), 500

@app.route('/api/models/test', methods=['POST'])
def test_model():
    """测试当前模型"""
    try:
        global deepseek_client
        test_message = "你好，请简单回复'模型测试成功'"
        
        # 使用简单的测试消息
        messages = [{"role": "user", "content": test_message}]
        response = deepseek_client.generate_answer("你是一个测试助手", messages)
        
        return jsonify({
            'success': True,
            'message': '模型测试成功',
            'response': response,
            'provider': deepseek_client.current_provider,
            'model': deepseek_client.default_model
        })
    except Exception as e:
        return jsonify({
            'success': False,
            'error': f'模型测试失败: {str(e)}',
            'provider': deepseek_client.current_provider if hasattr(deepseek_client, 'current_provider') else 'unknown'
        }), 500

if __name__ == "__main__":
    if not os.path.exists("knowledge_base.txt"):
        open("knowledge_base.txt", "w").close()
    
    # 创建 static 目录
    os.makedirs("static", exist_ok=True)
    
    print("正在启动应用...")
    print("模型将在首次使用时加载...")
    
    # 启动 Flask 应用
    app.run(host="0.0.0.0", port=6006, debug=True)
