import json
import os#文件系统，进程，环境变量，posix的封装
import time#time低级操作
import glob#通配符
from datetime import datetime#time类型的高级操作
from openai import OpenAI
from dotenv import load_dotenv#.env文件的使用
import requests#crawl爬取
from bs4 import BeautifulSoup#HTML/XML解析与数据提取
from selenium import webdriver#浏览器自动化控制
from selenium.webdriver.common.by import By#提供统一的元素定位标识符，用于指定查找元素的方式（如 ID、XPath、CSS 等）
from selenium.webdriver.support.ui import WebDriverWait#智能等待页面元素加载
from selenium.webdriver.support import expected_conditions as EC#定义页面加载完成的条件
from selenium.webdriver.edge.service import Service as EdgeService#管理 Edge 浏览器驱动（msedgedriver.exe）的生命周期，包括启动/停止服务
from selenium.webdriver.edge.options import Options as EdgeOptions#定制浏览器启动参数（如无头模式、代理、证书设置）
import re#正则表达式文本匹配
import logging#日志记录与管理
import random#随机数
import numpy as np#数值计算和数组操作
from typing import Optional
import base64
from typing import List
from pathlib import Path
from typing import Dict, List, Optional
import os
import json
import logging
import asyncio
import shutil
from datetime import datetime
from pathlib import Path
from logging.handlers import RotatingFileHandler
from dotenv import load_dotenv
from fastapi import FastAPI, HTTPException, UploadFile, File
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import httpx
try:
    from webdriver_manager.microsoft import EdgeChromiumDriverManager
except ImportError:
    print("⚠️ 缺少webdriver-manager库，请先执行: pip install webdriver-manager")
    exit(1)
# 在文件顶部添加FastAPI依赖
from fastapi import FastAPI, HTTPException, UploadFile, File
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import httpx
import asyncio
from fastapi import FastAPI, HTTPException, UploadFile, File, Request
import sys
'''
import atexit
@atexit.register
def cleanup():
    os.system("taskkill /f /im msedge.exe")  # Windows系统强制终止Edge进程    
'''

import logging
from logging.handlers import RotatingFileHandler  # 使用轮转日志处理程序
# 替换所有 input() 调用
def get_command():
    """获取命令的替代方法"""
    # 方法1: 从环境变量读取
    if os.environ.get("AUTO_MODE") == "true":
        return "auto"
    
    # 方法2: 从文件读取
    if os.path.exists("/app/command.txt"):
        with open("/app/command.txt") as f:
            return f.read().strip()
    
    # 方法3: 使用默认命令
    return "default"
# 优化后的日志配置
def setup_logging():
    """配置日志系统"""
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.INFO)
    
    # 定义日志格式
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    
    # 控制台处理器（输出到终端）
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    
    # 文件处理器 - 使用轮转日志，避免无限增长
    file_handler = RotatingFileHandler(
        'chat_system.log',
        maxBytes=10 * 1024 * 1024,  # 10MB
        backupCount=3,  # 保留3个备份
        encoding='utf-8'  # 确保使用UTF-8编码
    )
    file_handler.setFormatter(formatter)
    
    # 添加处理器
    logger.addHandler(console_handler)
    logger.addHandler(file_handler)
    
    return logger

# 初始化日志
logger = setup_logging()
logger.info("应用启动 - 使用轮转日志系统")
#获取一个名为当前模块名的日志记录器（如模块chat_module会生成记录器chat_module）。
# 加载环境变量
load_dotenv()
##############################################driver = webdriver.Edge()####################################
# 常量路径的定义
HISTORY_DIR = "chat_histories"
#KNOWLEDGE_DIR = "knowledge_base"#######################可变通##################################
CRAWLER_DIR = "crawled_data"  # 新增爬取内容存储目录
KNOWLEDGE_DIR = CRAWLER_DIR
KEYWORDS = ["咨询", "热线", "电话", "联系", "联系方式", "联系电话", "招生热线"]
class WebCrawler:
    """网页爬取核心类（终极优化版）"""
    def __init__(self, output_dir=CRAWLER_DIR):
        self.output_dir = output_dir
        os.makedirs(output_dir, exist_ok=True)
        self.driver = None
        self.user_agents = [
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36...",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)...",
            "Mozilla/5.0 (iPhone; CPU iPhone OS 16_0 like Mac OS X)..."
        ]

    def _init_driver(self):
        if self.driver is None:
            edge_options = EdgeOptions()
            # 无头模式增强配置
            edge_options.add_argument('--headless=new')#不弹出图形界面运行浏览器。
            edge_options.add_argument('--disable-gpu')
   #禁用 GPU 渲染加速，通常在无头模式下使用以避免某些平台上的渲染问题。
            edge_options.add_argument('--log-level=3')
            edge_options.add_argument('--ignore-certificate-errors')
            edge_options.add_argument('--allow-running-insecure-content')
            edge_options.add_argument('--disable-blink-features=AutomationControlled')
            edge_options.add_experimental_option("excludeSwitches", ["enable-automation"])
   #隐藏自动化控制的痕迹。
            # 随机User-Agent和窗口尺寸
            edge_options.add_argument(f'--user-agent={random.choice(self.user_agents)}')
        #从预设的 UA 池（Windows、Mac、iPhone 等设备标识）中随机选择 User-Agent 注入请求头，使每次启动的浏览器指纹不同。
            edge_options.add_argument('--window-size=1920,1080')
        #强制浏览器窗口为 1920×1080 分辨率
            
            # 静默模式启动
            service = EdgeService(
                EdgeChromiumDriverManager().install(),
                service_args=["--silent"],
          #自动检测并安装匹配当前 Edge 浏览器版本的 WebDriver，避免手动维护驱动兼容性问题。
                log_path=os.devnull  # 禁用驱动日志
            )
            self.driver = webdriver.Edge(service=service, options=edge_options)
         #生成一个可直接操作网页的 WebDriver 实例 (self.driver)，支持后续页面导航、元素定位等操作。
        return True

    def _get_dynamic_content(self, url, timeout=20):#定义动态内容获取方法，超时默认20秒。
        if not self._init_driver():
            return None
            
        try:
            # 智能页面加载策略
            self.driver.get(url)#模拟真实浏览器访问，触发JavaScript执行以渲染动态内容
            
            # 复合等待条件（DOM+JS+元素可见性）
            WebDriverWait(self.driver, timeout).until(
                lambda d: d.execute_script("return document.readyState") == "complete" and
                         len(d.find_elements(By.TAG_NAME, 'body')) > 0
            )
         #​DOM就绪​：document.readyState == "complete" 确保HTML解析完成。
		#​Body元素存在​：验证<body>标签已生成，防止空白页。
            
            # 分段滚动+随机延迟（模拟人工操作）
            for i in range(3):
                scroll_height = self.driver.execute_script(
                    "return Math.min(document.body.scrollHeight, "
                    f"document.documentElement.scrollHeight)")
                self.driver.execute_script(
                    f"window.scrollTo(0, {scroll_height * (i+1)/3})")
                time.sleep(random.uniform(0.5, 1.5))
         #​计算可滚动高度​：兼容不同浏览器的滚动高度获取方式。
		#​分三段滚动​：每次滚动至页面的1/3、2/3和底部位置。
		#​随机延迟​：每次滚动后暂停0.5~1.5秒，模拟人类浏览节奏。     
            # 二次验证内容加载
            WebDriverWait(self.driver, 5).until(
                EC.presence_of_element_located((By.XPATH, "//*[text()]"))
            )#确保页面中存在任意文本元素​（//*[text()]）。
            
            return self.driver.page_source#返回渲染后的完整HTML源码（含动态生成内容）。
            
        except Exception as e:
            logger.error(f"动态加载失败: {str(e)}")
            # 失败时尝试基础请求
            try:
                return requests.get(url, verify=False, timeout=10).text
            except:
                return None
        finally:
            if self.driver:
                self.driver.quit()
                self.driver = None
    
    def _clean_text(self, text):
        """清理爬取的文本内容"""
        # 去除多余空白和特殊字符
        cleaned = re.sub(r'\s+', ' ', text).strip()
        # 移除不可见字符
        return re.sub(r'[\x00-\x1f\x7f-\x9f]', '', cleaned)
    
    def crawl_page(self, url, is_dynamic=False, selector=None):
        """
        爬取指定网页内容
        :param url: 目标URL
        :param is_dynamic: 是否动态页面
        :param selector: 可选CSS选择器/XPath
        :return: 提取的内容字典
        """
        try:
            # 获取页面内容
            if is_dynamic:
                html = self._get_dynamic_content(url)
            else:
                response = requests.get(url, headers={
                    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
                }, timeout=10)
                response.encoding = "utf-8"  # 关键修复：强制UTF-8解码
                html = response.text if response.status_code == 200 else None
            
            if not html:#确保页面内容非空。
                return {"status": "failed", "reason": "页面获取失败"}
            
            # 解析内容
            soup = BeautifulSoup(html, 'html.parser')#html.parser 为Python内置（优化点​：换 lxml 可提升速度）
            
            # 按选择器提取或获取全部文本
            if selector:
                if selector.startswith('/'):  # XPath
                    from lxml import html     #处理XPath选择器（以/开头）
                    tree = html.fromstring(html)
                    elements = tree.xpath(selector)
                    content = '\n'.join([self._clean_text(e.text_content()) for e in elements if e.text_content()])
                else:  # CSS选择器
                    elements = soup.select(selector)
                    content = '\n'.join([self._clean_text(e.get_text()) for e in elements if e.get_text()])
            else:#提取全文
                content = self._clean_text(soup.get_text())
            
            # 生成文件名并保存
            domain = re.sub(r'[^\w-]', '_', url.split('//')[-1].split('/')[0])
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"{domain}_{timestamp}.txt"
            filepath = os.path.join(self.output_dir, filename)
            
            with open(filepath, 'w', encoding='utf-8') as f:
                f.write(f"URL: {url}\n\n{content}")
            
            return {
                "status": "success",
                "path": filepath,
                "content": content[:1000] + "..." if len(content) > 1000 else content
            }
            
        except Exception as e:
            return {"status": "error", "reason": str(e)}
        
class HistoryManager:
    """历史记录管理类，支持多文件加载"""   
    def __init__(self, history_dir=HISTORY_DIR):
        self.history_dir = history_dir
        os.makedirs(history_dir, exist_ok=True)#文件的创建
        
    def list_histories(self, limit=10):
        """列出所有历史记录文件（按时间倒序）"""#没有latest这个文件
        files = []
        for fname in os.listdir(self.history_dir):#遍历
            if fname.endswith(".json") and fname != "latest.json":
                file_path = os.path.join(self.history_dir, fname)#将目录路径 self.history_dir 和文件名 fname 拼接成完整路径（
                files.append({
                    "id": len(files) + 1,# 自增ID
                    "filename": fname,
                    "path": file_path,
                    "mtime": os.path.getmtime(file_path)# 修改时间
                })
        
        # 按修改时间倒序排序
        files.sort(key=lambda x: x["mtime"], reverse=True)
        return files[:limit]#这里注意修改，要不列出的有限
    
    def load_history(self, filename):#这个定向的加载文件，默认是latest
        """加载单个历史记录文件"""
        file_path = os.path.join(self.history_dir, filename)#将目录路径 self.history_dir 和文件名 filename 拼接成完整路径
        if os.path.exists(file_path):
            try:
                with open(file_path, 'r', encoding='utf-8') as f:#自动关闭
                    return json.load(f)#将JSON类型映射为Python类型（如JSON对象→字典，JSON数组→列表）
            except Exception as e:
                print(f"⚠️ 加载历史记录失败: {filename} - {e}")
        return None
    
    def load_multiple_histories(self, filenames):
        """加载多个历史记录文件并合并"""
        merged_history = []
        for filename in filenames:
            history = self.load_history(filename)#这里进行了一次格式转化
            if history:
                # 跳过重复的系统消息
                if merged_history and history[0].get("role") == "system":
                    merged_history.extend(history[1:])#跳过首条系统消息
                else:
                    merged_history.extend(history)#数据合并  '''python内置语法挺好用的'''
        return merged_history
    
    def save_history(self, messages, custom_name=None):#custom_name这个也是可以赋值改变程序的逻辑的
        """保存当前对话历史"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = custom_name or f"history_{timestamp}.json"#使用时间戳生成默认文件名
        filepath = os.path.join(self.history_dir, filename)
        
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(messages, f, ensure_ascii=False, indent=4)#p 将 messages 列表以 JSON 格式写入文件
        
        # 更新最新记录(这个是latest文件不断更新的代码实现)
        latest_path = os.path.join(self.history_dir, "latest.json")
        with open(latest_path, 'w', encoding='utf-8') as f:#将相同内容写入固定的 latest.json 文件，覆盖旧数据。
            json.dump(messages, f, ensure_ascii=False, indent=4)
        
        return filepath

class KnowledgeLoader:
    """知识库加载类 - 支持 crawled_data 和 knowledge 目录"""
    
    def __init__(self, crawl_dir="crawled_data", knowledge_dir="knowledge"):
        """
        Args:
            crawl_dir (str): 爬取数据目录，默认为 "crawled_data"
            knowledge_dir (str): 知识库目录，默认为 "knowledge"
        """
        # 明确隔离两个目录 ✅
        self.crawl_dir = "crawled_data"
        self.knowledge_dir = "knowledge"
        os.makedirs(crawl_dir, exist_ok=True)
        os.makedirs(knowledge_dir, exist_ok=True)
    
    def load(self, dir_choice=None):
        """
        加载知识内容
        :param dir_choice: 指定要加载的目录（'crawl', 'knowledge', 'all' 或 None=默认加载所有）
        :return: 加载的知识内容列表
        """
        if dir_choice == "crawl":
            dirs = [self.crawl_dir]
        elif dir_choice == "knowledge":
            dirs = [self.knowledge_dir]
        else:
            # 默认加载两个目录
            dirs = [self.crawl_dir, self.knowledge_dir]
        
        return self._load_dirs(dirs)
    
    def kload(self, file_filter=None, content_filter=None, dir_choice=None):
        """
        加载知识内容（支持选择性加载和目录选择）
        :param file_filter: 文件过滤条件
        :param content_filter: 内容过滤函数
        :param dir_choice: 指定要加载的目录（'crawl', 'knowledge', 'all' 或 None=默认加载所有）
        :return: 加载的知识内容列表
        """
        if dir_choice == "crawl":
            dirs = [self.crawl_dir]
        elif dir_choice == "knowledge":
            dirs = [self.knowledge_dir]
        else:
            # 默认加载两个目录
            dirs = [self.crawl_dir, self.knowledge_dir]
        
        return self._load_with_filters(dirs, file_filter, content_filter)
    
    def _load_dirs(self, dirs):
        """加载指定目录"""
        content = []
        extensions = ('.txt', '.md', '.csv', '.json')
        
        for directory in dirs:
            for ext in extensions:
                pattern = os.path.join(directory, '**', f'*{ext}')
                for file_path in glob.glob(pattern, recursive=True):
                    if os.path.isfile(file_path):
                        try:
                            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                                file_size = os.path.getsize(file_path)
                                text = f.read(10240) if file_size > 102400 else f.read()
                                content.append({
                                    "dir": os.path.basename(directory),  # 添加目录标签
                                    "path": os.path.basename(file_path),
                                    "content": text
                                })
                        except Exception as e:
                            print(f"⚠️ 文件读取失败: {file_path} - {e}")
            print(f"✅ 已加载目录: {directory}")
        return content
    
    def _load_with_filters(self, dirs, file_filter, content_filter):
        """带过滤条件的加载"""
        content = []
        extensions = ('.txt', '.md', '.csv', '.json')
        
        for directory in dirs:
            # 获取目录中所有匹配文件
            if file_filter:
                pattern = os.path.join(directory, f"*{file_filter}*")
                file_paths = glob.glob(pattern)
                
                if not file_paths:
                    for ext in extensions:
                        ext_pattern = os.path.join(directory, f"*{file_filter}*{ext}")
                        file_paths.extend(glob.glob(ext_pattern))
            else:
                file_paths = []
                for ext in extensions:
                    file_paths.extend(glob.glob(os.path.join(directory, f"*{ext}")))
            
            print(f"ℹ️ 在 {directory} 中找到 {len(file_paths)} 个匹配文件")
            
            # 读取和处理文件
            for file_path in file_paths:
                try:
                    with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                        file_size = os.path.getsize(file_path)
                        text = f.read(10240) if file_size > 102400 else f.read()
                        
                        if content_filter and not content_filter(text):
                            continue
                        
                        content.append({
                            "dir": os.path.basename(directory),
                            "path": file_path,
                            "filename": os.path.basename(file_path),
                            "content": text
                        })
                        print(f"✅ 已加载: {os.path.basename(file_path)} (来自: {directory})")
                except Exception as e:
                    print(f"⚠️ 文件读取失败: {file_path} - {e}")
        
        return content
    
    def get_file_lists(self):
        """获取两个目录的文件列表"""
        return {
            "crawled_data": self._get_dir_files(self.crawl_dir),
            "knowledge": self._get_dir_files(self.knowledge_dir)
        }
    
    def _get_dir_files(self, directory):
        """获取指定目录中的文件列表"""
        extensions = ('.txt', '.md', '.csv', '.json')
        files = []
        for ext in extensions:
            files.extend(glob.glob(os.path.join(directory, f"*{ext}")))
        return [os.path.basename(f) for f in files]
    
    def get_file_content(self, filename, from_dir=None):
        """获取指定文件的内容"""
        # 优先尝试指定的目录
        if from_dir == "crawl":
            file_path = os.path.join(self.crawl_dir, filename)
        elif from_dir == "knowledge":
            file_path = os.path.join(self.knowledge_dir, filename)
        else:
            # 未指定目录时在两个目录中搜索
            file_path = os.path.join(self.crawl_dir, filename)
            if not os.path.exists(file_path):
                file_path = os.path.join(self.knowledge_dir, filename)
        
        if not os.path.exists(file_path):
            print(f"⚠️ 文件不存在: {filename}")
            return None
        
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                return f.read()
        except Exception as e:
            print(f"⚠️ 读取文件失败: {filename} - {e}")
            return None
def create_client():
    """创建OpenAI客户端"""
    try:
        return OpenAI(
            api_key='sk-uZy29fuiHwjQEHtJq35r5KQHDqYYMa3WNdVdvxo8ZoPyws4T',#os.getenv("API_KEY"),#从环境变量读取密钥
            base_url='http://123.182.124.194:58000/v1'#os.getenv("API_URL")#从环境变量读取URL
        )
    except Exception as e:
        print(f"创建客户端时出错: {e}")
        return None

def stream_chat(client, messages):
    """流式聊天函数"""
    try:
        print("\nAI助手正在思考...\n" + "-" * 40)
        
        response = client.chat.completions.create(
            model="Qwen2.5-72B-Instruct",
            messages=messages,
            stream=True,
            temperature=0.7,
            max_tokens=2000
        )
        
        full_response = ""
        print("AI助手: ", end='', flush=True)
        for chunk in response:
            if chunk.choices[0].delta.content is not None:
                content = chunk.choices[0].delta.content
                print(content, end='', flush=True)
                full_response += content
                time.sleep(0.01)
        
        print("\n" + "-" * 40)
        return full_response
        
    except Exception as e:
        print(f"\nAPI调用错误: {e}")
        return None

def build_rag_prompt(user_query, knowledge, history):#这个的灵活性很高
    """构建RAG提示词"""
    # 系统指令
    system_msg = {
        "role": "system",
        "content": "你是一个专业的问答助手，请严格根据提供的知识内容回答问题。"
    }
    
    # 知识库上下文
    knowledge_context = "\n".join([f"【{k['path']}】\n{k['content'][:500]}" for k in knowledge])
    knowledge_msg = {
        "role": "user",
        "content": f"{knowledge_context}\n\n【当前问题】{user_query}"
    }
    
    # 组合历史记录
    return [system_msg] + history + [knowledge_msg]


# 加载环境变量
load_dotenv()

# 初始化OpenAI客户端
client = OpenAI(
    api_key="sk-uZy29fuiHwjQEHtJq35r5KQHDqYYMa3WNdVdvxo8ZoPyws4T",
    base_url="http://123.182.124.194:58000/v1"
)
###################################embedding#####################################################
# 预定义关键词库（可根据需要扩展）

# 预定义关键词库（可根据需要扩展）
KEYWORDS = ["咨询", "热线", "电话", "联系", "联系方式", "联系电话", "招生热线"]

def load_documents(data_dir, max_length=500):
    """从指定目录加载文档，智能分段处理"""
    documents = []
    files = glob.glob(os.path.join(data_dir, '*.txt'))
    
    if not files:
        print(f"⚠️ 在 '{data_dir}' 目录中未找到任何txt文件")
        return documents
    
    print(f"📂 发现 {len(files)} 个文本文件")
    
    for file_path in files:
        try:
            with open(file_path, "r", encoding='utf-8', errors='ignore') as f:
                content = f.read().strip()
                if not content:
                    continue
                    
                filename = os.path.basename(file_path)
                
                # 短文档直接整体处理
                if len(content) <= max_length:
                    has_keywords = any(kw in content for kw in KEYWORDS)
                    documents.append({
                        "source": filename,
                        "content": content,
                        "has_keywords": has_keywords
                    })
                # 长文档智能分段
                else:
                    segments = smart_segment(content, max_length)
                    if not segments:
                        continue
                        
                    # 标识包含关键词的片段
                    for i, seg in enumerate(segments):
                        has_keywords = any(kw in seg for kw in KEYWORDS)
                        documents.append({
                            "source": f"{filename}@{i+1}",
                            "content": seg,
                            "has_keywords": has_keywords
                        })
        except Exception as e:
            print(f"⚠️ 加载文档出错: {os.path.basename(file_path)} - {str(e)}")
    
    # 统计包含关键词的文档数
    key_docs = sum(1 for d in documents if d['has_keywords'])
    print(f"✅ 加载完成: {len(documents)}个文本片段 ({key_docs}个包含关键词)")
    return documents

def smart_segment(text, max_length=500):
    """智能文本分段：优先保留含关键词的上下文"""
    segments = []
    
    # 首先查找关键词周围的上下文
    found_key_contexts = []
    for kw in KEYWORDS:
        if kw in text:
            start_idx = max(0, text.index(kw) - 50)
            end_idx = min(len(text), text.index(kw) + 150)
            segment = text[start_idx:end_idx]
            if segment not in found_key_contexts:
                found_key_contexts.append(segment)
    
    # 添加找到的关键词上下文
    segments.extend(found_key_contexts)
    
    # 剩余文本按自然段落分割
    remaining_text = text
    for seg in segments:
        remaining_text = remaining_text.replace(seg, "")
    
    if len(remaining_text) > max_length:
        # 按段落分割
        if '\n\n' in remaining_text:
            paras = remaining_text.split('\n\n')
            for para in paras:
                if para and len(para) <= max_length:
                    segments.append(para)
                elif para:
                    # 按句子分割
                    sentences = re.split(r'(?<=[。！？.!?])', para)
                    current = ""
                    for sent in sentences:
                        if len(current) + len(sent) > max_length:
                            segments.append(current)
                            current = sent
                        else:
                            current += sent
                    if current:
                        segments.append(current)
        else:
            # 简单长度分割
            while remaining_text:
                segments.append(remaining_text[:max_length])
                remaining_text = remaining_text[max_length:]
    
    # 过滤空片段
    segments = [seg for seg in segments if seg.strip()]
    return segments

def get_embedding(text, model="bge-m3", max_retries=3):
    """获取文本嵌入向量，带重试机制"""
    for attempt in range(max_retries):
        try:
            response = client.embeddings.create(
                model=model,
                input=[text]
            )
            return response.data[0].embedding
        except Exception as e:
            if attempt < max_retries - 1:
                print(f"⚠️ 获取嵌入失败 (重试 {attempt+1}/{max_retries}): {str(e)}")
                time.sleep(1)
            else:
                print(f"❌ 无法获取文本嵌入: {str(e)}")
                return None
    return None

def enhance_cosine_similarity(query_embed, doc_embed, doc_content):
    """增强版相似度计算：基础相似度 + 关键词权重"""
    if query_embed is None or doc_embed is None:
        return 0.0
    
    a = np.array(query_embed)
    b = np.array(doc_embed)
    norm_a = np.linalg.norm(a)
    norm_b = np.linalg.norm(b)
    
    if norm_a == 0 or norm_b == 0:
        return 0.0
    
    base_sim = np.dot(a, b) / (norm_a * norm_b)
    
    # 关键词加权
    keyword_boost = 0.0
    for kw in KEYWORDS:
        if kw in doc_content:
            count = doc_content.count(kw)
            keyword_boost += min(0.25, count * 0.1)  # 最大加0.25
    
    return min(base_sim + keyword_boost, 1.0)

def find_top_contexts(query_embedding, documents, top_k=3):
    """查找最相关的上下文片段，优先关键词文档"""
    results = []
    
    for doc in documents:
        if "embedding" not in doc or doc["embedding"] is None:
            continue
            
        sim = enhance_cosine_similarity(query_embedding, doc["embedding"], doc["content"])
        
        # 创建结果对象
        result = {
            "source": doc["source"],
            "content": doc["content"],
            "similarity": sim,
            "has_keywords": doc["has_keywords"]
        }
        
        # 当相似度大于0或包含关键词时考虑
        if sim > 0 or doc["has_keywords"]:
            results.append(result)
    
    if not results:
        return []
    
    # 优先排序：含关键词的文档排前 > 相似度高的排前
    results.sort(key=lambda x: (-x["has_keywords"], -x["similarity"]))
    return results[:top_k]

def build_quality_prompt(user_query, context_results):
    """构建高质量的提示词，过滤低相关片段"""
    prompt = f"用户查询: {user_query}\n\n"
    
    if not context_results:
        return prompt + "⚠️ 未找到与查询相关的上下文信息\n请根据您的知识回答用户查询:"
    
    prompt += "相关上下文信息(基于语义匹配):\n"
    high_relevance = []
    low_relevance = []
    
    for ctx in context_results:
        if ctx["similarity"] >= 0.5 or ctx["has_keywords"]:
            high_relevance.append(ctx)
        else:
            low_relevance.append(ctx)
    
    # 优先添加高相关片段
    for i, ctx in enumerate(high_relevance, 1):
        # 高亮显示关键词
        content = ctx["content"]
        for kw in KEYWORDS:
            if kw in content:
                content = content.replace(kw, f"【{kw}】")
                
        prompt += f"[来源: {ctx['source']}, 相关度: {ctx['similarity']:.1%}]"
        if ctx["has_keywords"]:
            prompt += " 🔑"
        prompt += "\n"
        
        # 添加带缩进的片段内容
        prompt += "> " + "\n> ".join(content.split('\n'))
        prompt += "\n\n"
    
    # 添加低相关片段摘要
    if low_relevance:
        prompt += "---\n其他参考片段:\n"
        for ctx in low_relevance[:2]:
            preview = ctx["content"][:100].replace('\n', ' ') + ("..." if len(ctx["content"]) > 100 else "")
            prompt += f"- {ctx['source']}: {preview}\n"
    
    prompt += "\n请基于以上上下文专业地回答用户查询:"
    return prompt

def embedding():
    # 1. 加载文档并计算嵌入
    data_dir = input("请输入查询类别: ").strip()
    print(f"\n{'='*50}")
    print(f"📚 文档加载与嵌入计算系统".center(50))
    print(f"{'='*50}\n")
    
    print(f"正在扫描 '{data_dir}' 目录...")
    documents = load_documents(data_dir)
    
    if not documents:
        print(f"⚠️ 终止: 没有可用的文档内容")
        return
    
    # 2. 计算文档嵌入
    print("\n🔄 正在计算文本嵌入(可能需要几分钟)...")
    start_time = time.time()
    
    for i, doc in enumerate(documents):
        doc["embedding"] = get_embedding(doc["content"])
        if (i+1) % 10 == 0 or i == len(documents)-1:
            elapsed = time.time() - start_time
            print(f"  ⏱️ 已完成 {i+1}/{len(documents)} 个片段 (用时: {elapsed:.1f}s)")
    
    # 3. 用户交互循环
    print(f"\n{'='*50}")
    print(f"🔍 交互查询模式".center(50))
    print(f"{'='*50}")
    print("输入查询内容并按回车键开始检索")
    print("输入 exit 或 quit 退出程序\n")
    
    while True:
        # 获取用户输入
        user_query = input("请输入查询内容: ").strip()
        
        if user_query.lower() in ['exit', 'quit']:
            print("\n🛑 查询程序已终止")
            break
            
        if not user_query:
            print("⚠️ 查询内容不能为空，请重新输入\n")
            continue
            
        print(f"\n⏳ 处理查询: '{user_query}'...")
            
        # 计算查询嵌入
        query_embedding = get_embedding(user_query)
        
        if query_embedding is None:
            print("❌ 无法计算查询的嵌入向量，请检查API连接\n")
            continue
        
        # 查找相关上下文
        context_results = find_top_contexts(query_embedding, documents)
        
        # 构建并输出提示词
        print("\n" + "=" * 80)
        print(f"✨ 查询结果: {user_query}")
        print("=" * 80)
        final_prompt = build_quality_prompt(user_query, context_results)
        print(final_prompt)
        RED = "\033[31m"
        RESET = "\033[0m"  # 重置颜色
        # 诊断信息
        print("\n📊 上下文匹配报告:")
# 在诊断信息部分
        if context_results:
            print(f"- 找到 {len(context_results)} 个相关片段")
            if context_results:
                # 查询词使用红色高亮
                highlighted_query = f"{RED}{user_query}{RESET}"
                print(f"- 最佳匹配相似度: {context_results[0]['similarity']:.1%} (查询词: {highlighted_query})")
            
            # 检查并高亮包含查询词的内容
            has_query_keyword = False
            for ctx in context_results:
                if user_query in ctx['content']:
                    has_query_keyword = True
                    # 在内容中高亮查询词
                    highlighted_content = ctx['content'].replace(
                        user_query, 
                        f"{RED}{user_query}{RESET}"
                    )
                    print(f"\n🔍 包含查询词的上下文 (来源: {ctx['source']}):")
                    print(highlighted_content[:500])  # 只显示前500字符
                    break  # 显示第一个包含查询词的上下文
        
        if not has_query_keyword:
            print(f"- 上下文{RED}不包含{RESET}查询词 '{user_query}'")
        else:
            print("⚠️ 未找到任何相关上下文")
            print("可能原因: ")
            print("1. 文档中缺少相关信息")
            print("2. 嵌入计算或相似度匹配失败")
        
        print("\n" + "=" * 80)
        print("输入下一个查询或输入 exit 退出\n")
##################################################process#################################################################

def read_all_txt_files(data_dir: str) -> Dict[str, str]:
    """
    从指定目录读取所有 txt 文件的内容
    
    Args:
        data_dir (str): 数据目录路径
        
    Returns:
        Dict[str, str]: 字典，键为文件名，值为文件内容
    """
    txt_files_content = {}
    
    try:
        # 获取数据目录的绝对路径
        data_path = Path(data_dir)
        
        if not data_path.exists():
            print(f"错误：目录 '{data_dir}' 不存在")
            return txt_files_content
        
        if not data_path.is_dir():
            print(f"错误：'{data_dir}' 不是一个目录")
            return txt_files_content
        
        # 查找所有 txt 文件
        txt_files = list(data_path.glob("*.txt"))
        
        if not txt_files:
            print(f"在目录 '{data_dir}' 中没有找到任何 txt 文件")
            return txt_files_content
        
        print(f"在目录 '{data_dir}' 中找到 {len(txt_files)} 个 txt 文件:")
        
        # 读取每个 txt 文件的内容
        for txt_file in txt_files:
            try:
                with open(txt_file, 'r', encoding='utf-8') as f:
                    content = f.read()
                    txt_files_content[txt_file.name] = content
                    print(f"  ✓ 成功读取: {txt_file.name} ({len(content)} 字符)")
            except UnicodeDecodeError:
                # 如果 UTF-8 解码失败，尝试其他编码
                try:
                    with open(txt_file, 'r', encoding='gbk') as f:
                        content = f.read()
                        txt_files_content[txt_file.name] = content
                        print(f"  ✓ 成功读取 (GBK编码): {txt_file.name} ({len(content)} 字符)")
                except Exception as e:
                    print(f"  ✗ 读取文件失败: {txt_file.name} - {e}")
            except Exception as e:
                print(f"  ✗ 读取文件失败: {txt_file.name} - {e}")
        
        return txt_files_content
        
    except Exception as e:
        print(f"读取目录时出错: {e}")
        return txt_files_content

def read_specific_txt_file(file_path: str) -> Optional[str]:
    """
    读取指定的 txt 文件内容
    
    Args:
        file_path (str): 文件路径
        
    Returns:
        Optional[str]: 文件内容，读取失败时返回 None
    """
    try:
        file_path = Path(file_path)
        if not file_path.exists():
            print(f"错误：文件 '{file_path}' 不存在")
            return None
        
        # 获取文件大小
        file_size = os.path.getsize(file_path)
        size_str = f"{file_size} 字节"
        if file_size > 1024:
            size_str = f"{file_size/1024:.2f} KB"
        if file_size > 1024 * 1024:
            size_str = f"{file_size/(1024 * 1024):.2f} MB"
        
        with open(file_path, 'r', encoding='utf-8') as f:
            content = f.read()
            print(f"📄 文件: {file_path.name}")
            print(f"📏 大小: {size_str}")
            print(f"📝 字符数: {len(content)}")
            return content
            
    except UnicodeDecodeError:
        try:
            with open(file_path, 'r', encoding='gbk') as f:
                content = f.read()
                print(f"📄 文件: {file_path.name}")
                print(f"📏 大小: {size_str}")
                print(f"📝 字符数: {len(content)}")
                print(f"🔠 编码: GBK")
                return content
        except Exception as e:
            print(f"读取文件失败: {e}")
            return None
    except Exception as e:
        print(f"读取文件失败: {e}")
        return None

def get_txt_files_list(data_dir: str) -> List[str]:
    """
    获取指定目录中所有 txt 文件的列表
    
    Args:
        data_dir (str): 数据目录路径
        
    Returns:
        List[str]: txt 文件名列表
    """
    try:
        data_path = Path(data_dir)
        if not data_path.exists():
            print(f"错误：目录 '{data_dir}' 不存在")
            return []
        
        txt_files = [f.name for f in data_path.glob("*.txt")]
        return txt_files
        
    except Exception as e:
        print(f"获取文件列表时出错: {e}")
        return []

def process():
    """主处理函数"""
    print("=" * 60)
    print("📁 TXT 文件读取工具")
    print("=" * 60)
    
    # 获取用户输入的目录名称
    data_dir = input("请输入要读取的目录名称: ").strip()
    
    # 主循环
    while True:
        print("\n" + "=" * 60)
        print("🔍 主菜单")
        print("=" * 60)
        print("1. 查看文件列表")
        print("2. 读取特定文件")
        print("3. 读取所有文件")
        print("4. 退出程序")
        
        choice = input("\n请选择操作 (1/2/3/4): ").strip()
        
        if choice == "1":
            # 获取文件列表
            file_list = get_txt_files_list(data_dir)
            if file_list:
                print(f"\n在 '{data_dir}' 中找到 {len(file_list)} 个 txt 文件:")
                for file in file_list:
                    print(f"  - {file}")
            else:
                print(f"在 '{data_dir}' 中没有找到 txt 文件")
        
        elif choice == "2":
            # 读取特定文件
            file_name = input("\n请输入要读取的文件名: ").strip()
            if not file_name:
                print("文件名不能为空")
                continue
            
            file_path = os.path.join(data_dir, file_name)
            content = read_specific_txt_file(file_path)
            
            if content:
                print("\n" + "=" * 60)
                print("📄 文件内容预览:")
                print("=" * 60)
                
                # 显示前500个字符作为预览
                preview = content[:500]
                print(preview)
                
                if len(content) > 500:
                    print(f"... (共 {len(content)} 字符)")
                
                # 提供查看完整内容的选项
                view_full = input("\n是否查看完整内容? (y/n): ").strip().lower()
                if view_full == 'y':
                    print("\n" + "=" * 60)
                    print("📄 完整文件内容:")
                    print("=" * 60)
                    print(content)
                    print("=" * 60)
        
        elif choice == "3":
            # 读取所有文件
            print(f"\n正在读取 '{data_dir}' 目录中的所有 txt 文件...")
            all_content = read_all_txt_files(data_dir)
            
            if all_content:
                print("\n" + "=" * 60)
                print("📊 文件摘要")
                print("=" * 60)
                print(f"共读取 {len(all_content)} 个文件:")
                
                for filename, content in all_content.items():
                    print(f"\n📄 文件: {filename}")
                    print(f"📏 字符数: {len(content)}")
                    
                    # 显示前100个字符作为预览
                    preview = content[:100].replace('\n', '\\n')
                    if len(content) > 100:
                        preview += "..."
                    print(f"📝 内容预览: {preview}")
            else:
                print("没有读取到任何文件内容")
        
        elif choice == "4":
            print("\n感谢使用，程序结束!")
            break
        
        else:
            print("\n⚠️ 无效选择，请重新输入")
        
        # 添加分隔线
        print("\n" + "-" * 60)
# 新增OCR功能类


class ImageOCR:
    def __init__(self, api_key: str):
        self.api_key = api_key
        self.base_url = "https://open.bigmodel.cn/api/paas/v4/chat/completions"
    
    def recognize_image(self, image_path: str) -> Optional[str]:
        """
        使用GLM-4V模型识别图片中的文字
        
        Args:
            image_path (str): 图片文件路径
            
        Returns:
            Optional[str]: 识别出的文字内容，失败时返回None
        """
        try:
            # 检查文件是否存在
            if not os.path.exists(image_path):
                print(f"错误：图片文件 '{image_path}' 不存在")
                return None
            
            # 读取图片并转换为base64
            with open(image_path, "rb") as image_file:
                base64_image = base64.b64encode(image_file.read()).decode('utf-8')
            
            # 构建请求头
            headers = {
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json",
                "Accept": "application/json"
            }
            
            # 构建请求体
            payload = {
                "model": "glm-4v",
                "messages": [
                    {
                        "role": "user",
                        "content": [
                            {"type": "text", "text": "请识别图片中的所有文字内容，包括印刷体和手写体，保持原始格式和排版"},
                            {
                                "type": "image_url",
                                "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}
                            }
                        ]
                    }
                ],
                "max_tokens": 2000
            }
            
            # 发送请求
            response = requests.post(
                self.base_url, 
                headers=headers, 
                json=payload,
                timeout=30  # 30秒超时
            )
            
            # 检查响应状态
            if response.status_code != 200:
                print(f"API请求失败: {response.status_code}")
                print(f"错误信息: {response.text}")
                return None
                
            # 解析响应
            result = response.json()
            if 'choices' not in result or len(result['choices']) == 0:
                print("API响应格式错误")
                print(f"完整响应: {json.dumps(result, indent=2, ensure_ascii=False)}")
                return None
                
            # 提取内容
            content = result['choices'][0]['message']['content']
            return content
            
        except requests.exceptions.Timeout:
            print("API请求超时")
            return None
        except requests.exceptions.RequestException as e:
            print(f"网络请求错误: {str(e)}")
            return None
        except Exception as e:
            print(f"OCR处理出错: {str(e)}")
            return None
    
    def save_ocr_result(self, image_path: str, output_dir: str = "knowledge") -> Optional[str]:
        """
        识别图片文字并保存为文本文件
        
        Args:
            image_path (str): 图片文件路径
            output_dir (str): 输出目录，默认为"knowledge"
            
        Returns:
            Optional[str]: 保存的文件路径，失败时返回None
        """
        # 识别图片文字
        text_content = self.recognize_image(image_path)
        if not text_content:
            print("OCR识别失败")
            return None
        
        # 创建输出目录
        os.makedirs(output_dir, exist_ok=True)
        
        # 生成输出文件名
        image_name = Path(image_path).name
        txt_name = Path(image_path).stem + ".txt"
        output_path = os.path.join(output_dir, txt_name)
        
        # 保存文本文件
        try:
            with open(output_path, "w", encoding="utf-8") as f:
                f.write(text_content)
            print(f"✅ OCR结果已保存至: {output_path}")
            return output_path
        except Exception as e:
            print(f"保存OCR结果失败: {str(e)}")
            return None

# 使用示例
def ocr():
    # 初始化OCR处理器
    api_key = "a71db04526394b7b85ce4517341c198e.FnqpGsOJwLW7sCrK"
    ocr = ImageOCR(api_key)
    
    # 测试图片路径
    test_image = input("请输入图片名称test.jpg):")  # 替换为您的测试图片路径
    
    # 执行OCR识别
    print("🔄 正在识别图片文字...")
    text = ocr.recognize_image(test_image)
    
    if text:
        print("\n✅ OCR识别结果:")
        print("=" * 60)
        print(text)
        print("=" * 60)
        
        # 保存结果
        result_path = ocr.save_ocr_result(test_image)
        if result_path:
            print(f"💾 结果已保存至: {result_path}")
    else:
        print("❌ OCR识别失败")

# ==================== FastAPI应用设置 ====================
# 创建 FastAPI 应用
app = FastAPI(
    title="AI聊天助手后端服务",
    description="为前端聊天助手提供API支持",
    version="1.0.0",
    docs_url="/docs",  # 文档路径
    openapi_url="/openapi.json"  # OpenAPI路径
)

# 配置CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 初始化全局组件
load_dotenv()
api_key = os.getenv("GLM_API_KEY")
ocr_processor = ImageOCR(api_key) if api_key else None
client = create_client()

# 如果API密钥未设置，显示警告
if not api_key:
    logger.warning("未设置GLM_API_KEY环境变量，图片OCR功能使用模拟模式")

# 初始化知识库（示例）
knowledge = [
    {"path": "知识库1", "content": "这里是知识库1的内容..."},
    {"path": "知识库2", "content": "这里是知识库2的内容..."}
]

# ==================== API端点定义 ====================
@app.get("/")
async def root():
    """健康检查和基本服务信息"""
    return {
        "status": "success",
        "service": "AI聊天助手后端服务",
        "version": "1.0.0",
        "endpoints": {
            "chat": "/chat",
            "histories": "/histories",
            "load_history": "/load-history",
            "ocr": "/ocr",
            "docs": "/docs"
        }
    }

@app.post("/chat")
async def chat_endpoint(request: Request):
    """
    处理聊天请求 - 调用真实AI模型
    
    请求JSON格式:
    {
        "user_input": "你好",
        "history": [
            {"role": "user", "content": "你好"},
            {"role": "assistant", "content": "你好！有什么可以帮您的？"}
        ]
    }
    """
    try:
        # 解析请求体
        request_data = await request.json()
        user_input = request_data.get("user_input", "").strip()
        history = request_data.get("history", [])
        
        if not user_input:
            raise HTTPException(
                status_code=400, 
                detail="user_input字段不能为空"
            )
        
        logger.info(f"收到聊天请求: '{user_input}'")
        
        # 构建RAG提示
        rag_prompt = build_rag_prompt(user_input, knowledge, history)
        
        # 调用真实AI模型
        ai_response = stream_chat(client, rag_prompt)
        
        if not ai_response:
            raise HTTPException(
                status_code=500,
                detail="AI模型调用失败"
            )
        
        return {
            "status": "success",
            "response": ai_response
        }
    
    except Exception as e:
        logger.error(f"聊天处理错误: {str(e)}", exc_info=True)
        raise HTTPException(
            status_code=500, 
            detail=f"内部服务器错误: {str(e)}"
        )

@app.post("/ocr")
async def ocr_endpoint(image: UploadFile = File(...)):
    """处理图片OCR识别请求"""
    try:
        # 确保是图片文件
        valid_extensions = ('.png', '.jpg', '.jpeg', '.gif', '.bmp')
        if not any(image.filename.lower().endswith(ext) for ext in valid_extensions):
            raise HTTPException(
                status_code=400,
                detail=f"仅支持以下图片格式: {', '.join(valid_extensions)}"
            )
            
        logger.info(f"接收OCR请求: {image.filename}")
        
        # 创建临时目录
        temp_dir = "temp_uploads"
        os.makedirs(temp_dir, exist_ok=True)
        
        # 保存文件
        file_path = os.path.join(temp_dir, image.filename)
        with open(file_path, "wb") as buffer:
            content = await image.read()
            buffer.write(content)
            
        # OCR处理
        if ocr_processor:
            text = ocr_processor.recognize_image(file_path)
            if not text:
                text = "OCR识别失败"
        else:
            # 如果没有OCR处理器，使用模拟结果
            text = f"模拟OCR结果 - 缺少API密钥\n文件: {image.filename}\n大小: {len(content)}字节"
        
        # 清理临时文件
        os.remove(file_path)
        
        return {
            "status": "success",
            "filename": image.filename,
            "text": text
        }
        
    except Exception as e:
        logger.error(f"OCR处理错误: {str(e)}", exc_info=True)
        raise HTTPException(
            status_code=500, 
            detail=f"OCR处理失败: {str(e)}"
        )

# ==================== 其他端点 ====================
@app.get("/histories")
async def list_histories_endpoint():
    """列出所有历史记录"""
    # 示例实现
    return {
        "status": "success",
        "histories": [
            {"id": 1, "filename": "history_20230718.json", "size": "24KB"},
            {"id": 2, "filename": "history_20230717.json", "size": "18KB"}
        ]
    }

@app.post("/load-history")
async def load_history_endpoint(request: Request):
    """
    加载指定的历史记录
    
    请求JSON格式:
    {
        "ids": [1, 2]
    }
    """
    try:
        # 解析请求体
        request_data = await request.json()
        history_ids = request_data.get("ids", [])
        
        if not history_ids:
            raise HTTPException(
                status_code=400, 
                detail="至少需要一个有效的history_id"
            )
            
        logger.info(f"加载历史记录请求: {history_ids}")
        
        # 示例实现
        loaded_messages = [
            {"role": "system", "content": "模拟的合并历史记录"},
            {"role": "user", "content": "你好"},
            {"role": "assistant", "content": "你好！有什么可以帮您的？"}
        ]
        
        return {
            "status": "success",
            "message": f"已加载 {len(history_ids)} 个历史记录",
            "messages": loaded_messages
        }
        
    except Exception as e:
        logger.error(f"加载历史记录错误: {str(e)}", exc_info=True)
        raise HTTPException(
            status_code=500, 
            detail=f"加载历史记录失败: {str(e)}"
        )

# ==================== 服务启动函数 ====================
def run_service():
    """启动后端服务"""
    # 配置
    host = "0.0.0.0"
    port = 8000
    
    # 启动UVicorn服务器
    uvicorn_config = uvicorn.Config(
        app,
        host=host,
        port=port,
        log_level="info",
        reload=False
    )
    
    logger.info(f"Uvicorn运行在 http://{host}:{port}")
    logger.info(f"API文档: http://{host}:{port}/docs")
    
    server = uvicorn.Server(uvicorn_config)
    server.run()

if __name__ == "__main__":
    # 启动服务
    run_service()