import json
import os#文件系统，进程，环境变量，posix的封装
import time#time低级操作
import glob#通配符
from datetime import datetime#time类型的高级操作
from openai import OpenAI
from dotenv import load_dotenv#.env文件的使用
import requests#crawl爬取
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.edge.service import Service as EdgeService
from selenium.webdriver.edge.options import Options as EdgeOptions
import re
import logging
import random#随机数
import lxml
try:
    from webdriver_manager.microsoft import EdgeChromiumDriverManager
except ImportError:
    print("⚠️ 缺少webdriver-manager库，请先执行: pip install webdriver-manager")
    exit(1)
'''
import atexit
@atexit.register
def cleanup():
    os.system("taskkill /f /im msedge.exe")  # Windows系统强制终止Edge进程    
'''

# 初始化日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('chat_system.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 加载环境变量
load_dotenv()
##############################################driver = webdriver.Edge()####################################
# 常量路径的定义
HISTORY_DIR = "chat_histories"
#KNOWLEDGE_DIR = "knowledge_base"#######################可变通##################################
CRAWLER_DIR = "crawled_data"  # 新增爬取内容存储目录
KNOWLEDGE_DIR = CRAWLER_DIR
class WebCrawler:
    """网页爬取核心类（终极优化版）"""
    def __init__(self, output_dir=CRAWLER_DIR):
        self.output_dir = output_dir
        os.makedirs(output_dir, exist_ok=True)
        self.driver = None
        self.user_agents = [
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36...",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)...",
            "Mozilla/5.0 (iPhone; CPU iPhone OS 16_0 like Mac OS X)..."
        ]

    def _init_driver(self):
        if self.driver is None:
            edge_options = EdgeOptions()
            # 无头模式增强配置
            edge_options.add_argument('--headless=new')
            edge_options.add_argument('--disable-gpu')
            edge_options.add_argument('--log-level=3')
            edge_options.add_argument('--ignore-certificate-errors')
            edge_options.add_argument('--allow-running-insecure-content')
            edge_options.add_argument('--disable-blink-features=AutomationControlled')
            edge_options.add_experimental_option("excludeSwitches", ["enable-automation"])
            
            # 随机User-Agent和窗口尺寸
            edge_options.add_argument(f'--user-agent={random.choice(self.user_agents)}')
            edge_options.add_argument('--window-size=1920,1080')
            
            # 静默模式启动
            service = EdgeService(
                EdgeChromiumDriverManager().install(),
                service_args=["--silent"],
                log_path=os.devnull  # 禁用驱动日志
            )
            self.driver = webdriver.Edge(service=service, options=edge_options)
        return True

    def _get_dynamic_content(self, url, timeout=20):
        if not self._init_driver():
            return None
            
        try:
            # 智能页面加载策略
            self.driver.get(url)
            
            # 复合等待条件（DOM+JS+元素可见性）
            WebDriverWait(self.driver, timeout).until(
                lambda d: d.execute_script("return document.readyState") == "complete" and
                         len(d.find_elements(By.TAG_NAME, 'body')) > 0
            )
            
            # 分段滚动+随机延迟（模拟人工操作）
            for i in range(3):
                scroll_height = self.driver.execute_script(
                    "return Math.min(document.body.scrollHeight, "
                    f"document.documentElement.scrollHeight)")
                self.driver.execute_script(
                    f"window.scrollTo(0, {scroll_height * (i+1)/3})")
                time.sleep(random.uniform(0.5, 1.5))
                
            # 二次验证内容加载
            WebDriverWait(self.driver, 5).until(
                EC.presence_of_element_located((By.XPATH, "//*[text()]"))
            )
            
            return self.driver.page_source
            
        except Exception as e:
            logger.error(f"动态加载失败: {str(e)}")
            # 失败时尝试基础请求
            try:
                return requests.get(url, verify=False, timeout=10).text
            except:
                return None
        finally:
            if self.driver:
                self.driver.quit()
                self.driver = None
    
    def _clean_text(self, text):
        """清理爬取的文本内容"""
        # 去除多余空白和特殊字符
        cleaned = re.sub(r'\s+', ' ', text).strip()
        # 移除不可见字符
        return re.sub(r'[\x00-\x1f\x7f-\x9f]', '', cleaned)
    
    def crawl_page(self, url, is_dynamic=False, selector=None):
        """
        爬取指定网页内容
        :param url: 目标URL
        :param is_dynamic: 是否动态页面
        :param selector: 可选CSS选择器/XPath
        :return: 提取的内容字典
        """
        try:
            # 获取页面内容
            if is_dynamic:
                html = self._get_dynamic_content(url)#这里有问题
            else:
                response = requests.get(url, headers={
                    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
                }, timeout=10)
                html = response.text if response.status_code == 200 else None
            
            if not html:
                return {"status": "failed", "reason": "页面获取失败"}
            
            # 解析内容
            soup = BeautifulSoup(html, 'html.parser')
            
            # 按选择器提取或获取全部文本
            if selector:
                if selector.startswith('/'):  # XPath
                    from lxml import html
                    tree = html.fromstring(html)
                    elements = tree.xpath(selector)
                    content = '\n'.join([self._clean_text(e.text_content()) for e in elements if e.text_content()])
                else:  # CSS选择器
                    elements = soup.select(selector)
                    content = '\n'.join([self._clean_text(e.get_text()) for e in elements if e.get_text()])
            else:
                content = self._clean_text(soup.get_text())
            
            # 生成文件名并保存
            domain = re.sub(r'[^\w-]', '_', url.split('//')[-1].split('/')[0])
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"{domain}_{timestamp}.txt"
            filepath = os.path.join(self.output_dir, filename)
            
            with open(filepath, 'w', encoding='utf-8') as f:
                f.write(f"URL: {url}\n\n{content}")
            
            return {
                "status": "success",
                "path": filepath,
                "content": content[:1000] + "..." if len(content) > 1000 else content
            }
            
        except Exception as e:
            return {"status": "error", "reason": str(e)}
        
class HistoryManager:
    """历史记录管理类，支持多文件加载"""   
    def __init__(self, history_dir=HISTORY_DIR):
        self.history_dir = history_dir
        os.makedirs(history_dir, exist_ok=True)#文件的创建
        
    def list_histories(self, limit=10):
        """列出所有历史记录文件（按时间倒序）"""#没有latest这个文件
        files = []
        for fname in os.listdir(self.history_dir):#遍历
            if fname.endswith(".json") and fname != "latest.json":
                file_path = os.path.join(self.history_dir, fname)#将目录路径 self.history_dir 和文件名 fname 拼接成完整路径（
                files.append({
                    "id": len(files) + 1,# 自增ID
                    "filename": fname,
                    "path": file_path,
                    "mtime": os.path.getmtime(file_path)# 修改时间
                })
        
        # 按修改时间倒序排序
        files.sort(key=lambda x: x["mtime"], reverse=True)
        return files[:limit]#这里注意修改，要不列出的有限
    
    def load_history(self, filename):#这个定向的加载文件，默认是latest
        """加载单个历史记录文件"""
        file_path = os.path.join(self.history_dir, filename)#将目录路径 self.history_dir 和文件名 filename 拼接成完整路径
        if os.path.exists(file_path):
            try:
                with open(file_path, 'r', encoding='utf-8') as f:#自动关闭
                    return json.load(f)#将JSON类型映射为Python类型（如JSON对象→字典，JSON数组→列表）
            except Exception as e:
                print(f"⚠️ 加载历史记录失败: {filename} - {e}")
        return None
    
    def load_multiple_histories(self, filenames):
        """加载多个历史记录文件并合并"""
        merged_history = []
        for filename in filenames:
            history = self.load_history(filename)#这里进行了一次格式转化
            if history:
                # 跳过重复的系统消息
                if merged_history and history[0].get("role") == "system":
                    merged_history.extend(history[1:])#跳过首条系统消息
                else:
                    merged_history.extend(history)#数据合并  '''python内置语法挺好用的'''
        return merged_history
    
    def save_history(self, messages, custom_name=None):#custom_name这个也是可以赋值改变程序的逻辑的
        """保存当前对话历史"""
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        filename = custom_name or f"history_{timestamp}.json"#使用时间戳生成默认文件名
        filepath = os.path.join(self.history_dir, filename)
        
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(messages, f, ensure_ascii=False, indent=4)#p 将 messages 列表以 JSON 格式写入文件
        
        # 更新最新记录(这个是latest文件不断更新的代码实现)
        latest_path = os.path.join(self.history_dir, "latest.json")
        with open(latest_path, 'w', encoding='utf-8') as f:#将相同内容写入固定的 latest.json 文件，覆盖旧数据。
            json.dump(messages, f, ensure_ascii=False, indent=4)
        
        return filepath

class KnowledgeLoader:
    """知识库加载类"""
    
    def __init__(self, knowledge_dir=KNOWLEDGE_DIR):
        self.knowledge_dir = knowledge_dir
        os.makedirs(knowledge_dir, exist_ok=True)#创建知识库目录（若不存在）
    
    def load(self):
        """加载知识库内容"""
        content = []
        extensions = ('.txt', '.md', '.csv', '.json')
        
        for ext in extensions:
            pattern = os.path.join(self.knowledge_dir, '**', f'*{ext}')
            for file_path in glob.glob(pattern, recursive=True):#递归查找匹配模式的所有文件路径
                if os.path.isfile(file_path):#排除目录，确保文件
                    try:
                        # 大文件只读取前10K字符
                        with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                            text = f.read(10240) if os.path.getsize(file_path) > 102400 else f.read()#做一个选择，大的话读取10k，小的话全部读取。
                            content.append({
                                "path": os.path.basename(file_path),
                                "content": text
                            })
                    except Exception as e:
                        print(f"⚠️ 文件读取失败: {file_path} - {e}")
        return content

def create_client():
    """创建OpenAI客户端（增强版）"""
    try:
        # 调试输出环境变量
        print(f"当前环境变量: API_KEY={os.getenv('API_KEY')}, API_URL={os.getenv('API_URL')}")
        
        # 方案1：直接从环境变量读取
        client = OpenAI(
            api_key=os.getenv("API_KEY"),  # 确保名称与.env文件一致
            base_url=os.getenv("API_URL")
        )
        
        # 简单验证连接
        client.models.list()  # 测试API连通性
        return client
        
    except Exception as e:
        print(f"⚠️ 环境变量加载失败: {e}")
        
        # 方案2：硬编码密钥（临时测试用）
        try:
            client = OpenAI(
                api_key="sk-uZy29fuiHwjQEHtJq35r5KQHDqYYMa3WNdVdvxo8ZoPyws4T",
                base_url="http://123.182.124.194:58000/v1"
            )
            client.models.list()
            print("✅ 使用硬编码密钥连接成功")
            return client
        except Exception as fallback_error:
            print(f"❌ 终极方案失败: {fallback_error}")
            return None

def stream_chat(client, messages):
    """流式聊天函数"""
    try:
        print("\nAI助手正在思考...\n" + "-" * 40)
        
        response = client.chat.completions.create(
            model="Qwen2.5-72B-Instruct",
            messages=messages,
            stream=True,
            temperature=0.7,
            max_tokens=2000
        )
        
        full_response = ""
        print("AI助手: ", end='', flush=True)
        for chunk in response:
            if chunk.choices[0].delta.content is not None:
                content = chunk.choices[0].delta.content
                print(content, end='', flush=True)
                full_response += content
                time.sleep(0.01)
        
        print("\n" + "-" * 40)
        return full_response
        
    except Exception as e:
        print(f"\nAPI调用错误: {e}")
        return None

def build_rag_prompt(user_query, knowledge, history):#这个的灵活性很高
    """构建RAG提示词"""
    # 系统指令
    system_msg = {
        "role": "system",
        "content": "你是一个专业的问答助手，请严格根据提供的知识内容回答问题。"
    }
    
    # 知识库上下文
    knowledge_context = "\n".join([f"【{k['path']}】\n{k['content'][:500]}" for k in knowledge])
    knowledge_msg = {
        "role": "user",
        "content": f"{knowledge_context}\n\n【当前问题】{user_query}"
    }
    
    # 组合历史记录
    return [system_msg] + history + [knowledge_msg]

def main():
    # 定义ANSI颜色代码
    ORANGE = '\033[38;5;214m'
    RESET = '\033[0m'
    
    print("=" * 60)
    print(ORANGE + "🤖 多历史文件加载聊天系统" + RESET)
    print("=" * 60)
    print("命令指南:")
    print(f"  {ORANGE}list{RESET}    - 列出历史对话")
    print(f"  {ORANGE}load N{RESET}  - 加载多个历史文件（如: load 1,3,5）")
    print(f"  {ORANGE}save{RESET}    - 保存当前对话")
    print(f"  {ORANGE}clear{RESET}   - 清空当前对话")
    print(f"  {ORANGE}reload{RESET}  - 重载知识库")
    print(f"  {ORANGE}exit{RESET}    - 退出程序")
    print(f"  {ORANGE}ctrl+C{RESET} - 对话不保存并退出")
    print(f"  {ORANGE}crawl{RESET}  - 爬取网页内容（格式: crawl <URL> [dynamic] [selector]）")
    print("=" * 60)
    
    client = create_client()
    if not client:
        print("无法创建客户端，程序退出")
        return
    
    # 初始化管理器
    crawler = WebCrawler() 
    history_manager = HistoryManager()
    knowledge_loader = KnowledgeLoader()
    
    # 加载知识库
    knowledge = knowledge_loader.load()
    print(f"\n📚 已加载 {len(knowledge)} 个知识文件")
    
    # 初始化对话
    messages = [
        {"role": "system", "content": "你是一个友好且专业的AI助手，请用中文回答用户问题"}
    ]#这个可以改一下完善
    
    # 尝试加载最新记录
    latest_history = history_manager.load_history("latest.json")#这个可以改变，把json对象转化为python数据类型
    if latest_history:
        messages = latest_history#######################修改的小核心逻辑#########################
        print("\n🔄 已恢复上次对话")
    
    while True:
        try:
            user_input = input("\n👤 您: ").strip()
            if not user_input:
                continue
                
            # 命令处理
            if user_input.lower() == 'exit':
                history_manager.save_history(messages)
                print("\n👋 对话已保存，再见！")
                break
                
            if user_input.lower() == 'list':
                files = history_manager.list_histories()#这个可以去改变参数尽可能的多加。
                if files:
                    print("\n📜 历史对话记录:")
                    for file in files:
                        print(f"  [{file['id']}] {file['filename']}")
                else:
                    print("\nℹ️ 无历史记录")
                continue
                
            if user_input.lower().startswith('load '):
                try:
                    # 解析多个ID (如: "1,3,5")
                    ids = [int(id.strip()) for id in user_input[5:].split(',')]
                    #user_input[5:]：跳过前5个字符（即load ），提取后续内容。
				   #split(',')：按逗号分隔多个ID（如load 1,3,5 → ['1','3','5']）。
				   #int(id.strip())：去除空格并转为整数，生成ID列表
                    #前面的id原来是用来这样的呀
                    files = history_manager.list_histories()
                    
                    # 获取文件名
                    filenames = []
                    for id in ids:
                        if 1 <= id <= len(files):
                            filenames.append(files[id-1]["filename"])
                    
                    if filenames:
                        # 加载并合并多个历史文件
                        merged_history = history_manager.load_multiple_histories(filenames)#使用多个的文件
                        messages = merged_history
                        print(f"\n✅ 已加载 {len(filenames)} 个历史文件")
                    else:
                        print("\n❌ 未找到匹配的历史文件")
                except:
                    print("\n⚠️ 命令格式: load <编号1>,<编号2>,...")
                continue
                
            if user_input.lower() == 'save':
                saved_path = history_manager.save_history(messages)
                print(f"\n💾 对话已保存至: {saved_path}")
                continue
                
            if user_input.lower() == 'clear':
                messages = [messages[0]]
                print("\n🗑️ 对话已清空")
                continue
                
            if user_input.lower() == 'reload':
                knowledge = knowledge_loader.load()
                print(f"\n🔄 已重载 {len(knowledge)} 个知识文件")
                continue
            
            # 修改爬取命令处理逻辑
            if user_input.lower().startswith('crawl '):
                args = user_input[6:].split()
                if len(args) < 1:
                    print("\n⚠️ 格式: crawl <URL> [dynamic] [selector]")
                    continue
                
                url = args[0]
                is_dynamic = 'dynamic' in args[1:]
                selector = next((arg for arg in args[1:] if arg not in ('dynamic',)), None)
                
                # 添加处理前确认
                if is_dynamic:
                    print(f"\n🔄 准备爬取动态页面: {url}...")
                else:
                    print(f"\n🔄 准备爬取静态页面: {url}...")
                
                # 执行爬取
                result = crawler.crawl_page(url, is_dynamic, selector)
                
                # 输出结果
                print(f"\n🔄 爬取结果: {result.get('status')}")
                if result.get('status') == 'success':
                    print(f"📁 保存路径: {result.get('path')}")
                    print(f"📝 内容预览: {result.get('content')}")
                else:
                    print(f"❌ 失败原因: {result.get('reason')}")
                continue
            
            # 普通对话处理
            messages.append({"role": "user", "content": user_input})
            
            # 构建RAG提示并获取回复
            rag_prompt = build_rag_prompt(user_input, knowledge, messages)#高度的可定制处理的，功能实现值得学习
            response = stream_chat(client, rag_prompt)##########################这里才真正的开始了调用ai
            
            if response:
                # 添加AI回复
                messages.append({"role": "assistant", "content": response})
                
                # 保持上下文长度
                if len(messages) > 50:
                    messages = [messages[0]] + messages[-49:]
            else:
                print("回复获取失败，请重试")
                messages.pop()  # 移除失败的用户消息
        
        except KeyboardInterrupt:
            #history_manager.save_history(messages)
            break
        except Exception as e:
            print(f"\n⚠️ 系统错误: {e}")

if __name__ == "__main__":
    main()