# -*- coding: utf-8 -*-
"""
智能文本处理与RAG查询系统主应用
"""

# 导入兼容性配置（必须在其他导入之前）
try:
    import compatibility_config
except ImportError:
    # 如果兼容性配置文件不存在，手动配置基本设置
    import warnings
    import os
    warnings.filterwarnings("ignore", message=".*MINGW-W64.*")
    warnings.filterwarnings("ignore", message=".*invalid value encountered.*")
    warnings.filterwarnings("ignore", message=".*pkg_resources is deprecated.*")
    warnings.filterwarnings("ignore", message=".*Valid config keys have changed in V2.*")
    os.environ.setdefault('PYTHONIOENCODING', 'utf-8')

import os
import tempfile
import json
from datetime import datetime
from fastapi import FastAPI, File, UploadFile, HTTPException, Body, Cookie, Depends, Request
from fastapi.responses import JSONResponse, HTMLResponse, Response
from fastapi.staticfiles import StaticFiles
from rag_chain import RAGChain
from document_parser import DocumentParser
from session_manager import session_manager
from auth import get_auth_routes, check_permission, get_current_active_user
from admin import get_admin_routes
from utils import vectorizer, DEEPSEEK_API_KEY
from rate_limiter import setup_rate_limiting, rate_limit_for_user
from typing import Dict, Any

import argparse

# 解析命令行参数
arg_parser = argparse.ArgumentParser(description="文本处理和向量化接口")
arg_parser.add_argument("--port", type=int, default=8000, help="服务器端口号")
args = arg_parser.parse_args()

# 创建FastAPI应用
app = FastAPI(title="文本处理和向量化接口", version="1.0")

# 配置API限流
setup_rate_limiting(app)

# 存储端口号供后续使用
PORT = args.port

# 注册认证路由
get_auth_routes(app)

# 注册管理员路由
app = get_admin_routes(app)

# 挂载静态文件目录
app.mount("/static", StaticFiles(directory="static"), name="static")

# 添加根路由，提供网页界面
@app.get("/", response_class=HTMLResponse)
async def read_root():
    with open("index.html", "r", encoding="utf-8") as f:
        return f.read()

# 初始化文档解析器
doc_parser = DocumentParser()

# 初始化RAG链，传入已有的TextVectorizer实例
rag_chain = RAGChain(deepseek_api_key=DEEPSEEK_API_KEY, vectorizer=vectorizer)

# 定义上传和解析文档的接口
@app.post("/upload-document/")
@rate_limit_for_user
async def upload_and_parse_document(request: Request, file: UploadFile = File(...), current_user: Dict[str, Any] = Depends(get_current_active_user), permission: bool = Depends(check_permission("user:write"))):
    """
    上传文档并解析其内容
    - 支持TXT、PDF和Word文档上传
    - 解析文档内容
    - 将文本向量化并存储到数据库
    - 返回解析结果
    """
    # 添加调试日志
    print(f"请求路径: {request.url}")
    print(f"请求方法: {request.method}")
    print(f"请求头: {request.headers}")
    print(f"查询参数: {request.query_params}")
    print(f"路径参数: {request.path_params}")
    print(f"当前用户: {current_user}")
    
    # 获取文件扩展名
    if file.filename is None:
        raise HTTPException(status_code=400, detail="文件名不能为空")
    _, ext = os.path.splitext(file.filename)
    ext = ext.lower()

    # 验证文件类型
    if ext not in ['.txt', '.pdf', '.docx']:
        raise HTTPException(status_code=400, detail="仅支持TXT、PDF和Word文档上传")

    try:
        # 读取文件内容
        contents = await file.read()
        print(f"成功读取文件: {file.filename}, 大小: {len(contents)} 字节")

        # 使用文档解析器解析文件
        print(f"开始解析文件: {file.filename}")
        parsed_result = doc_parser.parse_document(file.filename, contents)
        text = parsed_result['text']
        doc_metadata = parsed_result['metadata']
        print(f"文件解析成功: {file.filename}, 文本长度: {len(text)} 字符")

        # 解析文件内容
        lines = text.splitlines()
        line_count = len(lines)

        # 统计单词数（简单以空格分割）
        word_count = len(text.split())

        # 统计字符数
        char_count = len(text)

        # 合并元数据
        metadata = {"source": file.filename, "type": "uploaded_file", **doc_metadata}
        print(f"开始向量化并存储文件: {file.filename}")
        vector_result = vectorizer.vectorize_and_store(text, metadata)
        print(f"文件向量化并存储成功: {file.filename}")
        print(f"当前文档数: {len(vectorizer.list_documents())}")
        print(f"当前文本块数: {len(vectorizer.text_chunks)}")
        print(f"当前元数据数: {len(vectorizer.metadata)}")

        # 返回解析结果
        return JSONResponse(content={
            "filename": file.filename,
            "file_type": ext[1:],  # 去掉点号
            "line_count": line_count,
            "word_count": word_count,
            "char_count": char_count,
            "preview": text[:200] + ("..." if len(text) > 200 else ""),  # 预览前200个字符
            "document_metadata": doc_metadata,
            "vectorization": vector_result,
            "current_document_count": len(vectorizer.list_documents())
        })

    except Exception as e:
            import traceback
            error_info = {
                "error_type": type(e).__name__,
                "error_message": str(e),
                "traceback": traceback.format_exc()
            }
            print(f"文件处理错误: {json.dumps(error_info, ensure_ascii=False)}")
            # 确保错误消息是有效的JSON字符串
            error_message = str(e).replace('"', '\"').replace('\n', ' ')
            # 记录详细错误信息到日志文件
            with open("error.log", "a", encoding="utf-8") as log_file:
                log_file.write(f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')} - 文件处理错误: {json.dumps(error_info, ensure_ascii=False)}\n")
            raise HTTPException(status_code=500, detail=f"文件解析错误: {error_message}")

# 保留原有的TXT上传接口，为向后兼容
@app.post("/upload-txt/")
@rate_limit_for_user
async def upload_and_parse_txt(request: Request, file: UploadFile = File(...), current_user: Dict[str, Any] = Depends(get_current_active_user), permission: bool = Depends(check_permission("user:write"))):
    """
    上传TXT文件并解析其内容（兼容旧接口）
    - 接受TXT文件上传
    - 统计文件的行数、单词数和字符数
    - 将文本向量化并存储到数据库
    - 返回解析结果
    """
    # 正确传递request和file参数
    return await upload_and_parse_document(request, file)

# 新增处理文本的端点
@app.post("/process-text")
@rate_limit_for_user
async def process_text(text: str = Body(..., embed=True), metadata: dict = Body(None, embed=True), current_user: Dict[str, Any] = Depends(get_current_active_user), permission: bool = Depends(check_permission("user:write"))):
    """
    处理文本并存储到向量数据库
    - **text**: 要处理的文本内容
    - **metadata**: 可选的元数据
    """
    try:
        # 调用文本向量化器处理文本
        result = vectorizer.vectorize_and_store(text, metadata)
        return JSONResponse(status_code=200, content=result)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"处理文本时出错: {str(e)}")

# 新增RAG链查询端点，支持会话管理
@app.post("/rag-query")
@rate_limit_for_user
async def rag_query(request: Request, query: str = Body(..., embed=True), session_id: str = Cookie(None), current_user: Dict[str, Any] = Depends(get_current_active_user), permission: bool = Depends(check_permission("user:read"))):
    """
    使用RAG链回答用户问题，支持多轮对话
    - **query**: 用户的问题
    - **session_id**: 会话ID(通过Cookie传递，可选)
    """
    try:
        # 如果没有会话ID，创建一个新会话
        if not session_id or not session_manager.get_session(session_id):
            session_id = session_manager.create_session()
            print(f"创建新会话: {session_id}")
        else:
            print(f"使用现有会话: {session_id}")

        # 使用RAG链运行查询，传入会话ID
        result = rag_chain.run(query, session_id)
        answer = result["answer"]
        references = result["references"]

        # 设置会话Cookie
        response = JSONResponse(status_code=200, content={
            "query": query,
            "answer": answer,
            "references": references,
            "session_id": session_id
        })
        response.set_cookie(key="session_id", value=session_id, httponly=True)
        return response
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"查询处理出错: {str(e)}")

# 新增获取会话历史端点
@app.get("/session-history")
@rate_limit_for_user
async def get_session_history(session_id: str = Cookie(None), current_user: Dict[str, Any] = Depends(get_current_active_user), permission: bool = Depends(check_permission("user:read"))):
    """
    获取会话历史
    - **session_id**: 会话ID(通过Cookie传递)
    """
    if not session_id or not session_manager.get_session(session_id):
        return JSONResponse(status_code=404, content={"detail": "会话不存在或已过期"})

    history = session_manager.get_session_history(session_id)
    return JSONResponse(status_code=200, content={"history": history})

# 新增清除会话端点
@app.delete("/session", dependencies=[Depends(check_permission("user:write"))])
async def clear_session(session_id: str = Cookie(None), current_user: Dict[str, Any] = Depends(get_current_active_user)):
    """
    清除会话
    - **session_id**: 会话ID(通过Cookie传递)
    """
    if not session_id:
        return JSONResponse(status_code=400, content={"detail": "未提供会话ID"})

    success = session_manager.delete_session(session_id)
    if success:
        response = JSONResponse(status_code=200, content={"detail": "会话已清除"})
        response.delete_cookie(key="session_id")
        return response
    else:
        return JSONResponse(status_code=404, content={"detail": "会话不存在或已过期"})

# 新增列出所有文档的端点
@app.get("/documents", dependencies=[Depends(check_permission("user:read"))])
async def list_documents(current_user: Dict[str, Any] = Depends(get_current_active_user)):
    """
    列出所有已上传的文档
    - 返回文档列表，包含文档源、元数据和块数量
    """
    try:
        # 获取所有文档
        docs = vectorizer.list_documents()
        return JSONResponse(content={
            "documents": docs,
            "total_count": len(docs)
        })
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取文档列表时出错: {str(e)}")

# 测试文档数量端点
@app.get("/test/document-count")
async def test_document_count():
    """测试文档数量，用于诊断问题"""
    docs = vectorizer.list_documents()
    return {
        "document_count": len(docs),
        "documents": docs,
        "raw_metadata_count": len(vectorizer.metadata),
        "raw_text_chunks_count": len(vectorizer.text_chunks)
    }

# 新增删除文档的端点
@app.delete("/documents/{source}")
@rate_limit_for_user
async def delete_document(request: Request, source: str, current_user: Dict[str, Any] = Depends(get_current_active_user), permission: bool = Depends(check_permission("user:write"))):
    """
    删除指定source的文档
    - **source**: 文档源标识符
    """
    try:
        print(f"尝试删除文档: {source}")
        print(f"当前文档数量: {len(vectorizer.list_documents())}")
        print(f"原始向量数据: chunks={len(vectorizer.text_chunks)}, metadata={len(vectorizer.metadata)}")
        
        # URL解码source
        import urllib.parse
        decoded_source = urllib.parse.unquote(source)
        print(f"解码后的source: {decoded_source}")
        
        result = vectorizer.delete_document(decoded_source)
        print(f"删除结果: {result}")
        
        if result["success"]:
            return JSONResponse(status_code=200, content=result)
        else:
            return JSONResponse(status_code=404, content=result)
    except Exception as e:
        import traceback
        error_info = {
            "error_type": type(e).__name__,
            "error_message": str(e),
            "traceback": traceback.format_exc(),
            "source": source
        }
        print(f"删除文档错误: {json.dumps(error_info, ensure_ascii=False)}")
        raise HTTPException(status_code=500, detail=f"删除文档时出错: {str(e)}")

# API信息路由
@app.get("/api-info")
async def api_info():
    return {
        "api_name": "文本处理和向量化接口",
        "version": "1.0",
        "endpoints": [
            {
                "path": "/upload-document/",
                "method": "POST",
                "description": "上传文档并解析其内容，支持TXT、PDF和Word格式"
            },
            {
                "path": "/upload-txt/",
                "method": "POST",
                "description": "上传TXT文件并解析其内容（兼容旧接口）"
            },
            {
                "path": "/process-text",
                "method": "POST",
                "description": "处理文本并存储到向量数据库"
            },
            {
                "path": "/rag-query",
                "method": "POST",
                "description": "使用RAG链回答用户问题"
            },
            {
                "path": "/documents",
                "method": "GET",
                "description": "列出所有已上传的文档"
            },
            {
                "path": "/documents/{source}",
                "method": "DELETE",
                "description": "删除指定source的文档"
            }
        ]
    }

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=PORT)