from fastapi import FastAPI, UploadFile, File, Form, HTTPException, Query, Request
from fastapi.responses import JSONResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from typing import List, Optional
import os
import json
import uuid
import logging
from pydantic import BaseModel
import uvicorn
from contextlib import asynccontextmanager
import time
import torch

# 配置日志记录
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler()
    ]
)
logger = logging.getLogger("weaviate-app")

# 导入我们的WeaviateDocumentManager
from weaviate_conn import WeaviateDocumentManager, WEAVIATE_URL, connect_to_weaviate, create_collection

# 创建文档管理器实例
document_manager = None

# 生命周期管理
@asynccontextmanager
async def lifespan(app: FastAPI):
    # 启动时执行
    global document_manager
    
    logger.info("应用程序启动中...")
    
    # 连接到Weaviate
    client = connect_to_weaviate(WEAVIATE_URL)
    
    # 确保集合存在
    collection_name = "DocumentAPI"
    create_collection(client, collection_name)
    
    # 创建文档管理器实例
    document_manager = WeaviateDocumentManager(client=client, collection_name=collection_name)
    logger.info(f"文档管理器初始化完成，使用集合: {collection_name}")
    
    yield
    
    # 关闭时执行
    if document_manager:
        logger.info("关闭Weaviate连接...")
        document_manager.close()
    logger.info("应用程序已关闭")

app = FastAPI(
    title="Weaviate文档管理系统", 
    description="使用FastAPI和Weaviate进行文档管理",
    lifespan=lifespan
)

# 挂载静态文件
app.mount("/static", StaticFiles(directory="static"), name="static")

# 设置模板
templates = Jinja2Templates(directory="templates")

# 定义数据模型
class DocumentCreate(BaseModel):
    text: str
    source: str

class DocumentUpdate(BaseModel):
    text: Optional[str] = None
    source: Optional[str] = None

class SearchQuery(BaseModel):
    query: str
    limit: int = 10
    search_type: str = "hybrid"

# API路由
@app.get("/")
async def read_root(request: Request):
    """返回前端页面"""
    return templates.TemplateResponse("index.html", {"request": request})

@app.post("/api/documents")
async def create_document(document: DocumentCreate):
    """创建单个文档"""
    try:
        doc_id = document_manager.add_document(
            text=document.text,
            source=document.source
        )
        return {"status": "success", "message": "文档创建成功", "id": doc_id}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"创建文档失败: {str(e)}")

@app.post("/api/documents/batch")
async def create_documents(documents: List[DocumentCreate]):
    """批量创建文档"""
    try:
        texts = [doc.text for doc in documents]
        sources = [doc.source for doc in documents]
        
        count = document_manager.add_documents(texts, sources)
        return {"status": "success", "message": f"成功添加 {count} 个文档"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"批量创建文档失败: {str(e)}")

@app.post("/api/documents/upload")
async def upload_documents(file: UploadFile = File(...)):
    """上传文档文件
    
    支持以下格式:
    - JSON (包含文档对象数组)
    - EPUB
    - TXT
    - MD (Markdown)
    """
    try:
        # 获取文件扩展名
        filename = file.filename
        file_ext = os.path.splitext(filename)[1].lower() if filename else ""
        
        logger.info(f"正在上传文件: {filename} (类型: {file_ext})")
        
        # 保存上传的文件到临时位置
        temp_file_path = f"temp_{uuid.uuid4()}{file_ext}"
        try:
            # 读取上传的文件内容
            content = await file.read()
            
            # 写入临时文件
            with open(temp_file_path, "wb") as temp_file:
                temp_file.write(content)
            
            logger.info(f"文件 {filename} 已保存到临时位置: {temp_file_path}")
            
            # 根据文件类型处理
            if file_ext == ".json":
                # 处理JSON文件
                try:
                    # 读取JSON文件内容
                    with open(temp_file_path, "r", encoding="utf-8") as f:
                        content = f.read().strip()
                        logger.info(f"JSON文件内容长度: {len(content)} 字节")
                        if not content:
                            logger.error("JSON文件为空")
                            raise HTTPException(status_code=400, detail="上传的JSON文件为空")
                        
                        # 解析JSON
                        data = json.loads(content)
                        logger.info(f"JSON数据类型: {type(data)}")
                        
                except UnicodeDecodeError:
                    # 尝试其他编码
                    with open(temp_file_path, "r", encoding="gbk") as f:
                        content = f.read().strip()
                        if not content:
                            logger.error("JSON文件为空")
                            raise HTTPException(status_code=400, detail="上传的JSON文件为空")
                        
                        # 解析JSON
                        data = json.loads(content)
                
                # 确保数据是列表
                if not isinstance(data, list):
                    # 如果是字典，尝试查找是否有包含文档列表的字段
                    if isinstance(data, dict):
                        for key in ["documents", "items", "data", "docs", "records"]:
                            if key in data and isinstance(data[key], list):
                                data = data[key]
                                logger.info(f"从JSON对象的 '{key}' 字段中提取文档列表")
                                break
                    
                    # 如果仍然不是列表，尝试包装成列表
                    if not isinstance(data, list):
                        logger.warning("JSON不是数组格式，尝试包装为数组")
                        data = [data]
                
                # 提取文本和来源
                texts = []
                sources = []
                
                for item in data:
                    # 检查是否为字典
                    if not isinstance(item, dict):
                        logger.error(f"JSON数组中的项不是对象: {item}")
                        continue
                    
                    # 尝试不同的字段名称
                    text = None
                    source = None
                    
                    # 常见的文本字段名
                    for text_field in ["text", "content", "body", "document", "doc"]:
                        if text_field in item and item[text_field]:
                            text = item[text_field]
                            break
                    
                    # 常见的来源字段名
                    for source_field in ["source", "url", "path", "filename", "title", "id"]:
                        if source_field in item and item[source_field]:
                            source = item[source_field]
                            break
                    
                    # 如果没有找到文本或来源，则跳过
                    if not text:
                        logger.warning(f"JSON对象缺少文本字段: {item}")
                        continue
                    
                    # 如果没有来源，使用默认值
                    if not source:
                        source = f"json_doc_{len(texts)}"
                    
                    texts.append(text)
                    sources.append(source)
                
                if not texts:
                    logger.error("没有从JSON中提取到有效文档")
                    raise HTTPException(status_code=400, detail="JSON文件中没有有效的文档对象")
                
                logger.info(f"从JSON文件中提取了 {len(texts)} 个文档")
                
                # 确保文档管理器已加载嵌入模型
                if not document_manager.embedding_model:
                    logger.info("正在加载嵌入模型...")
                    document_manager.load_embedding_model()
                
                # 添加文档
                try:
                    count = document_manager.add_documents(texts, sources)
                    logger.info(f"成功添加 {count} 个文档到Weaviate")
                    return {"status": "success", "message": f"成功上传 {count} 个文档", "format": "json"}
                except Exception as add_error:
                    logger.error(f"添加文档到Weaviate失败: {str(add_error)}", exc_info=True)
                    # 尝试单独添加每个文档
                    count = 0
                    for text, source in zip(texts, sources):
                        try:
                            doc_id = document_manager.add_document(text=text, source=source)
                            if doc_id:
                                count += 1
                        except Exception as e:
                            logger.error(f"添加单个文档失败: {str(e)}")
                    
                    if count > 0:
                        logger.info(f"通过单独添加成功上传了 {count}/{len(texts)} 个文档")
                        return {"status": "success", "message": f"成功上传 {count}/{len(texts)} 个文档", "format": "json"}
                    else:
                        raise HTTPException(status_code=500, detail=f"所有文档添加失败: {str(add_error)}")
            
            elif file_ext in [".epub", ".txt", ".md"]:
                # 导入处理函数
                from weaviate_conn import process_document
                
                logger.info(f"正在处理 {file_ext} 文件...")
                
                # 处理文档
                chunks = process_document(temp_file_path)
                if not chunks:
                    logger.error(f"无法从文件 {filename} 中提取文本")
                    raise HTTPException(status_code=400, detail=f"无法从文件 {filename} 中提取文本")
                
                logger.info(f"从 {filename} 中提取了 {len(chunks)} 个文本块")
                
                # 设置源信息
                source = filename
                sources = [source] * len(chunks)
                
                # 确保文档管理器已加载嵌入模型
                if not document_manager.embedding_model:
                    logger.info("正在加载嵌入模型...")
                    document_manager.load_embedding_model()
                
                # 添加到Weaviate
                try:
                    # 确保所有向量都是标量或一维列表
                    count = document_manager.add_documents(chunks, sources, vectors=None)
                    logger.info(f"成功添加 {count} 个文档片段到Weaviate")
                    return {"status": "success", "message": f"成功从 {filename} 中提取并上传 {count} 个文档片段", "format": file_ext[1:]}
                except Exception as e:
                    logger.error(f"添加文档到Weaviate失败: {str(e)}", exc_info=True)
                    # 尝试直接添加每个文档
                    count = 0
                    for chunk, src in zip(chunks, sources):
                        try:
                            doc_id = document_manager.add_document(text=chunk, source=src)
                            if doc_id:
                                count += 1
                        except Exception as chunk_e:
                            logger.error(f"添加单个文档块失败: {str(chunk_e)}")
                    
                    if count > 0:
                        logger.info(f"通过单独添加成功上传了 {count}/{len(chunks)} 个文档片段")
                        return {"status": "success", "message": f"成功从 {filename} 中提取并上传 {count}/{len(chunks)} 个文档片段", "format": file_ext[1:]}
                    else:
                        raise HTTPException(status_code=500, detail=f"所有文档片段添加失败: {str(e)}")
            
            else:
                logger.warning(f"不支持的文件格式: {file_ext}")
                raise HTTPException(status_code=400, detail=f"不支持的文件格式: {file_ext}。支持的格式: .json, .epub, .txt, .md")
        
        finally:
            # 清理临时文件
            if os.path.exists(temp_file_path):
                os.remove(temp_file_path)
                logger.info(f"临时文件已清理: {temp_file_path}")
    
    except json.JSONDecodeError as je:
        logger.error(f"无效的JSON文件: {filename}，错误: {str(je)}", exc_info=True)
        raise HTTPException(status_code=400, detail=f"无效的JSON文件: {str(je)}")
    except Exception as e:
        logger.error(f"上传文档失败: {str(e)}", exc_info=True)
        raise HTTPException(status_code=500, detail=f"上传文档失败: {str(e)}")

@app.get("/api/documents")
async def get_documents(
    source: Optional[str] = None,
    limit: int = Query(100, ge=1, le=1000)
):
    """获取文档列表"""
    try:
        if source:
            docs = document_manager.get_documents_by_source(source, limit)
        else:
            # 获取所有文档，这里我们使用search_documents而不是get_documents
            # 因为WeaviateDocumentManager没有直接提供get_all_documents方法
            docs = document_manager.search_documents("*", limit=limit, search_type="keyword")
        
        results = []
        for doc in docs:
            results.append({
                "id": doc.uuid,
                "text": doc.properties.get("text", ""),
                "source": doc.properties.get("source", ""),
                "score": doc.metadata.score if hasattr(doc.metadata, "score") else None
            })
        
        return {"status": "success", "documents": results, "count": len(results)}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取文档失败: {str(e)}")

@app.get("/api/documents/{doc_id}")
async def get_document(doc_id: str):
    """获取单个文档"""
    try:
        doc = document_manager.get_document_by_id(doc_id)
        if not doc:
            raise HTTPException(status_code=404, detail=f"文档 {doc_id} 不存在")
        
        return {
            "status": "success",
            "document": {
                "id": doc.uuid,
                "text": doc.properties.get("text", ""),
                "source": doc.properties.get("source", "")
            }
        }
    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取文档失败: {str(e)}")

@app.put("/api/documents/{doc_id}")
async def update_document(doc_id: str, document: DocumentUpdate):
    """更新文档"""
    try:
        success = document_manager.update_document(
            doc_id=doc_id,
            text=document.text,
            source=document.source
        )
        
        if not success:
            raise HTTPException(status_code=404, detail=f"文档 {doc_id} 不存在或更新失败")
        
        return {"status": "success", "message": "文档更新成功"}
    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"更新文档失败: {str(e)}")

@app.delete("/api/documents/{doc_id}")
async def delete_document(doc_id: str):
    """删除文档"""
    try:
        success = document_manager.delete_document(doc_id)
        
        if not success:
            raise HTTPException(status_code=404, detail=f"文档 {doc_id} 不存在或删除失败")
        
        return {"status": "success", "message": "文档删除成功"}
    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"删除文档失败: {str(e)}")

@app.delete("/api/documents")
async def delete_documents_by_source(source: str):
    """按来源删除文档"""
    if not source:
        raise HTTPException(status_code=400, detail="必须提供source参数")
    
    try:
        count = document_manager.delete_documents_by_source(source)
        return {"status": "success", "message": f"成功删除 {count} 个文档"}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"删除文档失败: {str(e)}")

@app.post("/api/search")
async def search_documents(search: SearchQuery):
    """搜索文档"""
    start_time = time.time()
    try:
        logger.info(f"执行搜索: 查询={search.query}, 类型={search.search_type}, 限制={search.limit}")
        
        # 确保文档管理器已加载嵌入模型
        if not document_manager.embedding_model and search.search_type in ["hybrid", "semantic"]:
            logger.info("正在加载嵌入模型...")
            document_manager.load_embedding_model()
        
        # 如果查询为空，使用通配符查询
        if not search.query.strip():
            search.query = "*"
            search.search_type = "keyword"
            logger.info("查询为空，使用通配符查询")
        
        # 对于关键词搜索，如果查询非空且不是通配符，尝试扩展查询
        if search.search_type == "keyword" and search.query != "*":
            # 确保每个单词都被搜索到
            original_query = search.query
            if " " in original_query:
                # 将多个关键词连接起来，让每个关键词都参与匹配
                search.query = " OR ".join(original_query.split())
                logger.info(f"扩展关键词查询: '{original_query}' -> '{search.query}'")
        
        # 记录实际使用的搜索类型
        actual_search_type = search.search_type
        
        # 执行搜索
        try:
            # 先尝试正常搜索
            docs = document_manager.search_documents(
                query=search.query,
                limit=search.limit,
                search_type=search.search_type
            )
            
            # 如果没有结果且不是通配符搜索，尝试不同的搜索方式
            if not docs and search.query != "*":
                logger.info(f"搜索 '{search.query}' 未找到结果，尝试替代搜索方法")
                
                if search.search_type == "hybrid":
                    # 尝试纯关键词搜索
                    logger.info("尝试纯关键词搜索")
                    actual_search_type = "keyword (fallback from hybrid)"
                    docs = document_manager.search_documents(
                        query=search.query,
                        limit=search.limit,
                        search_type="keyword"
                    )
                elif search.search_type == "semantic":
                    # 尝试混合搜索
                    logger.info("尝试混合搜索")
                    actual_search_type = "hybrid (fallback from semantic)"
                    docs = document_manager.search_documents(
                        query=search.query,
                        limit=search.limit,
                        search_type="hybrid"
                    )
                elif search.search_type == "keyword":
                    # 尝试使用通配符查询
                    logger.info("尝试通配符查询")
                    actual_search_type = "keyword wildcard (fallback)"
                    docs = document_manager.search_documents(
                        query="*",
                        limit=search.limit,
                        search_type="keyword"
                    )
                    
                    # 如果有结果，手动过滤
                    if docs:
                        logger.info(f"获取到 {len(docs)} 个结果，手动过滤包含 '{search.query}' 的结果")
                        filtered_docs = []
                        query_lower = search.query.lower()
                        for doc in docs:
                            text = doc.properties.get("text", "").lower()
                            source = doc.properties.get("source", "").lower()
                            if query_lower in text or query_lower in source:
                                filtered_docs.append(doc)
                        docs = filtered_docs
                        logger.info(f"过滤后剩余 {len(docs)} 个结果")
            
        except Exception as search_error:
            logger.error(f"搜索出错: {str(search_error)}", exc_info=True)
            # 如果搜索出错，尝试使用通配符获取所有文档
            logger.info("搜索出错，尝试获取所有文档")
            actual_search_type = "keyword wildcard (error fallback)"
            docs = document_manager.search_documents("*", limit=search.limit, search_type="keyword")
            
            # 然后手动过滤
            if docs and search.query != "*":
                logger.info(f"获取到 {len(docs)} 个文档，手动过滤包含 '{search.query}' 的结果")
                filtered_docs = []
                query_lower = search.query.lower()
                for doc in docs:
                    text = doc.properties.get("text", "").lower()
                    source = doc.properties.get("source", "").lower()
                    if query_lower in text or query_lower in source:
                        filtered_docs.append(doc)
                docs = filtered_docs
                logger.info(f"过滤后剩余 {len(docs)} 个结果")
        
        results = []
        for doc in docs:
            # 确保得分不为空
            score = None
            if hasattr(doc.metadata, "score") and doc.metadata.score is not None:
                score = doc.metadata.score
            else:
                # 如果没有得分，尝试通过其他方式计算一个得分
                if document_manager.embedding_model and search.query != "*":
                    try:
                        text = doc.properties.get("text", "")
                        query_vector = document_manager.embedding_model.encode(search.query, normalize_embeddings=True)
                        text_vector = document_manager.embedding_model.encode(text, normalize_embeddings=True)
                        # 计算余弦相似度
                        score = float(torch.tensor(query_vector).dot(torch.tensor(text_vector)).item())
                        logger.info(f"为文档计算了相似度得分: {score}")
                    except Exception as e:
                        logger.error(f"计算相似度得分失败: {e}")
                        score = 0.5  # 设置默认得分
                else:
                    # 设置一个默认得分
                    score = 0.5
            
            results.append({
                "id": doc.uuid,
                "text": doc.properties.get("text", ""),
                "source": doc.properties.get("source", ""),
                "score": score,
                "match_type": actual_search_type
            })
        
        # 计算总耗时
        end_time = time.time()
        elapsed_time = end_time - start_time
        
        logger.info(f"搜索结果: 找到 {len(results)} 个匹配项, 耗时 {elapsed_time:.3f} 秒")
        
        return {
            "status": "success", 
            "results": results, 
            "count": len(results),
            "elapsed_time": elapsed_time,
            "search_type": actual_search_type,
            "query": search.query
        }
    except Exception as e:
        # 计算总耗时（即使出错）
        end_time = time.time()
        elapsed_time = end_time - start_time
        
        logger.error(f"搜索文档失败: {str(e)}, 耗时 {elapsed_time:.3f} 秒", exc_info=True)
        raise HTTPException(status_code=500, detail=f"搜索文档失败: {str(e)}")

@app.get("/api/sources")
async def list_sources():
    """列出所有文档来源"""
    try:
        sources = document_manager.list_sources()
        result = []
        
        for source in sources:
            count = document_manager.count_documents(source)
            result.append({"source": source, "count": count})
        
        return {"status": "success", "sources": result, "count": len(result)}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取来源列表失败: {str(e)}")

@app.get("/api/stats")
async def get_stats():
    """获取统计信息"""
    try:
        total_count = document_manager.count_documents()
        sources = document_manager.list_sources()
        source_counts = []
        
        for source in sources:
            count = document_manager.count_documents(source)
            source_counts.append({"source": source, "count": count})
        
        return {
            "status": "success",
            "stats": {
                "total_documents": total_count,
                "sources_count": len(sources),
                "sources": source_counts
            }
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取统计信息失败: {str(e)}")

@app.post("/api/export")
async def export_documents(source: Optional[str] = Form(None)):
    """导出文档"""
    try:
        # 创建导出目录
        os.makedirs("exports", exist_ok=True)
        
        # 生成唯一文件名
        export_file = f"exports/documents_{uuid.uuid4()}.json"
        
        # 导出文档
        count = document_manager.export_documents(export_file, source)
        
        return {
            "status": "success",
            "message": f"成功导出 {count} 个文档",
            "file": export_file
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"导出文档失败: {str(e)}")

if __name__ == "__main__":
    uvicorn.run("app:app", host="0.0.0.0", port=8000, reload=True) 