import os
import json
import glob
import mimetypes
from datetime import datetime
from typing import List, Optional, Dict
from dotenv import load_dotenv
from fastapi import FastAPI, UploadFile, File, Form, Query
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from langchain_community.vectorstores import FAISS
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import (
    PyPDFLoader,
    TextLoader,
    JSONLoader,
    UnstructuredWordDocumentLoader,
    UnstructuredExcelLoader,
    UnstructuredMarkdownLoader,
    WebBaseLoader
)
from langchain_core.messages import SystemMessage, HumanMessage
from deepseek_chat import ChatDeepseek
from contextlib import asynccontextmanager

load_dotenv()

os.environ["USER_AGENT"] = "DeepSeek-Chat-API/1.0"
os.environ["OPENAI_API_KEY"] = "dummy"
os.environ["NLTK_DATA"] = os.path.join(os.path.dirname(__file__), "nltk_data")
os.makedirs(os.environ["NLTK_DATA"], exist_ok=True)

@asynccontextmanager
async def lifespan(app: FastAPI):
    # Startup
    await load_local_knowledge_base()
    yield
    # Shutdown
    await chat.aclose()

app = FastAPI(lifespan=lifespan)

# 配置CORS
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# 初始化聊天模型
chat = ChatDeepseek()

# 初始化文本分割器
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=1000,
    chunk_overlap=200,
    length_function=len,
    separators=["\n\n", "\n", " ", ""]
)

# 初始化向量存储
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
document_stores: Dict[str, Dict] = {}

# 定义知识库目录
KNOWLEDGE_BASE_DIR = "knowledge_base"
os.makedirs(KNOWLEDGE_BASE_DIR, exist_ok=True)

# 请求和响应模型
class MessagePart(BaseModel):
    type: str
    text: str

class Message(BaseModel):
    role: str
    content: str
    parts: List[MessagePart]

class ChatRequest(BaseModel):
    id: Optional[str] = None
    messages: List[Message]
    stream: Optional[bool] = False
    system_prompt: Optional[str] = None
    document_id: Optional[str] = None
    use_memory: Optional[bool] = True

class ChatResponse(BaseModel):
    response: str
    sources: Optional[List[Dict[str, str]]] = None

class DocumentResponse(BaseModel):
    document_id: str
    message: str
    metadata: Dict[str, str]

class KnowledgeBaseResponse(BaseModel):
    knowledge_base_id: str
    name: str
    description: str
    created_at: str
    document_count: int

# 获取文档加载器
def get_document_loader(file_path: str, content_type: str):
    if content_type == "application/pdf":
        return PyPDFLoader(file_path)
    elif content_type == "text/plain":
        return TextLoader(file_path)
    elif content_type == "application/json":
        # 自定义 JSON 加载器
        class CustomJSONLoader:
            def __init__(self, file_path):
                self.file_path = file_path
            
            def load(self):
                from langchain_core.documents import Document
                
                with open(self.file_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                    # 直接处理 JSON 数组
                    texts = [record.get('zy', '') for record in data]
                    # 将每条记录转换为 Document 对象
                    return [Document(page_content=text, metadata={"source": self.file_path}) 
                           for text in texts if text]
        
        return CustomJSONLoader(file_path)
    elif content_type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
        return UnstructuredWordDocumentLoader(file_path)
    elif content_type in ["application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", "application/vnd.ms-excel"]:
        return UnstructuredExcelLoader(file_path)
    elif content_type == "text/markdown":
        return UnstructuredMarkdownLoader(file_path)
    else:
        raise ValueError(f"Unsupported content type: {content_type}")

# 处理文档
async def process_document(file_path: str, content_type: str, name: str = None, description: str = None):
    try:
        # 加载文档
        loader = get_document_loader(file_path, content_type)
        documents = loader.load()
        
        # 分割文档
        texts = text_splitter.split_documents(documents)
        
        # 创建向量存储
        vectorstore = FAISS.from_documents(texts, embeddings)
        
        # 准备元数据
        metadata = {
            "filename": os.path.basename(file_path),
            "content_type": content_type,
            "upload_time": str(datetime.now()),
            "name": name or os.path.splitext(os.path.basename(file_path))[0],
            "description": description or ""
        }
        
        return vectorstore, len(texts), metadata
    except Exception as e:
        raise Exception(f"Error processing document: {str(e)}")

# 加载本地知识库
async def load_local_knowledge_base():
    print("开始加载本地知识库...")
    
    # 支持的文档类型
    supported_extensions = {
        '.pdf': 'application/pdf',
        '.txt': 'text/plain',
        '.json': 'application/json',
        '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
        '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
        '.md': 'text/markdown'
    }
    
    # 查找所有支持的文档
    for ext, content_type in supported_extensions.items():
        files = glob.glob(os.path.join(KNOWLEDGE_BASE_DIR, f"*{ext}"))
        for file_path in files:
            try:
                print(f"处理文件: {file_path}")
                vectorstore, chunk_count, metadata = await process_document(
                    file_path,
                    content_type,
                    name=os.path.splitext(os.path.basename(file_path))[0]
                )
                
                # 生成文档ID
                document_id = f"doc_{len(document_stores)}"
                
                # 存储向量和元数据
                document_stores[document_id] = {
                    "vectorstore": vectorstore,
                    "metadata": metadata,
                    "chunk_count": chunk_count
                }
                
                print(f"成功加载文档: {file_path}")
            except Exception as e:
                print(f"处理文件失败 {file_path}: {str(e)}")

# 上传文档
@app.post("/upload", response_model=DocumentResponse)
async def upload_document(
    file: UploadFile = File(...),
    name: Optional[str] = Form(None),
    description: Optional[str] = Form(None)
):
    try:
        # 保存上传的文件
        file_path = os.path.join(KNOWLEDGE_BASE_DIR, file.filename)
        with open(file_path, "wb") as f:
            content = await file.read()
            f.write(content)
        
        # 获取文件类型
        content_type = file.content_type or mimetypes.guess_type(file.filename)[0]
        
        # 处理文档
        vectorstore, chunk_count, metadata = await process_document(
            file_path,
            content_type,
            name,
            description
        )
        
        # 生成文档ID
        document_id = f"doc_{len(document_stores)}"
        
        # 存储向量和元数据
        document_stores[document_id] = {
            "vectorstore": vectorstore,
            "metadata": metadata,
            "chunk_count": chunk_count
        }
        
        return DocumentResponse(
            document_id=document_id,
            message=f"Document processed successfully with {chunk_count} chunks",
            metadata=metadata
        )
    except Exception as e:
        raise Exception(f"Error uploading document: {str(e)}")

# 聊天接口
@app.post("/chat", response_model=ChatResponse)
async def chat_endpoint(request: ChatRequest):
    try:
        # 准备系统提示词
        system_prompt = request.system_prompt or "你是一个有帮助的AI助手。"
        
        # 准备消息列表
        messages = [
            SystemMessage(content=system_prompt),
            HumanMessage(content=request.messages[-1].content)
        ]
        
        # 搜索所有知识库中的相关文档
        docs = []
        if document_stores:  # 如果有知识库
            for doc_store in document_stores.values():
                vectorstore = doc_store["vectorstore"]
                # 搜索相关文档
                docs.extend(vectorstore.similarity_search(request.messages[-1].content, k=1))
            
            if docs:  # 如果找到相关文档
                # 按相关性排序，最多使用前3个文档
                docs = sorted(docs, key=lambda x: x.metadata.get("score", 0), reverse=True)[:3]
                # 添加文档内容到系统提示词
                context = "\n".join([doc.page_content for doc in docs])
                messages[0] = SystemMessage(content=f"{system_prompt}\n\n相关文档内容：\n{context}")
        
        # 使用 _call 方法获取完整响应
        response = await chat._call(messages)
        
        return ChatResponse(
            response=response,
            sources=[{"content": doc.page_content, "source": doc.metadata.get("source", "Unknown")} 
                    for doc in docs] if docs else None
        )
    except Exception as e:
        raise Exception(f"Error in chat endpoint: {str(e)}")

# 流式聊天接口
@app.post("/chat/stream")
async def chat_stream(request: ChatRequest):
    async def generate_chat_response():
        try:
            # Get the last user message
            last_message = request.messages[-1].content if request.messages else ""
            
            # 准备系统提示词
            system_prompt = request.system_prompt or "你是一个有帮助的AI助手。"
            
            # 准备消息列表
            messages = [
                SystemMessage(content=system_prompt),
                HumanMessage(content=last_message)
            ]
            
            # 搜索所有知识库中的相关文档
            docs = []
            if document_stores:  # 如果有知识库
                for doc_store in document_stores.values():
                    vectorstore = doc_store["vectorstore"]
                    # 搜索相关文档
                    docs.extend(vectorstore.similarity_search(last_message, k=1))
                
                if docs:  # 如果找到相关文档
                    # 按相关性排序，最多使用前3个文档
                    docs = sorted(docs, key=lambda x: x.metadata.get("score", 0), reverse=True)[:3]
                    # 添加文档内容到系统提示词
                    context = "\n".join([doc.page_content for doc in docs])
                    messages[0] = SystemMessage(content=f"{system_prompt}\n\n相关文档内容：\n{context}")
            
            # 流式调用
            async for chunk in chat._astream(messages):
                content = chunk.generations[0].message.content
                yield f"data: {content}\n\n"
                
        except Exception as e:
            yield f"data: Error: {str(e)}\n\n"
    
    return StreamingResponse(
        generate_chat_response(),
        media_type="text/event-stream"
    )

# 获取知识库列表
@app.get("/knowledge-bases", response_model=List[KnowledgeBaseResponse])
async def list_knowledge_bases():
    return [
        KnowledgeBaseResponse(
            knowledge_base_id=doc_id,
            name=store["metadata"]["name"],
            description=store["metadata"]["description"],
            created_at=store["metadata"]["upload_time"],
            document_count=store["chunk_count"]
        )
        for doc_id, store in document_stores.items()
    ]

# 删除知识库
@app.delete("/knowledge-bases/{document_id}")
async def delete_knowledge_base(document_id: str):
    if document_id in document_stores:
        del document_stores[document_id]
        return {"message": "Knowledge base deleted successfully"}
    raise Exception("Knowledge base not found")

# 健康检查
@app.get("/health")
async def health_check():
    return {"status": "healthy"} 