import json
from fastapi import APIRouter, Query
from fastapi.responses import JSONResponse
from typing import Optional, List
from langchain_ollama import OllamaLLM, OllamaEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain_community.document_loaders import (TextLoader,CSVLoader,
    DirectoryLoader,UnstructuredHTMLLoader,
    JSONLoader,PyPDFLoader)
import os
from fastapi.responses import StreamingResponse
from langchain.text_splitter import RecursiveCharacterTextSplitter

router = APIRouter()

# 远程 Ollama 服务的地址
OLLAMA_BASE_URL = "http://172.16.21.38:11436"
OLLAMA_MODEL = "qwen3:0.6b"

# Ollama LLM 实例
ollama = OllamaLLM(base_url=OLLAMA_BASE_URL, model=OLLAMA_MODEL)
# 初始化文本切割器
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=10,      # 每块文本长度
    chunk_overlap=2,    # 重叠长度
    length_function=len,
    add_start_index=True
)
@router.get("/test")
async def test():
    return JSONResponse(content={"message": "Hello, World!"})

@router.get("/stream_poem")
async def stream_poem(prompt: str = Query("写一首关于秋天的诗歌", description="生成内容的提示词")):
    """Ollama 流式生成接口，每次返回一小块文本"""
    # 定义生成器函数
    def event_generator():
        for chunk in ollama.stream(prompt):
            yield chunk  # 每次生成一个 token/片段
    # 返回 StreamingResponse
    return StreamingResponse(event_generator(), media_type="text/plain")


@router.post("/ollama_split_generate")
async def ollama_split_generate(text: str = Query(..., description="要处理的长文本")):
    """
    接口说明：
    1. 接收长文本
    2. 分割成小块文本
    3. 对每块文本调用 Ollama 生成结果
    4. 返回每块生成的内容和原始索引
    """
    try:
        # 文本切割
        documents = text_splitter.create_documents([text])
        results = []
        for doc in documents:
            # doc.page_content 是文本块，doc.metadata["start_index"] 是起始索引
            generated = ollama.invoke(doc.page_content)
            results.append({
                "chunk_start_index": doc.metadata["start_index"],
                "chunk_text": doc.page_content,
                "generated_text": generated
            })
        return JSONResponse(content={"chunks": results})
    except Exception as e:
        return JSONResponse(content={"error": str(e)}, status_code=500)