import glob
import logging
import os
from typing import List, Dict
from typing import Optional

import pandas as pd
import pdfplumber
import pptx
import uvicorn
from docx import Document
from fastapi import FastAPI, HTTPException, UploadFile, File
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from sentence_transformers import SentenceTransformer

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# 初始化FastAPI
app = FastAPI(title="Document Embedding Service")

# 允许跨域
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_methods=["*"],
    allow_headers=["*"],
)

# 加载模型
MODEL_PATH = "local_model_paraphrase"
try:
    logger.info(f"Loading model from {MODEL_PATH}...")
    model = SentenceTransformer(MODEL_PATH)
    logger.info("Model loaded successfully")
except Exception as e:
    logger.error(f"Failed to load model: {str(e)}")
    raise


# 定义请求模型
class DocumentRequest(BaseModel):
    file_path: str  # 单个文件路径
    directory_path: Optional[str] = None  # 可选目录路径
    file_extensions: Optional[List[str]] = ["txt", "pdf", "docx"]  # 支持的文件扩展名
    chunk_size: Optional[int] = 1000  # 文本分块大小(字符数)
    overlap: Optional[int] = 100  # 分块重叠大小(字符数)


# 在服务启动时预热模型
@app.on_event("startup")
async def startup_event():
    warmup_text = "模型预热文本"
    model.encode(warmup_text)
    logger.info("Model warmup completed")


def read_text_file(file_path: str):
    """读取多种格式文件（txt/docx/xls/pptx/pdf/md），返回文本内容"""
    if not os.path.exists(file_path):
        raise FileNotFoundError(f"文件不存在: {file_path}")

    ext = os.path.splitext(file_path)[1].lower()

    # 1. 纯文本文件（TXT/MD）
    if ext in ['.txt', '.md']:
        with open(file_path, 'r', encoding='utf-8') as f:
            return f.read()

    # 2. Word文档（DOCX）
    elif ext == '.docx':
        doc = Document(file_path)
        return '\n'.join([para.text for para in doc.paragraphs])


    # 4. Excel文件（XLS/XLSX）
    elif ext in ['.xls', '.xlsx']:
        df = pd.read_excel(file_path, header=None)
        return df.to_string(index=False, header=False)

    # 5. PowerPoint（PPTX）
    elif ext == '.pptx':
        ppt = pptx.Presentation(file_path)
        text = []
        for slide in ppt.slides:
            for shape in slide.shapes:
                if hasattr(shape, "text"):
                    text.append(shape.text)
        return '\n'.join(text)


    # 7. PDF文件
    elif ext == '.pdf':
        with pdfplumber.open(file_path) as pdf:
            return '\n'.join([page.extract_text() for page in pdf.pages if page.extract_text()])

    else:
        raise ValueError(f"不支持的文件格式: {ext}")


def process_documents(request: DocumentRequest) -> Dict[str, List[str]]:
    """处理文档并返回文本内容"""
    documents = {}

    # 处理单个文件
    if request.file_path and os.path.isfile(request.file_path):
        file_name = os.path.basename(request.file_path)
        content = read_text_file(request.file_path)
        if content:
            documents[file_name] = chunk_text(content, request.chunk_size, request.overlap)

    # 处理目录中的文件
    if request.directory_path and os.path.isdir(request.directory_path):
        for ext in request.file_extensions:
            for file_path in glob.glob(os.path.join(request.directory_path, f"*.{ext}")):
                file_name = os.path.basename(file_path)
                content = read_text_file(file_path)
                if content:
                    documents[file_name] = chunk_text(content, request.chunk_size, request.overlap)

    return documents


def chunk_text(text: str, chunk_size: int = 1000, overlap: int = 100) -> List[Dict]:
    """改进的分块方法，返回包含元数据的字典列表"""
    chunks = []
    start = 0
    end = chunk_size
    chunk_index = 0

    while start < len(text):
        chunk_content = text[start:end]
        chunks.append({
            "text": chunk_content,
            "start": start,
            "end": end,
            "chunk_index": chunk_index,
            "total_chars": len(text)
        })
        start = end - overlap
        end = start + chunk_size
        chunk_index += 1

    return chunks


@app.post("/embed_documents")
async def embed_documents(request: DocumentRequest):
    try:
        documents = process_documents(request)
        if not documents:
            raise HTTPException(status_code=400, detail="No valid documents found")

        # 收集所有文本块和元数据
        all_chunks = []
        doc_texts = {}

        for doc_name, chunks in documents.items():
            doc_texts[doc_name] = [chunk["text"] for chunk in chunks]
            all_chunks.extend(chunks)

        # 批量生成嵌入向量
        texts_to_embed = [chunk["text"] for chunk in all_chunks]
        embeddings = model.encode(texts_to_embed, batch_size=128)

        # 按文档分组结果
        result = {
            "documents": {},
            "texts": doc_texts,
            "metadata": {doc_name: [] for doc_name in documents.keys()},
            "model": MODEL_PATH,
            "dimension": embeddings.shape[1],
            "total_chunks": len(all_chunks)
        }

        current_idx = 0
        for doc_name, chunks in documents.items():
            chunk_count = len(chunks)
            result["documents"][doc_name] = [
                embeddings[current_idx + i].tolist()
                for i in range(chunk_count)
            ]
            result["metadata"][doc_name] = [
                {k: v for k, v in chunk.items() if k != "text"}
                for chunk in chunks
            ]
            current_idx += chunk_count

        return result
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))


def group_embeddings(text_mapping, embeddings):
    """按文档名分组嵌入向量"""
    from itertools import groupby

    # 将文本名和嵌入向量配对并排序
    paired = sorted(zip(text_mapping, embeddings), key=lambda x: x[0])

    # 按文档名分组
    for doc_name, group in groupby(paired, key=lambda x: x[0]):
        yield doc_name, [item[1] for item in group]


class SingleTextRequest(BaseModel):
    text: str
    chunk_size: Optional[int] = 1000
    overlap: Optional[int] = 100




@app.post("/embed_single_text")
async def api_embed_single_text(request: SingleTextRequest):
    try:
        return embed_single_text(request.text, request.chunk_size, request.overlap)
    except Exception as e:
        logger.error(f"API single text embedding error: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


def embed_single_text(text: str, chunk_size: int = 1000, overlap: int = 100) -> dict:
    """
    处理单个文本内容并返回嵌入向量
    :param text: 要处理的文本内容
    :param chunk_size: 文本分块大小(字符数)
    :param overlap: 分块重叠大小(字符数)
    :return: 包含嵌入向量和元数据的字典
    """
    try:
        # 分块处理文本
        chunks = chunk_text(text, chunk_size, overlap)
        if not chunks:
            raise ValueError("No valid text chunks created")

        # 生成嵌入向量
        embeddings = model.encode(
            chunks,
            batch_size=128,
            convert_to_numpy=True,
            show_progress_bar=False
        )

        # 转换为列表形式
        embeddings_list = [embedding.tolist() for embedding in embeddings]

        return {
            "embeddings": embeddings_list,
            "model": MODEL_PATH,
            "dimension": embeddings.shape[1] if len(embeddings.shape) > 1 else embeddings.shape[0],
            "chunk_count": len(chunks),
            "chunk_sizes": [len(chunk) for chunk in chunks]  # 可选：返回每个块的大小
        }

    except Exception as e:
        logger.error(f"Single text embedding error: {str(e)}")
        raise


@app.post("/upload_and_embed")
async def upload_and_embed(file: UploadFile = File(...), chunk_size: int = 1000, overlap: int = 100):
    """
    上传文件并生成嵌入向量
    :param file: 上传的文件
    :param chunk_size: 文本分块大小
    :param overlap: 分块重叠大小
    :return: 嵌入向量结果
    """
    try:
        # 读取文件内容
        content = await file.read()
        text = content.decode('utf-8')

        # 分块处理
        chunks = chunk_text(text, chunk_size, overlap)
        if not chunks:
            raise HTTPException(status_code=400, detail="No valid text chunks created")

        # 生成嵌入向量
        embeddings = model.encode(
            chunks,
            batch_size=128,
            convert_to_numpy=True,
            show_progress_bar=False
        )

        return {
            "filename": file.filename,
            "embeddings": embeddings.tolist(),
            "model": MODEL_PATH,
            "dimension": embeddings.shape[1],
            "chunk_count": len(chunks)
        }
    except Exception as e:
        logger.error(f"File upload embedding error: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))


@app.get("/health")
async def health_check():
    """健康检查端点"""
    import numpy as np

    # 测试单个文本的向量
    test_text = "测试文本"
    embedding = model.encode(test_text, convert_to_numpy=True)
    norm = np.linalg.norm(embedding)

    print("向量模长:", norm)
    return {"status": "healthy"}


if __name__ == "__main__":

    uvicorn.run(app, host="0.0.0.0", port=5000)
