"""
Text2Vec Base Chinese API服务
"""
import os
import logging
from contextlib import asynccontextmanager
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from typing import Optional, List, Union
import uvicorn
from prometheus_client import Counter, Histogram, generate_latest, CONTENT_TYPE_LATEST
from starlette.responses import Response
import numpy as np

from .model_loader import Text2VecChineseModelLoader
from .inference import Text2VecChineseInference

# 配置日志
logging.basicConfig(
    level=os.getenv("LOG_LEVEL", "INFO"),
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

# Prometheus指标
request_counter = Counter(
    "text2vec_requests_total",
    "Total number of requests",
    ["endpoint", "status"]
)

request_duration = Histogram(
    "text2vec_request_duration_seconds",
    "Request duration in seconds",
    ["endpoint"]
)

text_length = Histogram(
    "text2vec_text_length_tokens",
    "Text length in tokens",
    ["endpoint"]
)

chinese_chars = Histogram(
    "text2vec_chinese_chars",
    "Number of Chinese characters",
    ["endpoint"]
)

# 全局变量
model_loader = None
inference_engine = None


@asynccontextmanager
async def lifespan(app: FastAPI):
    """应用生命周期管理"""
    global model_loader, inference_engine
    
    # 启动时加载模型
    logger.info("正在加载模型...")
    try:
        model_loader = Text2VecChineseModelLoader()
        model_loader.load_model()
        inference_engine = Text2VecChineseInference(
            model_loader.get_model(),
            model_loader.get_device()
        )
        logger.info("模型加载完成")
    except Exception as e:
        logger.error(f"模型加载失败: {str(e)}")
        raise
    
    yield
    
    # 关闭时清理资源
    logger.info("正在清理资源...")


app = FastAPI(
    title="Text2Vec Base Chinese API",
    description="基于昇腾910B的中文文本嵌入服务",
    version="1.0.0",
    lifespan=lifespan
)

# CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


class HealthResponse(BaseModel):
    """健康检查响应"""
    status: str
    model_loaded: bool
    npu_available: bool
    version: str
    model_info: dict


class EmbeddingRequest(BaseModel):
    """嵌入请求"""
    input: Union[str, List[str]]
    model: Optional[str] = "text2vec-base-chinese"
    encoding_format: Optional[str] = "float"
    normalize_embeddings: Optional[bool] = False
    use_segmentation: Optional[bool] = False


class SimilarityRequest(BaseModel):
    """相似度请求"""
    texts: List[str]
    model: Optional[str] = "text2vec-base-chinese"


class SearchRequest(BaseModel):
    """搜索请求"""
    query: str
    documents: List[str]
    top_k: Optional[int] = 5
    model: Optional[str] = "text2vec-base-chinese"


class PipelineInputs(BaseModel):
    """Pipeline 输入结构"""
    source_sentence: str
    sentences: List[str]


class PipelineSimilarityRequest(BaseModel):
    """Pipeline 相似度请求"""
    inputs: PipelineInputs
    model: Optional[str] = "text2vec-base-chinese"


@app.get("/health", response_model=HealthResponse)
async def health_check():
    """健康检查"""
    import torch
    return {
        "status": "healthy",
        "model_loaded": model_loader is not None and inference_engine is not None,
        "npu_available": torch.npu.is_available() if hasattr(torch, 'npu') else False,
        "version": "1.0.0",
        "model_info": {
            "name": "text2vec-base-chinese",
            "max_seq_length": 512,
            "embedding_dim": 768,
            "language": "chinese"
        }
    }


@app.get("/metrics")
async def metrics():
    """Prometheus指标"""
    return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)


@app.post("/v1/embeddings")
async def create_embeddings(request: EmbeddingRequest):
    """
    创建中文文本嵌入
    遵循OpenAI Embeddings API协议
    """
    request_counter.labels(endpoint="/v1/embeddings", status="received").inc()
    
    try:
        # 处理输入
        if isinstance(request.input, str):
            texts = [request.input]
        else:
            texts = request.input
        
        # 编码文本
        embeddings = inference_engine.encode(
            texts,
            normalize_embeddings=request.normalize_embeddings or False,
            use_segmentation=request.use_segmentation or False
        )
        
        # 格式化响应
        data = []
        for idx, embedding in enumerate(embeddings):
            embedding_list = embedding.tolist()
            
            # 处理编码格式
            if request.encoding_format == "base64":
                import base64
                embedding_bytes = np.array(embedding_list, dtype=np.float32).tobytes()
                embedding_str = base64.b64encode(embedding_bytes).decode('utf-8')
                embedding_data = embedding_str
            else:
                embedding_data = embedding_list
            
            data.append({
                "object": "embedding",
                "embedding": embedding_data,
                "index": idx
            })
        
        # 计算token数（简单估算，中文按字符数）
        total_tokens = sum(len(text) for text in texts)
        
        return {
            "object": "list",
            "data": data,
            "model": request.model,
            "usage": {
                "prompt_tokens": total_tokens,
                "total_tokens": total_tokens
            }
        }
        
    except Exception as e:
        logger.error(f"嵌入失败: {str(e)}")
        request_counter.labels(endpoint="/v1/embeddings", status="error").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/embeddings", status="success").inc()


@app.post("/v1/similarity")
async def compute_similarity(request: SimilarityRequest):
    """
    计算中文文本相似度
    """
    request_counter.labels(endpoint="/v1/similarity", status="received").inc()
    
    try:
        if len(request.texts) != 2:
            raise HTTPException(status_code=400, detail="需要提供两个文本")
        
        similarity = inference_engine.similarity(request.texts)
        
        # 获取嵌入向量
        embeddings = inference_engine.encode(request.texts, normalize_embeddings=True)
        
        return {
            "similarity": similarity,
            "embeddings": [emb.tolist() for emb in embeddings]
        }
        
    except Exception as e:
        logger.error(f"相似度计算失败: {str(e)}")
        request_counter.labels(endpoint="/v1/similarity", status="error").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/similarity", status="success").inc()


@app.post("/v1/search")
async def semantic_search(request: SearchRequest):
    """
    中文语义搜索
    """
    request_counter.labels(endpoint="/v1/search", status="received").inc()
    
    try:
        results = inference_engine.search(
            query=request.query,
            documents=request.documents,
            top_k=request.top_k or 5
        )
        
        return {
            "results": results
        }
        
    except Exception as e:
        logger.error(f"搜索失败: {str(e)}")
        request_counter.labels(endpoint="/v1/search", status="error").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/v1/search", status="success").inc()


@app.post("/pipeline/sentence-similarity")
async def pipeline_sentence_similarity(request: PipelineSimilarityRequest):
    """
    Pipeline 风格的句子相似度计算
    """
    request_counter.labels(endpoint="/pipeline/sentence-similarity", status="received").inc()
    
    try:
        similarities = inference_engine.compute_similarities(
            request.inputs.source_sentence,
            request.inputs.sentences
        )
        
        return similarities
        
    except Exception as e:
        logger.error(f"Pipeline 相似度计算失败: {str(e)}")
        request_counter.labels(endpoint="/pipeline/sentence-similarity", status="error").inc()
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        request_counter.labels(endpoint="/pipeline/sentence-similarity", status="success").inc()


if __name__ == "__main__":
    port = int(os.getenv("PORT", 8000))
    workers = int(os.getenv("WORKERS", 1))
    
    uvicorn.run(
        "main:app",
        host="0.0.0.0",
        port=port,
        workers=workers,
        log_level=os.getenv("LOG_LEVEL", "info").lower()
    )


