# -*- coding: utf-8 -*-
from fastapi import FastAPI, HTTPException, Depends, Header
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModel
import torch
import numpy as np
import uvicorn
import yaml
import os
import logging

# Load configuration
def load_config():
    config_path = os.path.join(os.path.dirname(__file__), 'config.yaml')
    try:
        with open(config_path, 'r', encoding='utf-8') as file:
            return yaml.safe_load(file)
    except FileNotFoundError:
        print(f"Configuration file not found at {config_path}")
        raise
    except yaml.YAMLError as e:
        print(f"Error parsing configuration file: {e}")
        raise

# Load configuration
config = load_config()

# Setup logging
logging.basicConfig(
    level=getattr(logging, config.get('logging', {}).get('level', 'INFO')),
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

app = FastAPI(
    title="BGE Embedding Service",
    description="A service for generating text embeddings using BGE (BAAI General Embedding) models",
    version="1.0.0"
)

# Get API key from config
API_KEY = config.get('api', {}).get('secret_key', 'bge-service-secret-key-2023')

def verify_api_key(authorization: str = Header(None)):
    """验证API密钥"""
    if authorization is None:
        raise HTTPException(status_code=401, detail="Authorization header is missing")
    
    # 检查Authorization头格式是否正确
    if not authorization.startswith("Bearer "):
        raise HTTPException(status_code=401, detail="Invalid authorization format. Expected 'Bearer <token>'")
    
    # 提取token
    token = authorization.split(" ")[1]
    
    # 验证token
    if token != API_KEY:
        raise HTTPException(status_code=401, detail="Invalid API key")
    
    return token

# 加载模型和分词器
model_path = config.get('model', {}).get('model_path', './models/bge-base-zh')
logger.info(f"Loading model from: {model_path}")

try:
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = AutoModel.from_pretrained(model_path)
    model.eval()
    logger.info("Model and tokenizer loaded successfully")
except Exception as e:
    logger.error(f"Failed to load model: {e}")
    raise

class EmbeddingRequest(BaseModel):
    texts: list[str]
    
class EmbeddingResponse(BaseModel):
    embeddings: list[list[float]]
    
@app.post("/v1/embeddings")
async def get_embeddings(request: EmbeddingRequest, token: str = Depends(verify_api_key)):
    """
    Generate embeddings for a list of text strings.
    
    Args:
        request: EmbeddingRequest containing list of texts to embed
        token: API token for authentication
        
    Returns:
        EmbeddingResponse containing list of embedding vectors
        
    Raises:
        HTTPException: If input validation fails or model inference fails
    """
    try:
        # Validate input
        if not request.texts:
            raise HTTPException(status_code=400, detail="No texts provided for embedding")
        
        if len(request.texts) > 100:
            raise HTTPException(status_code=400, detail="Maximum 100 texts per request")
        
        # Get max length from config
        max_length = config.get('model', {}).get('max_length', 512)
        
        logger.info(f"Processing {len(request.texts)} texts for embedding")
        
        # 编码文本
        encoded_input = tokenizer(
            request.texts, 
            padding=True, 
            truncation=True, 
            return_tensors='pt', 
            max_length=max_length
        )
        
        # 获取嵌入向量
        with torch.no_grad():
            model_output = model(**encoded_input)
            sentence_embeddings = model_output[0][:, 0]
        
        # 归一化
        sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1)
        
        # 转换为列表
        embeddings = sentence_embeddings.tolist()
        
        logger.info(f"Successfully generated {len(embeddings)} embeddings")
        
        return EmbeddingResponse(embeddings=embeddings)
        
    except Exception as e:
        logger.error(f"Error generating embeddings: {e}")
        raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")

@app.get("/")
async def root():
    """Root endpoint providing service information"""
    return {
        "service": "BGE Embedding Service",
        "version": "1.0.0",
        "description": "A service for generating text embeddings using BGE models",
        "endpoints": {
            "embeddings": "/v1/embeddings",
            "docs": "/docs"
        }
    }

@app.get("/health")
async def health_check():
    """Health check endpoint"""
    return {"status": "healthy", "model_loaded": True}

if __name__ == "__main__":
    # Get server configuration
    server_config = config.get('server', {})
    host = server_config.get('host', '0.0.0.0')
    port = server_config.get('port', 8000)
    
    logger.info(f"Starting BGE Embedding Service on {host}:{port}")
    
    uvicorn.run(app, host=host, port=port)
    