#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
BentoML Service for Embedding, Reranker, and OCR Models
Integrates: m3e-large, bge-reranker-v2-m3, PaddleOCR v4
OpenAI-compatible API format
"""

import bentoml
import numpy as np
from typing import List, Optional, Dict, Any
import base64
import io
from PIL import Image
import time
import logging
import os
from pathlib import Path

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Use default cache directories (no local models directory)


# ==================== Embedding Runnable ====================
@bentoml.service(
    resources={"cpu": "2"},
    traffic={"timeout": 300},
)
class EmbeddingRunnable:
    """M3E-Large Embedding Model Runnable"""
    
    def __init__(self):
        from sentence_transformers import SentenceTransformer
        logger.info("Loading m3e-large embedding model...")
        
        # Load model from default cache (will download if not cached)
        try:
            self.model = SentenceTransformer('moka-ai/m3e-large')
            logger.info("✓ m3e-large model loaded successfully")
        except Exception as e:
            logger.error(f"Failed to load m3e-large model: {e}")
            raise
    
    
    @bentoml.api
    def embed(self, input_data: List[str] | str, model: str = "m3e-large") -> Dict[str, Any]:
        """
        Generate embeddings for input text(s)
        OpenAI-compatible format
        """
        # Ensure input is a list
        if isinstance(input_data, str):
            texts = [input_data]
        else:
            texts = input_data
        
        # Generate embeddings
        embeddings = self.model.encode(texts, normalize_embeddings=True)
        
        # Convert to list format
        embedding_list = embeddings.tolist()
        
        # Build OpenAI-compatible response
        data = []
        for idx, embedding in enumerate(embedding_list):
            data.append({
                "object": "embedding",
                "embedding": embedding,
                "index": idx
            })
        
        response = {
            "object": "list",
            "data": data,
            "model": model,
            "usage": {
                "prompt_tokens": sum(len(text.split()) for text in texts),
                "total_tokens": sum(len(text.split()) for text in texts)
            }
        }
        
        return response


# ==================== Reranker Runnable ====================
@bentoml.service(
    resources={"cpu": "2"},
    traffic={"timeout": 300},
)
class RerankerRunnable:
    """BGE-Reranker-v2-m3 Model Runnable"""
    
    def __init__(self):
        from FlagEmbedding import FlagReranker
        logger.info("Loading bge-reranker-v2-m3 model...")
        
        # Load model from default cache (will download if not cached)
        try:
            self.model = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=True)
            logger.info("✓ bge-reranker-v2-m3 model loaded successfully")
        except Exception as e:
            logger.error(f"Failed to load bge-reranker-v2-m3 model: {e}")
            raise
    
    
    @bentoml.api
    def rerank(
        self, 
        query: str, 
        documents: List[str],
        model: str = "BAAI/bge-reranker-v2-m3",
        top_n: Optional[int] = None,
        return_documents: bool = True
    ) -> Dict[str, Any]:
        """
        Rerank documents based on query relevance
        """
        # Prepare sentence pairs
        sentence_pairs = [[query, doc] for doc in documents]
        
        # Compute relevance scores
        scores = self.model.compute_score(sentence_pairs)
        
        # Ensure scores is a list
        if isinstance(scores, (int, float)):
            scores = [scores]
        
        # Create indexed scores and sort by relevance
        indexed_scores = list(enumerate(scores))
        sorted_scores = sorted(indexed_scores, key=lambda x: x[1], reverse=True)
        
        # Apply top_n filter if specified
        if top_n:
            sorted_scores = sorted_scores[:top_n]
        
        # Build response (WeKnora compatible format)
        results = []
        for index, score in sorted_scores:
            result = {
                "index": index,
                "relevance_score": float(score)
            }
            if return_documents:
                result["document"] = {"text": documents[index]}
            results.append(result)
        
        response = {
            "id": f"rerank-{hash(query + str(documents)) % 1000000}",
            "model": model,
            "results": results,
            "usage": {
                "total_tokens": len(query.split()) + sum(len(doc.split()) for doc in documents)
            }
        }
        
        return response


# ==================== OCR Runnable ====================
@bentoml.service(
    resources={"cpu": "2"},
    traffic={"timeout": 300},
)
class OCRRunnable:
    """PaddleOCR v4 Model Runnable"""
    
    def __init__(self):
        logger.info("Loading PaddleOCR v4 model...")
        # Configuration consistent with ocr_server.py
        self.model = self._init_paddle_ocr()
        logger.info("PaddleOCR v4 model loaded successfully")
    
    def _init_paddle_ocr(self):
        """Initialize PaddleOCR engine with same configuration as ocr_server.py"""
        import os
        import paddle
        import subprocess
        import platform
        
        # Use default PaddleOCR cache directory
        logger.info("PaddleOCR will use default cache directory")
        
        # Set PaddlePaddle to use CPU and disable GPU
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
        paddle.set_device('cpu')
        
        # Check CPU AVX support (same as ocr_server.py)
        try:
            if platform.system() == "Linux":
                try:
                    result = subprocess.run(['grep', '-o', 'avx', '/proc/cpuinfo'],
                                          capture_output=True, text=True, timeout=5)
                    has_avx = 'avx' in result.stdout.lower()
                    if not has_avx:
                        logger.warning("CPU does not support AVX instructions, using compatibility mode")
                        os.environ['FLAGS_use_avx2'] = '0'
                        os.environ['FLAGS_use_avx'] = '1'
                except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError):
                    logger.warning("Could not detect AVX support, using compatibility mode")
                    os.environ['FLAGS_use_avx2'] = '0'
                    os.environ['FLAGS_use_avx'] = '1'
        except Exception as e:
            logger.warning(f"Error detecting CPU capabilities: {e}, using compatibility mode")
            os.environ['FLAGS_use_avx2'] = '0'
            os.environ['FLAGS_use_avx'] = '1'
        
        from paddleocr import PaddleOCR
        
        # OCR configuration identical to ocr_server.py
        ocr_config = {
            "text_det_limit_type": "max",
            "text_det_limit_side_len": 960,
            "use_doc_orientation_classify": True,  # 启用文档方向分类
            "use_doc_unwarping": False,
            "use_textline_orientation": True,  # 启用文本行方向检测
            "text_recognition_model_name": "PP-OCRv4_server_rec",
            "text_detection_model_name": "PP-OCRv4_server_det",
            "text_det_thresh": 0.3,
            "text_det_box_thresh": 0.6,
            "text_det_unclip_ratio": 1.5,
            "text_rec_score_thresh": 0.0,
            "ocr_version": "PP-OCRv4",
            "lang": "ch"
        }
        
        return PaddleOCR(**ocr_config)
    
    def _decode_base64_image(self, base64_string: str) -> np.ndarray:
        """Convert base64 string to numpy array image (same as ocr_server.py)"""
        try:
            # Remove data URL prefix if present
            if ',' in base64_string:
                base64_string = base64_string.split(',')[1]
            
            # Decode base64
            image_data = base64.b64decode(base64_string)
            
            # Convert to PIL Image
            image = Image.open(io.BytesIO(image_data))
            
            # Convert to RGB format
            if image.mode != "RGB":
                image = image.convert("RGB")
            
            # Convert to numpy array
            image_array = np.array(image)
            
            return image_array
        except Exception as e:
            logger.error(f"Error converting base64 to image: {str(e)}")
            return None
    
    @bentoml.api
    def ocr_chat_completion(self, messages: List[Dict[str, Any]], model: str = "paddle-ocr-v4") -> Dict[str, Any]:
        """
        OCR endpoint with OpenAI chat completion format
        Compatible with the format in ocr_server.py
        """
        # Extract the latest user message
        user_message = None
        for msg in reversed(messages):
            if msg.get('role') == 'user':
                user_message = msg
                break
        
        if not user_message:
            raise ValueError("No user message found")
        
        # Extract image data and prompt text
        content = user_message.get('content', '')
        image_base64 = None
        prompt_text = ""
        
        if isinstance(content, list):
            # Handle multimodal input (image + text)
            for item in content:
                if item.get('type') == 'image_url':
                    image_url = item.get('image_url', {}).get('url', '')
                    if image_url.startswith('data:image'):
                        image_base64 = image_url
                elif item.get('type') == 'text':
                    prompt_text = item.get('text', '')
        else:
            # Handle string content
            if isinstance(content, str) and content.startswith('data:image'):
                image_base64 = content
            else:
                prompt_text = content if isinstance(content, str) else ""
        
        if not image_base64:
            raise ValueError("No valid image data found")
        
        # Decode image to numpy array
        image_array = self._decode_base64_image(image_base64)
        
        if image_array is None:
            raise ValueError("Failed to decode image from base64 data")
        
        # Perform OCR with compatibility handling
        try:
            # Try the newer predict method first (recommended by PaddleOCR)
            ocr_result = self.model.predict(image_array)
        except Exception as predict_error:
            logger.warning(f"predict() method failed: {predict_error}")
            try:
                # Fallback to ocr method without cls parameter
                ocr_result = self.model.ocr(image_array)
            except Exception as ocr_error:
                logger.warning(f"ocr() method also failed: {ocr_error}")
                # Try ocr method with cls=False as last resort
                ocr_result = self.model.ocr(image_array, cls=False)
        
        # Extract text content using the same logic as ocr_server.py
        extracted_text = ""
        if ocr_result is not None:
            # Handle nested list format from PaddleOCR
            result_list = ocr_result
            if isinstance(result_list, list) and len(result_list) > 0:
                # Check if it's nested format (multiple images)
                if isinstance(result_list[0], list) and len(result_list[0]) > 0:
                    first_item = result_list[0][0] if isinstance(result_list[0], list) else result_list[0]
                    if isinstance(first_item, list) and len(first_item) >= 2:
                        # Standard nested format: [[[[box], [text, confidence]], ...]]
                        result_list = result_list[0]
                
                # Process each detected text line
                for line_idx, line in enumerate(result_list):
                    if line and isinstance(line, list) and len(line) >= 2:
                        # line format: [[box_coordinates], [text, confidence]]
                        text_info = line[1]
                        
                        # Extract text using robust method from ocr_server.py
                        text_str = ""
                        try:
                            if isinstance(text_info, (list, tuple)) and len(text_info) > 0:
                                text_candidate = text_info[0]
                                
                                if isinstance(text_candidate, str):
                                    text_str = text_candidate
                                elif isinstance(text_candidate, (list, tuple)):
                                    # Handle nested format
                                    text_parts = []
                                    
                                    def extract_strings(obj, parts):
                                        if isinstance(obj, str):
                                            parts.append(obj)
                                        elif isinstance(obj, (list, tuple)):
                                            for item in obj:
                                                extract_strings(item, parts)
                                    
                                    extract_strings(text_candidate, text_parts)
                                    text_str = " ".join(text_parts) if text_parts else ""
                                else:
                                    text_str = str(text_candidate) if text_candidate else ""
                            elif isinstance(text_info, str):
                                text_str = text_info
                            else:
                                text_str = str(text_info) if text_info else ""
                        
                        except Exception as e:
                            logger.warning(f"Error extracting text from line {line_idx}: {e}")
                            try:
                                text_str = str(text_info) if text_info else ""
                            except:
                                text_str = ""
                        
                        # Final validation and processing
                        if text_str:
                            if not isinstance(text_str, str):
                                if isinstance(text_str, (list, tuple)):
                                    text_str = " ".join(str(x) for x in text_str if x)
                                else:
                                    text_str = str(text_str)
                            
                            if isinstance(text_str, str):
                                cleaned_text = text_str.strip()
                                if cleaned_text:
                                    extracted_text += cleaned_text + " "
        
        extracted_text = extracted_text.strip()
        
        # Format response based on prompt
        if "表格" in prompt_text or "table" in prompt_text.lower():
            response_content = extracted_text
        elif "json" in prompt_text.lower():
            import json
            response_content = json.dumps({"text": extracted_text}, ensure_ascii=False)
        else:
            response_content = extracted_text
        
        # Build OpenAI-compatible response
        response = {
            "id": f"chatcmpl-{int(time.time())}",
            "object": "chat.completion",
            "created": int(time.time()),
            "model": model,
            "choices": [
                {
                    "index": 0,
                    "message": {
                        "role": "assistant",
                        "content": response_content
                    },
                    "finish_reason": "stop"
                }
            ],
            "usage": {
                "prompt_tokens": 0,
                "completion_tokens": len(extracted_text.split()) if isinstance(extracted_text, str) else 0,
                "total_tokens": len(extracted_text.split()) if isinstance(extracted_text, str) else 0
            }
        }
        
        return response


# ==================== Main Service ====================
@bentoml.service(
    resources={"cpu": "4"},
    traffic={"timeout": 300},
)
class MultiModelService:
    """
    Unified service integrating Embedding, Reranker, and OCR models
    Provides OpenAI-compatible API endpoints
    """
    
    embedding = bentoml.depends(EmbeddingRunnable)
    reranker = bentoml.depends(RerankerRunnable)
    ocr = bentoml.depends(OCRRunnable)
    
    @bentoml.api(route="/v1/embeddings")
    def embeddings(
        self,
        input: List[str] | str,
        model: str = "m3e-large",
        encoding_format: str = "float",
        truncate_prompt_tokens: Optional[int] = None
    ) -> Dict[str, Any]:
        """
        OpenAI-compatible embeddings endpoint
        POST /v1/embeddings
        """
        return self.embedding.embed(input_data=input, model=model)
    
    @bentoml.api(route="/v1/rerank")
    def rerank(
        self,
        query: str,
        documents: List[str],
        model: str = "BAAI/bge-reranker-v2-m3",
        top_n: Optional[int] = None,
        return_documents: bool = True
    ) -> Dict[str, Any]:
        """
        Reranker endpoint
        POST /v1/rerank
        """
        return self.reranker.rerank(
            query=query,
            documents=documents,
            model=model,
            top_n=top_n,
            return_documents=return_documents
        )
    
    @bentoml.api(route="/v1/chat/completions")
    def chat_completions(
        self,
        messages: List[Dict[str, Any]],
        model: str = "paddle-ocr-v4",
        temperature: float = 1.0,
        max_tokens: Optional[int] = None
    ) -> Dict[str, Any]:
        """
        OpenAI-compatible chat completions endpoint for OCR
        POST /v1/chat/completions
        """
        return self.ocr.ocr_chat_completion(messages=messages, model=model)
    
    @bentoml.api(route="/health")
    def health(self) -> Dict[str, str]:
        """
        Health check endpoint
        GET /health
        """
        return {"status": "healthy", "service": "multi-model-service"}
    
    # 兼容性路由（不带 /v1 前缀）
    @bentoml.api(route="/embeddings")
    def embeddings_compat(
        self,
        input: List[str] | str,
        model: str = "m3e-large",
        encoding_format: str = "float",
        truncate_prompt_tokens: Optional[int] = None
    ) -> Dict[str, Any]:
        """兼容性路由：POST /embeddings"""
        return self.embeddings(input=input, model=model, encoding_format=encoding_format, truncate_prompt_tokens=truncate_prompt_tokens)
    
    @bentoml.api(route="/rerank")
    def rerank_compat(
        self,
        query: str,
        documents: List[str],
        model: str = "BAAI/bge-reranker-v2-m3",
        top_n: Optional[int] = None,
        return_documents: bool = True
    ) -> Dict[str, Any]:
        """兼容性路由：POST /rerank"""
        return self.rerank(query=query, documents=documents, model=model, top_n=top_n, return_documents=return_documents)
    
    # WeKnora 重排序路由（使用 /reranker 路径）
    @bentoml.api(route="/reranker")
    def reranker_endpoint(
        self,
        query: str,
        documents: List[str],
        model: str = "bge-reranker-v2-m3",
        top_n: Optional[int] = None,
        return_documents: bool = True,
        additional_data: Optional[Dict] = None,
        truncate_prompt_tokens: Optional[int] = None
    ) -> Dict[str, Any]:
        """WeKnora 重排序接口：POST /reranker"""
        return self.rerank(query=query, documents=documents, model=model, top_n=top_n, return_documents=return_documents)
