#!/usr/bin/env python3
"""
BGE模型Python后端
这个脚本被Go程序调用，用于生成文本嵌入向量
"""

import sys
import json
import argparse
import logging
from typing import List, Dict, Any

import torch
import numpy as np
from transformers import AutoTokenizer, AutoModel

# 设置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class BGEModel:
    """BGE模型封装类"""
    
    def __init__(self, model_path: str, max_length: int = 512, device: str = "cpu"):
        """
        初始化BGE模型
        
        Args:
            model_path: 模型路径
            max_length: 最大序列长度
            device: 设备 (cpu/cuda)
        """
        self.model_path = model_path
        self.max_length = max_length
        self.device = device
        self.tokenizer = None
        self.model = None
        self.is_loaded = False
        
    def load(self) -> bool:
        """加载模型和分词器"""
        try:
            logger.info(f"Loading tokenizer from {self.model_path}")
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)
            
            logger.info(f"Loading model from {self.model_path}")
            self.model = AutoModel.from_pretrained(self.model_path)
            self.model.to(self.device)
            self.model.eval()
            
            self.is_loaded = True
            logger.info("Model loaded successfully")
            return True
        except Exception as e:
            logger.error(f"Failed to load model: {e}")
            return False
    
    def encode(self, texts: List[str]) -> List[List[float]]:
        """
        编码文本为嵌入向量
        
        Args:
            texts: 文本列表
            
        Returns:
            嵌入向量列表
        """
        if not self.is_loaded:
            raise RuntimeError("Model is not loaded")
        
        try:
            # 编码文本
            encoded_input = self.tokenizer(
                texts, 
                padding=True, 
                truncation=True, 
                return_tensors='pt', 
                max_length=self.max_length
            ).to(self.device)
            
            # 获取嵌入向量
            with torch.no_grad():
                model_output = self.model(**encoded_input)
                sentence_embeddings = model_output[0][:, 0]
            
            # 归一化
            sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1)
            
            # 转换为列表
            embeddings = sentence_embeddings.tolist()
            
            return embeddings
        except Exception as e:
            logger.error(f"Error encoding texts: {e}")
            raise
    
    def close(self):
        """关闭模型并释放资源"""
        if self.model is not None:
            del self.model
            self.model = None
        if self.tokenizer is not None:
            del self.tokenizer
            self.tokenizer = None
        self.is_loaded = False
        
        # 清理GPU缓存
        if torch.cuda.is_available():
            torch.cuda.empty_cache()

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="BGE Model Backend")
    parser.add_argument("--model-path", type=str, required=True, help="Path to the BGE model")
    parser.add_argument("--max-length", type=int, default=512, help="Maximum sequence length")
    parser.add_argument("--device", type=str, default="cpu", help="Device (cpu/cuda)")
    parser.add_argument("--action", type=str, choices=["encode", "load"], required=True, help="Action to perform")
    
    args = parser.parse_args()
    
    # 创建模型实例
    model = BGEModel(args.model_path, args.max_length, args.device)
    
    try:
        if args.action == "load":
            # 加载模型
            success = model.load()
            result = {
                "success": success,
                "message": "Model loaded successfully" if success else "Failed to load model"
            }
            print(json.dumps(result))
            
        elif args.action == "encode":
            # 从标准输入读取文本
            input_data = sys.stdin.read()
            # 处理可能的UTF-8 BOM
            if input_data.startswith('\ufeff'):
                input_data = input_data[1:]
            try:
                input_json = json.loads(input_data)
            except json.JSONDecodeError as e:
                result = {
                    "success": False,
                    "error": f"Invalid JSON: {e}"
                }
                print(json.dumps(result))
                sys.exit(1)
            
            texts = input_json.get("texts", [])
            
            if not texts:
                result = {
                    "success": False,
                    "error": "No texts provided"
                }
                print(json.dumps(result))
                sys.exit(1)
            
            # 加载模型（如果尚未加载）
            if not model.is_loaded:
                if not model.load():
                    result = {
                        "success": False,
                        "error": "Failed to load model"
                    }
                    print(json.dumps(result))
                    sys.exit(1)
            
            # 编码文本
            embeddings = model.encode(texts)
            
            result = {
                "success": True,
                "embeddings": embeddings
            }
            print(json.dumps(result))
            
    except Exception as e:
        logger.error(f"Error: {e}")
        result = {
            "success": False,
            "error": str(e)
        }
        print(json.dumps(result))
        sys.exit(1)
    finally:
        # 清理资源
        model.close()

if __name__ == "__main__":
    main()