#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
基于Hugging Face Transformers的翻译API服务
支持中文到英文翻译，用于PLC参数名称翻译
"""

from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
import re
import uvicorn
from typing import Optional

# 初始化FastAPI应用
app = FastAPI(title="PLC参数翻译API", description="中文到英文翻译服务", version="1.0.0")

# 请求体模型
class TranslateRequest(BaseModel):
    text: str
    from_lang: str = "zh"
    to_lang: str = "en"

class TranslateResponse(BaseModel):
    original_text: str
    translated_text: str
    simplified_name: str
    from_lang: str
    to_lang: str

# 全局变量存储模型
tokenizer = None
model = None

def load_translation_model():
    """加载翻译模型"""
    global tokenizer, model
    
    try:
        print("正在加载翻译模型...")
        # 使用中文到英文的模型
        model_name = "Helsinki-NLP/opus-mt-zh-en"  # 注意：zh-en 不是 en-zh
        
        tokenizer = AutoTokenizer.from_pretrained(model_name)
        model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
        
        # 如果有GPU可用，将模型移到GPU
        if torch.cuda.is_available():
            model = model.to('cuda')
            print("模型已加载到GPU")
        else:
            print("模型已加载到CPU")
            
        print("翻译模型加载完成！")
        
    except Exception as e:
        print(f"模型加载失败: {e}")
        print("尝试使用备用模型...")
        try:
            # 备用方案：使用多语言模型
            model_name = "facebook/mbart-large-50-many-to-many-mmt"
            tokenizer = AutoTokenizer.from_pretrained(model_name)
            model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
            print("备用模型加载完成！")
        except Exception as e2:
            print(f"备用模型加载也失败: {e2}")
            raise e2

def simplify_translation(text: str) -> str:
    """简化翻译结果为适合的变量名格式"""
    if not text:
        return text
    
    # 移除标点符号，保留字母数字和空格
    cleaned = re.sub(r'[^A-Za-z0-9\s]', ' ', text)
    
    # 分割单词
    words = cleaned.split()
    
    # 简化单词（移除常见的冗余词汇）
    simplified_words = []
    skip_words = {
        'the', 'of', 'and', 'or', 'in', 'on', 'at', 'to', 'for', 'with', 'by',
        'a', 'an', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
        'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could',
        'actual', 'real', 'true', 'experiment', 'test'
    }
    
    # 单词简化映射
    word_mapping = {
        'temperature': 'temp',
        'pressure': 'pres',
        'flow': 'flow',
        'radiation': 'rad',
        'surface': 'surf',
        'furnace': 'furn',
        'combustion': 'comb',
        'cooling': 'cool',
        'heating': 'heat',
        'water': 'water',
        'air': 'air',
        'gas': 'gas',
        'spray': 'spray',
        'nozzle': 'nozzle'
    }
    
    for word in words:
        word = word.lower().strip()
        if word and word not in skip_words and len(word) > 1:
            # 应用单词简化
            simplified_word = word_mapping.get(word, word)
            simplified_words.append(simplified_word)
    
    # 限制单词数量
    simplified_words = simplified_words[:3]
    
    # 转换为驼峰命名
    if simplified_words:
        result = simplified_words[0].lower()
        for word in simplified_words[1:]:
            result += word.capitalize()
        return result
    
    return text.lower()

@app.on_event("startup")
async def startup_event():
    """应用启动时加载模型"""
    load_translation_model()

@app.post("/translate", response_model=TranslateResponse)
async def translate_text(request: TranslateRequest):
    """翻译文本接口"""
    global tokenizer, model
    
    if tokenizer is None or model is None:
        raise HTTPException(status_code=500, detail="翻译模型未加载")
    
    try:
        text = request.text.strip()
        if not text:
            raise HTTPException(status_code=400, detail="输入文本不能为空")
        
        # 准备输入
        inputs = tokenizer(text, return_tensors="pt", max_length=512, truncation=True)
        
        # 如果模型在GPU上，将输入也移到GPU
        if torch.cuda.is_available() and next(model.parameters()).is_cuda:
            inputs = {k: v.to('cuda') for k, v in inputs.items()}
        
        # 生成翻译
        with torch.no_grad():
            outputs = model.generate(
                **inputs,
                max_length=128,
                num_beams=4,
                temperature=0.7,
                do_sample=True,
                early_stopping=True
            )
        
        # 解码结果
        translated = tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # 简化为变量名格式
        simplified = simplify_translation(translated)
        
        return TranslateResponse(
            original_text=text,
            translated_text=translated,
            simplified_name=simplified,
            from_lang=request.from_lang,
            to_lang=request.to_lang
        )
        
    except Exception as e:
        print(f"翻译错误: {e}")
        raise HTTPException(status_code=500, detail=f"翻译失败: {str(e)}")

@app.get("/health")
async def health_check():
    """健康检查接口"""
    return {
        "status": "healthy",
        "model_loaded": tokenizer is not None and model is not None,
        "gpu_available": torch.cuda.is_available()
    }

@app.get("/")
async def root():
    """根路径"""
    return {
        "message": "PLC参数翻译API服务",
        "docs": "/docs",
        "health": "/health"
    }

if __name__ == "__main__":
    # 启动服务
    uvicorn.run(
        app, 
        host="0.0.0.0", 
        port=8000,
        log_level="info"
    )