"""
下载并运行 transformer-tiny-en-zh 模型
绕过依赖问题的解决方案
"""

import os
import sys
import subprocess
import requests
import zipfile
from pathlib import Path
from flask import Flask, request, jsonify
from flask_cors import CORS

app = Flask(__name__)
CORS(app)

# 全局变量
model = None
tokenizer = None

def download_model_files():
    """
    直接下载模型文件而不使用PaddleHub
    """
    model_dir = Path("transformer_tiny_en_zh")
    model_dir.mkdir(exist_ok=True)
    
    print("正在下载 transformer-tiny-en-zh 模型文件...")
    
    # 模型文件URL（这些是示例URL，实际需要找到正确的模型文件位置）
    model_urls = {
        "config.json": "https://huggingface.co/Helsinki-NLP/opus-mt-en-zh/resolve/main/config.json",
        "pytorch_model.bin": "https://huggingface.co/Helsinki-NLP/opus-mt-en-zh/resolve/main/pytorch_model.bin",
        "tokenizer.json": "https://huggingface.co/Helsinki-NLP/opus-mt-en-zh/resolve/main/tokenizer.json",
        "vocab.json": "https://huggingface.co/Helsinki-NLP/opus-mt-en-zh/resolve/main/vocab.json",
    }
    
    for filename, url in model_urls.items():
        file_path = model_dir / filename
        if not file_path.exists():
            try:
                print(f"下载 {filename}...")
                response = requests.get(url, stream=True)
                response.raise_for_status()
                
                with open(file_path, 'wb') as f:
                    for chunk in response.iter_content(chunk_size=8192):
                        f.write(chunk)
                print(f"✓ {filename} 下载完成")
            except Exception as e:
                print(f"✗ 下载 {filename} 失败: {e}")
                return False
    
    return True

def install_required_packages():
    """
    安装必要的包
    """
    packages = [
        "transformers>=4.21.0",
        "torch",
        "sentencepiece",
        "sacremoses"
    ]
    
    for package in packages:
        try:
            print(f"安装 {package}...")
            subprocess.check_call([sys.executable, "-m", "pip", "install", package])
            print(f"✓ {package} 安装完成")
        except Exception as e:
            print(f"✗ 安装 {package} 失败: {e}")
            return False
    
    return True

def load_model():
    """
    加载翻译模型
    """
    global model, tokenizer
    
    try:
        import paddlehub as hub
        
        print("尝试使用 transformer-tiny-en2zh 模型...")
        # 尝试不同的模型名称
        model_names = [
            'transformer-tiny-en2zh',
            'transformer_tiny_en2zh', 
            'tiny_transformer_en2zh'
        ]
        
        for model_name in model_names:
            try:
                print(f"  尝试加载模型: {model_name}")
                model = hub.Module(name=model_name)
                tokenizer = None  # PaddleHub模型不需要单独的tokenizer
                print(f"✓ 成功加载模型: {model_name}")
                return True
            except Exception as e:
                print(f"  ✗ 模型 {model_name} 加载失败: {e}")
                continue
        
        # 如果所有PaddleHub模型都失败，回退到transformers库的小模型
        print("尝试使用备用小模型...")
        from transformers import MarianMTModel, MarianTokenizer
        
        # 使用一个相对较小的模型
        model_name = "Helsinki-NLP/opus-mt-en-zh"
        print(f"使用备用模型: {model_name}")
        tokenizer = MarianTokenizer.from_pretrained(model_name)
        model = MarianMTModel.from_pretrained(model_name)
        
        print("✓ 备用模型加载成功")
        return True
        
    except Exception as e:
        print(f"✗ 所有模型加载失败: {e}")
        return False

def translate_text(text):
    """
    执行翻译 - 支持PaddleHub和Transformers两种模型
    """
    if model is None:
        return "模型未加载"
    
    try:
        # 如果是PaddleHub模型（没有tokenizer）
        if tokenizer is None:
            result = model.translate(text)
            return result[0] if isinstance(result, list) else result
        
        # 如果是Transformers模型（有tokenizer）
        else:
            inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
            translated = model.generate(**inputs, max_length=512, num_beams=4, early_stopping=True)
            translation = tokenizer.decode(translated[0], skip_special_tokens=True)
            return translation
        
    except Exception as e:
        return f"翻译出错: {str(e)}"

@app.route('/translate', methods=['POST'])
def translate():
    """
    翻译API端点
    """
    try:
        data = request.json
        text = data.get('text', '')
        
        if not text:
            return jsonify({'error': '文本为空'}), 400
        
        translation = translate_text(text)
        return jsonify({'translation': translation})
    
    except Exception as e:
        return jsonify({'error': str(e)}), 500

@app.route('/health', methods=['GET'])
def health():
    """
    健康检查端点
    """
    model_loaded = model is not None and tokenizer is not None
    return jsonify({
        'status': 'ready' if model_loaded else 'loading',
        'model': 'transformer-tiny-en-zh',
        'model_loaded': model_loaded
    })

def main():
    """
    主函数
    """
    print("=" * 60)
    print("transformer-tiny-en-zh 模型下载和运行程序")
    print("=" * 60)
    
    # 步骤1: 安装必要的包
    print("\n步骤1: 安装必要的包...")
    if not install_required_packages():
        print("✗ 包安装失败，退出程序")
        return
    
    # 步骤2: 下载模型文件（可选）
    print("\n步骤2: 检查模型文件...")
    # download_model_files()  # 暂时注释掉，直接使用在线模型
    
    # 步骤3: 加载模型
    print("\n步骤3: 加载翻译模型...")
    if not load_model():
        print("✗ 模型加载失败，退出程序")
        return
    
    # 步骤4: 启动服务器
    print("\n步骤4: 启动翻译服务器...")
    print("\n✓ 所有准备工作完成")
    print("\n服务器地址: http://localhost:5000")
    print("翻译接口: POST http://localhost:5000/translate")
    print("健康检查: GET http://localhost:5000/health")
    print("\n按 Ctrl+C 停止服务器")
    print("=" * 60)
    
    app.run(host='0.0.0.0', port=5000, debug=False)

if __name__ == '__main__':
    main()