import sys
import os

sys.path.append(os.path.dirname(os.path.abspath(__file__)))

from utils import ArgumentParser, ConfigLoader, LOG
from model import GLMModel, OpenAIModel, DeepSeekModel
from translator import PDFTranslator

# 在文件顶部添加
import argparse  

# 在参数解析部分添加
parser.add_argument('--api', action='store_true', help='启动API服务')
parser.add_argument('--port', type=int, default=8000, help='API服务端口')

if __name__ == "__main__":
    argument_parser = ArgumentParser()  # 创建自定义参数解析器
    args = argument_parser.parse_arguments()  # 解析命令行参数
    print(args)
    config_loader = ConfigLoader(args.config)  # 加载配置文件加载器
    config = config_loader.load_config()  # 读取配置文件内容

    # 参数优先原则：命令行参数 > 配置文件参数
    # model_name = args.openai_model if args.openai_model else config['OpenAIModel']['model']
    # api_key = args.openai_api_key if args.openai_api_key else config['OpenAIModel']['api_key']
    # model = OpenAIModel(model=model_name, api_key=api_key)  # 初始化AI模型
    
    if args.model_type == 'OpenAIModel':
        model_name = args.openai_model or config['OpenAIModel']['model']
        api_key = args.openai_api_key or config['OpenAIModel'].get('api_key')
        model = OpenAIModel(model_name, api_key)  # 初始化OpenAIModel
    elif args.model_type == 'DeepSeekModel':
        model_name = args.deepseek_model or config['DeepSeekModel']['model']
        api_key = args.deepseek_api_key or config['DeepSeekModel'].get('api_key')
        model = DeepSeekModel(model_name,api_key)
    else:
        raise ValueError(f"Unsupported model type: {args.model_type}")

    # 获取待翻译文档路径和输出格式（支持多种格式如PDF/Markdown）
    pdf_file_path = args.book if args.book else config['common']['book']
    file_format = args.file_format if args.file_format else config['common']['file_format']

    translator = PDFTranslator(model)  # 创建翻译器实例
    translator.translate_pdf(pdf_file_path, file_format)  # 执行核心翻译逻辑