from pyzerox import zerox
import os
import json
import asyncio
import sys
import subprocess
import platform
import argparse
import base64
from pathlib import Path
from dotenv import load_dotenv
import config
import io
from custom_zerox import custom_zerox
import aiofiles.threadpool

# 保存原始的 aiofiles.open 函数
original_open = aiofiles.threadpool.open

# 定义新的 open 函数，强制使用 UTF-8 编码
def utf8_open(file, mode='r', **kwargs):
    if 'w' in mode and 'encoding' not in kwargs:
        kwargs['encoding'] = 'utf-8'
    return original_open(file, mode, **kwargs)

# 替换 aiofiles.threadpool.open 函数
aiofiles.threadpool.open = utf8_open

# 现在所有通过 aiofiles.open 打开的文件都会使用 UTF-8 编码

# 设置默认编码为 UTF-8
if sys.stdout.encoding != 'utf-8':
    try:
        sys.stdout.reconfigure(encoding='utf-8')
        sys.stderr.reconfigure(encoding='utf-8')
    except AttributeError:
        # Python 3.6 及以下版本不支持 reconfigure
        sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
        sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')

# 加载环境变量
load_dotenv()

### Model Setup (Use only Vision Models) Refer: https://docs.litellm.ai/docs/providers ###

## placeholder for additional model kwargs which might be required for some models
kwargs = {}

## system prompt to use for the vision model
custom_system_prompt = None

# to override
# custom_system_prompt = "For the below PDF page, do something..something..." ## example

# ###################### Example for OpenAI ######################
# model = "gpt-4o-mini" ## openai model
# os.environ["OPENAI_API_KEY"] = "" ## your-api-key


# ###################### Example for Azure OpenAI ######################
# model = "azure/gpt-4o-mini" ## "azure/<your_deployment_name>" -> format <provider>/<model>
# os.environ["AZURE_API_KEY"] = "" # "your-azure-api-key"
# os.environ["AZURE_API_BASE"] = "" # "https://example-endpoint.openai.azure.com"
# os.environ["AZURE_API_VERSION"] = "" # "2023-05-15"


###################### Example for Gemini ######################
# The provided model is not a vision model. Please provide a vision model. (Extra Info: {'model': 'gemini/gemini-1.5-pro-vision'})
model = "gemini/gemini-1.5-flash" #gemini/gemini-1.5-flash# 或 "gemini/gemini-1.5-flash-vision"
# 从.env文件中读取API密钥，而不是硬编码
# os.environ['GEMINI_API_KEY'] = "" # your-gemini-api-key


# ###################### Example for Anthropic ######################
# model="claude-3-opus-20240229"
# os.environ["ANTHROPIC_API_KEY"] = "" # your-anthropic-api-key

# # ###################### Vertex ai ######################
# # model = "vertex_ai/gemini-1.5-flash-001" ## "vertex_ai/<model_name>" -> format <provider>/<model>
# # ## GET CREDENTIALS
# # ## RUN ##
# # # !gcloud auth application-default login - run this to add vertex credentials to your env
## OR ##
# file_path = 'path/to/vertex_ai_service_account.json'

# # Load the JSON file
# with open(file_path, 'r') as file:
#     vertex_credentials = json.load(file)

# # Convert to JSON string
# vertex_credentials_json = json.dumps(vertex_credentials)

# vertex_credentials=vertex_credentials_json

# ## extra args
# kwargs = {"vertex_credentials": vertex_credentials}

###################### For other providers refer: https://docs.litellm.ai/docs/providers ######################

def check_poppler_installation():
    """检查 poppler 是否正确安装"""
    try:
        if platform.system() == "Windows":
            # 在 Windows 上尝试运行 pdfinfo 命令
            subprocess.run(["where", "pdfinfo"], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        else:
            # 在 Linux/Mac 上尝试运行 pdfinfo 命令
            subprocess.run(["which", "pdfinfo"], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        return True
    except (subprocess.SubprocessError, FileNotFoundError):
        return False

def get_file_mime_type(file_path):
    """根据文件扩展名获取 MIME 类型"""
    extension = file_path.lower().split('.')[-1]
    mime_types = {
        'pdf': 'application/pdf',
        'jpg': 'image/jpeg',
        'jpeg': 'image/jpeg',
        'png': 'image/png',
        'gif': 'image/gif',
        'mp3': 'audio/mp3',
        'mp4': 'video/mp4',
        'txt': 'text/plain',
        'docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
        'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
    }
    return mime_types.get(extension, 'application/octet-stream')

def prepare_file_content(file_path):
    """准备文件内容，支持本地文件和远程URL"""
    # 如果是URL，直接返回
    if file_path.startswith(('http://', 'https://', 'gs://')):
        return {
            "type": "file",
            "file": {
                "file_id": file_path,
                "format": get_file_mime_type(file_path)
            }
        }
    
    # 如果是本地文件，转换为base64
    try:
        file_bytes = Path(file_path).read_bytes()
        encoded_data = base64.b64encode(file_bytes).decode("utf-8")
        mime_type = get_file_mime_type(file_path)
        return {
            "type": "file",
            "file": {
                "file_data": f"data:{mime_type};base64,{encoded_data}"
            }
        }
    except Exception as e:
        print(f"处理文件时出错: {str(e)}")
        return None

def setup_api_keys(model_config):
    """根据模型配置设置相应的API密钥"""
    provider = model_config["provider"]
    
    if provider == "gemini" and os.environ.get("GEMINI_API_KEY"):
        os.environ["GEMINI_API_KEY"] = os.environ.get("GEMINI_API_KEY")
        return True
    elif provider == "openai" and os.environ.get("OPENAI_API_KEY"):
        os.environ["OPENAI_API_KEY"] = os.environ.get("OPENAI_API_KEY")
        return True
    elif provider == "anthropic" and os.environ.get("ANTHROPIC_API_KEY"):
        os.environ["ANTHROPIC_API_KEY"] = os.environ.get("ANTHROPIC_API_KEY")
        return True
    elif provider == "azure":
        if all([os.environ.get(key) for key in ["AZURE_API_KEY", "AZURE_API_BASE", "AZURE_API_VERSION"]]):
            return True
    
    return False

# 定义命令行参数解析器
def parse_arguments():
    parser = argparse.ArgumentParser(description='OCR处理工具')
    parser.add_argument('--model', type=str, default=config.DEFAULT_MODEL, 
                        help=f'要使用的模型，可选值: {", ".join(config.AVAILABLE_MODELS.keys())}')
    parser.add_argument('--file_path', type=str, required=True,
                        help='要处理的文件路径，可以是本地文件或URL')
    parser.add_argument('--output_dir', type=str, default=config.DEFAULT_OUTPUT_DIR,
                        help='输出目录')
    parser.add_argument('--api_key', type=str,
                        help='API密钥，如果不提供则使用.env文件中的配置')
    parser.add_argument('--system_prompt', type=str, choices=list(config.SYSTEM_PROMPTS.keys()),
                        help='预定义的系统提示类型')
    parser.add_argument('--custom_prompt', type=str,
                        help='自定义系统提示文本')
    parser.add_argument('--select_pages', type=str,
                        help='要处理的页面，例如 "1,2,3" 或 "all"')
    parser.add_argument('--list_models', action='store_true',
                        help='列出所有可用的模型')
    return parser.parse_args()

def list_available_models():
    """列出所有可用的模型及其描述"""
    print("\n可用的模型:")
    print("-" * 80)
    print(f"{'模型ID':<20}{'提供商':<15}{'描述':<45}")
    print("-" * 80)
    for model_id, model_info in config.AVAILABLE_MODELS.items():
        print(f"{model_id:<20}{model_info['provider']:<15}{model_info['description']:<45}")
    print("-" * 80)
    print(f"默认模型: {config.DEFAULT_MODEL}\n")

# Define main async entrypoint
async def main():
    # 解析命令行参数
    args = parse_arguments()
    
    # 如果请求列出模型，则显示模型列表并退出
    if args.list_models:
        list_available_models()
        return "已列出所有可用模型"
    
    # 获取模型配置
    if args.model in config.AVAILABLE_MODELS:
        model_config = config.AVAILABLE_MODELS[args.model]
        model_name = model_config["model_name"]
    else:
        print(f"错误: 未知的模型 '{args.model}'")
        list_available_models()
        return "模型错误"
    
    # 设置API密钥
    if args.api_key:
        provider = model_config["provider"]
        if provider == "gemini":
            os.environ['GEMINI_API_KEY'] = args.api_key
        elif provider == "openai":
            os.environ['OPENAI_API_KEY'] = args.api_key
        elif provider == "anthropic":
            os.environ['ANTHROPIC_API_KEY'] = args.api_key
    elif not setup_api_keys(model_config):
        print(f"错误: 未提供 {model_config['provider']} API密钥")
        return "API密钥错误"
    
    # 检查 poppler 是否安装
    if not check_poppler_installation():
        print("错误: Poppler 未安装或不在系统 PATH 中")
        print("请安装 Poppler 并确保其在系统 PATH 中")
        print("Windows 安装指南: https://github.com/oschwartz10612/poppler-windows/releases/")
        print("或使用 conda: conda install -c conda-forge poppler")
        return "Poppler 安装错误"

    # 检查文件是否存在
    file_path = args.file_path
    if not os.path.exists(file_path) and not file_path.startswith(("http://", "https://", "gs://")):
        print(f"错误: 文件 '{file_path}' 不存在")
        return "文件不存在错误"

    # 检查文件类型是否支持
    file_extension = file_path.lower().split('.')[-1]
    if file_extension not in config.SUPPORTED_FILE_TYPES:
        print(f"警告: 文件类型 '{file_extension}' 可能不受支持")
    
    # 处理select_pages参数
    select_pages = None
    if args.select_pages and args.select_pages.lower() != "all":
        try:
            select_pages = [int(p) for p in args.select_pages.split(',')]
        except ValueError:
            print(f"警告: 无效的页面选择格式 '{args.select_pages}'，将处理所有页面")
    
    # 设置系统提示
    custom_system_prompt = None
    if args.custom_prompt:
        custom_system_prompt = args.custom_prompt
    elif args.system_prompt and args.system_prompt in config.SYSTEM_PROMPTS:
        custom_system_prompt = config.SYSTEM_PROMPTS[args.system_prompt]
    
    # 设置输出目录
    output_dir = args.output_dir
    
    # 额外参数
    kwargs = {}
    
    try:
        print(f"正在处理文件: {file_path}")
        print(f"使用模型: {args.model}（提供商: {model_config['provider']}）")
        print(f"内部模型名称: {model_name}")
        
        # 调用 custom_zerox 处理文件
        result = await custom_zerox(file_path=file_path, model=model_name, output_dir=output_dir,
                                    custom_system_prompt=custom_system_prompt, select_pages=select_pages, **kwargs)
        
        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)
        
        # 获取文件名（不含扩展名）
        file_name = os.path.basename(file_path)
        file_name = os.path.splitext(file_name)[0]
        
        # 构建输出文件路径
        output_file = os.path.join(output_dir, f"{file_name}.md")
        
        # 使用 UTF-8 编码写入结果
        if isinstance(result, str):
            await write_utf8_file(output_file, result)
        else:
            # 如果结果是对象，尝试提取内容
            try:
                content = str(result)
                await write_utf8_file(output_file, content)
            except Exception as e:
                print(f"处理结果格式错误: {str(e)}")
        
        print(f"处理完成，结果已保存到 {output_file}")
        return result
    except UnicodeEncodeError as e:
        print(f"编码错误: {str(e)}")
        print("这可能是由于中文或特殊字符导致的。尝试使用 UTF-8 编码处理...")
        
        # 尝试使用备用模型
        try:
            backup_model = "gemini/gemini-1.5-flash"  # 使用已知可以工作的模型
            print(f"使用备用模型（内部名称: {backup_model}）...")
            result = await custom_zerox(file_path=file_path, model=backup_model, output_dir=output_dir,
                                       custom_system_prompt=custom_system_prompt, select_pages=select_pages, **kwargs)
            
            # 手动保存结果
            file_name = os.path.basename(file_path)
            file_name = os.path.splitext(file_name)[0]
            output_file = os.path.join(output_dir, f"{file_name}.md")
            
            if isinstance(result, str):
                await write_utf8_file(output_file, result)
            else:
                content = str(result)
                await write_utf8_file(output_file, content)
                
            return result
        except Exception as backup_error:
            print(f"备用模型也失败: {str(backup_error)}")
            import traceback
            traceback.print_exc()
            return f"处理错误: {str(e)}\n备用模型错误: {str(backup_error)}"
    except Exception as e:
        print(f"处理文件时出错: {str(e)}")
        print("尝试使用备用模型...")
        
        # 尝试使用备用模型
        try:
            backup_model = "claude-3-sonnet-20240229"  # Anthropic 的模型通常支持视觉
            print(f"使用备用模型（内部名称: {backup_model}）...")
            result = await custom_zerox(file_path=file_path, model=backup_model, output_dir=output_dir,
                                       custom_system_prompt=custom_system_prompt, select_pages=select_pages, **kwargs)
            return result
        except Exception as backup_error:
            print(f"备用模型也失败: {str(backup_error)}")
            import traceback
            traceback.print_exc()
            return f"处理错误: {str(e)}\n备用模型错误: {str(backup_error)}"

# 创建一个简单的API接口函数
async def process_document(model_id=None, file_path=None, output_dir=None, system_prompt_type=None, 
                          custom_prompt=None, select_pages=None, api_key=None):
    """
    处理文档的API接口
    
    参数:
        model_id (str): 模型ID，如 'gemini-flash'
        file_path (str): 文件路径或URL
        output_dir (str): 输出目录
        system_prompt_type (str): 预定义的系统提示类型
        custom_prompt (str): 自定义系统提示文本
        select_pages (list): 要处理的页面列表
        api_key (str): API密钥
        
    返回:
        处理结果
    """
    # 参数验证
    if not file_path:
        return "错误: 未提供文件路径"
    
    # 设置默认值
    model_id = model_id or config.DEFAULT_MODEL
    output_dir = output_dir or config.DEFAULT_OUTPUT_DIR
    
    # 获取模型配置
    if model_id in config.AVAILABLE_MODELS:
        model_config = config.AVAILABLE_MODELS[model_id]
        model_name = model_config["model_name"]
    else:
        return f"错误: 未知的模型 '{model_id}'"
    
    # 设置API密钥
    if api_key:
        provider = model_config["provider"]
        if provider == "gemini":
            os.environ['GEMINI_API_KEY'] = api_key
        elif provider == "openai":
            os.environ['OPENAI_API_KEY'] = api_key
        elif provider == "anthropic":
            os.environ['ANTHROPIC_API_KEY'] = api_key
    elif not setup_api_keys(model_config):
        return f"错误: 未提供 {model_config['provider']} API密钥"
    
    # 设置系统提示
    system_prompt = None
    if custom_prompt:
        system_prompt = custom_prompt
    elif system_prompt_type and system_prompt_type in config.SYSTEM_PROMPTS:
        system_prompt = config.SYSTEM_PROMPTS[system_prompt_type]
    
    try:
        result = await custom_zerox(file_path=file_path, model=model_name, output_dir=output_dir,
                                   custom_system_prompt=system_prompt, select_pages=select_pages)
        return result
    except Exception as e:
        print(f"API处理错误: {str(e)}")
        import traceback
        traceback.print_exc()
        return f"API处理错误: {str(e)}"

# 在 main 函数中添加一个自定义的文件写入函数
async def write_utf8_file(file_path, content):
    """使用 UTF-8 编码写入文件"""
    try:
        import aiofiles
        async with aiofiles.open(file_path, 'w', encoding='utf-8') as f:
            await f.write(content)
        return True
    except Exception as e:
        print(f"写入文件时出错: {str(e)}")
        # 尝试同步写入
        try:
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(content)
            return True
        except Exception as e2:
            print(f"同步写入也失败: {str(e2)}")
            return False

# run the main function:
if __name__ == "__main__":
    result = asyncio.run(main())

    # print markdown result
    print(result)