import os
from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer

# 1. 修正路径格式：Linux系统无需r""标记，直接使用标准路径字符串
model_path = "/opt/model/sshleifer/distilbart-cnn-12-6"

def validate_and_load_model(path):
    """验证路径并加载模型，返回模型和分词器"""
    # 检查路径是否为None
    if path is None:
        raise ValueError("模型路径不能为空（None），请检查路径赋值")
    
    # 检查路径是否为有效字符串
    if not isinstance(path, str) or not path.strip():
        raise TypeError(f"模型路径必须是有效的字符串，当前值: {path}（类型: {type(path)}）")
    
    # 检查路径是否存在
    if not os.path.exists(path):
        raise FileNotFoundError(f"模型路径不存在: {path}\n请确认路径拼写正确")
    
    # 检查是否为目录
    if not os.path.isdir(path):
        raise NotADirectoryError(f"路径不是有效的目录: {path}")
    
    # 检查关键文件
    required_files = [
        "config.json", "pytorch_model.bin", 
        "tokenizer_config.json", "vocab.json", "merges.txt"
    ]
    missing = [f for f in required_files if not os.path.exists(os.path.join(path, f))]
    if missing:
        raise FileNotFoundError(f"缺少必要文件: {', '.join(missing)}\n请重新下载模型")
    
    # 加载模型和分词器
    tokenizer = AutoTokenizer.from_pretrained(path, local_files_only=True)
    model = AutoModelForSeq2SeqLM.from_pretrained(path, local_files_only=True)
    return model, tokenizer

try:
    # 验证并加载模型
    model, tokenizer = validate_and_load_model(model_path)
    
    # 创建摘要管道
    summarizer = pipeline(
        "summarization",
        model=model,
        tokenizer=tokenizer,
        min_length=8,
        max_length=32
    )
    
    # 执行摘要
    text = "In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention. For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles."
    
    result = summarizer(text)
    print("summarization：", result)

except Exception as e:
    print(f"错误: {str(e)}")
    print("\n紧急处理步骤:")
    print("1. 执行以下命令检查路径是否存在：")
    print(f"   ls -ld {model_path}")
    print("2. 若路径不存在，重新创建并下载模型：")
    print(f"   mkdir -p {model_path}")
    print("   wget -P {model_path} https://hf-mirror.com/sshleifer/distilbart-cnn-12-6/resolve/main/config.json")
    print("   wget -P {model_path} https://hf-mirror.com/sshleifer/distilbart-cnn-12-6/resolve/main/pytorch_model.bin")
    print("   wget -P {model_path} https://hf-mirror.com/sshleifer/distilbart-cnn-12-6/resolve/main/tokenizer_config.json")
    print("   wget -P {model_path} https://hf-mirror.com/sshleifer/distilbart-cnn-12-6/resolve/main/vocab.json")
    print("   wget -P {model_path} https://hf-mirror.com/sshleifer/distilbart-cnn-12-6/resolve/main/merges.txt")
    print("3. 赋予权限：")
    print(f"   sudo chmod -R 755 {model_path}")
