{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "7a0d3602-327a-463c-8c1f-51ce8f13466f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "情感分析结果： [{'label': 'negative (stars 1, 2 and 3)', 'score': 0.537394642829895}]\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline\n",
    "\n",
    "# 本地模型文件夹路径（替换为你的实际路径）\n",
    "local_model_path = \"/opt/model/roberta-base-finetuned-dianping-chinese\"\n",
    "\n",
    "def check_model_files(path):\n",
    "    \"\"\"修正文件检查逻辑，兼容pytorch_model.bin\"\"\"\n",
    "    required_files = [\n",
    "        \"config.json\",           # 模型配置文件（必须）\n",
    "        \"tokenizer_config.json\", # 分词器配置（必须）\n",
    "        \"vocab.txt\"              # 分词器词表（必须）\n",
    "    ]\n",
    "    # 权重文件可能是以下两种格式之一（满足其一即可）\n",
    "    weight_files = [\n",
    "        \"pytorch_model.bin\",     # PyTorch常用格式\n",
    "        \"model.safetensors\"      # 另一种安全格式\n",
    "    ]\n",
    "    \n",
    "    # 检查路径是否存在\n",
    "    if not os.path.exists(path):\n",
    "        return False, f\"路径不存在: {path}\"\n",
    "    \n",
    "    # 检查必要文件\n",
    "    missing_required = [f for f in required_files if not os.path.exists(os.path.join(path, f))]\n",
    "    if missing_required:\n",
    "        return False, f\"缺少必要配置文件: {', '.join(missing_required)}\"\n",
    "    \n",
    "    # 检查权重文件（至少存在一个）\n",
    "    weight_exists = any(os.path.exists(os.path.join(path, f)) for f in weight_files)\n",
    "    if not weight_exists:\n",
    "        return False, f\"缺少模型权重文件，需至少存在一个: {', '.join(weight_files)}\"\n",
    "    \n",
    "    return True, \"模型文件完整\"\n",
    "\n",
    "try:\n",
    "    # 验证模型文件\n",
    "    is_valid, message = check_model_files(local_model_path)\n",
    "    if not is_valid:\n",
    "        raise ValueError(message)\n",
    "    \n",
    "    # 加载模型和分词器\n",
    "    tokenizer = AutoTokenizer.from_pretrained(\n",
    "        local_model_path,\n",
    "        trust_remote_code=True,\n",
    "        local_files_only=True\n",
    "    )\n",
    "    model = AutoModelForSequenceClassification.from_pretrained(\n",
    "        local_model_path,\n",
    "        trust_remote_code=True,\n",
    "        local_files_only=True\n",
    "    )\n",
    "    \n",
    "    # 测试情感分析\n",
    "    classifier = pipeline(\"sentiment-analysis\", model=model, tokenizer=tokenizer)\n",
    "    result = classifier(\"今儿上海可真冷啊\")\n",
    "    print(\"情感分析结果：\", result)\n",
    "\n",
    "except Exception as e:\n",
    "    print(f\"错误：{e}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "da3c7039-2e90-4f76-9bde-faf6bfe95d2b",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Asking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ner： [{'entity': 'positive (stars 4 and 5)', 'score': 0.5112218, 'index': 1, 'word': 'hu', 'start': 0, 'end': 2}, {'entity': 'negative (stars 1, 2 and 3)', 'score': 0.5056061, 'index': 2, 'word': '##gg', 'start': 2, 'end': 4}, {'entity': 'negative (stars 1, 2 and 3)', 'score': 0.5089293, 'index': 3, 'word': '##ing', 'start': 4, 'end': 7}, {'entity': 'positive (stars 4 and 5)', 'score': 0.5232359, 'index': 4, 'word': 'face', 'start': 8, 'end': 12}, {'entity': 'negative (stars 1, 2 and 3)', 'score': 0.5004955, 'index': 5, 'word': 'is', 'start': 13, 'end': 15}, {'entity': 'positive (stars 4 and 5)', 'score': 0.50215495, 'index': 6, 'word': 'a', 'start': 16, 'end': 17}, {'entity': 'positive (stars 4 and 5)', 'score': 0.52170616, 'index': 7, 'word': 'f', 'start': 18, 'end': 19}, {'entity': 'negative (stars 1, 2 and 3)', 'score': 0.5003228, 'index': 8, 'word': '##ren', 'start': 19, 'end': 22}, {'entity': 'negative (stars 1, 2 and 3)', 'score': 0.50385314, 'index': 9, 'word': '##ch', 'start': 22, 'end': 24}, {'entity': 'positive (stars 4 and 5)', 'score': 0.51444966, 'index': 10, 'word': 'company', 'start': 25, 'end': 32}, {'entity': 'positive (stars 4 and 5)', 'score': 0.5064859, 'index': 11, 'word': 'base', 'start': 33, 'end': 37}, {'entity': 'positive (stars 4 and 5)', 'score': 0.50615585, 'index': 12, 'word': '##d', 'start': 37, 'end': 38}, {'entity': 'positive (stars 4 and 5)', 'score': 0.5039248, 'index': 13, 'word': 'in', 'start': 39, 'end': 41}, {'entity': 'positive (stars 4 and 5)', 'score': 0.5152361, 'index': 14, 'word': 'new', 'start': 42, 'end': 45}, {'entity': 'positive (stars 4 and 5)', 'score': 0.52242714, 'index': 15, 'word': 'york', 'start': 46, 'end': 50}, {'entity': 'positive (stars 4 and 5)', 'score': 0.52016234, 'index': 16, 'word': 'city', 'start': 51, 'end': 55}, {'entity': 'positive (stars 4 and 5)', 'score': 0.5106457, 'index': 17, 'word': '.', 'start': 55, 'end': 56}]\n"
     ]
    }
   ],
   "source": [
    "from transformers import pipeline\n",
    "\n",
    "classifier = pipeline(\n",
    "    task=\"ner\",\n",
    "    model=r\"/opt/model/roberta-base-finetuned-dianping-chinese\", # 中文实体识别优化，精度高\n",
    "    device=\"cpu\"\n",
    ")\n",
    "\n",
    "result = classifier(\"Hugging Face is a French company based in New York City.\")\n",
    "print(\"ner：\", result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "8e97f20d-2cb8-4e65-a6dc-24eeca8bb888",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForQuestionAnswering were not initialized from the model checkpoint at /opt/model/roberta-base-finetuned-dianping-chinese and are newly initialized: ['qa_outputs.bias', 'qa_outputs.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "question-answering： {'score': 0.0035992111079394817, 'start': 30, 'end': 54, 'answer': 'huggingface/transformers'}\n"
     ]
    }
   ],
   "source": [
    "from transformers import pipeline\n",
    "question_answerer = pipeline(\n",
    "    task=\"question-answering\",\n",
    "    model=r\"/opt/model/roberta-base-finetuned-dianping-chinese\", # 中文实体识别优化，精度高\n",
    "    device=\"cpu\"\n",
    ")\n",
    "\n",
    "result = question_answerer(question=\"What is the name of the repository?\",\n",
    "                           context=\"The name of the repository is huggingface/transformers\")\n",
    "print(\"question-answering：\", result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "fb3a6932-5dbd-4a0e-b869-161f0e3c32f5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "summarization： [{'summary_text': ' The Transformer is the first sequence transduction model based entirely on attention . It replaces the recurrent layers most commonly used in encoder-decoder'}]\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "from transformers import pipeline, AutoModelForSeq2SeqLM, AutoTokenizer\n",
    "\n",
    "# 1. 修正路径格式：Linux系统无需r\"\"标记，直接使用标准路径字符串\n",
    "model_path = \"/opt/model/sshleifer/distilbart-cnn-12-6\"\n",
    "\n",
    "def validate_and_load_model(path):\n",
    "    \"\"\"验证路径并加载模型，返回模型和分词器\"\"\"\n",
    "    # 检查路径是否为None\n",
    "    if path is None:\n",
    "        raise ValueError(\"模型路径不能为空（None），请检查路径赋值\")\n",
    "    \n",
    "    # 检查路径是否为有效字符串\n",
    "    if not isinstance(path, str) or not path.strip():\n",
    "        raise TypeError(f\"模型路径必须是有效的字符串，当前值: {path}（类型: {type(path)}）\")\n",
    "    \n",
    "    # 检查路径是否存在\n",
    "    if not os.path.exists(path):\n",
    "        raise FileNotFoundError(f\"模型路径不存在: {path}\\n请确认路径拼写正确\")\n",
    "    \n",
    "    # 检查是否为目录\n",
    "    if not os.path.isdir(path):\n",
    "        raise NotADirectoryError(f\"路径不是有效的目录: {path}\")\n",
    "    \n",
    "    # 检查关键文件\n",
    "    required_files = [\n",
    "        \"config.json\", \"pytorch_model.bin\", \n",
    "        \"tokenizer_config.json\", \"vocab.json\", \"merges.txt\"\n",
    "    ]\n",
    "    missing = [f for f in required_files if not os.path.exists(os.path.join(path, f))]\n",
    "    if missing:\n",
    "        raise FileNotFoundError(f\"缺少必要文件: {', '.join(missing)}\\n请重新下载模型\")\n",
    "    \n",
    "    # 加载模型和分词器\n",
    "    tokenizer = AutoTokenizer.from_pretrained(path, local_files_only=True)\n",
    "    model = AutoModelForSeq2SeqLM.from_pretrained(path, local_files_only=True)\n",
    "    return model, tokenizer\n",
    "\n",
    "try:\n",
    "    # 验证并加载模型\n",
    "    model, tokenizer = validate_and_load_model(model_path)\n",
    "    \n",
    "    # 创建摘要管道\n",
    "    summarizer = pipeline(\n",
    "        \"summarization\",\n",
    "        model=model,\n",
    "        tokenizer=tokenizer,\n",
    "        min_length=8,\n",
    "        max_length=32\n",
    "    )\n",
    "    \n",
    "    # 执行摘要\n",
    "    text = \"In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention. For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles.\"\n",
    "    \n",
    "    result = summarizer(text)\n",
    "    print(\"summarization：\", result)\n",
    "\n",
    "except Exception as e:\n",
    "    print(f\"错误: {str(e)}\")\n",
    "    print(\"\\n紧急处理步骤:\")\n",
    "    print(\"1. 执行以下命令检查路径是否存在：\")\n",
    "    print(f\"   ls -ld {model_path}\")\n",
    "    print(\"2. 若路径不存在，重新创建并下载模型：\")\n",
    "    print(f\"   mkdir -p {model_path}\")\n",
    "    print(\"   wget -P {model_path} https://hf-mirror.com/sshleifer/distilbart-cnn-12-6/resolve/main/config.json\")\n",
    "    print(\"   wget -P {model_path} https://hf-mirror.com/sshleifer/distilbart-cnn-12-6/resolve/main/pytorch_model.bin\")\n",
    "    print(\"   wget -P {model_path} https://hf-mirror.com/sshleifer/distilbart-cnn-12-6/resolve/main/tokenizer_config.json\")\n",
    "    print(\"   wget -P {model_path} https://hf-mirror.com/sshleifer/distilbart-cnn-12-6/resolve/main/vocab.json\")\n",
    "    print(\"   wget -P {model_path} https://hf-mirror.com/sshleifer/distilbart-cnn-12-6/resolve/main/merges.txt\")\n",
    "    print(\"3. 赋予权限：\")\n",
    "    print(f\"   sudo chmod -R 755 {model_path}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "d16e6ac1-53b6-41b1-ace4-7507b0242d98",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/usr/local/python3/lib/python3.8/site-packages/transformers/models/whisper/generation_whisper.py:509: FutureWarning: The input name `inputs` is deprecated. Please make sure to use `input_features` instead.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "automatic-speech-recognition： {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'}\n"
     ]
    }
   ],
   "source": [
    "from transformers import pipeline\n",
    "\n",
    "classifier = pipeline(task=\"automatic-speech-recognition\", model=\"/opt/model/whisper-small\")\n",
    "\n",
    "preds = classifier(\"./data/audio/mlk.flac\")\n",
    "print(\"automatic-speech-recognition：\", preds)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "c47050b5-499f-47e3-847e-02464dec4b29",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Pillow已安装，版本：10.4.0\n",
      "加载图像处理器...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "执行失败: \n",
      "ViTImageProcessor requires the PIL library but it was not found in your environment. You can install it with pip:\n",
      "`pip install pillow`. Please note that you may need to restart your runtime after installation.\n",
      "\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import sys\n",
    "from transformers import pipeline, ViTImageProcessor, ViTForImageClassification\n",
    "\n",
    "# 模型和图像路径\n",
    "model_path = \"/opt/model/google/vit-base-patch16-224\"\n",
    "image_path = \"./data/image/pipeline-cat-chonk.jpeg\"\n",
    "\n",
    "def verify_pillow_installation():\n",
    "    \"\"\"强制验证Pillow是否能正常导入\"\"\"\n",
    "    try:\n",
    "        import PIL\n",
    "        from PIL import Image\n",
    "        print(f\"Pillow已安装，版本：{PIL.__version__}\")\n",
    "        return True\n",
    "    except ImportError:\n",
    "        raise ImportError(\n",
    "            \"Pillow库未正确安装！\\n\"\n",
    "            \"请执行以下命令重新安装：\\n\"\n",
    "            \"pip3 uninstall -y Pillow\\n\"\n",
    "            \"pip3 install Pillow --no-cache-dir\"\n",
    "        )\n",
    "\n",
    "try:\n",
    "    # 首先验证Pillow\n",
    "    verify_pillow_installation()\n",
    "    \n",
    "    # 验证模型路径\n",
    "    if not os.path.exists(model_path):\n",
    "        raise FileNotFoundError(f\"模型路径不存在: {model_path}\")\n",
    "    \n",
    "    # 验证图像文件\n",
    "    if not os.path.exists(image_path):\n",
    "        raise FileNotFoundError(f\"图像文件不存在: {image_path}\")\n",
    "    \n",
    "    # 加载处理器和模型（这一步需要Pillow）\n",
    "    print(\"加载图像处理器...\")\n",
    "    processor = ViTImageProcessor.from_pretrained(model_path, local_files_only=True)\n",
    "    \n",
    "    print(\"加载模型...\")\n",
    "    model = ViTForImageClassification.from_pretrained(model_path, local_files_only=True)\n",
    "    \n",
    "    # 图像分类\n",
    "    classifier = pipeline(\n",
    "        \"image-classification\",\n",
    "        model=model,\n",
    "        image_processor=processor\n",
    "    )\n",
    "    \n",
    "    result = classifier(image_path)\n",
    "    print(\"图像分类结果：\", result)\n",
    "\n",
    "except Exception as e:\n",
    "    print(f\"执行失败: {str(e)}\", file=sys.stderr)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "658188c1-61b4-4ef5-9adb-0f407e22abbf",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
