{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# Pipeline尝鲜 (Work1)",
   "id": "64900eca257ed812"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### 设置代理和模型缓存目录",
   "id": "c4b8309d9a6d98c5"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-28T17:02:29.072958Z",
     "start_time": "2025-08-28T17:02:29.069935Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "os.environ[\"http_proxy\"] = \"http://127.0.0.1:7890\"\n",
    "os.environ[\"https_proxy\"] = \"http://127.0.0.1:7890\""
   ],
   "id": "fc900f69d1016ffe",
   "outputs": [],
   "execution_count": 3
  },
  {
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-08-28T17:02:32.109917Z",
     "start_time": "2025-08-28T17:02:29.080389Z"
    }
   },
   "cell_type": "code",
   "source": "from transformers import pipeline",
   "id": "initial_id",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### Part 1: 语义分析",
   "id": "92fb5432e659d5"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-28T17:02:33.676148Z",
     "start_time": "2025-08-28T17:02:32.120387Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 仅指定任务时，使用默认模型（不推荐）\n",
    "pipe = pipeline(\"sentiment-analysis\")\n",
    "pipe(\"今儿上海可真冷啊\")"
   ],
   "id": "32a2d9eaee293fb1",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "No model was supplied, defaulted to distilbert/distilbert-base-uncased-finetuned-sst-2-english and revision 714eb0f (https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english).\n",
      "Using a pipeline without specifying a model name and revision in production is not recommended.\n",
      "Device set to use cuda:0\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[{'label': 'NEGATIVE', 'score': 0.8957214951515198}]"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-28T17:02:33.709719Z",
     "start_time": "2025-08-28T17:02:33.691711Z"
    }
   },
   "cell_type": "code",
   "source": [
    "text_list = [\n",
    "    \"今儿我家娃就要上幼儿园了.\",\n",
    "    \"时间过得可真快.\",\n",
    "    \"不一会就长大了.\",\n",
    "    \"哎! 是不是我们也老了.\",\n",
    "    \"但是我很欣慰.\"\n",
    "]\n",
    "pipe(text_list)"
   ],
   "id": "ddf515a1cf872fec",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[{'label': 'NEGATIVE', 'score': 0.9479220509529114},\n",
       " {'label': 'NEGATIVE', 'score': 0.8526216149330139},\n",
       " {'label': 'NEGATIVE', 'score': 0.9256910681724548},\n",
       " {'label': 'NEGATIVE', 'score': 0.9616868495941162},\n",
       " {'label': 'NEGATIVE', 'score': 0.9354894757270813}]"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 6
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "这里的\"但是我很欣慰\" 应该是积极向, 这里有点问题",
   "id": "f9159bec5a12b8ae"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### Part 2: 命名实体识别",
   "id": "1d3eeeac3a23a7ce"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-28T17:02:34.977751Z",
     "start_time": "2025-08-28T17:02:33.722495Z"
    }
   },
   "cell_type": "code",
   "source": [
    "classifier = pipeline(task=\"ner\")\n",
    "# preds = classifier(\"今儿我家娃就要上幼儿园了.时间过得可真快.不一会就长大了.哎! 是不是我们也老了.但是我很欣慰.\")\n",
    "preds = classifier(\"Hugging Face is a French company based in New York City.\")\n",
    "preds = [\n",
    "    {\n",
    "        \"entity\": pred[\"entity\"],\n",
    "        \"score\": round(pred[\"score\"], 4),\n",
    "        \"index\": pred[\"index\"],\n",
    "        \"word\": pred[\"word\"],\n",
    "        \"start\": pred[\"start\"],\n",
    "        \"end\": pred[\"end\"],\n",
    "    }\n",
    "    for pred in preds\n",
    "]\n",
    "print(*preds, sep=\"\\n\")"
   ],
   "id": "62084e4cd180077b",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "No model was supplied, defaulted to dbmdz/bert-large-cased-finetuned-conll03-english and revision 4c53496 (https://huggingface.co/dbmdz/bert-large-cased-finetuned-conll03-english).\n",
      "Using a pipeline without specifying a model name and revision in production is not recommended.\n",
      "Some weights of the model checkpoint at dbmdz/bert-large-cased-finetuned-conll03-english were not used when initializing BertForTokenClassification: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight']\n",
      "- This IS expected if you are initializing BertForTokenClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForTokenClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
      "Device set to use cuda:0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'entity': 'I-ORG', 'score': np.float32(0.9968), 'index': 1, 'word': 'Hu', 'start': 0, 'end': 2}\n",
      "{'entity': 'I-ORG', 'score': np.float32(0.9293), 'index': 2, 'word': '##gging', 'start': 2, 'end': 7}\n",
      "{'entity': 'I-ORG', 'score': np.float32(0.9763), 'index': 3, 'word': 'Face', 'start': 8, 'end': 12}\n",
      "{'entity': 'I-MISC', 'score': np.float32(0.9983), 'index': 6, 'word': 'French', 'start': 18, 'end': 24}\n",
      "{'entity': 'I-LOC', 'score': np.float32(0.999), 'index': 10, 'word': 'New', 'start': 42, 'end': 45}\n",
      "{'entity': 'I-LOC', 'score': np.float32(0.9987), 'index': 11, 'word': 'York', 'start': 46, 'end': 50}\n",
      "{'entity': 'I-LOC', 'score': np.float32(0.9992), 'index': 12, 'word': 'City', 'start': 51, 'end': 55}\n"
     ]
    }
   ],
   "execution_count": 7
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "默认模型貌似不支持中文",
   "id": "fad65c482653fc40"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-28T17:02:36.147935Z",
     "start_time": "2025-08-28T17:02:34.989486Z"
    }
   },
   "cell_type": "code",
   "source": [
    "classifier = pipeline(task=\"ner\", grouped_entities=True)\n",
    "classifier(\"Hugging Face is a French company based in New York City.\")"
   ],
   "id": "b4a633e4b1ec971f",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "No model was supplied, defaulted to dbmdz/bert-large-cased-finetuned-conll03-english and revision 4c53496 (https://huggingface.co/dbmdz/bert-large-cased-finetuned-conll03-english).\n",
      "Using a pipeline without specifying a model name and revision in production is not recommended.\n",
      "Some weights of the model checkpoint at dbmdz/bert-large-cased-finetuned-conll03-english were not used when initializing BertForTokenClassification: ['bert.pooler.dense.bias', 'bert.pooler.dense.weight']\n",
      "- This IS expected if you are initializing BertForTokenClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertForTokenClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n",
      "Device set to use cuda:0\n",
      "E:\\Work\\Conda\\envs\\py312\\Lib\\site-packages\\transformers\\pipelines\\token_classification.py:186: UserWarning: `grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to `aggregation_strategy=\"AggregationStrategy.SIMPLE\"` instead.\n",
      "  warnings.warn(\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[{'entity_group': 'ORG',\n",
       "  'score': np.float32(0.9674638),\n",
       "  'word': 'Hugging Face',\n",
       "  'start': 0,\n",
       "  'end': 12},\n",
       " {'entity_group': 'MISC',\n",
       "  'score': np.float32(0.99828726),\n",
       "  'word': 'French',\n",
       "  'start': 18,\n",
       "  'end': 24},\n",
       " {'entity_group': 'LOC',\n",
       "  'score': np.float32(0.99896103),\n",
       "  'word': 'New York City',\n",
       "  'start': 42,\n",
       "  'end': 55}]"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 8
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### Part 3: 问答系统",
   "id": "90bcb258cf554af2"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-28T17:02:39.544230Z",
     "start_time": "2025-08-28T17:02:36.161065Z"
    }
   },
   "cell_type": "code",
   "source": [
    "\n",
    "question_answerer = pipeline(task=\"question-answering\")\n",
    "preds = question_answerer(\n",
    "    question=\"What is the name of the repository?\",\n",
    "    context=\"The name of the repository is huggingface/transformers\",\n",
    ")\n",
    "print(\n",
    "    f\"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}\"\n",
    ")\n",
    "preds = question_answerer(\n",
    "    question=\"What is the capital of China?\",\n",
    "    context=\"On 1 October 1949, CCP Chairman Mao Zedong formally proclaimed the People's Republic of China in Tiananmen Square, Beijing.\",\n",
    ")\n",
    "print(\n",
    "    f\"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}\"\n",
    ")\n",
    "preds = question_answerer(\n",
    "    question=\"我是哪个公司的? 啥时候入职的?\",\n",
    "    context=\"本人于2020年10月入职中国电子旗下的中电金信软件有限公司\",\n",
    ")\n",
    "print(\n",
    "    f\"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}\"\n",
    ")"
   ],
   "id": "b56ced194e630afa",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "No model was supplied, defaulted to distilbert/distilbert-base-cased-distilled-squad and revision 564e9b5 (https://huggingface.co/distilbert/distilbert-base-cased-distilled-squad).\n",
      "Using a pipeline without specifying a model name and revision in production is not recommended.\n",
      "Device set to use cuda:0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "score: 0.9333, start: 30, end: 54, answer: huggingface/transformers\n",
      "score: 0.9458, start: 115, end: 122, answer: Beijing\n",
      "score: 0.0946, start: 3, end: 10, answer: 2020年10\n"
     ]
    }
   ],
   "execution_count": 9
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "默认模型没法处理连续问题, 换个模型试试",
   "id": "e247a8ceb174173a"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-28T17:02:43.798075Z",
     "start_time": "2025-08-28T17:02:39.561922Z"
    }
   },
   "cell_type": "code",
   "source": [
    "model_name = \"deepset/roberta-base-squad2\"\n",
    "question_answerer = pipeline(task=\"question-answering\", model=model_name, tokenizer=model_name)\n",
    "preds = question_answerer(\n",
    "    question=\"What is the name of the repository?\",\n",
    "    context=\"The name of the repository is huggingface/transformers\",\n",
    ")\n",
    "print(\n",
    "    f\"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}\"\n",
    ")\n",
    "preds = question_answerer(\n",
    "    question=\"What is the capital of China?\",\n",
    "    context=\"On 1 October 1949, CCP Chairman Mao Zedong formally proclaimed the People's Republic of China in Tiananmen Square, Beijing.\",\n",
    ")\n",
    "print(\n",
    "    f\"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}\"\n",
    ")\n",
    "preds = question_answerer(\n",
    "    question=\"我是啥时候入职的?\",\n",
    "    context=\"本人于2020年10月入职中国电子旗下的中电金信软件有限公司\",\n",
    ")\n",
    "print(\n",
    "    f\"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}\"\n",
    ")"
   ],
   "id": "1e37a0eac8f8111b",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Device set to use cuda:0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "score: 0.9086, start: 30, end: 54, answer: huggingface/transformers\n",
      "score: 0.7504, start: 115, end: 122, answer: Beijing\n",
      "score: 0.0108, start: 10, end: 30, answer: 月入职中国电子旗下的中电金信软件有限公司\n"
     ]
    }
   ],
   "execution_count": 10
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "时间还是有问题",
   "id": "e2ecc24fe4bfc8cc"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "### Part 4: 文本摘要\n",
    "这里使用 **_facebook/bart-large-cnn_**"
   ],
   "id": "a71970853f40f0fe"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-28T17:03:30.906981Z",
     "start_time": "2025-08-28T17:03:28.156988Z"
    }
   },
   "cell_type": "code",
   "source": [
    "summarizer = pipeline(task=\"summarization\",\n",
    "                      # model=\"facebook/bart-large-cnn\",\n",
    "                      model=\"t5-base\",\n",
    "                      min_length=8,\n",
    "                      max_length=32,\n",
    "                      )\n",
    "summarizer(\n",
    "    \"\"\"\n",
    "    In this work, we presented the Transformer, the first sequence transduction model based entirely on attention,\n",
    "    replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention.\n",
    "    For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers.\n",
    "    On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art.\n",
    "    In the former task our best model outperforms even all previously reported ensembles.\n",
    "    \"\"\"\n",
    ")"
   ],
   "id": "a9e97d02504ba0c7",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Device set to use cuda:0\n",
      "Your max_length is set to 200, but your input_length is only 128. Since this is a summarization task, where outputs shorter than the input are typically wanted, you might consider decreasing max_length manually, e.g. summarizer('...', max_length=64)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[{'summary_text': 'the Transformer is the first sequence transduction model based entirely on attention . it replaces recurrent layers commonly used in encoder-decoder architectures with multi-headed self-attention . for translation tasks, the Transformer can be trained significantly faster than architectures based on convolutional layers .'}]"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 14
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "t5: 4s, bart-large-cnn: 10s 左右",
   "id": "e26fb4f3eb0c67e1"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python (py312)",
   "language": "python",
   "name": "py312"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
