{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "86dc7417",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datasets import load_dataset\n",
    "\n",
    "train_file_data = load_dataset('json', data_files=['./data/DuReaderQG/train.json'])\n",
    "dev_file_data = load_dataset('json', data_files=['./data/DuReaderQG/dev.json'])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "0aa5127c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['第35集雪见缓缓张开眼睛，景天又惊又喜之际，长卿和紫萱的仙船驶至，见众人无恙，也十分高兴。众人登船，用尽合力把自身的真气和水分输给她。雪见终于醒过来了，但却一脸木然，全无反应。众人向常胤求助，却发现人世界竟没有雪见的身世纪录。长卿询问清微的身世，清微语带双关说一切上了天界便有答案。长卿驾驶仙船，众人决定立马动身，往天界而去。众人来到一荒山，长卿指出，魔界和天界相连。由魔界进入通过神魔之井，便可登天。众人至魔界入口，仿若一黑色的蝙蝠洞，但始终无法进入。后来花楹发现只要有翅膀便能飞入。于是景天等人打下许多乌鸦，模仿重楼的翅膀，制作数对翅膀状巨物。刚佩戴在身，便被吸入洞口。众人摔落在地，抬头发现魔界守卫。景天和众魔套交情，自称和魔尊重楼相熟，众魔不理，打了起来。',\n",
       " '选择燃气热水器时，一定要关注这几个问题：1、出水稳定性要好，不能出现忽热忽冷的现象2、快速到达设定的需求水温3、操作要智能、方便4、安全性要好，要装有安全报警装置 市场上燃气热水器品牌众多，购买时还需多加对比和仔细鉴别。方太今年主打的磁化恒温热水器在使用体验方面做了全面升级：9秒速热，可快速进入洗浴模式；水温持久稳定，不会出现忽热忽冷的现象，并通过水量伺服技术将出水温度精确控制在±0.5℃，可满足家里宝贝敏感肌肤洗护需求；配备CO和CH4双气体报警装置更安全（市场上一般多为CO单气体报警）。另外，这款热水器还有智能WIFI互联功能，只需下载个手机APP即可用手机远程操作热水器，实现精准调节水温，满足家人多样化的洗浴需求。当然方太的磁化恒温系列主要的是增加磁化功能，可以有效吸附水中的铁锈、铁屑等微小杂质，防止细菌滋生，使沐浴水质更洁净，长期使用磁化水沐浴更利于身体健康。']"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_file_data['train']['context'][:2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "2678cc42",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(['context', 'answer', 'question', 'id'],\n",
       " {'context': Value('string'),\n",
       "  'answer': Value('string'),\n",
       "  'question': Value('string'),\n",
       "  'id': Value('int64')})"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_file_data['train'].column_names, train_file_data['train'].features"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "09730201",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['context', 'answer', 'question', 'id'],\n",
       "        num_rows: 13068\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['context', 'answer', 'question', 'id'],\n",
       "        num_rows: 1452\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_and_test_data = train_file_data['train'].train_test_split(test_size=0.1)\n",
    "train_and_test_data"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6b053088",
   "metadata": {},
   "source": [
    "- [ ] 这里为什么不能用use_fast，这个是否能用use_fast如何判断？比如HF一个Transformer的模型，应该如何确定是否能用？"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "64851fb2",
   "metadata": {},
   "outputs": [],
   "source": [
    "from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, T5Tokenizer, T5ForConditionalGeneration\n",
    "import torch\n",
    "\n",
    "device = torch.device('mps' if torch.backends.mps.is_available() else 'cpu')\n",
    "\n",
    "model:T5ForConditionalGeneration = AutoModelForSeq2SeqLM.from_pretrained('langboat/mengzi-t5-base')\n",
    "# model = T5ForConditionalGeneration.from_pretrained('langboat/mengzi-t5-base')\n",
    "model.to(device)\n",
    "\n",
    "# tokenizer = AutoTokenizer.from_pretrained('langboat/mengzi-t5-base')\n",
    "tokenizer:T5Tokenizer = AutoTokenizer.from_pretrained('langboat/mengzi-t5-base', use_fast=False, legacy=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "c2d92fec",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Dataset({\n",
       "    features: ['context', 'answer', 'question', 'id'],\n",
       "    num_rows: 13068\n",
       "})"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_and_test_data['train']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "660ed47a",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Map: 100%|██████████| 13068/13068 [00:04<00:00, 3255.14 examples/s]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "Dataset({\n",
       "    features: ['input_ids', 'attention_mask', 'labels'],\n",
       "    num_rows: 13068\n",
       "})"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from transformers import DataCollatorWithPadding\n",
    "\n",
    "def process_function(examples):\n",
    "  inputs = [f'question:{item[0]} context:{item[1]}' for item in zip(examples['question'], examples['context'])]\n",
    "  answers = examples['answer']\n",
    "  model_inputs = tokenizer(inputs, max_length=512, truncation=True, padding=False)\n",
    "  model_inputs['labels'] = tokenizer(answers, max_length=128, truncation=True, padding=False)\n",
    "  return model_inputs\n",
    "\n",
    "\n",
    "\n",
    "train_test = train_and_test_data['train'].map(\n",
    "  process_function,\n",
    "  batched=True,\n",
    "  remove_columns=train_and_test_data['train'].column_names\n",
    ")\n",
    "train_test\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ai-common",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
