{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "jupyter": {
     "is_executing": true
    },
    "ExecuteTime": {
     "start_time": "2025-04-03T08:25:10.285274Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of DistilBertForMaskedLM were not initialized from the model checkpoint at D:/ai/huggingface-models/ and are newly initialized: ['vocab_layer_norm.bias', 'vocab_layer_norm.weight', 'vocab_projector.bias', 'vocab_transform.bias', 'vocab_transform.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "模型权重是: torch.float32\n"
     ]
    }
   ],
   "source": [
    "from transformers import AutoModelForMaskedLM\n",
    "from transformers import AutoTokenizer\n",
    "\n",
    "import torch\n",
    "import platform\n",
    "\n",
    "if platform.system() == 'Windows':\n",
    "    data_path = \"D:/ai/huggingface-datasets/imdb\"\n",
    "    model_path = 'D:/ai/huggingface-models/'\n",
    "else:\n",
    "    data_path = \"/media/will/0291BA7C3A7B49C5/ai/huggingface-datasets/imdb\"\n",
    "    model_path = '/media/will/0291BA7C3A7B49C5/ai/huggingface-models/'\n",
    "\n",
    "check_point = \"distilbert-base-uncased-finetuned-sst-2-english\"\n",
    "# model_checkpoint = \"distilbert-base-uncased\"\n",
    "# model_checkpoint=\"bert-base-chinese\"\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_path,\n",
    "        subfolder=check_point,\n",
    "        # load_in_8bit=True,  # 启用INT8量化,需要安装bitsandbytes\n",
    "        )\n",
    "        \n",
    "model = AutoModelForMaskedLM.from_pretrained(model_path,\n",
    "        subfolder=check_point,\n",
    "        # load_in_8bit=True,\n",
    "        )\n",
    "        \n",
    "print(f'模型权重是: {model.dtype}')# 确认模型权重是否为 torch.float16"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:06.720393Z",
     "start_time": "2025-03-30T13:31:06.714355Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "'>>> DistilBERT number of parameters: 67M'\n",
      "'>>> BERT number of parameters: 110M'\n"
     ]
    }
   ],
   "source": [
    "distilbert_num_parameters = model.num_parameters() / 1_000_000\n",
    "print(f\"'>>> DistilBERT number of parameters: {round(distilbert_num_parameters)}M'\")\n",
    "print(f\"'>>> BERT number of parameters: 110M'\")\n",
    "# text = \"这是一个非常好的 [MASK].\"\n",
    "text = \"This is a great [MASK].\"\n",
    "# text = \"You are [MASK].\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T14:21:21.021225Z",
     "start_time": "2025-03-30T14:21:20.747908Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T09:59:02.412636Z",
     "iopub.status.busy": "2025-03-30T09:59:02.411950Z",
     "iopub.status.idle": "2025-03-30T09:59:03.077130Z",
     "shell.execute_reply": "2025-03-30T09:59:03.076274Z",
     "shell.execute_reply.started": "2025-03-30T09:59:02.412611Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "'>>> This is a great film.'\n",
      "'>>> This is a great movie.'\n",
      "'>>> This is a great idea.'\n",
      "'>>> This is a great one.'\n",
      "'>>> This is a great adventure.'\n"
     ]
    }
   ],
   "source": [
    "inputs = tokenizer(text, return_tensors=\"pt\")\n",
    "token_logits = model(**inputs).logits\n",
    "# 找到 [MASK] 的位置并提取其 logits\n",
    "mask_token_index = torch.where(inputs[\"input_ids\"] == tokenizer.mask_token_id)[1]\n",
    "mask_token_logits = token_logits[0, mask_token_index, :]\n",
    "# 选择具有最高 logits 的 [MASK] 候选词\n",
    "top_5_tokens = torch.topk(mask_token_logits, 5, dim=1).indices[0].tolist()\n",
    "\n",
    "for token in top_5_tokens:\n",
    "    print(f\"'>>> {text.replace(tokenizer.mask_token, tokenizer.decode([token]))}'\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:07.574819Z",
     "start_time": "2025-03-30T13:31:07.272237Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T09:59:03.078318Z",
     "iopub.status.busy": "2025-03-30T09:59:03.077984Z",
     "iopub.status.idle": "2025-03-30T09:59:06.868127Z",
     "shell.execute_reply": "2025-03-30T09:59:06.867442Z",
     "shell.execute_reply.started": "2025-03-30T09:59:03.078284Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "raw_datasets = DatasetDict({\n",
      "    train: Dataset({\n",
      "        features: ['text', 'label'],\n",
      "        num_rows: 25000\n",
      "    })\n",
      "    test: Dataset({\n",
      "        features: ['text', 'label'],\n",
      "        num_rows: 25000\n",
      "    })\n",
      "})\n"
     ]
    }
   ],
   "source": [
    "from datasets import load_dataset\n",
    "\n",
    "imdb_dataset = load_dataset(path=data_path)\n",
    "# imdb_dataset = load_dataset(\"imdb\")\n",
    "print(f'raw_datasets = {imdb_dataset}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:07.603764Z",
     "start_time": "2025-03-30T13:31:07.595401Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T09:59:06.869687Z",
     "iopub.status.busy": "2025-03-30T09:59:06.868925Z",
     "iopub.status.idle": "2025-03-30T09:59:06.886935Z",
     "shell.execute_reply": "2025-03-30T09:59:06.886264Z",
     "shell.execute_reply.started": "2025-03-30T09:59:06.869662Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "'>>> Review: There is no relation at all between Fortier and Profiler but the fact that both are police series about violent crimes. Profiler looks crispy, Fortier looks classic. Profiler plots are quite simple. Fortier's plot are far more complicated... Fortier looks more like Prime Suspect, if we have to spot similarities... The main character is weak and weirdo, but have \"clairvoyance\". People like to compare, to judge, to evaluate. How about just enjoying? Funny thing too, people writing Fortier looks American but, on the other hand, arguing they prefer American series (!!!). Maybe it's the language, or the spirit, but I think this series is more English than American. By the way, the actors are really good and funny. The acting is not superficial at all...'\n",
      "'>>> Label: 1'\n",
      "\n",
      "'>>> Review: This movie is a great. The plot is very true to the book which is a classic written by Mark Twain. The movie starts of with a scene where Hank sings a song with a bunch of kids called \"when you stub your toe on the moon\" It reminds me of Sinatra's song High Hopes, it is fun and inspirational. The Music is great throughout and my favorite song is sung by the King, Hank (bing Crosby) and Sir \"Saggy\" Sagamore. OVerall a great family movie or even a great Date movie. This is a movie you can watch over and over again. The princess played by Rhonda Fleming is gorgeous. I love this movie!! If you liked Danny Kaye in the Court Jester then you will definitely like this movie.'\n",
      "'>>> Label: 1'\n",
      "\n",
      "'>>> Review: George P. Cosmatos' \"Rambo: First Blood Part II\" is pure wish-fulfillment. The United States clearly didn't win the war in Vietnam. They caused damage to this country beyond the imaginable and this movie continues the fairy story of the oh-so innocent soldiers. The only bad guys were the leaders of the nation, who made this war happen. The character of Rambo is perfect to notice this. He is extremely patriotic, bemoans that US-Americans didn't appreciate and celebrate the achievements of the single soldier, but has nothing but distrust for leading officers and politicians. Like every film that defends the war (e.g. \"We Were Soldiers\") also this one avoids the need to give a comprehensible reason for the engagement in South Asia. And for that matter also the reason for every single US-American soldier that was there. Instead, Rambo gets to take revenge for the wounds of a whole nation. It would have been better to work on how to deal with the memories, rather than suppressing them. \"Do we get to win this time?\" Yes, you do.'\n",
      "'>>> Label: 0'\n"
     ]
    }
   ],
   "source": [
    "sample = imdb_dataset[\"train\"].shuffle(seed=42).select(range(3))\n",
    "\n",
    "for row in sample:\n",
    "    print(f\"\\n'>>> Review: {row['text']}'\")\n",
    "    print(f\"'>>> Label: {row['label']}'\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:07.691006Z",
     "start_time": "2025-03-30T13:31:07.661151Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T09:59:06.888077Z",
     "iopub.status.busy": "2025-03-30T09:59:06.887769Z",
     "iopub.status.idle": "2025-03-30T09:59:58.180360Z",
     "shell.execute_reply": "2025-03-30T09:59:58.179658Z",
     "shell.execute_reply.started": "2025-03-30T09:59:06.888027Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['input_ids', 'attention_mask', 'word_ids'],\n",
       "        num_rows: 25000\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['input_ids', 'attention_mask', 'word_ids'],\n",
       "        num_rows: 25000\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def tokenize_function(examples):\n",
    "    result = tokenizer(examples[\"text\"])\n",
    "    if tokenizer.is_fast:\n",
    "        result[\"word_ids\"] = [result.word_ids(i) for i in range(len(result[\"input_ids\"]))]\n",
    "    return result\n",
    "\n",
    "\n",
    "# 使用 batched=True 来激活快速多线程!\n",
    "tokenized_datasets = imdb_dataset.map(\n",
    "    tokenize_function, batched=True, remove_columns=[\"text\", \"label\"]\n",
    ")\n",
    "tokenized_datasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:07.714284Z",
     "start_time": "2025-03-30T13:31:07.709268Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T09:59:58.182850Z",
     "iopub.status.busy": "2025-03-30T09:59:58.182633Z",
     "iopub.status.idle": "2025-03-30T09:59:58.186031Z",
     "shell.execute_reply": "2025-03-30T09:59:58.185319Z",
     "shell.execute_reply.started": "2025-03-30T09:59:58.182832Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "512"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "chunk_size = 128\n",
    "tokenizer.model_max_length"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:07.778127Z",
     "start_time": "2025-03-30T13:31:07.771612Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T09:59:58.188259Z",
     "iopub.status.busy": "2025-03-30T09:59:58.188020Z",
     "iopub.status.idle": "2025-03-30T09:59:58.946335Z",
     "shell.execute_reply": "2025-03-30T09:59:58.945537Z",
     "shell.execute_reply.started": "2025-03-30T09:59:58.188240Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "'>>> Review 0 length: 363'\n",
      "'>>> Review 1 length: 304'\n",
      "'>>> Review 2 length: 133'\n"
     ]
    }
   ],
   "source": [
    "# 切片会为每个特征生成一个列表的列表\n",
    "tokenized_samples = tokenized_datasets[\"train\"][:3]\n",
    "\n",
    "for idx, sample in enumerate(tokenized_samples[\"input_ids\"]):\n",
    "    print(f\"'>>> Review {idx} length: {len(sample)}'\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:07.839402Z",
     "start_time": "2025-03-30T13:31:07.834984Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T09:59:58.947306Z",
     "iopub.status.busy": "2025-03-30T09:59:58.947045Z",
     "iopub.status.idle": "2025-03-30T09:59:58.959030Z",
     "shell.execute_reply": "2025-03-30T09:59:58.958243Z",
     "shell.execute_reply.started": "2025-03-30T09:59:58.947279Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "'>>> Concatenated reviews length: 800'\n"
     ]
    }
   ],
   "source": [
    "concatenated_examples = {\n",
    "    k: sum(tokenized_samples[k], []) for k in tokenized_samples.keys()\n",
    "}\n",
    "total_length = len(concatenated_examples[\"input_ids\"])\n",
    "print(f\"'>>> Concatenated reviews length: {total_length}'\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:07.896306Z",
     "start_time": "2025-03-30T13:31:07.891264Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T09:59:58.960136Z",
     "iopub.status.busy": "2025-03-30T09:59:58.959831Z",
     "iopub.status.idle": "2025-03-30T09:59:58.977243Z",
     "shell.execute_reply": "2025-03-30T09:59:58.976588Z",
     "shell.execute_reply.started": "2025-03-30T09:59:58.960107Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "'>>> Chunk length: 128'\n",
      "'>>> Chunk length: 128'\n",
      "'>>> Chunk length: 128'\n",
      "'>>> Chunk length: 128'\n",
      "'>>> Chunk length: 128'\n",
      "'>>> Chunk length: 128'\n",
      "'>>> Chunk length: 32'\n"
     ]
    }
   ],
   "source": [
    "chunks = {\n",
    "    k: [t[i : i + chunk_size] for i in range(0, total_length, chunk_size)]\n",
    "    for k, t in concatenated_examples.items()\n",
    "}\n",
    "\n",
    "for chunk in chunks[\"input_ids\"]:\n",
    "    print(f\"'>>> Chunk length: {len(chunk)}'\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:07.953926Z",
     "start_time": "2025-03-30T13:31:07.948253Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T09:59:58.978345Z",
     "iopub.status.busy": "2025-03-30T09:59:58.978025Z",
     "iopub.status.idle": "2025-03-30T09:59:58.989537Z",
     "shell.execute_reply": "2025-03-30T09:59:58.988815Z",
     "shell.execute_reply.started": "2025-03-30T09:59:58.978315Z"
    },
    "trusted": true
   },
   "outputs": [],
   "source": [
    "def group_texts(examples):\n",
    "    # 拼接所有的文本\n",
    "    concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}\n",
    "    # 计算拼接文本的长度\n",
    "    total_length = len(concatenated_examples[list(examples.keys())[0]])\n",
    "    # 如果最后一个块小于 chunk_size,我们将其丢弃\n",
    "    total_length = (total_length // chunk_size) * chunk_size\n",
    "    # 按最大长度分块\n",
    "    result = {\n",
    "        k: [t[i : i + chunk_size] for i in range(0, total_length, chunk_size)]\n",
    "        for k, t in concatenated_examples.items()\n",
    "    }\n",
    "    # 创建一个新的 labels 列\n",
    "    result[\"labels\"] = result[\"input_ids\"].copy()\n",
    "    return result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:08.018619Z",
     "start_time": "2025-03-30T13:31:08.000998Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T09:59:58.990503Z",
     "iopub.status.busy": "2025-03-30T09:59:58.990291Z",
     "iopub.status.idle": "2025-03-30T10:03:26.952330Z",
     "shell.execute_reply": "2025-03-30T10:03:26.951648Z",
     "shell.execute_reply.started": "2025-03-30T09:59:58.990484Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['input_ids', 'attention_mask', 'word_ids', 'labels'],\n",
       "        num_rows: 61291\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['input_ids', 'attention_mask', 'word_ids', 'labels'],\n",
       "        num_rows: 59904\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lm_datasets = tokenized_datasets.map(group_texts, batched=True)\n",
    "lm_datasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:08.065778Z",
     "start_time": "2025-03-30T13:31:08.056620Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T10:03:26.953289Z",
     "iopub.status.busy": "2025-03-30T10:03:26.953026Z",
     "iopub.status.idle": "2025-03-30T10:03:26.961346Z",
     "shell.execute_reply": "2025-03-30T10:03:26.960530Z",
     "shell.execute_reply.started": "2025-03-30T10:03:26.953256Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "as the vietnam war and race issues in the united states. in between asking politicians and ordinary denizens of stockholm about their opinions on politics, she has sex with her drama teacher, classmates, and married men. < br / > < br / > what kills me about i am curious - yellow is that 40 years ago, this was considered pornographic. really, the sex and nudity scenes are few and far between, even then it ' s not shot like some cheaply made porno. while my countrymen mind find it shocking, in reality sex and nudity are a major staple in swedish cinema. even ingmar bergman,\n",
      "as the vietnam war and race issues in the united states. in between asking politicians and ordinary denizens of stockholm about their opinions on politics, she has sex with her drama teacher, classmates, and married men. < br / > < br / > what kills me about i am curious - yellow is that 40 years ago, this was considered pornographic. really, the sex and nudity scenes are few and far between, even then it ' s not shot like some cheaply made porno. while my countrymen mind find it shocking, in reality sex and nudity are a major staple in swedish cinema. even ingmar bergman,\n"
     ]
    }
   ],
   "source": [
    "print(tokenizer.decode(lm_datasets[\"train\"][1][\"input_ids\"]))\n",
    "\n",
    "print(tokenizer.decode(lm_datasets[\"train\"][1][\"labels\"]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:08.145083Z",
     "start_time": "2025-03-30T13:31:08.119821Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T10:03:26.962641Z",
     "iopub.status.busy": "2025-03-30T10:03:26.962326Z",
     "iopub.status.idle": "2025-03-30T10:03:27.109932Z",
     "shell.execute_reply": "2025-03-30T10:03:27.109262Z",
     "shell.execute_reply.started": "2025-03-30T10:03:26.962612Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "'>>> [CLS] i rented i [MASK] curious -isha from my video store because of all the controversy that surrounded it when [MASK] [MASK] first released [MASK] 1967. [MASK] also heard that at [MASK] kathmandu was seized [MASK] u. s. customs if it ever [MASK] to enter this [MASK], [MASK] being a fan of films considered \" controversial \" i [MASK] had to [MASK] this [MASK] [MASK]. < br / > < br / channel the [MASK] is centered around a young swedish drama student named lena who [MASK] to learn still she can about [MASK]. in particular [MASK] wants to focus [MASK] attentions to making some sort of documentary on what the average swede [MASK] about certain political [MASK] such'\n",
      "\n",
      "'>>> as the vietnam war and race issues in the united states. in between asking politicians and ordinary denize [MASK] of stockholm about their opinions on politics springfield she has sex with [MASK] drama teacher, classmates, and married men. < br / > [MASK] br / > what kills me [MASK] i am curious [MASK]hore is that 40 years ago, [MASK] was considered pornographic. really, the sex [MASK] nu [MASK] scenes are few and far between, even then [MASK] ' s not [MASK] [MASK] some cheaply made porno. while my [MASK]men mind find it shocking, [MASK] reality sex and [MASK]dity are a major staple in swedish [MASK]. even ingmar bergman,'\n"
     ]
    }
   ],
   "source": [
    "from transformers import DataCollatorForLanguageModeling\n",
    "\n",
    "data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15)\n",
    "samples = [lm_datasets[\"train\"][i] for i in range(2)]\n",
    "for sample in samples:\n",
    "    _ = sample.pop(\"word_ids\")\n",
    "\n",
    "for chunk in data_collator(samples)[\"input_ids\"]:\n",
    "    print(f\"\\n'>>> {tokenizer.decode(chunk)}'\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:08.185851Z",
     "start_time": "2025-03-30T13:31:08.177806Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T10:03:27.111043Z",
     "iopub.status.busy": "2025-03-30T10:03:27.110753Z",
     "iopub.status.idle": "2025-03-30T10:03:27.116738Z",
     "shell.execute_reply": "2025-03-30T10:03:27.116040Z",
     "shell.execute_reply.started": "2025-03-30T10:03:27.111014Z"
    },
    "trusted": true
   },
   "outputs": [],
   "source": [
    "import collections\n",
    "import numpy as np\n",
    "\n",
    "from transformers import default_data_collator\n",
    "\n",
    "wwm_probability = 0.2\n",
    "\n",
    "\n",
    "def whole_word_masking_data_collator(features):\n",
    "    for feature in features:\n",
    "        word_ids = feature.pop(\"word_ids\")\n",
    "\n",
    "        # 创建一个单词与对应 token 索引之间的映射\n",
    "        mapping = collections.defaultdict(list)\n",
    "        current_word_index = -1\n",
    "        current_word = None\n",
    "        for idx, word_id in enumerate(word_ids):\n",
    "            if word_id is not None:\n",
    "                if word_id != current_word:\n",
    "                    current_word = word_id\n",
    "                    current_word_index += 1\n",
    "                mapping[current_word_index].append(idx)\n",
    "\n",
    "        # 随机遮蔽单词\n",
    "        mask = np.random.binomial(1, wwm_probability, (len(mapping),))\n",
    "        input_ids = feature[\"input_ids\"]\n",
    "        labels = feature[\"labels\"]\n",
    "        new_labels = [-100] * len(labels)\n",
    "        for word_id in np.where(mask)[0]:\n",
    "            word_id = word_id.item()\n",
    "            for idx in mapping[word_id]:\n",
    "                new_labels[idx] = labels[idx]\n",
    "                input_ids[idx] = tokenizer.mask_token_id\n",
    "        feature[\"labels\"] = new_labels\n",
    "\n",
    "    return default_data_collator(features)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:08.242060Z",
     "start_time": "2025-03-30T13:31:08.232872Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T10:03:27.117704Z",
     "iopub.status.busy": "2025-03-30T10:03:27.117494Z",
     "iopub.status.idle": "2025-03-30T10:03:27.134478Z",
     "shell.execute_reply": "2025-03-30T10:03:27.133777Z",
     "shell.execute_reply.started": "2025-03-30T10:03:27.117685Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "'>>> [CLS] i [MASK] i am curious - yellow from [MASK] video store because of all the controversy that surrounded it when it was [MASK] released in 1967. i also heard that at first it was seized [MASK] u [MASK] s. customs if it [MASK] [MASK] to enter [MASK] [MASK] [MASK] therefore being a fan of films considered \" controversial [MASK] i really had to [MASK] this for myself [MASK] [MASK] br / [MASK] [MASK] br / > the [MASK] is centered [MASK] a young swedish drama student named lena who wants to [MASK] everything she can about life. in particular she wants to [MASK] her attentions to making some sort of documentary on what the average swede thought about certain [MASK] issues such'\n",
      "\n",
      "'>>> as the [MASK] war [MASK] race issues in the united states. [MASK] [MASK] asking politicians and ordinary [MASK] [MASK] [MASK] of [MASK] about their opinions on [MASK], she has sex with [MASK] drama [MASK], classmates [MASK] and married men. < br / > [MASK] br / > what kills me about i am curious - yellow [MASK] that 40 [MASK] [MASK], this was [MASK] [MASK]. really, the sex and nudity scenes are few and far between [MASK] even then it ' [MASK] not shot like some cheaply made porno. while my countrymen mind find it shocking, in [MASK] sex and nudity are a [MASK] staple in swedish [MASK]. even ingmar [MASK],'\n"
     ]
    }
   ],
   "source": [
    "samples = [lm_datasets[\"train\"][i] for i in range(2)]\n",
    "batch = whole_word_masking_data_collator(samples)\n",
    "\n",
    "for chunk in batch[\"input_ids\"]:\n",
    "    print(f\"\\n'>>> {tokenizer.decode(chunk)}'\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:08.305188Z",
     "start_time": "2025-03-30T13:31:08.291527Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T10:03:27.135420Z",
     "iopub.status.busy": "2025-03-30T10:03:27.135211Z",
     "iopub.status.idle": "2025-03-30T10:03:27.156102Z",
     "shell.execute_reply": "2025-03-30T10:03:27.155481Z",
     "shell.execute_reply.started": "2025-03-30T10:03:27.135397Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "DatasetDict({\n",
       "    train: Dataset({\n",
       "        features: ['input_ids', 'attention_mask', 'word_ids', 'labels'],\n",
       "        num_rows: 10000\n",
       "    })\n",
       "    test: Dataset({\n",
       "        features: ['input_ids', 'attention_mask', 'word_ids', 'labels'],\n",
       "        num_rows: 1000\n",
       "    })\n",
       "})"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_size = 10_000\n",
    "test_size = int(0.1 * train_size)\n",
    "\n",
    "downsampled_dataset = lm_datasets[\"train\"].train_test_split(\n",
    "    train_size=train_size, test_size=test_size, seed=42\n",
    ")\n",
    "downsampled_dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:08.508612Z",
     "start_time": "2025-03-30T13:31:08.347158Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T10:03:27.157027Z",
     "iopub.status.busy": "2025-03-30T10:03:27.156780Z",
     "iopub.status.idle": "2025-03-30T10:03:28.363829Z",
     "shell.execute_reply": "2025-03-30T10:03:28.363218Z",
     "shell.execute_reply.started": "2025-03-30T10:03:27.156995Z"
    },
    "trusted": true
   },
   "outputs": [],
   "source": [
    "from transformers import TrainingArguments\n",
    "\n",
    "# 默认是64,太大了\n",
    "batch_size = 32\n",
    "# 在每个 epoch 输出训练的 loss\n",
    "logging_steps = len(downsampled_dataset[\"train\"]) // batch_size\n",
    "model_name = check_point.split(\"/\")[-1]\n",
    "output_dir=f\"{model_name}-finetuned-imdb\"\n",
    "\n",
    "training_args = TrainingArguments(\n",
    "    output_dir=output_dir,\n",
    "    overwrite_output_dir=True,\n",
    "    eval_strategy=\"epoch\",\n",
    "    learning_rate=2e-5,\n",
    "    weight_decay=0.01,\n",
    "    per_device_train_batch_size=batch_size,\n",
    "    per_device_eval_batch_size=batch_size,\n",
    "    # fp16=True,  # 启用混合精度\n",
    "    gradient_checkpointing=True,  # 显存优化\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:09.334016Z",
     "start_time": "2025-03-30T13:31:08.537756Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T10:03:28.364785Z",
     "iopub.status.busy": "2025-03-30T10:03:28.364554Z",
     "iopub.status.idle": "2025-03-30T10:03:30.784489Z",
     "shell.execute_reply": "2025-03-30T10:03:30.783823Z",
     "shell.execute_reply.started": "2025-03-30T10:03:28.364765Z"
    },
    "trusted": true
   },
   "outputs": [],
   "source": [
    "from transformers import Trainer\n",
    "\n",
    "trainer = Trainer(\n",
    "    model=model,\n",
    "    args=training_args,\n",
    "    train_dataset=downsampled_dataset[\"train\"],\n",
    "    eval_dataset=downsampled_dataset[\"test\"],\n",
    "    data_collator=data_collator,\n",
    "    processing_class=tokenizer,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:09.387723Z",
     "start_time": "2025-03-30T13:31:09.384083Z"
    },
    "execution": {
     "iopub.execute_input": "2025-03-30T10:03:30.785570Z",
     "iopub.status.busy": "2025-03-30T10:03:30.785278Z"
    },
    "trusted": true
   },
   "outputs": [],
   "source": [
    "import math\n",
    "# 在x79 1652v2 gpu上训练18s\n",
    "# eval_results = trainer.evaluate()\n",
    "# print(f\">>> Perplexity: {math.exp(eval_results['eval_loss']):.2f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:09.438858Z",
     "start_time": "2025-03-30T13:31:09.435134Z"
    }
   },
   "outputs": [],
   "source": [
    "# del eval_results # 这个结果会占用大量现存，最好删掉"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T13:31:09.489704Z",
     "start_time": "2025-03-30T13:31:09.484726Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "256.61376953125 MB\n"
     ]
    }
   ],
   "source": [
    "print(torch.cuda.memory_allocated() / 1024**2, \"MB\")  # 查看显存占用"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T14:07:12.058575Z",
     "start_time": "2025-03-30T13:31:09.544239Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='939' max='939' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [939/939 35:59, Epoch 3/3]\n",
       "    </div>\n",
       "    <table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       " <tr style=\"text-align: left;\">\n",
       "      <th>Epoch</th>\n",
       "      <th>Training Loss</th>\n",
       "      <th>Validation Loss</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <td>1</td>\n",
       "      <td>No log</td>\n",
       "      <td>2.462987</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>2</td>\n",
       "      <td>2.606800</td>\n",
       "      <td>2.442091</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <td>3</td>\n",
       "      <td>2.606800</td>\n",
       "      <td>2.422115</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table><p>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "data": {
      "text/plain": [
       "TrainOutput(global_step=939, training_loss=2.5618247782714323, metrics={'train_runtime': 2162.3583, 'train_samples_per_second': 13.874, 'train_steps_per_second': 0.434, 'total_flos': 994208670720000.0, 'train_loss': 2.5618247782714323, 'epoch': 3.0})"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "trainer.train()\n",
    "# 在x79 1650v2 cpu上要训练2个半小时..\n",
    "# 在x79 rx570 gpu上要训练也要2个多小时.. 计算速度和精度、batch_size有关\n",
    "# rx570 fp16速度非常慢。 在fp32+batch_size=32下，大概要半小时，batch_size再大就爆显存了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T14:07:29.897718Z",
     "start_time": "2025-03-30T14:07:12.127890Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "\n",
       "    <div>\n",
       "      \n",
       "      <progress value='32' max='32' style='width:300px; height:20px; vertical-align: middle;'></progress>\n",
       "      [32/32 00:17]\n",
       "    </div>\n",
       "    "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      ">>> Perplexity: 11.50\n"
     ]
    }
   ],
   "source": [
    "eval_results = trainer.evaluate()\n",
    "print(f\">>> Perplexity: {math.exp(eval_results['eval_loss']):.2f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T14:07:29.921304Z",
     "start_time": "2025-03-30T14:07:29.916968Z"
    },
    "trusted": true
   },
   "outputs": [],
   "source": [
    "def insert_random_mask(batch):\n",
    "    features = [dict(zip(batch, t)) for t in zip(*batch.values())]\n",
    "    masked_inputs = data_collator(features)\n",
    "    # 为数据集中的每一列创建一个新的\"masked\"列\n",
    "    return {\"masked_\" + k: v.numpy() for k, v in masked_inputs.items()}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T14:07:30.398248Z",
     "start_time": "2025-03-30T14:07:29.971675Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "024d4c9e8db24d94a009fbdd3601f2a8",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "Map:   0%|          | 0/1000 [00:00<?, ? examples/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "downsampled_dataset = downsampled_dataset.remove_columns([\"word_ids\"])\n",
    "eval_dataset = downsampled_dataset[\"test\"].map(\n",
    "    insert_random_mask,\n",
    "    batched=True,\n",
    "    remove_columns=downsampled_dataset[\"test\"].column_names,\n",
    ")\n",
    "eval_dataset = eval_dataset.rename_columns(\n",
    "    {\n",
    "        \"masked_input_ids\": \"input_ids\",\n",
    "        \"masked_attention_mask\": \"attention_mask\",\n",
    "        \"masked_labels\": \"labels\",\n",
    "    }\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T14:07:30.428864Z",
     "start_time": "2025-03-30T14:07:30.424444Z"
    },
    "trusted": true
   },
   "outputs": [],
   "source": [
    "from torch.utils.data import DataLoader\n",
    "from transformers import default_data_collator\n",
    "\n",
    "batch_size = 64\n",
    "train_dataloader = DataLoader(\n",
    "    downsampled_dataset[\"train\"],\n",
    "    shuffle=True,\n",
    "    batch_size=batch_size,\n",
    "    collate_fn=data_collator,\n",
    ")\n",
    "eval_dataloader = DataLoader(\n",
    "    eval_dataset, batch_size=batch_size, collate_fn=default_data_collator\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T14:08:57.971616Z",
     "start_time": "2025-03-30T14:08:57.958297Z"
    },
    "trusted": true
   },
   "outputs": [],
   "source": [
    "# model = AutoModelForMaskedLM.from_pretrained(model_checkpoint)\n",
    "from torch.optim import AdamW\n",
    "\n",
    "optimizer = AdamW(model.parameters(), lr=5e-5)\n",
    "from accelerate import Accelerator\n",
    "\n",
    "accelerator = Accelerator()\n",
    "model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(\n",
    "    model, optimizer, train_dataloader, eval_dataloader\n",
    ")\n",
    "\n",
    "from transformers import get_scheduler\n",
    "\n",
    "num_train_epochs = 3\n",
    "num_update_steps_per_epoch = len(train_dataloader)\n",
    "num_training_steps = num_train_epochs * num_update_steps_per_epoch\n",
    "\n",
    "lr_scheduler = get_scheduler(\n",
    "    \"linear\",\n",
    "    optimizer=optimizer,\n",
    "    num_warmup_steps=0,\n",
    "    num_training_steps=num_training_steps,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T14:09:05.748922Z",
     "start_time": "2025-03-30T14:09:04.061197Z"
    },
    "trusted": true
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "1025607fe4bc4246a8b355c6cdaaa8c8",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/471 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "ename": "OutOfMemoryError",
     "evalue": "HIP out of memory. Tried to allocate 954.00 MiB. GPU 0 has a total capacty of 4.00 GiB of which 466.00 MiB is free. Of the allocated memory 2.89 GiB is allocated by PyTorch, and 440.26 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation.  See documentation for Memory Management and PYTORCH_HIP_ALLOC_CONF",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mOutOfMemoryError\u001b[0m                          Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[31], line 13\u001b[0m\n\u001b[1;32m     11\u001b[0m outputs \u001b[38;5;241m=\u001b[39m model(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mbatch)\n\u001b[1;32m     12\u001b[0m loss \u001b[38;5;241m=\u001b[39m outputs\u001b[38;5;241m.\u001b[39mloss\n\u001b[0;32m---> 13\u001b[0m \u001b[43maccelerator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mloss\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     15\u001b[0m optimizer\u001b[38;5;241m.\u001b[39mstep()\n\u001b[1;32m     16\u001b[0m lr_scheduler\u001b[38;5;241m.\u001b[39mstep()\n",
      "File \u001b[0;32m~/.local/lib/python3.10/site-packages/accelerate/accelerator.py:2329\u001b[0m, in \u001b[0;36mAccelerator.backward\u001b[0;34m(self, loss, **kwargs)\u001b[0m\n\u001b[1;32m   2327\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlomo_backward(loss, learning_rate)\n\u001b[1;32m   2328\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 2329\u001b[0m     \u001b[43mloss\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.local/lib/python3.10/site-packages/torch/_tensor.py:492\u001b[0m, in \u001b[0;36mTensor.backward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m    482\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m    483\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[1;32m    484\u001b[0m         Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[1;32m    485\u001b[0m         (\u001b[38;5;28mself\u001b[39m,),\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    490\u001b[0m         inputs\u001b[38;5;241m=\u001b[39minputs,\n\u001b[1;32m    491\u001b[0m     )\n\u001b[0;32m--> 492\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautograd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m    493\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs\u001b[49m\n\u001b[1;32m    494\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/.local/lib/python3.10/site-packages/torch/autograd/__init__.py:251\u001b[0m, in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m    246\u001b[0m     retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n\u001b[1;32m    248\u001b[0m \u001b[38;5;66;03m# The reason we repeat the same comment below is that\u001b[39;00m\n\u001b[1;32m    249\u001b[0m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[1;32m    250\u001b[0m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[0;32m--> 251\u001b[0m \u001b[43mVariable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_backward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m  \u001b[49m\u001b[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[1;32m    252\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    253\u001b[0m \u001b[43m    \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    254\u001b[0m \u001b[43m    \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    255\u001b[0m \u001b[43m    \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    256\u001b[0m \u001b[43m    \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    257\u001b[0m \u001b[43m    \u001b[49m\u001b[43mallow_unreachable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m    258\u001b[0m \u001b[43m    \u001b[49m\u001b[43maccumulate_grad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m    259\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[0;31mOutOfMemoryError\u001b[0m: HIP out of memory. Tried to allocate 954.00 MiB. GPU 0 has a total capacty of 4.00 GiB of which 466.00 MiB is free. Of the allocated memory 2.89 GiB is allocated by PyTorch, and 440.26 MiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting max_split_size_mb to avoid fragmentation.  See documentation for Memory Management and PYTORCH_HIP_ALLOC_CONF"
     ]
    }
   ],
   "source": [
    "from tqdm.auto import tqdm\n",
    "import torch\n",
    "import math\n",
    "\n",
    "progress_bar = tqdm(range(num_training_steps))\n",
    "\n",
    "for epoch in range(num_train_epochs):\n",
    "    # 训练\n",
    "    model.train()\n",
    "    for batch in train_dataloader:\n",
    "        outputs = model(**batch)\n",
    "        loss = outputs.loss\n",
    "        accelerator.backward(loss)\n",
    "\n",
    "        optimizer.step()\n",
    "        lr_scheduler.step()\n",
    "        optimizer.zero_grad()\n",
    "        progress_bar.update(1)\n",
    "\n",
    "    # 评估\n",
    "    model.eval()\n",
    "    losses = []\n",
    "    for step, batch in enumerate(eval_dataloader):\n",
    "        with torch.no_grad():\n",
    "            outputs = model(**batch)\n",
    "\n",
    "        loss = outputs.loss\n",
    "        losses.append(accelerator.gather(loss.repeat(batch_size)))\n",
    "\n",
    "    losses = torch.cat(losses)\n",
    "    losses = losses[: len(eval_dataset)]\n",
    "    try:\n",
    "        perplexity = math.exp(torch.mean(losses))\n",
    "    except OverflowError:\n",
    "        perplexity = float(\"inf\")\n",
    "\n",
    "    print(f\">>> Epoch {epoch}: Perplexity: {perplexity}\")\n",
    "\n",
    "    # 保存并上传\n",
    "    accelerator.wait_for_everyone()\n",
    "    unwrapped_model = accelerator.unwrap_model(model)\n",
    "    unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save)\n",
    "    if accelerator.is_main_process:\n",
    "        tokenizer.save_pretrained(output_dir)\n",
    "        repo.push_to_hub(\n",
    "            commit_message=f\"Training in progress epoch {epoch}\", blocking=False\n",
    "        )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "trusted": true
   },
   "outputs": [],
   "source": [
    "from transformers import pipeline\n",
    "# 这里要改\n",
    "mask_filler = pipeline(\n",
    "    \"fill-mask\", model=\"huggingface-course/distilbert-base-uncased-finetuned-imdb\"\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "trusted": true
   },
   "outputs": [],
   "source": [
    "preds = mask_filler(text)\n",
    "\n",
    "for pred in preds:\n",
    "    print(f\">>> {pred['sequence']}\")\n",
    "\n",
    "\n",
    "# 结束"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-30T14:23:15.314956Z",
     "start_time": "2025-03-30T14:23:14.679051Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "'>>> This is a great film.'\n",
      "'>>> This is a great movie.'\n",
      "'>>> This is a great idea.'\n",
      "'>>> This is a great one.'\n",
      "'>>> This is a great adventure.'\n"
     ]
    }
   ],
   "source": [
    "model_path='/home/will/PycharmProjects/pytorch/huggingface/distilbert-base-uncased-finetuned-imdb/'\n",
    "check_point='checkpoint-500'\n",
    "model = AutoModelForMaskedLM.from_pretrained(model_path, subfolder=check_point)\n",
    "\n",
    "text = \"This is a great [MASK].\"\n",
    "inputs = tokenizer(text, return_tensors=\"pt\")\n",
    "token_logits = model(**inputs).logits\n",
    "# Find the location of [MASK] and extract its logits\n",
    "mask_token_index = torch.where(inputs[\"input_ids\"] == tokenizer.mask_token_id)[1]\n",
    "mask_token_logits = token_logits[0, mask_token_index, :]\n",
    "top_5_tokens = torch.topk(mask_token_logits, 5, dim=1).indices[0].tolist()\n",
    "\n",
    "for token in top_5_tokens:\n",
    "    print(f\"'>>> {text.replace(tokenizer.mask_token, tokenizer.decode([token]))}'\")"
   ]
  }
 ],
 "metadata": {
  "kaggle": {
   "accelerator": "nvidiaTeslaT4",
   "dataSources": [],
   "isGpuEnabled": true,
   "isInternetEnabled": true,
   "language": "python",
   "sourceType": "notebook"
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
