{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-16T03:00:31.661032Z",
     "start_time": "2025-07-16T03:00:27.634975Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from modelscope import AutoTokenizer, AutoModel, AutoModelForCausalLM\n",
    "\n",
    "model_name = \"openai-community/gpt2\"\n",
    "# gpt_model = AutoModel.from_pretrained(model_name)\n",
    "gpt_model = AutoModelForCausalLM.from_pretrained(model_name)\n",
    "print(gpt_model)"
   ],
   "id": "2422b17b8ef27a54",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/dadudu/miniconda3/envs/mini-gpt/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading Model from https://www.modelscope.cn to directory: /Users/dadudu/.cache/modelscope/hub/models/openai-community/gpt2\n",
      "GPT2LMHeadModel(\n",
      "  (transformer): GPT2Model(\n",
      "    (wte): Embedding(50257, 768)\n",
      "    (wpe): Embedding(1024, 768)\n",
      "    (drop): Dropout(p=0.1, inplace=False)\n",
      "    (h): ModuleList(\n",
      "      (0-11): 12 x GPT2Block(\n",
      "        (ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "        (attn): GPT2Attention(\n",
      "          (c_attn): Conv1D(nf=2304, nx=768)\n",
      "          (c_proj): Conv1D(nf=768, nx=768)\n",
      "          (attn_dropout): Dropout(p=0.1, inplace=False)\n",
      "          (resid_dropout): Dropout(p=0.1, inplace=False)\n",
      "        )\n",
      "        (ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "        (mlp): GPT2MLP(\n",
      "          (c_fc): Conv1D(nf=3072, nx=768)\n",
      "          (c_proj): Conv1D(nf=768, nx=3072)\n",
      "          (act): NewGELUActivation()\n",
      "          (dropout): Dropout(p=0.1, inplace=False)\n",
      "        )\n",
      "      )\n",
      "    )\n",
      "    (ln_f): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
      "  )\n",
      "  (lm_head): Linear(in_features=768, out_features=50257, bias=False)\n",
      ")\n"
     ]
    }
   ],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-16T03:00:33.189043Z",
     "start_time": "2025-07-16T03:00:31.740602Z"
    }
   },
   "cell_type": "code",
   "source": "gpt_tokenizer = AutoTokenizer.from_pretrained(model_name)",
   "id": "9d28ab81b30c7678",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading Model from https://www.modelscope.cn to directory: /Users/dadudu/.cache/modelscope/hub/models/openai-community/gpt2\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-16T03:00:33.204195Z",
     "start_time": "2025-07-16T03:00:33.200983Z"
    }
   },
   "cell_type": "code",
   "source": "gpt_tokenizer",
   "id": "feb2a0ab7b6df77c",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "GPT2TokenizerFast(name_or_path='/Users/dadudu/.cache/modelscope/hub/models/openai-community/gpt2', vocab_size=50257, model_max_length=1024, is_fast=True, padding_side='right', truncation_side='right', special_tokens={'bos_token': '<|endoftext|>', 'eos_token': '<|endoftext|>', 'unk_token': '<|endoftext|>'}, clean_up_tokenization_spaces=False, added_tokens_decoder={\n",
       "\t50256: AddedToken(\"<|endoftext|>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n",
       "}\n",
       ")"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-16T03:00:36.669146Z",
     "start_time": "2025-07-16T03:00:33.214002Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from modelscope.msdatasets import MsDataset\n",
    "\n",
    "dataset = MsDataset.load('modelscope/chinese-poetry-collection', subset_name='default', split='train')\n",
    "\n",
    "# 截取前20首诗\n",
    "data = dataset.to_hf_dataset().select(range(1))\n",
    "\n",
    "data[:1]"
   ],
   "id": "8fc9e4f98354c4ea",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2025-07-16 11:00:33,675 - modelscope - WARNING - Use trust_remote_code=True. Will invoke codes from chinese-poetry-collection. Please make sure that you can trust the external codes.\n",
      "2025-07-16 11:00:35,518 - modelscope - WARNING - Reusing dataset dataset_builder (/Users/dadudu/.cache/modelscope/hub/datasets/modelscope/chinese-poetry-collection/master/data_files)\n",
      "2025-07-16 11:00:35,519 - modelscope - INFO - Generating dataset dataset_builder (/Users/dadudu/.cache/modelscope/hub/datasets/modelscope/chinese-poetry-collection/master/data_files)\n",
      "2025-07-16 11:00:35,519 - modelscope - INFO - Reusing cached meta-data file: /Users/dadudu/.cache/modelscope/hub/datasets/modelscope/chinese-poetry-collection/master/data_files/7c9a7977d937face2055b6145eaf516f\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'text1': ['半生长以客为家，罢直初来瀚海槎。始信人间行不尽，天涯更复有天涯。']}"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-16T03:00:36.700138Z",
     "start_time": "2025-07-16T03:00:36.696656Z"
    }
   },
   "cell_type": "code",
   "source": "gpt_tokenizer('半生长以客为家')",
   "id": "dbf339f9721d14a2",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'input_ids': [39355, 232, 37955, 165, 243, 123, 20015, 98, 22522, 95, 10310, 118, 22522, 114], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-16T03:00:36.715586Z",
     "start_time": "2025-07-16T03:00:36.712438Z"
    }
   },
   "cell_type": "code",
   "source": "gpt_tokenizer.decode([39355, 232, 37955, 165, 243, 123, 20015, 98, 22522, 95, 10310, 118, 22522, 114])",
   "id": "cee92fd9f1e06ea7",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'半生长以客为家'"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-16T03:00:36.733855Z",
     "start_time": "2025-07-16T03:00:36.731466Z"
    }
   },
   "cell_type": "code",
   "source": "gpt_tokenizer.decode([39355, 165])",
   "id": "f7b77e808addd986",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'��'"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-16T03:00:36.746966Z",
     "start_time": "2025-07-16T03:00:36.744333Z"
    }
   },
   "cell_type": "code",
   "source": "gpt_tokenizer.decode([0])",
   "id": "b73e11245408b4d4",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'!'"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-16T03:00:36.766601Z",
     "start_time": "2025-07-16T03:00:36.760659Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "gpt_tokenizer.pad_token = gpt_tokenizer.eos_token\n",
    "\n",
    "class PoetryDataset(Dataset):\n",
    "    def __init__(self, data, tokenizer, max_length=128):\n",
    "        self.tokenizer = tokenizer\n",
    "        self.max_length = max_length\n",
    "        self.encodings = []\n",
    "\n",
    "        for poem in data['text1']:\n",
    "            # 添加特殊标记并编码\n",
    "            poem = poem + gpt_tokenizer.eos_token\n",
    "            encoded = tokenizer(\n",
    "                poem,\n",
    "                max_length=max_length,\n",
    "                padding='max_length',\n",
    "                truncation=True,\n",
    "                return_tensors='pt'\n",
    "            )\n",
    "            input_ids = encoded['input_ids'].squeeze()\n",
    "            self.encodings.append(input_ids)\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.encodings)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        input_ids = self.encodings[idx]\n",
    "        # # 标签是输入右移一位\n",
    "        # labels = input_ids.clone()\n",
    "        # labels[:-1] = input_ids[1:]\n",
    "        # labels[-1] = -100\n",
    "        return input_ids\n",
    "\n",
    "\n",
    "dataset = PoetryDataset(data, gpt_tokenizer)\n",
    "data_loader = DataLoader(dataset, batch_size=1, shuffle=True)\n",
    "for input_ids in data_loader:\n",
    "    print(input_ids)\n",
    "    break"
   ],
   "id": "fe1dd619a0366028",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[39355,   232, 37955,   165,   243,   123, 20015,    98, 22522,    95,\n",
      "         10310,   118, 22522,   114,   171,   120,   234,   163,   121,    95,\n",
      "         33566,   112, 26344,   251, 30266,    98,   163,   222,   248, 38184,\n",
      "           115,   162,   100,   236, 16764, 34650,   233, 46479,    94, 21689,\n",
      "         29785,   112, 26193,   234, 38834, 22887,   121,   171,   120,   234,\n",
      "         25465,   162,   114,   107,   162,   249,   112, 13783,   235, 17312,\n",
      "           231, 25465,   162,   114,   107, 16764, 50256, 50256, 50256, 50256,\n",
      "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256,\n",
      "         50256, 50256, 50256, 50256, 50256, 50256, 50256, 50256]])\n"
     ]
    }
   ],
   "execution_count": 9
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-16T03:00:55.006858Z",
     "start_time": "2025-07-16T03:00:36.777024Z"
    }
   },
   "cell_type": "code",
   "source": [
    "optimizer = torch.optim.Adam(gpt_model.parameters(), lr=0.001)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "gpt_model.to(device)\n",
    "\n",
    "EPOCHS = 50\n",
    "\n",
    "for epoch in range(EPOCHS):\n",
    "\n",
    "    for input_ids in data_loader:\n",
    "        input_ids = input_ids.to(device)\n",
    "\n",
    "        outputs = gpt_model(\n",
    "            input_ids, labels=input_ids\n",
    "        )\n",
    "\n",
    "        loss = outputs.loss\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        if epoch % 10 == 0:\n",
    "            print(f'Epoch {epoch + 1}, Loss: {loss:.4f}')\n",
    "\n"
   ],
   "id": "2eb65b25b022f95f",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "`loss_type=None` was set in the config but it is unrecognised.Using the default loss: `ForCausalLMLoss`.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1, Loss: 8.8608\n",
      "Epoch 2, Loss: 3.7362\n",
      "Epoch 3, Loss: 14.7212\n",
      "Epoch 4, Loss: 6.1582\n",
      "Epoch 5, Loss: 2.1106\n",
      "Epoch 6, Loss: 2.0384\n",
      "Epoch 7, Loss: 1.9064\n",
      "Epoch 8, Loss: 1.6631\n",
      "Epoch 9, Loss: 1.5291\n",
      "Epoch 10, Loss: 1.4033\n",
      "Epoch 11, Loss: 1.1898\n",
      "Epoch 12, Loss: 1.0704\n",
      "Epoch 13, Loss: 0.9153\n",
      "Epoch 14, Loss: 0.6945\n",
      "Epoch 15, Loss: 0.4824\n",
      "Epoch 16, Loss: 0.3173\n",
      "Epoch 17, Loss: 0.1443\n",
      "Epoch 18, Loss: 0.1119\n",
      "Epoch 19, Loss: 0.0904\n",
      "Epoch 20, Loss: 0.1469\n",
      "Epoch 21, Loss: 0.0263\n",
      "Epoch 22, Loss: 0.0757\n",
      "Epoch 23, Loss: 0.0415\n",
      "Epoch 24, Loss: 0.0230\n",
      "Epoch 25, Loss: 0.0324\n",
      "Epoch 26, Loss: 0.0233\n",
      "Epoch 27, Loss: 0.0124\n",
      "Epoch 28, Loss: 0.0134\n",
      "Epoch 29, Loss: 0.0147\n",
      "Epoch 30, Loss: 0.0100\n",
      "Epoch 31, Loss: 0.0066\n",
      "Epoch 32, Loss: 0.0044\n",
      "Epoch 33, Loss: 0.0031\n",
      "Epoch 34, Loss: 0.0029\n",
      "Epoch 35, Loss: 0.0024\n",
      "Epoch 36, Loss: 0.0018\n",
      "Epoch 37, Loss: 0.0014\n",
      "Epoch 38, Loss: 0.0011\n",
      "Epoch 39, Loss: 0.0009\n",
      "Epoch 40, Loss: 0.0006\n",
      "Epoch 41, Loss: 0.0004\n",
      "Epoch 42, Loss: 0.0002\n",
      "Epoch 43, Loss: 0.0001\n",
      "Epoch 44, Loss: 0.0001\n",
      "Epoch 45, Loss: 0.0001\n",
      "Epoch 46, Loss: 0.0001\n",
      "Epoch 47, Loss: 0.0000\n",
      "Epoch 48, Loss: 0.0000\n",
      "Epoch 49, Loss: 0.0000\n",
      "Epoch 50, Loss: 0.0000\n"
     ]
    }
   ],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-16T03:01:59.802758Z",
     "start_time": "2025-07-16T03:01:58.535101Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 生成函数\n",
    "# def generate(sentence):\n",
    "#     tokens = gpt_tokenizer.encode(sentence)\n",
    "#\n",
    "#     decoder_inputs = tokens\n",
    "#     for _ in range(60):\n",
    "#         with torch.no_grad():\n",
    "#             output = gpt_model(torch.tensor(decoder_inputs).view(1, -1).to(device))\n",
    "#             pred_token = output.logits[:, -1, :].argmax().item()\n",
    "#             decoder_inputs.append(pred_token)\n",
    "#\n",
    "#     return gpt_tokenizer.decode(decoder_inputs)\n",
    "\n",
    "def generate(sentence, max_length=500):\n",
    "    input_ids = gpt_tokenizer.encode(sentence, return_tensors='pt')\n",
    "    input_ids = input_ids.to(device)\n",
    "    output = gpt_model.generate(\n",
    "        input_ids,\n",
    "        max_length=max_length,\n",
    "        pad_token_id = gpt_tokenizer.eos_token_id,\n",
    "        do_sample=True,\n",
    "    )\n",
    "    return gpt_tokenizer.decode(output[0], skip_special_tokens=True)\n",
    "\n",
    "print(generate(\"半生\"))"
   ],
   "id": "fed9769a193d5a54",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "半生长以客为家，罢直初来瀚海槎。始信人间行不尽，天涯更复有天涯。\n"
     ]
    }
   ],
   "execution_count": 12
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
