Files changed (1) hide show
  1. examples/inference.ipynb +59 -99
examples/inference.ipynb CHANGED
@@ -9,35 +9,14 @@
9
  },
10
  {
11
  "cell_type": "code",
12
- "execution_count": 28,
13
  "metadata": {},
14
  "outputs": [
15
  {
16
  "name": "stdout",
17
  "output_type": "stream",
18
  "text": [
19
- "Requirement already up-to-date: transformers in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (4.21.3)\n",
20
- "Requirement already up-to-date: torch in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (1.12.1)\n",
21
- "Requirement already up-to-date: torchvision in c:\\users\\divanma\\appdata\\roaming\\python\\python37\\site-packages (0.13.1)\n",
22
- "Requirement already satisfied, skipping upgrade: numpy>=1.17 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from transformers) (1.21.6)\n",
23
- "Requirement already satisfied, skipping upgrade: pyyaml>=5.1 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from transformers) (6.0)\n",
24
- "Requirement already satisfied, skipping upgrade: tokenizers!=0.11.3,<0.13,>=0.11.1 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from transformers) (0.12.1)\n",
25
- "Requirement already satisfied, skipping upgrade: requests in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from transformers) (2.28.1)\n",
26
- "Requirement already satisfied, skipping upgrade: importlib-metadata; python_version < \"3.8\" in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from transformers) (4.11.3)\n",
27
- "Requirement already satisfied, skipping upgrade: packaging>=20.0 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from transformers) (21.3)\n",
28
- "Requirement already satisfied, skipping upgrade: huggingface-hub<1.0,>=0.1.0 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from transformers) (0.9.1)\n",
29
- "Requirement already satisfied, skipping upgrade: tqdm>=4.27 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from transformers) (4.64.1)\n",
30
- "Requirement already satisfied, skipping upgrade: regex!=2019.12.17 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from transformers) (2022.9.11)\n",
31
- "Requirement already satisfied, skipping upgrade: filelock in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from transformers) (3.8.0)\n",
32
- "Requirement already satisfied, skipping upgrade: typing-extensions in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from torch) (4.1.1)\n",
33
- "Requirement already satisfied, skipping upgrade: pillow!=8.3.*,>=5.3.0 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from torchvision) (5.4.1)\n",
34
- "Requirement already satisfied, skipping upgrade: certifi>=2017.4.17 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from requests->transformers) (2022.6.15)\n",
35
- "Requirement already satisfied, skipping upgrade: charset-normalizer<3,>=2 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from requests->transformers) (2.0.4)\n",
36
- "Requirement already satisfied, skipping upgrade: urllib3<1.27,>=1.21.1 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from requests->transformers) (1.26.9)\n",
37
- "Requirement already satisfied, skipping upgrade: idna<4,>=2.5 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from requests->transformers) (3.3)\n",
38
- "Requirement already satisfied, skipping upgrade: zipp>=0.5 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from importlib-metadata; python_version < \"3.8\"->transformers) (3.8.0)\n",
39
- "Requirement already satisfied, skipping upgrade: pyparsing!=3.0.5,>=2.0.2 in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from packaging>=20.0->transformers) (3.0.4)\n",
40
- "Requirement already satisfied, skipping upgrade: colorama; platform_system == \"Windows\" in c:\\users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages (from tqdm>=4.27->transformers) (0.4.5)\n"
41
  ]
42
  }
43
  ],
@@ -54,7 +33,7 @@
54
  },
55
  {
56
  "cell_type": "code",
57
- "execution_count": 29,
58
  "metadata": {},
59
  "outputs": [],
60
  "source": [
@@ -71,11 +50,24 @@
71
  },
72
  {
73
  "cell_type": "code",
74
- "execution_count": 30,
75
  "metadata": {},
76
- "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
77
  "source": [
78
- "root_dir = os.getcwd()"
 
 
79
  ]
80
  },
81
  {
@@ -87,7 +79,7 @@
87
  },
88
  {
89
  "cell_type": "code",
90
- "execution_count": 31,
91
  "metadata": {},
92
  "outputs": [],
93
  "source": [
@@ -103,51 +95,9 @@
103
  },
104
  {
105
  "cell_type": "code",
106
- "execution_count": 32,
107
  "metadata": {},
108
- "outputs": [
109
- {
110
- "ename": "MemoryError",
111
- "evalue": "",
112
- "output_type": "error",
113
- "traceback": [
114
- "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
115
- "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)",
116
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\tarfile.py\u001b[0m in \u001b[0;36mnti\u001b[1;34m(s)\u001b[0m\n\u001b[0;32m 186\u001b[0m \u001b[0ms\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnts\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0ms\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"ascii\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"strict\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 187\u001b[1;33m \u001b[0mn\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0ms\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstrip\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mor\u001b[0m \u001b[1;34m\"0\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m8\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 188\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
117
- "\u001b[1;31mValueError\u001b[0m: invalid literal for int() with base 8: 'q\\x03ctorch'",
118
- "\nDuring handling of the above exception, another exception occurred:\n",
119
- "\u001b[1;31mInvalidHeaderError\u001b[0m Traceback (most recent call last)",
120
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\tarfile.py\u001b[0m in \u001b[0;36mnext\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 2288\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2289\u001b[1;33m \u001b[0mtarinfo\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtarinfo\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfromtarfile\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2290\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mEOFHeaderError\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
121
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\tarfile.py\u001b[0m in \u001b[0;36mfromtarfile\u001b[1;34m(cls, tarfile)\u001b[0m\n\u001b[0;32m 1094\u001b[0m \u001b[0mbuf\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtarfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfileobj\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mBLOCKSIZE\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1095\u001b[1;33m \u001b[0mobj\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcls\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrombuf\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mbuf\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mencoding\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtarfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0merrors\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1096\u001b[0m \u001b[0mobj\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moffset\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtarfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfileobj\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtell\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m-\u001b[0m \u001b[0mBLOCKSIZE\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
122
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\tarfile.py\u001b[0m in \u001b[0;36mfrombuf\u001b[1;34m(cls, buf, encoding, errors)\u001b[0m\n\u001b[0;32m 1036\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1037\u001b[1;33m \u001b[0mchksum\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnti\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mbuf\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m148\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;36m156\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1038\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mchksum\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mcalc_chksums\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mbuf\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
123
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\tarfile.py\u001b[0m in \u001b[0;36mnti\u001b[1;34m(s)\u001b[0m\n\u001b[0;32m 188\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 189\u001b[1;33m \u001b[1;32mraise\u001b[0m \u001b[0mInvalidHeaderError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"invalid header\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 190\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mn\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
124
- "\u001b[1;31mInvalidHeaderError\u001b[0m: invalid header",
125
- "\nDuring handling of the above exception, another exception occurred:\n",
126
- "\u001b[1;31mReadError\u001b[0m Traceback (most recent call last)",
127
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages\\torch\\serialization.py\u001b[0m in \u001b[0;36m_load\u001b[1;34m(f, map_location, pickle_module, **pickle_load_args)\u001b[0m\n\u001b[0;32m 555\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 556\u001b[1;33m \u001b[0mstorage\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mobj\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 557\u001b[0m \u001b[0mstorage_dtype\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0muint8\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
128
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages\\torch\\serialization.py\u001b[0m in \u001b[0;36mlegacy_load\u001b[1;34m(f)\u001b[0m\n\u001b[0;32m 466\u001b[0m \u001b[1;31m# and the tensor back up with no problems in _this_ and future\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 467\u001b[1;33m \u001b[1;31m# versions of pytorch, but in older versions, here's the problem:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 468\u001b[0m \u001b[1;31m# the storage will be loaded up as a _UntypedStorage, and then the\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
129
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\tarfile.py\u001b[0m in \u001b[0;36mopen\u001b[1;34m(cls, name, mode, fileobj, bufsize, **kwargs)\u001b[0m\n\u001b[0;32m 1590\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mCompressionError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"unknown compression type %r\"\u001b[0m \u001b[1;33m%\u001b[0m \u001b[0mcomptype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1591\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfilemode\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfileobj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1592\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
130
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\tarfile.py\u001b[0m in \u001b[0;36mtaropen\u001b[1;34m(cls, name, mode, fileobj, **kwargs)\u001b[0m\n\u001b[0;32m 1620\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"mode must be 'r', 'a', 'w' or 'x'\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1621\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mcls\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfileobj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1622\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
131
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\tarfile.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, name, mode, fileobj, format, tarinfo, dereference, ignore_zeros, encoding, errors, pax_headers, debug, errorlevel, copybufsize)\u001b[0m\n\u001b[0;32m 1483\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfirstmember\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1484\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfirstmember\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1485\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
132
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\tarfile.py\u001b[0m in \u001b[0;36mnext\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 2300\u001b[0m \u001b[1;32melif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moffset\u001b[0m \u001b[1;33m==\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2301\u001b[1;33m \u001b[1;32mraise\u001b[0m \u001b[0mReadError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0me\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2302\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mEmptyHeaderError\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
133
- "\u001b[1;31mReadError\u001b[0m: invalid header",
134
- "\nDuring handling of the above exception, another exception occurred:\n",
135
- "\u001b[1;31mRuntimeError\u001b[0m Traceback (most recent call last)",
136
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages\\transformers\\modeling_utils.py\u001b[0m in \u001b[0;36mload_state_dict\u001b[1;34m(checkpoint_file)\u001b[0m\n\u001b[0;32m 366\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 367\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mload\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcheckpoint_file\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmap_location\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"cpu\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 368\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
137
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages\\torch\\serialization.py\u001b[0m in \u001b[0;36mload\u001b[1;34m(f, map_location, pickle_module, **pickle_load_args)\u001b[0m\n\u001b[0;32m 386\u001b[0m \u001b[0mserialized_container_types\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m{\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 387\u001b[1;33m \u001b[0mserialized_storages\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m{\u001b[0m\u001b[1;33m}\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 388\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
138
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages\\torch\\serialization.py\u001b[0m in \u001b[0;36m_load\u001b[1;34m(f, map_location, pickle_module, **pickle_load_args)\u001b[0m\n\u001b[0;32m 559\u001b[0m \u001b[0mstorage_numel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mstorage\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnbytes\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 560\u001b[1;33m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 561\u001b[0m \u001b[1;31m# If storage is allocated, ensure that any other saved storages\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
139
- "\u001b[1;31mRuntimeError\u001b[0m: c:\\dev\\P\\gpt-neo-1.3B-fiction-novel-generation\\pytorch_model.bin is a zip archive (did you mean to use torch.jit.load()?)",
140
- "\nDuring handling of the above exception, another exception occurred:\n",
141
- "\u001b[1;31mMemoryError\u001b[0m Traceback (most recent call last)",
142
- "\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_20996\\2464673473.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mmodel\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mAutoModelForCausalLM\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_pretrained\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mroot_dir\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
143
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages\\transformers\\models\\auto\\auto_factory.py\u001b[0m in \u001b[0;36mfrom_pretrained\u001b[1;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[0;32m 444\u001b[0m \u001b[1;32melif\u001b[0m \u001b[0mtype\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mcls\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_model_mapping\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mkeys\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 445\u001b[0m \u001b[0mmodel_class\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0m_get_model_class\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcls\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_model_mapping\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 446\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mmodel_class\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_pretrained\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpretrained_model_name_or_path\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0mmodel_args\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mconfig\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mconfig\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 447\u001b[0m raise ValueError(\n\u001b[0;32m 448\u001b[0m \u001b[1;34mf\"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\\n\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
144
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages\\transformers\\modeling_utils.py\u001b[0m in \u001b[0;36mfrom_pretrained\u001b[1;34m(cls, pretrained_model_name_or_path, *model_args, **kwargs)\u001b[0m\n\u001b[0;32m 2065\u001b[0m \u001b[1;32mif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mis_sharded\u001b[0m \u001b[1;32mand\u001b[0m \u001b[0mstate_dict\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2066\u001b[0m \u001b[1;31m# Time to load the checkpoint\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2067\u001b[1;33m \u001b[0mstate_dict\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mload_state_dict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mresolved_archive_file\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2068\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2069\u001b[0m \u001b[1;31m# set dtype to instantiate the model under:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
145
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\site-packages\\transformers\\modeling_utils.py\u001b[0m in \u001b[0;36mload_state_dict\u001b[1;34m(checkpoint_file)\u001b[0m\n\u001b[0;32m 369\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 370\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcheckpoint_file\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 371\u001b[1;33m \u001b[1;32mif\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mread\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstartswith\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"version\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 372\u001b[0m raise OSError(\n\u001b[0;32m 373\u001b[0m \u001b[1;34m\"You seem to have cloned a repository without having git-lfs installed. Please install \"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
146
- "\u001b[1;32mc:\\Users\\divanma\\.conda\\envs\\pytorchenv\\lib\\encodings\\cp1252.py\u001b[0m in \u001b[0;36mdecode\u001b[1;34m(self, input, final)\u001b[0m\n\u001b[0;32m 21\u001b[0m \u001b[1;32mclass\u001b[0m \u001b[0mIncrementalDecoder\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcodecs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mIncrementalDecoder\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 22\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mdecode\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfinal\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 23\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mcodecs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcharmap_decode\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0merrors\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mdecoding_table\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 24\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[1;32mclass\u001b[0m \u001b[0mStreamWriter\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mCodec\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mcodecs\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mStreamWriter\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
147
- "\u001b[1;31mMemoryError\u001b[0m: "
148
- ]
149
- }
150
- ],
151
  "source": [
152
  "model = AutoModelForCausalLM.from_pretrained(root_dir)"
153
  ]
@@ -168,39 +118,49 @@
168
  },
169
  {
170
  "cell_type": "code",
171
- "execution_count": null,
172
- "metadata": {},
173
- "outputs": [],
174
- "source": [
175
- "inputs = tokenizer('Hello, my dog is cute', return_tensors='pt')\n",
176
- "outputs = model(**inputs, labels=inputs['input_ids'])\n",
177
- "\n",
178
- "print(f'[OUTPUT] {outputs}')"
179
- ]
180
- },
181
- {
182
- "cell_type": "markdown",
183
  "metadata": {},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  "source": [
185
- "## Valuation"
186
- ]
187
- },
188
- {
189
- "cell_type": "code",
190
- "execution_count": null,
191
- "metadata": {},
192
- "outputs": [],
193
- "source": [
194
- "loss = outputs.loss\n",
195
- "logits = outputs.logits\n",
196
  "\n",
197
- "print(f'[LOSS] {loss}, [LOGITS] {logits}')"
198
  ]
199
  }
200
  ],
201
  "metadata": {
202
  "kernelspec": {
203
- "display_name": "Python 3.7.3 ('pytorchenv')",
204
  "language": "python",
205
  "name": "python3"
206
  },
@@ -214,12 +174,12 @@
214
  "name": "python",
215
  "nbconvert_exporter": "python",
216
  "pygments_lexer": "ipython3",
217
- "version": "3.7.3"
218
  },
219
  "orig_nbformat": 4,
220
  "vscode": {
221
  "interpreter": {
222
- "hash": "a1f58ad6df42b3a9f00d8caf282612c40ca90330c75003a8465db9aa3eb9729c"
223
  }
224
  }
225
  },
 
9
  },
10
  {
11
  "cell_type": "code",
12
+ "execution_count": 1,
13
  "metadata": {},
14
  "outputs": [
15
  {
16
  "name": "stdout",
17
  "output_type": "stream",
18
  "text": [
19
+ "zsh:1: command not found: pip\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  ]
21
  }
22
  ],
 
33
  },
34
  {
35
  "cell_type": "code",
36
+ "execution_count": 2,
37
  "metadata": {},
38
  "outputs": [],
39
  "source": [
 
50
  },
51
  {
52
  "cell_type": "code",
53
+ "execution_count": 3,
54
  "metadata": {},
55
+ "outputs": [
56
+ {
57
+ "data": {
58
+ "text/plain": [
59
+ "'/Users/deanmartin/Source/gpt-neo-1.3B-fiction-novel-generation'"
60
+ ]
61
+ },
62
+ "execution_count": 3,
63
+ "metadata": {},
64
+ "output_type": "execute_result"
65
+ }
66
+ ],
67
  "source": [
68
+ "root_dir = '/'.join(os.getcwd().split('/')[:-1])\n",
69
+ "\n",
70
+ "root_dir"
71
  ]
72
  },
73
  {
 
79
  },
80
  {
81
  "cell_type": "code",
82
+ "execution_count": 4,
83
  "metadata": {},
84
  "outputs": [],
85
  "source": [
 
95
  },
96
  {
97
  "cell_type": "code",
98
+ "execution_count": 5,
99
  "metadata": {},
100
+ "outputs": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  "source": [
102
  "model = AutoModelForCausalLM.from_pretrained(root_dir)"
103
  ]
 
118
  },
119
  {
120
  "cell_type": "code",
121
+ "execution_count": 6,
 
 
 
 
 
 
 
 
 
 
 
122
  "metadata": {},
123
+ "outputs": [
124
+ {
125
+ "name": "stderr",
126
+ "output_type": "stream",
127
+ "text": [
128
+ "The attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\n",
129
+ "Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.\n"
130
+ ]
131
+ },
132
+ {
133
+ "name": "stdout",
134
+ "output_type": "stream",
135
+ "text": [
136
+ "My name is John Doe, and I'm part of the New York State Attorney General's office.\n",
137
+ "\n",
138
+ "I'm not your \"typical\" defendant. I have no jury, no judge, no jury pool, no prosecutor-counsel, no courtroom.\n",
139
+ "\n",
140
+ "I don't even have my own cell phone with my inmate phone number. I live in the Bronx, Queens, Manhattan and New Jersey.\n",
141
+ "\n",
142
+ "And, I can't sleep because there's been a bomb threat,\n"
143
+ ]
144
+ }
145
+ ],
146
  "source": [
147
+ "prompt = 'My name is John Doe'\n",
148
+ "input_ids = tokenizer(prompt, return_tensors='pt').input_ids\n",
149
+ "generated_tokens = model.generate(\n",
150
+ " input_ids,\n",
151
+ " do_sample=True,\n",
152
+ " temperature=0.9,\n",
153
+ " max_length=100\n",
154
+ ")\n",
155
+ "generated_text = tokenizer.batch_decode(generated_tokens)[0]\n",
 
 
156
  "\n",
157
+ "print(generated_text)"
158
  ]
159
  }
160
  ],
161
  "metadata": {
162
  "kernelspec": {
163
+ "display_name": "Python 3.9.13 ('pytorch')",
164
  "language": "python",
165
  "name": "python3"
166
  },
 
174
  "name": "python",
175
  "nbconvert_exporter": "python",
176
  "pygments_lexer": "ipython3",
177
+ "version": "3.9.13"
178
  },
179
  "orig_nbformat": 4,
180
  "vscode": {
181
  "interpreter": {
182
+ "hash": "0203f9377e450cf3e5fd498dcfe93bad69687b6515d650e7d79a42aa53323e2d"
183
  }
184
  }
185
  },