Upload InferenceTest.ipynb
Browse files- InferenceTest.ipynb +10 -15
InferenceTest.ipynb
CHANGED
@@ -10,28 +10,23 @@
|
|
10 |
},
|
11 |
{
|
12 |
"cell_type": "code",
|
13 |
-
"execution_count":
|
14 |
"id": "fa3d9de4-4e59-468f-92f0-b5f2ec55858d",
|
15 |
"metadata": {},
|
16 |
-
"outputs": [
|
17 |
-
{
|
18 |
-
"name": "stderr",
|
19 |
-
"output_type": "stream",
|
20 |
-
"text": [
|
21 |
-
"2024-11-27 10:33:08.171106: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
|
22 |
-
"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n",
|
23 |
-
"E0000 00:00:1732703588.241296 2164 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
|
24 |
-
"E0000 00:00:1732703588.262674 2164 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
|
25 |
-
"2024-11-27 10:33:08.443688: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
26 |
-
"To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n"
|
27 |
-
]
|
28 |
-
}
|
29 |
-
],
|
30 |
"source": [
|
31 |
"from transformers import AutoProcessor, AutoTokenizer, AutoModelForCausalLM\n",
|
32 |
"import torch\n",
|
33 |
"import os\n",
|
34 |
"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
"if os.environ.get('HF_TOKEN') is None:\n",
|
36 |
" raise ValueError(\"You must set the HF_TOKEN environment variable to use this script, you also need to have access to the Llama 3.2 model family\")\n",
|
37 |
"\n",
|
|
|
10 |
},
|
11 |
{
|
12 |
"cell_type": "code",
|
13 |
+
"execution_count": null,
|
14 |
"id": "fa3d9de4-4e59-468f-92f0-b5f2ec55858d",
|
15 |
"metadata": {},
|
16 |
+
"outputs": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
"source": [
|
18 |
"from transformers import AutoProcessor, AutoTokenizer, AutoModelForCausalLM\n",
|
19 |
"import torch\n",
|
20 |
"import os\n",
|
21 |
"\n",
|
22 |
+
"try:\n",
|
23 |
+
" from google.colab import userdata\n",
|
24 |
+
" HF_TOKEN = userdata.get('HF_TOKEN')\n",
|
25 |
+
" os.environ['HF_TOKEN'] = HF_TOKEN\n",
|
26 |
+
"except:\n",
|
27 |
+
" print(\"Not running in Google Colab, trying to get the HF_TOKEN from the environment\")\n",
|
28 |
+
"\n",
|
29 |
+
"\n",
|
30 |
"if os.environ.get('HF_TOKEN') is None:\n",
|
31 |
" raise ValueError(\"You must set the HF_TOKEN environment variable to use this script, you also need to have access to the Llama 3.2 model family\")\n",
|
32 |
"\n",
|