liamebs commited on
Commit
02592ba
β€’
1 Parent(s): 3942073

Send corrected application file

Browse files
Files changed (1) hide show
  1. app.py +67 -202
app.py CHANGED
@@ -1,202 +1,67 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": 29,
6
- "id": "6b45aa78",
7
- "metadata": {},
8
- "outputs": [],
9
- "source": [
10
- "import gradio as gr\n",
11
- "from transformers import AutoTokenizer, AutoModelForCausalLM\n",
12
- "import torch"
13
- ]
14
- },
15
- {
16
- "cell_type": "code",
17
- "execution_count": 30,
18
- "id": "4ba5d9cb",
19
- "metadata": {
20
- "scrolled": false
21
- },
22
- "outputs": [
23
- {
24
- "name": "stderr",
25
- "output_type": "stream",
26
- "text": [
27
- "Some weights of BartForCausalLM were not initialized from the model checkpoint at sshleifer/distilbart-cnn-12-6 and are newly initialized: ['lm_head.weight']\n",
28
- "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
29
- ]
30
- }
31
- ],
32
- "source": [
33
- "model = AutoModelForCausalLM.from_pretrained(\n",
34
- " \"tiiuae/falcon-7b-instruct\",\n",
35
- " torch_dtype=torch.bfloat16,\n",
36
- " trust_remote_code=True,\n",
37
- " device_map=\"auto\",\n",
38
- " low_cpu_mem_usage=True,\n",
39
- ")\n",
40
- "tokenizer = AutoTokenizer.from_pretrained(\"tiiuae/falcon-7b-instruct\")\n"
41
- ]
42
- },
43
- {
44
- "cell_type": "code",
45
- "execution_count": 31,
46
- "id": "7ce45172",
47
- "metadata": {},
48
- "outputs": [],
49
- "source": [
50
- "def generate_text(input_text):\n",
51
- " input_ids = tokenizer.encode(input_text, return_tensors=\"pt\")\n",
52
- " attention_mask = torch.ones(input_ids.shape)\n",
53
- " \n",
54
- " output = model.generate(\n",
55
- " input_ids,\n",
56
- " attention_mask=attention_mask,\n",
57
- " max_length=200,\n",
58
- " do_sample=True,\n",
59
- " top_k=10,\n",
60
- " num_return_sequences=1,\n",
61
- " eos_token_id=tokenizer.eos_token_id,\n",
62
- " )\n",
63
- " \n",
64
- " output_text = tokenizer.decode(output[0], skip_special_tokens=Ture)\n",
65
- " print(output_text)\n",
66
- " \n",
67
- " #Remove Prompt Echo from Generated Text\n",
68
- " cleaned_output_text = output_text.replace(input_text, \"\")\n",
69
- " return cleaned_output_text\n"
70
- ]
71
- },
72
- {
73
- "cell_type": "code",
74
- "execution_count": 32,
75
- "id": "ff09adf4",
76
- "metadata": {
77
- "scrolled": true
78
- },
79
- "outputs": [
80
- {
81
- "name": "stderr",
82
- "output_type": "stream",
83
- "text": [
84
- "/tmp/ipykernel_2839/161200188.py:4: GradioDeprecationWarning: Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components\n",
85
- " gr.inputs.Textbox(label=\"Input Text\"),\n",
86
- "/tmp/ipykernel_2839/161200188.py:4: GradioDeprecationWarning: `optional` parameter is deprecated, and it has no effect\n",
87
- " gr.inputs.Textbox(label=\"Input Text\"),\n",
88
- "/tmp/ipykernel_2839/161200188.py:4: GradioDeprecationWarning: `numeric` parameter is deprecated, and it has no effect\n",
89
- " gr.inputs.Textbox(label=\"Input Text\"),\n",
90
- "/tmp/ipykernel_2839/161200188.py:6: GradioDeprecationWarning: Usage of gradio.inputs is deprecated, and will not be supported in the future, please import your component from gradio.components\n",
91
- " outputs=gr.inputs.Textbox(label=\"Generated Text\"),\n",
92
- "/tmp/ipykernel_2839/161200188.py:6: GradioDeprecationWarning: `optional` parameter is deprecated, and it has no effect\n",
93
- " outputs=gr.inputs.Textbox(label=\"Generated Text\"),\n",
94
- "/tmp/ipykernel_2839/161200188.py:6: GradioDeprecationWarning: `numeric` parameter is deprecated, and it has no effect\n",
95
- " outputs=gr.inputs.Textbox(label=\"Generated Text\"),\n"
96
- ]
97
- },
98
- {
99
- "name": "stdout",
100
- "output_type": "stream",
101
- "text": [
102
- "Running on local URL: http://127.0.0.1:7861\n",
103
- "\n",
104
- "To create a public link, set `share=True` in `launch()`.\n"
105
- ]
106
- },
107
- {
108
- "data": {
109
- "text/html": [
110
- "<div><iframe src=\"http://127.0.0.1:7861/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
111
- ],
112
- "text/plain": [
113
- "<IPython.core.display.HTML object>"
114
- ]
115
- },
116
- "metadata": {},
117
- "output_type": "display_data"
118
- },
119
- {
120
- "name": "stderr",
121
- "output_type": "stream",
122
- "text": [
123
- "Traceback (most recent call last):\n",
124
- " File \"/home/dolahillsimon/.local/lib/python3.9/site-packages/gradio/routes.py\", line 442, in run_predict\n",
125
- " output = await app.get_blocks().process_api(\n",
126
- " File \"/home/dolahillsimon/.local/lib/python3.9/site-packages/gradio/blocks.py\", line 1392, in process_api\n",
127
- " result = await self.call_function(\n",
128
- " File \"/home/dolahillsimon/.local/lib/python3.9/site-packages/gradio/blocks.py\", line 1097, in call_function\n",
129
- " prediction = await anyio.to_thread.run_sync(\n",
130
- " File \"/home/dolahillsimon/.local/lib/python3.9/site-packages/anyio/to_thread.py\", line 33, in run_sync\n",
131
- " return await get_asynclib().run_sync_in_worker_thread(\n",
132
- " File \"/home/dolahillsimon/.local/lib/python3.9/site-packages/anyio/_backends/_asyncio.py\", line 877, in run_sync_in_worker_thread\n",
133
- " return await future\n",
134
- " File \"/home/dolahillsimon/.local/lib/python3.9/site-packages/anyio/_backends/_asyncio.py\", line 807, in run\n",
135
- " result = context.run(func, *args)\n",
136
- " File \"/home/dolahillsimon/.local/lib/python3.9/site-packages/gradio/utils.py\", line 703, in wrapper\n",
137
- " response = f(*args, **kwargs)\n",
138
- " File \"/tmp/ipykernel_2839/3991009440.py\", line 15, in generate_text\n",
139
- " output_text = tokenizer.decode(output[0], skip_special_tokens=Ture)\n",
140
- "NameError: name 'Ture' is not defined\n"
141
- ]
142
- }
143
- ],
144
- "source": [
145
- "text_generation_interface = gr.Interface(\n",
146
- " fn=generate_text,\n",
147
- " inputs=[\n",
148
- " gr.inputs.Textbox(label=\"Input Text\"),\n",
149
- " ],\n",
150
- " outputs=gr.inputs.Textbox(label=\"Generated Text\"),\n",
151
- " title=\"Falcon-7B Instruct\",\n",
152
- ").launch()"
153
- ]
154
- },
155
- {
156
- "cell_type": "code",
157
- "execution_count": 25,
158
- "id": "71c06df4",
159
- "metadata": {},
160
- "outputs": [
161
- {
162
- "name": "stdout",
163
- "output_type": "stream",
164
- "text": [
165
- "Closing server running on port: 7860\n",
166
- "Closing server running on port: 7863\n",
167
- "Closing server running on port: 7864\n",
168
- "Closing server running on port: 7865\n",
169
- "Closing server running on port: 7866\n",
170
- "Closing server running on port: 7860\n",
171
- "Closing server running on port: 7862\n",
172
- "Closing server running on port: 7861\n"
173
- ]
174
- }
175
- ],
176
- "source": [
177
- "gr.close_all()"
178
- ]
179
- }
180
- ],
181
- "metadata": {
182
- "kernelspec": {
183
- "display_name": "Python 3 (ipykernel)",
184
- "language": "python",
185
- "name": "python3"
186
- },
187
- "language_info": {
188
- "codemirror_mode": {
189
- "name": "ipython",
190
- "version": 3
191
- },
192
- "file_extension": ".py",
193
- "mimetype": "text/x-python",
194
- "name": "python",
195
- "nbconvert_exporter": "python",
196
- "pygments_lexer": "ipython3",
197
- "version": "3.9.2"
198
- }
199
- },
200
- "nbformat": 4,
201
- "nbformat_minor": 5
202
- }
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[29]:
5
+
6
+
7
+ import gradio as gr
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
+ import torch
10
+
11
+
12
+ # In[30]:
13
+
14
+
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ "tiiuae/falcon-7b-instruct",
17
+ torch_dtype=torch.bfloat16,
18
+ trust_remote_code=True,
19
+ device_map="auto",
20
+ low_cpu_mem_usage=True,
21
+ )
22
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b-instruct")
23
+
24
+
25
+ # In[31]:
26
+
27
+
28
+ def generate_text(input_text):
29
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
30
+ attention_mask = torch.ones(input_ids.shape)
31
+
32
+ output = model.generate(
33
+ input_ids,
34
+ attention_mask=attention_mask,
35
+ max_length=200,
36
+ do_sample=True,
37
+ top_k=10,
38
+ num_return_sequences=1,
39
+ eos_token_id=tokenizer.eos_token_id,
40
+ )
41
+
42
+ output_text = tokenizer.decode(output[0], skip_special_tokens=True)
43
+ print(output_text)
44
+
45
+ #Remove Prompt Echo from Generated Text
46
+ cleaned_output_text = output_text.replace(input_text, "")
47
+ return cleaned_output_text
48
+
49
+
50
+ # In[32]:
51
+
52
+
53
+ text_generation_interface = gr.Interface(
54
+ fn=generate_text,
55
+ inputs=[
56
+ gr.inputs.Textbox(label="Input Text"),
57
+ ],
58
+ outputs=gr.inputs.Textbox(label="Generated Text"),
59
+ title="Falcon-7B Instruct",
60
+ ).launch()
61
+
62
+
63
+ # In[25]:
64
+
65
+
66
+ gr.close_all()
67
+