wb-droid commited on
Commit
bb8a317
1 Parent(s): f481b0f

Initial commit

Browse files
Files changed (5) hide show
  1. app.py +146 -0
  2. meta.pkl +3 -0
  3. model_v4.pkl +3 -0
  4. requirements.txt +0 -0
  5. webapp.ipynb +340 -0
app.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # References:
2
+ # https://www.tanishq.ai/blog/posts/2021-11-16-gradio-huggingface.html
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ import gradio as gr
7
+ import torch
8
+ from torch import nn
9
+ import pickle
10
+ from torch import tensor
11
+ import torch.nn.functional as F
12
+ import pandas as pd
13
+
14
+ with open("meta.pkl", "rb") as f:
15
+ meta = pickle.load(f)
16
+ t2i = meta['t2i']
17
+ i2t = meta['i2t']
18
+ encode = lambda x: [t2i[c] for c in x]
19
+ decode = lambda x: "".join([i2t[i] for i in x])
20
+
21
+ batch_size = 128 # B, batch size
22
+ block_size = 48 # T, context len for poem is shorter, to set to 48
23
+ vocab_size = len(t2i.keys())
24
+ nn_emb_size = 64 # nn_emb
25
+ n_head = 16
26
+ n_layers = 8
27
+
28
+ #device = "cuda"
29
+ devicd = "cpu"
30
+
31
+ def encode_pad(s):
32
+ if len(s) >= block_size:
33
+ sample = s[:block_size]
34
+ else:
35
+ sample = s
36
+ sample = encode(s)
37
+ sample = [0]*(block_size-len(sample)) + sample
38
+ inp = tensor(sample[:block_size])[None,...]
39
+ return inp
40
+
41
+ class AttentionBlock(nn.Module):
42
+ def __init__(self, nn_emb = nn_emb_size, block_size = block_size, n_head = n_head):
43
+ super().__init__()
44
+ self.nn_emb = nn_emb_size
45
+ self.block_size = block_size
46
+ self.n_head = n_head
47
+
48
+ self.emb_proj = nn.Linear(nn_emb, nn_emb * 3)
49
+ self.ln_1 = nn.LayerNorm(nn_emb)
50
+ self.mult_head = nn.MultiheadAttention(nn_emb, n_head, dropout=0.2, batch_first=True)
51
+ self.ln_2 = nn.LayerNorm(nn_emb)
52
+ self.ff = nn.Sequential(nn.Linear(nn_emb, nn_emb * 4),nn.GELU(), nn.Dropout(0.2), nn.Linear(nn_emb * 4, nn_emb), nn.GELU(), nn.Dropout(0.2))
53
+
54
+ def forward(self,x): # (B, T, nn_emb)
55
+ x1 = x
56
+ x = self.emb_proj(x) # (B, T, nn_emb*3)
57
+ q,k,v = x.split(self.nn_emb, dim=2)
58
+ x,_ = self.mult_head(q, k, v, key_padding_mask=None, need_weights=False, attn_mask=torch.nn.Transformer.generate_square_subsequent_mask(self.nn_emb), average_attn_weights=True, is_causal=True) # (B,T,nn_emb)
59
+ x = x+x1
60
+ x = self.ff(self.ln_2(x)) + x
61
+ return x
62
+
63
+
64
+ class Model(nn.Module):
65
+ def __init__(self, nn_emb = nn_emb_size, block_size = block_size,vocab_size = vocab_size, n_head = n_head, n_layers = n_layers):
66
+ super().__init__()
67
+ self.vocab_size = vocab_size
68
+ self.block_size = block_size
69
+ self.nn_emb = nn_emb
70
+ self.n_head = n_head
71
+ self.n_layers = n_layers
72
+
73
+ self.tk_emb = nn.Embedding(vocab_size, nn_emb)
74
+ self.pos_emb = nn.Embedding(block_size, nn_emb)
75
+ self.ln = nn.LayerNorm(nn_emb)
76
+ #self.emb_proj = nn.Linear(nn_emb, nn_emb * 3)
77
+ #self.atten = nn.MultiheadAttention(nn_emb, n_head, dropout=0.2, batch_first=True)
78
+ self.attention_blocks = nn.ModuleList( [AttentionBlock(nn_emb, block_size, n_head)] * n_layers)
79
+ #self.h = nn.Sequential(nn.Linear(nn_emb, nn_emb),nn.GELU(), nn.Dropout(0.2), nn.Linear(nn_emb, nn_emb), nn.GELU(), nn.Dropout(0.2))
80
+ self.ln_h = nn.Linear(nn_emb, self.vocab_size)
81
+
82
+ def forward(self, inp, targ = None): # inp is (B, T), targ is (B, T)
83
+ inp.to(device)
84
+ tk = self.tk_emb(inp) # (B,T,nn_emb)
85
+ positions = torch.arange(self.block_size).to(device)
86
+ #print(positions)
87
+ pos = self.pos_emb(positions) # (T,nn_emb)
88
+ x = tk + pos # (B,T,nn_emb)
89
+ #x = self.ln(x)
90
+ #a = x
91
+ #x = self.emb_proj(x) # (B,t,nn_emb*3)
92
+ for blk in self.attention_blocks:
93
+ x = blk(x)
94
+ #q,k,v = x.split(self.nn_emb, dim=2)
95
+ #x,_ = self.atten(q, k, v, key_padding_mask=None, need_weights=False, attn_mask=torch.nn.Transformer.generate_square_subsequent_mask(self.nn_emb), average_attn_weights=True, is_causal=True) # (B,T,nn_emb)
96
+ #x = x + a
97
+ #x = self.ln(x)
98
+ #x = x+self.h(x) # (B,T,nn_emb)
99
+ x = self.ln(x) # (B,T,nn_emb)
100
+ x = self.ln_h(x) # (B,T,vocab_size)
101
+ if targ == None:
102
+ loss = None
103
+ else:
104
+ targ.to(device)
105
+ loss = F.cross_entropy(x.view(-1, x.shape[-1]), targ.view(-1))
106
+ return x, loss
107
+
108
+ m = Model()
109
+ m.to(device)
110
+
111
+ with open("model_v4.pkl","rb") as f:
112
+ m=pickle.load(f)
113
+
114
+ top_k = 20
115
+ def generate(s, num = 60):
116
+
117
+ for i in range(num + num):
118
+ inp = s[-block_size:]
119
+ inp = encode_pad(inp).to(device)
120
+ out, loss = m(inp)
121
+ out = out[:,-1,:]
122
+ if top_k is not None:
123
+ v, _ = torch.topk(out, min(top_k, out.size(-1)))
124
+ out[out < v[:, [-1]]] = -float('Inf')
125
+ prob = torch.softmax(out[:,:], dim=-1)
126
+ g = torch.multinomial(prob, num_samples=1)
127
+ next_c = i2t[g[0].item()]
128
+ if next_c in s and next_c != '。' and next_c != ',':
129
+ continue
130
+ s = s + next_c
131
+
132
+ if (len(s) > num and s[-1] == "。"):
133
+ break
134
+ return s
135
+
136
+ inputs = [gr.Textbox(label="Input",
137
+ info="Enter some Chinese text to start generate",
138
+ lines=3,
139
+ value="终南。",)]
140
+
141
+ outputs = [ gr.Textbox(
142
+ label="Output",
143
+ info="Generated Poem",
144
+ lines=3,
145
+ value="", )]
146
+ gr.Interface(fn=generate, inputs=inputs, outputs=outputs, title="Enter Chinese text to generate Chinese Poem.").launch(share=True)
meta.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae0f2ecd644b93adbd2ad86e1f2bcffce1203e4376c0eb8b0b64626f05a2e927
3
+ size 125873
model_v4.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09da34b6e08bacc70f1ed89313bb29f6ea6d816a643017cc5d31dee21c287cdc
3
+ size 4129724
requirements.txt ADDED
File without changes
webapp.ipynb ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 51,
6
+ "id": "1eccc83e-bc68-4082-a3cc-b055779b6ee8",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "# References:\n",
11
+ "# https://www.tanishq.ai/blog/posts/2021-11-16-gradio-huggingface.html"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": 2,
17
+ "id": "5b74867e-7ec1-4cda-9d96-0f5cd9cd4810",
18
+ "metadata": {},
19
+ "outputs": [],
20
+ "source": [
21
+ "import numpy as np\n",
22
+ "import pandas as pd\n",
23
+ "import gradio as gr\n",
24
+ "import torch\n",
25
+ "from torch import nn\n",
26
+ "import pickle\n",
27
+ "from torch import tensor\n",
28
+ "import torch.nn.functional as F\n",
29
+ "import pandas as pd"
30
+ ]
31
+ },
32
+ {
33
+ "cell_type": "code",
34
+ "execution_count": 3,
35
+ "id": "7d6e9e70-83fe-4209-8f06-6542cf6ba11b",
36
+ "metadata": {},
37
+ "outputs": [],
38
+ "source": [
39
+ "with open(\"meta.pkl\", \"rb\") as f:\n",
40
+ " meta = pickle.load(f)\n",
41
+ "t2i = meta['t2i']\n",
42
+ "i2t = meta['i2t']\n",
43
+ "encode = lambda x: [t2i[c] for c in x]\n",
44
+ "decode = lambda x: \"\".join([i2t[i] for i in x])"
45
+ ]
46
+ },
47
+ {
48
+ "cell_type": "code",
49
+ "execution_count": 7,
50
+ "id": "c4a0b480-6775-4d82-9395-9b5a455012ad",
51
+ "metadata": {},
52
+ "outputs": [],
53
+ "source": [
54
+ "batch_size = 128 # B, batch size\n",
55
+ "block_size = 48 # T, context len for poem is shorter, to set to 48\n",
56
+ "vocab_size = len(t2i.keys())\n",
57
+ "nn_emb_size = 64 # nn_emb\n",
58
+ "n_head = 16\n",
59
+ "n_layers = 8\n",
60
+ "\n",
61
+ "#device = \"cuda\"\n",
62
+ "devicd = \"cpu\""
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": 8,
68
+ "id": "0e4e72ce-5f61-4831-b7e8-703ed171936b",
69
+ "metadata": {},
70
+ "outputs": [],
71
+ "source": [
72
+ "def encode_pad(s):\n",
73
+ " if len(s) >= block_size:\n",
74
+ " sample = s[:block_size]\n",
75
+ " else:\n",
76
+ " sample = s\n",
77
+ " sample = encode(s)\n",
78
+ " sample = [0]*(block_size-len(sample)) + sample \n",
79
+ " inp = tensor(sample[:block_size])[None,...]\n",
80
+ " return inp"
81
+ ]
82
+ },
83
+ {
84
+ "cell_type": "code",
85
+ "execution_count": 9,
86
+ "id": "a9bc886f-4ec8-458a-b847-c9996df57fa9",
87
+ "metadata": {},
88
+ "outputs": [
89
+ {
90
+ "data": {
91
+ "text/plain": [
92
+ "Model(\n",
93
+ " (tk_emb): Embedding(7475, 64)\n",
94
+ " (pos_emb): Embedding(48, 64)\n",
95
+ " (ln): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
96
+ " (attention_blocks): ModuleList(\n",
97
+ " (0-7): 8 x AttentionBlock(\n",
98
+ " (emb_proj): Linear(in_features=64, out_features=192, bias=True)\n",
99
+ " (ln_1): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
100
+ " (mult_head): MultiheadAttention(\n",
101
+ " (out_proj): NonDynamicallyQuantizableLinear(in_features=64, out_features=64, bias=True)\n",
102
+ " )\n",
103
+ " (ln_2): LayerNorm((64,), eps=1e-05, elementwise_affine=True)\n",
104
+ " (ff): Sequential(\n",
105
+ " (0): Linear(in_features=64, out_features=256, bias=True)\n",
106
+ " (1): GELU(approximate='none')\n",
107
+ " (2): Dropout(p=0.2, inplace=False)\n",
108
+ " (3): Linear(in_features=256, out_features=64, bias=True)\n",
109
+ " (4): GELU(approximate='none')\n",
110
+ " (5): Dropout(p=0.2, inplace=False)\n",
111
+ " )\n",
112
+ " )\n",
113
+ " )\n",
114
+ " (ln_h): Linear(in_features=64, out_features=7475, bias=True)\n",
115
+ ")"
116
+ ]
117
+ },
118
+ "execution_count": 9,
119
+ "metadata": {},
120
+ "output_type": "execute_result"
121
+ }
122
+ ],
123
+ "source": [
124
+ "class AttentionBlock(nn.Module):\n",
125
+ " def __init__(self, nn_emb = nn_emb_size, block_size = block_size, n_head = n_head):\n",
126
+ " super().__init__()\n",
127
+ " self.nn_emb = nn_emb_size\n",
128
+ " self.block_size = block_size\n",
129
+ " self.n_head = n_head\n",
130
+ "\n",
131
+ " self.emb_proj = nn.Linear(nn_emb, nn_emb * 3)\n",
132
+ " self.ln_1 = nn.LayerNorm(nn_emb) \n",
133
+ " self.mult_head = nn.MultiheadAttention(nn_emb, n_head, dropout=0.2, batch_first=True)\n",
134
+ " self.ln_2 = nn.LayerNorm(nn_emb) \n",
135
+ " self.ff = nn.Sequential(nn.Linear(nn_emb, nn_emb * 4),nn.GELU(), nn.Dropout(0.2), nn.Linear(nn_emb * 4, nn_emb), nn.GELU(), nn.Dropout(0.2))\n",
136
+ "\n",
137
+ " def forward(self,x): # (B, T, nn_emb)\n",
138
+ " x1 = x\n",
139
+ " x = self.emb_proj(x) # (B, T, nn_emb*3)\n",
140
+ " q,k,v = x.split(self.nn_emb, dim=2)\n",
141
+ " x,_ = self.mult_head(q, k, v, key_padding_mask=None, need_weights=False, attn_mask=torch.nn.Transformer.generate_square_subsequent_mask(self.nn_emb), average_attn_weights=True, is_causal=True) # (B,T,nn_emb)\n",
142
+ " x = x+x1\n",
143
+ " x = self.ff(self.ln_2(x)) + x\n",
144
+ " return x\n",
145
+ " \n",
146
+ " \n",
147
+ "class Model(nn.Module):\n",
148
+ " def __init__(self, nn_emb = nn_emb_size, block_size = block_size,vocab_size = vocab_size, n_head = n_head, n_layers = n_layers): \n",
149
+ " super().__init__()\n",
150
+ " self.vocab_size = vocab_size\n",
151
+ " self.block_size = block_size\n",
152
+ " self.nn_emb = nn_emb\n",
153
+ " self.n_head = n_head\n",
154
+ " self.n_layers = n_layers\n",
155
+ " \n",
156
+ " self.tk_emb = nn.Embedding(vocab_size, nn_emb)\n",
157
+ " self.pos_emb = nn.Embedding(block_size, nn_emb)\n",
158
+ " self.ln = nn.LayerNorm(nn_emb)\n",
159
+ " #self.emb_proj = nn.Linear(nn_emb, nn_emb * 3)\n",
160
+ " #self.atten = nn.MultiheadAttention(nn_emb, n_head, dropout=0.2, batch_first=True)\n",
161
+ " self.attention_blocks = nn.ModuleList( [AttentionBlock(nn_emb, block_size, n_head)] * n_layers)\n",
162
+ " #self.h = nn.Sequential(nn.Linear(nn_emb, nn_emb),nn.GELU(), nn.Dropout(0.2), nn.Linear(nn_emb, nn_emb), nn.GELU(), nn.Dropout(0.2))\n",
163
+ " self.ln_h = nn.Linear(nn_emb, self.vocab_size)\n",
164
+ "\n",
165
+ " def forward(self, inp, targ = None): # inp is (B, T), targ is (B, T)\n",
166
+ " inp.to(device)\n",
167
+ " tk = self.tk_emb(inp) # (B,T,nn_emb)\n",
168
+ " positions = torch.arange(self.block_size).to(device)\n",
169
+ " #print(positions)\n",
170
+ " pos = self.pos_emb(positions) # (T,nn_emb)\n",
171
+ " x = tk + pos # (B,T,nn_emb)\n",
172
+ " #x = self.ln(x) \n",
173
+ " #a = x\n",
174
+ " #x = self.emb_proj(x) # (B,t,nn_emb*3)\n",
175
+ " for blk in self.attention_blocks:\n",
176
+ " x = blk(x)\n",
177
+ " #q,k,v = x.split(self.nn_emb, dim=2)\n",
178
+ " #x,_ = self.atten(q, k, v, key_padding_mask=None, need_weights=False, attn_mask=torch.nn.Transformer.generate_square_subsequent_mask(self.nn_emb), average_attn_weights=True, is_causal=True) # (B,T,nn_emb)\n",
179
+ " #x = x + a\n",
180
+ " #x = self.ln(x) \n",
181
+ " #x = x+self.h(x) # (B,T,nn_emb)\n",
182
+ " x = self.ln(x) # (B,T,nn_emb) \n",
183
+ " x = self.ln_h(x) # (B,T,vocab_size)\n",
184
+ " if targ == None:\n",
185
+ " loss = None\n",
186
+ " else:\n",
187
+ " targ.to(device)\n",
188
+ " loss = F.cross_entropy(x.view(-1, x.shape[-1]), targ.view(-1))\n",
189
+ " return x, loss\n",
190
+ "\n",
191
+ "m = Model()\n",
192
+ "m.to(device)"
193
+ ]
194
+ },
195
+ {
196
+ "cell_type": "code",
197
+ "execution_count": 20,
198
+ "id": "95545bf7-51fa-45a8-b34d-0231aa95e300",
199
+ "metadata": {},
200
+ "outputs": [],
201
+ "source": [
202
+ "with open(\"model_v4.pkl\",\"rb\") as f:\n",
203
+ " m=pickle.load(f)"
204
+ ]
205
+ },
206
+ {
207
+ "cell_type": "code",
208
+ "execution_count": 21,
209
+ "id": "c2393e78-a1c6-4671-9170-4ea33cdb50d1",
210
+ "metadata": {},
211
+ "outputs": [],
212
+ "source": [
213
+ "top_k = 20\n",
214
+ "def generate(s, num = 60):\n",
215
+ "\n",
216
+ " for i in range(num + num):\n",
217
+ " inp = s[-block_size:]\n",
218
+ " inp = encode_pad(inp).to(device)\n",
219
+ " out, loss = m(inp)\n",
220
+ " out = out[:,-1,:]\n",
221
+ " if top_k is not None:\n",
222
+ " v, _ = torch.topk(out, min(top_k, out.size(-1)))\n",
223
+ " out[out < v[:, [-1]]] = -float('Inf') \n",
224
+ " prob = torch.softmax(out[:,:], dim=-1)\n",
225
+ " g = torch.multinomial(prob, num_samples=1)\n",
226
+ " next_c = i2t[g[0].item()]\n",
227
+ " if next_c in s and next_c != '。' and next_c != ',':\n",
228
+ " continue\n",
229
+ " s = s + next_c\n",
230
+ "\n",
231
+ " if (len(s) > num and s[-1] == \"。\"):\n",
232
+ " break\n",
233
+ " return s"
234
+ ]
235
+ },
236
+ {
237
+ "cell_type": "code",
238
+ "execution_count": 24,
239
+ "id": "170b95ca-74b9-4360-84cc-6a8dfa3f8c42",
240
+ "metadata": {},
241
+ "outputs": [
242
+ {
243
+ "data": {
244
+ "text/plain": [
245
+ "'终南。若问黄云一路在,更有东城上去时。不须为别故园庐,独坐江山半夜凉。此地无馀春树晚,今朝日暮向来迟。西北天津长望后,三湘月下烟中。'"
246
+ ]
247
+ },
248
+ "execution_count": 24,
249
+ "metadata": {},
250
+ "output_type": "execute_result"
251
+ }
252
+ ],
253
+ "source": [
254
+ "generate('终南。')"
255
+ ]
256
+ },
257
+ {
258
+ "cell_type": "code",
259
+ "execution_count": 26,
260
+ "id": "edca19ab-087b-4368-84d0-8eee7388c200",
261
+ "metadata": {
262
+ "scrolled": true
263
+ },
264
+ "outputs": [
265
+ {
266
+ "name": "stdout",
267
+ "output_type": "stream",
268
+ "text": [
269
+ "Running on local URL: http://127.0.0.1:7867\n",
270
+ "\n",
271
+ "To create a public link, set `share=True` in `launch()`.\n"
272
+ ]
273
+ },
274
+ {
275
+ "data": {
276
+ "text/html": [
277
+ "<div><iframe src=\"http://127.0.0.1:7867/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
278
+ ],
279
+ "text/plain": [
280
+ "<IPython.core.display.HTML object>"
281
+ ]
282
+ },
283
+ "metadata": {},
284
+ "output_type": "display_data"
285
+ },
286
+ {
287
+ "data": {
288
+ "text/plain": []
289
+ },
290
+ "execution_count": 26,
291
+ "metadata": {},
292
+ "output_type": "execute_result"
293
+ }
294
+ ],
295
+ "source": [
296
+ "\n",
297
+ "inputs = [gr.Textbox(label=\"Input\",\n",
298
+ " info=\"Enter some Chinese text to start generate\",\n",
299
+ " lines=3,\n",
300
+ " value=\"终南。\",)]\n",
301
+ "\n",
302
+ "outputs = [ gr.Textbox(\n",
303
+ " label=\"Output\",\n",
304
+ " info=\"Generated Poem\",\n",
305
+ " lines=3,\n",
306
+ " value=\"\", )]\n",
307
+ "gr.Interface(fn=generate, inputs=inputs, outputs=outputs, title=\"Enter Chinese text to generate Chinese Poem.\").launch(share=False)"
308
+ ]
309
+ },
310
+ {
311
+ "cell_type": "code",
312
+ "execution_count": null,
313
+ "id": "6112eaea-16d6-4d43-8b95-3999c605643b",
314
+ "metadata": {},
315
+ "outputs": [],
316
+ "source": []
317
+ }
318
+ ],
319
+ "metadata": {
320
+ "kernelspec": {
321
+ "display_name": "Python 3 (ipykernel)",
322
+ "language": "python",
323
+ "name": "python3"
324
+ },
325
+ "language_info": {
326
+ "codemirror_mode": {
327
+ "name": "ipython",
328
+ "version": 3
329
+ },
330
+ "file_extension": ".py",
331
+ "mimetype": "text/x-python",
332
+ "name": "python",
333
+ "nbconvert_exporter": "python",
334
+ "pygments_lexer": "ipython3",
335
+ "version": "3.8.10"
336
+ }
337
+ },
338
+ "nbformat": 4,
339
+ "nbformat_minor": 5
340
+ }