File size: 60,770 Bytes
dfce964
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16be967
dfce964
 
 
 
 
 
 
 
16be967
7dbbcfc
dfce964
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7dbbcfc
dfce964
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
# -*- coding: UTF-8 -*-

#### gradio: 3.50.2

import os

dep_text = '''
pandas
chatglm-cpp
torch
tabulate
tqdm
transformers
accelerate
sentencepiece
huggingface_hub
'''

'''
with open("dep_text.txt", "w") as f:
    f.write(dep_text)
os.system("pip install -r dep_text.txt")
'''

import gradio as gr
import random
import time
import pandas as pd

import chatglm_cpp
from pathlib import Path
from huggingface_hub import space_info

if not os.path.exists("chatglm3-ggml_q4_0.bin"):
    os.system("wget https://huggingface.co/spaces/svjack/chatglm3-open-chat/resolve/main/chatglm3-ggml_q4_0.bin")

model_file_path = "chatglm3-ggml_q4_0.bin"
assert os.path.exists(model_file_path)
#model_file_path = "../../Downloads1/chatglm3-ggml_q4_0.bin"
chatglm_llm = chatglm_cpp.Pipeline(Path(model_file_path))

def predict(message, history):
    flatten_history = []
    for a, b in history:
        flatten_history.append(a)
        flatten_history.append(b)

    streamer = chatglm_llm.chat(
        history= flatten_history + [message], do_sample=False,
        stream = True
        )

    response = ""
    for new_text in streamer:
        response += new_text
        yield response

example_df_list = []
example_df_list.append(
    pd.DataFrame(
    [['请执行将成语翻译成emoji的任务,下面是一些例子。', '好的。'],
     ['朝斯夕斯', '🌞🕛🌇🌛'],
     ['出其不意', '🪖🐎🗡️✌️'],
     ['百紫千红', '🎎🌹🍊🍃'],
     ['背碑覆局', '🀄️♟️🏳️💀'],
     ['春蛇秋蚓', '🌞🐍🍁🪱'],
     ['传风扇火', '☁️🌬️\U0001faad🔥'],
     ['不可逾越', '💪🏃🥱☹️'],
     ['丹书白马', '📄📖😻🐎'],
     ['挨冻受饿', '🌬️🥶🍽️😱'],
     ['白马非马', '😄🌟🐎😝'],
     ['抱玉握珠', '🫂💰\U0001faf3🧧'],
     ['道骨仙风', '✝️👨🕌👼'],
     ['春华秋实', '🌞🌽🍁🍉'],
     ['春风夏雨', '☀️🌬️🌧️⚡️'],
     ['姹紫嫣红', ""]],
     columns = ["User", "Assistant"]
     )
)

example_df_list.append(
pd.DataFrame(
[['Use following emojis to generate a short description of a scene', 'Ok'],
 ['👨👩🔥❄️', "there's a couple who share an intense passion or attraction towards each other . The setting takes place in cold weather conditions"],
 ['🌲🔥👨💦', 'A man battling a raging forest fire, using a hose to try and extinguish it'],
 ['🏫📚💭🙌', "a learning environment where students are engaged in studying or discussing topics relevant to their coursework. They might be referring to reference materials like textbooks or notes while wearing glasses as they focus intently on understanding complex concepts. At some point during the session, there could also be moments of appreciation from participants who acknowledge each other's contributions through applause, indicating a positive atmosphere conducive to effective learning."],
 ['🎨🌹📝🌺', "a beautiful rose garden with various art pieces scattered around it. Visitors can appreciate both the natural beauty and man-made creations in one harmonious setting"],
 ['🌹💕🌸💋', "a picture of romance in nature surrounded by beautiful flowers like roses and tulips while exuding strong feelings through love-struck faces. "],
 ["👩‍🍳🐠🤷🥛", ""]
 ],
 columns = ["User", "Assistant"]
 )
)

example_df_list.append(
    pd.DataFrame(
    [['请从下面的句子中提取句中疑问词。', '好的。'],
     ['宁波在哪个省份?', '哪个省份'],
     ['中国的货币是什么?', '什么'],
     ['百慕大三角在什么地方?', '什么地方'],
     ['谁是最可爱的人?', "谁"],
     ['何时全面实现现代化?', ""]
     ],
     columns = ["User", "Assistant"]
     )
)

example_df_list.append(
pd.DataFrame(
[['请从下面的句子中提取实体和属性。', '好的。'],
 ['宁波在哪个省份?', '实体:宁波 属性:省份'],
 ['中国的货币是什么?', '实体:中国 属性:货币'],
 ['百慕大三角在什么地方?', '实体:百慕大三角 属性:地方'],
 ['谁是最可爱的人?', "实体:人 属性:可爱"],
 ['黄河的拐点在哪里?', "实体:黄河 属性:拐点"],
 ["玉米的引进时间是什么时候?", ""]
 ],
 columns = ["User", "Assistant"]
 )
)

example_df_list.append(
                                pd.DataFrame(
                                [['请根据输入的句子生成一个问题和对应的答案。', '好的。'],
                                 ['飓风格特是1993年9月在墨西哥和整个中美洲引发严重洪灾的大规模热带气旋.', '问题:飓风是什么时候发生的? 答案:1993年9月'],
                                 ['飓风源于9月14日西南加勒比海上空一股东风波。', '问题:飓风是什么时候开始的? 答案:9月14日'],
                                 ['飓风格特是1993年9月在墨西哥和整个中美洲引发严重洪灾的大规模热带气旋', '问题:飓风特在哪个国家登陆? 答案:墨西哥'],
                                 ['飓风源于9月14日西南加勒比海上空一股东风波。', "问题:飓风从哪里开始的? 答案:西南加勒比海"],
                                 ['飓风次日从尼加拉瓜登岸,经过洪都拉斯后于9月17日在洪都拉斯湾再次达到热带风暴标准,', "问题:飓风何时达到热带风暴标准? 答案:9月17日"],
                                 ["飓风9月21日从纳亚里特州进入太平洋时已降级成热带低气压,最终于5天后在开放水域上空消散。", "问题:飓风在什么时候消散? 答案:5天后"],
                                 ["维护国际和平与安全. 第二次世界大战结束后的1945年,联合国宣布成立,总部设在纽约,并将维护国际和平与安全确立为主要目的之一。", ""]
                                 ],
                                 columns = ["User", "Assistant"]
                                 )
)

example_df_list.append(
pd.DataFrame(
[['以下面发生的事情为背景,回答与时间起因结果相关的问题。', '好的。'],

 ['以下事件有哪些必要的先决条件:X吃到了一顿大餐。', 'X进行了点餐'],
 ['下面的事件发生后可能会发生什么:X吃到了一顿大餐。', 'X变胖'],
 ['以下事件的动机是什么:X吃到了一顿大餐。', 'X饿了'],
 ['以下事件发生后,你有什么感觉:X吃到了一顿大餐。', "X感觉味道很好"],

 ['以下事件有哪些必要的先决条件:X见到了自己喜欢的明星。', 'X买了一张票'],
 ['下面的事件发生后可能会发生什么:X见到了自己喜欢的明星。', 'X被打动了'],
 ['以下事件的动机是什么:X见到了自己喜欢的明星。', 'X想进行一些娱乐'],
 ['以下事件发生后,你有什么感觉:X见到了自己喜欢的明星。', "X的心情愉快"],

 ['以下事件有哪些必要的先决条件:X去行骗', 'X心术不正'],
 ['下面的事件发生后可能会发生什么:X去行骗', 'X被指控'],
 ['以下事件的动机是什么:X去行骗', 'X想不劳而获'],
 ['以下事件发生后,你有什么感觉:X去行骗', "X在监狱里挨饿受冻"],

 ['下面的事件发生后可能会发生什么:X去伊斯坦布尔', ''],
 ],
 columns = ["User", "Assistant"]
 )
)

example_df_list.append(
                            pd.DataFrame(
                            [['Use the following events as a background to answer questions related to the cause and effect of time.', 'Ok'],

                             ['What are the necessary preconditions for the next event?:X had a big meal.', 'X placed an order'],
                             ['What could happen after the next event?:X had a big meal.', 'X becomes fat'],
                             ['What is the motivation for the next event?:X had a big meal.', 'X is hungry'],
                             ['What are your feelings after the following event?:X had a big meal.', "X tastes good"],

                             ['What are the necessary preconditions for the next event?:X met his favorite star.', 'X bought a ticket'],
                             ['What could happen after the next event?:X met his favorite star.', 'X is motivated'],
                             ['What is the motivation for the next event?:X met his favorite star.', 'X wants to have some entertainment'],
                             ['What are your feelings after the following event?:X met his favorite star.', "X is in a happy mood"],

                             ['What are the necessary preconditions for the next event?: X to cheat', 'X has evil intentions'],
                             ['What could happen after the next event?:X to cheat', 'X is accused'],
                             ['What is the motivation for the next event?:X to cheat', 'X wants to get something for nothing'],
                             ['What are your feelings after the following event?:X to cheat', "X is starving and freezing in prison"],

                             ['What could happen after the next event?:X go to Istanbul', ''],
                             ],
                             columns = ["User", "Assistant"]
                             )
)

example_df_list.append(
                                pd.DataFrame(
                                [['根据下面的上下文生成杰克和安娜之间的日常对话。', '好的。'],
                                ["飓风格特是1993年9月在墨西哥和整个中美洲引发严重洪灾的大规模热带气旋,源于9月14日西南加勒比海上空一股东风波。次日从尼加拉瓜登岸,经过洪都拉斯后于9月17日在洪都拉斯湾再次达到热带风暴标准,但次日进入伯利兹上空后就减弱成热带低气压。穿过尤卡坦半岛后,在9月20日强化成二级飓风,从韦拉克鲁斯州的图斯潘附近登陆墨西哥。9月21日从纳亚里特州进入太平洋时已降级成热带低气压,最终于5天后在开放水域上空消散。", "杰克:嗨,你好吗?\n安娜:我很好,最近怎么样?\n杰克:我们去年9月在墨西哥旅游,整个中美洲发生了严重热带气旋。\n安娜:仔细说说。\n杰克:我们遭遇的飓风格特简直是一场灾难。\n安娜:飓风是如何发生的?\n杰克:从南加拉瓜登岸,经过瓜岛后,在9月17日洪都拉斯湾再次达到热带风暴标准。\n安娜:第二天发生了什么?\n杰克:次日到了伯利兹上空,然后穿过尤卡坦半岛,越过,进入墨西哥北部。\n安娜:太可怕了。\n杰克:后来飓风已经降到了热带低气压。\n安娜:真是有史以来最严重的一次。结果怎么样?\n杰克:五天后散去了。"],
                                ['珊瑚坝是长江中的一处河漫滩,位于长江重庆市渝中区区段主航道左侧[1],靠近渝中半岛,原分属重庆市市中区菜园坝街道和石板坡街道[2],现属渝中区菜园坝街道石板坡社区[3],是长江上游缓冲地段自然冲积沙洲,略呈纺锤形[4]或椭圆形,长约1800米,宽约600米,坝上遍布鹅卵石和水草。每年夏季洪水时均被淹没,其余时间常露水面,枯水期则与长江左岸相连[5]。', "杰克:你听说过珊瑚坝吗?\n安娜:是的,它是一处河漫滩\n杰克:它在一个什么样的地方?\n安娜:位于长江重庆市渝中区区段主航道左侧。\n杰克:哪里离哪里比较近?\n安娜:渝中半岛\n杰克:这么好的景点处在什么行政区划?\n安娜:属于石板坡街道和社区。\n杰克:这个堤坝有什么特点?\n安娜:它是略呈纺锤形的自然冲积沙洲。\n杰克:在不同的季节景象也不同吧?\n安娜:是的,夏季洪水时均被淹没,其余时间常露水面。"],
                                ["甬江是浙東運河入海前个壓末一段河道,歷史丄曾在漕糧海運中起到重要个作用,也曾溝通內陸搭海上絲綢之路[5]。甬江口曾發生一系列戰事,爲寧波帶來動盪[6]。但另一方面,西方文明也頂早在甬江畔登陸,形成江北岸外人居留地,開啟爻寧波个近代化[7]。", ""]
                                 ],
                                 columns = ["User", "Assistant"]
                                 )
)

example_df_list.append(
                                pd.DataFrame(
                                [['Generate a daily conversation between Jack and Anna based on the context below.', 'Ok'],
                                ["Hurricane Gert was a large-scale tropical cyclone that caused severe flooding in Mexico and throughout Central America in September 1993. It originated from a tropical wave over the southwest Caribbean Sea on September 14. It landed in Nicaragua the next day, passed through Honduras, and reached tropical storm status again in the Gulf of Honduras on September 17. However, it weakened into a tropical depression after entering Belize the next day. After passing through the Yucatan Peninsula, it strengthened into a Category 2 hurricane on September 20 and made landfall in Mexico near Tuxpan, Veracruz. It was downgraded to a tropical depression when it entered the Pacific Ocean from Nayarit on September 21, and finally dissipated over open waters five days later.",
                                "Jack: Hi, how are you?\nAnna: I'm fine, how are you doing? \nJack: We were traveling in Mexico last September, and severe tropical cyclones occurred throughout Central America. \nAnna: Tell me carefully. \nJack: What we had with Hurricane Gert was a disaster. \nAnna: How do hurricanes happen? \nJack: It landed in South Caragua, passed through Guadalcanal, and reached tropical storm standards again in the Gulf of Honduras on September 17. \nAnna:What happened the next day? \nJack: The next day we arrived over Belize, then passed through the Yucatan Peninsula, crossed over, and entered northern Mexico. \nAnna: It's terrible. \nJack: Later the hurricane was reduced to a tropical depression. \nAnna: It's the worst ever. what's the result? \nJack: Dispersed after five days."],
                                ['Coral Dam is a floodplain in the Yangtze River. It is located on the left side of the main channel of the Yuzhong District section of Chongqing City[1], close to the Yuzhong Peninsula. It used to belong to Caiyuanba Street and Shibanpo Street, Shizhong District, Chongqing City[2], and now belongs to Shibanpo Community, Caiyuanba Street, Yuzhong District [3] is a natural alluvial sandbank in the buffer zone of the upper reaches of the Yangtze River. It is slightly spindle-shaped [4] or oval, about 1,800 meters long and 600 meters wide. The dam is covered with pebbles and aquatic plants. It is submerged during floods every summer, and is often exposed to the water during the rest of the year. During the dry season, it is connected to the left bank of the Yangtze River [5].',
                                "Jack: Have you heard of the Coral Dam?\nAnna: Yes, it is a floodplain\nJack: What kind of place is it?\nAnna: It is located on the left side of the main channel of the Yuzhong District of Chongqing City on the Yangtze River. \nJack: Which one is closer? \nAnna: Yuzhong Peninsula\nJack: In what administrative division is such a good scenic spot located?\nAnna: It belongs to Shibanpo streets and communities. \nJack: What are the characteristics of this dike?\nAnna: It is a slightly spindle-shaped natural alluvial sandbar. \nJack: The scene is different in different seasons, right? \nAnna: Yes, it is submerged during floods in the summer, and is often exposed during the rest of the time."],
                                ["The Yongjiang River is the last stretch of river before the East Zhejiang Canal enters the sea. Historically, it has played an important role in grain transportation and has also connected the inland and built the Maritime Silk Road [5]. A series of wars occurred at the mouth of the Yongjiang River, bringing turmoil to Ningbo [6]. But on the other hand, Western civilization also landed on the banks of the Yongjiang River at the earliest, forming a settlement for outsiders on the north bank of the river and starting the modernization of Ningbo [7].", ""]
                                 ],
                                 columns = ["User", "Assistant"]
                                 )
)

example_df_list.append(
                                pd.DataFrame(
                                [['使用中文续写下面stable diffusion模型的prompt', '好的。'],
                                ["一只凶猛的老虎,咬死了一只豺狼。", "一只凶猛的老虎,咬死了一只豺狼。高度详细,数字绘画,艺术站,概念艺术,锐利的焦点,插图,电影照明,艺术由artgerm和gregrutkowski和alphonsemucha8k彩色辛烷渲染。"],
                                ["印度毗湿奴神话图,", "印度毗湿奴神话图,毁灭之神的亲属,戴着王冠,没有胡子,穿着黄金僧衣,看着镜头,蓝色和金色,空灵,幻想,概念艺术,数字绘画,artstation上的趋势,deviantart上的趋势,史诗般的构图,极其详细,体积照明,杰作,辛烷渲染,由创作8k4k高分辨率35mm16"],
                                ["外太空幻想机器人,", "外太空幻想机器人,战斗盔甲,银色带金边,极其详细,数字绘画,艺术站,概念艺术,平滑,锐利的焦点,插图,电影照明,艺术由artgerm和gregrutkowski和alphonsemucha。16k。35mm8k。质量::高分辨率。。4k。50mm。"],
                                ["中国古代帝王衮服,", "中国古代帝王衮服,黄金富贵,权倾朝野,勤政为民,吉祥太平,复杂的细节,光明,高度详细,逼真,神权渲染,天宫引擎。黄金比例,artstation上的趋势美丽的细节插图背景水墨宫廷画。"],
                                ["哈努曼神猴,", ""]
                                 ],
                                 columns = ["User", "Assistant"]
                                 )
)

example_df_list.append(
                                    pd.DataFrame(
                                    [['根据下面主题给出多轮对话', '好的。'],
                                    ["天气", "问:今天天气不错。 真的不热又凉爽。 \n答:是的, 我希望中午不会变得太热。 \n问:你想去别处看看吗? \n问:是的,我想去别处走在,我不想在这里呆太久。你会等我吗? \n答:不用担心 。 我会等着你 。 \n问:谢谢你,您今年要去哪里度假? \n答:哦,我要去中国沿海。 那是一个美丽的国家。 \n问:真的吗 ?那里是什么样的? \n答:它有很棒的海滩和许多树木和海岸。 \n问:您会在那待多久? \n答:大约2周。 "],
                                    ["就餐", "问:你饿吗? \n答:是的,我真的很想吃点东西。 \n问:你要吃什么? \n答:我想要披萨。 \n问:还要别的吗 ? \n答:吃的就这些,我也有些口渴。 \n问:你想喝什么吗? \n答:我想要一杯可乐。 \n问:你觉得这份披萨怎么样? \n答:那是一个很棒的披萨,放了很多干酪。"],
                                    ["戏剧", ""]
                                     ],
                                     columns = ["User", "Assistant"]
                                     )
)

example_df_list.append(
                                    pd.DataFrame(
                                    [['Give multiple rounds of dialogue based on the following topics', 'Ok'],
                                    ["Weather", "Q: The weather is nice today. Really not hot and cool. \nA: Yes, I hope it doesn't get too hot at noon. \nQ: Do you want to go somewhere else? \nQ: Yes, I want to go somewhere else. I don’t want to stay here for too long. Are you going to wait for me? \nAnswer: Don’t worry. I will wait for you . \nQ: Thank you, where are you going on vacation this year? \nA: Oh, I’m going to the coast of China. That's a beautiful country. \nQ: Really? What's it like there? \nA: It has great beaches and lots of trees and coast. \nQ: How long will you stay there? \nAnswer: About 2 weeks."],
                                    ["Dining", "Question: Are you hungry? \nAnswer: Yes, I really want to eat something. \nQ: What do you want to eat? \nAnswer: I want pizza. \nQ: Do you want anything else? \nAnswer: That's all I have to eat. I'm also a little thirsty. \nQ: What would you like to drink? \nAnswer: I would like a Coke. \nQ: What do you think of this pizza? \nA: That was a great pizza with a lot of cheese."],
                                    ["Drama", ""]
                                     ],
                                     columns = ["User", "Assistant"]
                                     )
)

assert len(example_df_list) == 12
assert all(map(lambda x: hasattr(x, "shape"), example_df_list))

def get_params(request: gr.Request, example_df_list = example_df_list):
    params = request.query_params
    ip = request.client.host
    req = {"params": params,
          "ip": ip}
    print("load it.")
    print(req)
    input_list_index = params.__dict__.get("input_list_index", None)
    if type(input_list_index) == type(""):
        str_index = input_list_index
    else:
        str_index = "0"
    index = int(str_index)
    if index < len(example_df_list):
        df = example_df_list[index]
    else:
        df = example_df_list[0]
    return str_index, df

with gr.Blocks(css = "custom.css") as few_shot_demo:
    title = gr.HTML(
            """<h1 align="center"> <font size="+3"> ChatGLM3 Few Shot 🍔 </font> </h1>""",
            elem_id="title",
    )

    gr.HTML(
            """<h1 align="left"> <font size="+0"> 向 ChatGLM3 下达 Few shot 任务 </font> </h1>""",
            #elem_id="title",
    )

    gr.HTML(
            """<h3 align="left"> <font size="+0"> You can input mannally, or click below examples, every example has its own model space that you can lookup </font> </h3>""",
            #elem_id="title",
    )

    def user(context_df, history):
        history = list(filter(lambda l2: type(l2[0]) == type(l2[1]) == type("") and (l2[0].strip() or l2[1].strip()),
        context_df.values.tolist()))
        return history
        #return "", history
        #return "", history + [[user_message, None]]

    def bot(history):
        history[-1][1] = ""
        user_message = history[-1][0]
        pred_iter = predict(user_message ,history)
        for ele in pred_iter:
            history[-1][1] = ele
            yield history

    input_list_index = gr.Textbox(value = "0", visible = False)
    '''
    few_shot_demo.load(get_params, None, input_list_index)
    print("input_list_index :", input_list_index)

    #assert input_list_index in map(str, range(12))
    input_list_index = int(input_list_index)
    context_df_value = example_df_list[input_list_index]
    '''

    with gr.Row():
        with gr.Column():
            context_df = gr.DataFrame(
                #value = context_df_value,
                headers = ["User", "Assistant"],
                col_count = 2,
                row_count = 8,
                #label = "You can input mannally, or click below examples, every example has its own model space that you can lookup",
            )

            with gr.Row():
                sub_button = gr.Button("Submit")
                clear = gr.Button("Clear")

        with gr.Column():
            chatbot = gr.Chatbot(height = 256)
    '''
    with gr.Row():
        with gr.Column():
            markdown_exp_size = "#####"
            space_cnt = 1

            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/emoji.stable-diffusion"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                    gr.Examples(
                        [
                            #### svjack/emoji.stable-diffusion
                            pd.DataFrame(
                            [['请执行将成语翻译成emoji的任务,下面是一些例子。', '好的。'],
                             ['朝斯夕斯', '🌞🕛🌇🌛'],
                             ['出其不意', '🪖🐎🗡️✌️'],
                             ['百紫千红', '🎎🌹🍊🍃'],
                             ['背碑覆局', '🀄️♟️🏳️💀'],
                             ['春蛇秋蚓', '🌞🐍🍁🪱'],
                             ['传风扇火', '☁️🌬️\U0001faad🔥'],
                             ['不可逾越', '💪🏃🥱☹️'],
                             ['丹书白马', '📄📖😻🐎'],
                             ['挨冻受饿', '🌬️🥶🍽️😱'],
                             ['白马非马', '😄🌟🐎😝'],
                             ['抱玉握珠', '🫂💰\U0001faf3🧧'],
                             ['道骨仙风', '✝️👨🕌👼'],
                             ['春华秋实', '🌞🌽🍁🍉'],
                             ['春风夏雨', '☀️🌬️🌧️⚡️'],
                             ['姹紫嫣红', ""]],
                             columns = ["User", "Assistant"]
                             ),
                        ],
                        inputs = context_df,
                        label = "svjack/emoji.stable-diffusion"
                    )

            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/emoji.stable-diffusion"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                        lora_repo = "svjack/wizardlm-13b-ggml"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                    gr.Examples(
                        [
                            #### svjack/wizardlm-13b-ggml
                            pd.DataFrame(
                            [['Use following emojis to generate a short description of a scene', 'Ok'],
                             ['👨👩🔥❄️', "there's a couple who share an intense passion or attraction towards each other . The setting takes place in cold weather conditions"],
                             ['🌲🔥👨💦', 'A man battling a raging forest fire, using a hose to try and extinguish it'],
                             ['🏫📚💭🙌', "a learning environment where students are engaged in studying or discussing topics relevant to their coursework. They might be referring to reference materials like textbooks or notes while wearing glasses as they focus intently on understanding complex concepts. At some point during the session, there could also be moments of appreciation from participants who acknowledge each other's contributions through applause, indicating a positive atmosphere conducive to effective learning."],
                             ['🎨🌹📝🌺', "a beautiful rose garden with various art pieces scattered around it. Visitors can appreciate both the natural beauty and man-made creations in one harmonious setting"],
                             ['🌹💕🌸💋', "a picture of romance in nature surrounded by beautiful flowers like roses and tulips while exuding strong feelings through love-struck faces. "],
                             ["👩‍🍳🐠🤷🥛", ""]
                             ],
                             columns = ["User", "Assistant"]
                             ),
                        ],
                        inputs = context_df,
                        label = "svjack/wizardlm-13b-ggml svjack/emoji.stable-diffusion"
                    )

            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/Question-Words-Extractor-zh"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                    gr.Examples(
                        [
                            #### svjack/Question-Words-Extractor-zh
                            pd.DataFrame(
                            [['请从下面的句子中提取句中疑问词。', '好的。'],
                             ['宁波在哪个省份?', '哪个省份'],
                             ['中国的货币是什么?', '什么'],
                             ['百慕大三角在什么地方?', '什么地方'],
                             ['谁是最可爱的人?', "谁"],
                             ['何时全面实现现代化?', ""]
                             ],
                             columns = ["User", "Assistant"]
                             ),
                        ],
                        inputs = context_df,
                        label = "svjack/Question-Words-Extractor-zh"
                    )


            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/Entity-Property-Extractor-zh"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1


                    gr.Examples(
                            [
                                #### svjack/Entity-Property-Extractor-zh
                                pd.DataFrame(
                                [['请从下面的句子中提取实体和属性。', '好的。'],
                                 ['宁波在哪个省份?', '实体:宁波 属性:省份'],
                                 ['中国的货币是什么?', '实体:中国 属性:货币'],
                                 ['百慕大三角在什么地方?', '实体:百慕大三角 属性:地方'],
                                 ['谁是最可爱的人?', "实体:人 属性:可爱"],
                                 ['黄河的拐点在哪里?', "实体:黄河 属性:拐点"],
                                 ["玉米的引进时间是什么时候?", ""]
                                 ],
                                 columns = ["User", "Assistant"]
                                 ),
                            ],
                            inputs = context_df,
                            label = "svjack/Entity-Property-Extractor-zh"
                    )

            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/Question-Generator"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1


                    gr.Examples(
                            [
                                #### svjack/Question-Generator
                                pd.DataFrame(
                                [['请根据输入的句子生成一个问题和对应的答案。', '好的。'],
                                 ['飓风格特是1993年9月在墨西哥和整个中美洲引发严重洪灾的大规模热带气旋.', '问题:飓风是什么时候发生的? 答案:1993年9月'],
                                 ['飓风源于9月14日西南加勒比海上空一股东风波。', '问题:飓风是什么时候开始的? 答案:9月14日'],
                                 ['飓风格特是1993年9月在墨西哥和整个中美洲引发严重洪灾的大规模热带气旋', '问题:飓风特在哪个国家登陆? 答案:墨西哥'],
                                 ['飓风源于9月14日西南加勒比海上空一股东风波。', "问题:飓风从哪里开始的? 答案:西南加勒比海"],
                                 ['飓风次日从尼加拉瓜登岸,经过洪都拉斯后于9月17日在洪都拉斯湾再次达到热带风暴标准,', "问题:飓风何时达到热带风暴标准? 答案:9月17日"],
                                 ["飓风9月21日从纳亚里特州进入太平洋时已降级成热带低气压,最终于5天后在开放水域上空消散。", "问题:飓风在什么时候消散? 答案:5天后"],
                                 ["维护国际和平与安全. 第二次世界大战结束后的1945年,联合国宣布成立,总部设在纽约,并将维护国际和平与安全确立为主要目的之一。", ""]
                                 ],
                                 columns = ["User", "Assistant"]
                                 ),
                            ],
                            inputs = context_df,
                            label = "svjack/Question-Generator"
                    )

            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/Chinese-Comet-Atomic"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                        lora_repo = "svjack/Chinese-Comet-Atomic-T5-Large-Lora"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                    gr.Examples(
                            [
                                #### svjack/Chinese-Comet-Atomic
                                pd.DataFrame(
                                [['以下面发生的事情为背景,回答与时间起因结果相关的问题。', '好的。'],

                                 ['以下事件有哪些必要的先决条件:X吃到了一顿大餐。', 'X进行了点餐'],
                                 ['下面的事件发生后可能会发生什么:X吃到了一顿大餐。', 'X变胖'],
                                 ['以下事件的动机是什么:X吃到了一顿大餐。', 'X饿了'],
                                 ['以下事件发生后,你有什么感觉:X吃到了一顿大餐。', "X感觉味道很好"],

                                 ['以下事件有哪些必要的先决条件:X见到了自己喜欢的明星。', 'X买了一张票'],
                                 ['下面的事件发生后可能会发生什么:X见到了自己喜欢的明星。', 'X被打动了'],
                                 ['以下事件的动机是什么:X见到了自己喜欢的明星。', 'X想进行一些娱乐'],
                                 ['以下事件发生后,你有什么感觉:X见到了自己喜欢的明星。', "X的心情愉快"],

                                 ['以下事件有哪些必要的先决条件:X去行骗', 'X心术不正'],
                                 ['下面的事件发生后可能会发生什么:X去行骗', 'X被指控'],
                                 ['以下事件的动机是什么:X去行骗', 'X想不劳而获'],
                                 ['以下事件发生后,你有什么感觉:X去行骗', "X在监狱里挨饿受冻"],

                                 ['下面的事件发生后可能会发生什么:X去伊斯坦布尔', ''],
                                 ],
                                 columns = ["User", "Assistant"]
                                 ),
                            ],
                            inputs = context_df,
                            label = "svjack/Chinese-Comet-Atomic svjack/Chinese-Comet-Atomic-T5-Large-Lora"
                    )

            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/English-Comet-Atomic"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                    gr.Examples(
                            [
                                #### svjack/English-Comet-Atomic
                                pd.DataFrame(
                                [['Use the following events as a background to answer questions related to the cause and effect of time.', 'Ok'],

                                 ['What are the necessary preconditions for the next event?:X had a big meal.', 'X placed an order'],
                                 ['What could happen after the next event?:X had a big meal.', 'X becomes fat'],
                                 ['What is the motivation for the next event?:X had a big meal.', 'X is hungry'],
                                 ['What are your feelings after the following event?:X had a big meal.', "X tastes good"],

                                 ['What are the necessary preconditions for the next event?:X met his favorite star.', 'X bought a ticket'],
                                 ['What could happen after the next event?:X met his favorite star.', 'X is motivated'],
                                 ['What is the motivation for the next event?:X met his favorite star.', 'X wants to have some entertainment'],
                                 ['What are your feelings after the following event?:X met his favorite star.', "X is in a happy mood"],

                                 ['What are the necessary preconditions for the next event?: X to cheat', 'X has evil intentions'],
                                 ['What could happen after the next event?:X to cheat', 'X is accused'],
                                 ['What is the motivation for the next event?:X to cheat', 'X wants to get something for nothing'],
                                 ['What are your feelings after the following event?:X to cheat', "X is starving and freezing in prison"],

                                 ['What could happen after the next event?:X go to Istanbul', ''],
                                 ],
                                 columns = ["User", "Assistant"]
                                 ),
                            ],
                            inputs = context_df,
                            label = "svjack/English-Comet-Atomic"
                    )

            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/Chinese-Context-Dialogue-Generator"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                    gr.Examples(
                            [
                                #### svjack/Chinese-Context-Dialogue-Generator
                                pd.DataFrame(
                                [['根据下面的上下文生成杰克和安娜之间的日常对话。', '好的。'],
                                ["飓风格特是1993年9月在墨西哥和整个中美洲引发严重洪灾的大规模热带气旋,源于9月14日西南加勒比海上空一股东风波。次日从尼加拉瓜登岸,经过洪都拉斯后于9月17日在洪都拉斯湾再次达到热带风暴标准,但次日进入伯利兹上空后就减弱成热带低气压。穿过尤卡坦半岛后,在9月20日强化成二级飓风,从韦拉克鲁斯州的图斯潘附近登陆墨西哥。9月21日从纳亚里特州进入太平洋时已降级成热带低气压,最终于5天后在开放水域上空消散。", "杰克:嗨,你好吗?\n安娜:我很好,最近怎么样?\n杰克:我们去年9月在墨西哥旅游,整个中美洲发生了严重热带气旋。\n安娜:仔细说说。\n杰克:我们遭遇的飓风格特简直是一场灾难。\n安娜:飓风是如何发生的?\n杰克:从南加拉瓜登岸,经过瓜岛后,在9月17日洪都拉斯湾再次达到热带风暴标准。\n安娜:第二天发生了什么?\n杰克:次日到了伯利兹上空,然后穿过尤卡坦半岛,越过,进入墨西哥北部。\n安娜:太可怕了。\n杰克:后来飓风已经降到了热带低气压。\n安娜:真是有史以来最严重的一次。结果怎么样?\n杰克:五天后散去了。"],
                                ['珊瑚坝是长江中的一处河漫滩,位于长江重庆市渝中区区段主航道左侧[1],靠近渝中半岛,原分属重庆市市中区菜园坝街道和石板坡街道[2],现属渝中区菜园坝街道石板坡社区[3],是长江上游缓冲地段自然冲积沙洲,略呈纺锤形[4]或椭圆形,长约1800米,宽约600米,坝上遍布鹅卵石和水草。每年夏季洪水时均被淹没,其余时间常露水面,枯水期则与长江左岸相连[5]。', "杰克:你听说过珊瑚坝吗?\n安娜:是的,它是一处河漫滩\n杰克:它在一个什么样的地方?\n安娜:位于长江重庆市渝中区区段主航道左侧。\n杰克:哪里离哪里比较近?\n安娜:渝中半岛\n杰克:这么好的景点处在什么行政区划?\n安娜:属于石板坡街道和社区。\n杰克:这个堤坝有什么特点?\n安娜:它是略呈纺锤形的自然冲积沙洲。\n杰克:在不同的季节景象也不同吧?\n安娜:是的,夏季洪水时均被淹没,其余时间常露水面。"],
                                ["甬江是浙東運河入海前个壓末一段河道,歷史丄曾在漕糧海運中起到重要个作用,也曾溝通內陸搭海上絲綢之路[5]。甬江口曾發生一系列戰事,爲寧波帶來動盪[6]。但另一方面,西方文明也頂早在甬江畔登陸,形成江北岸外人居留地,開啟爻寧波个近代化[7]。", ""]
                                 ],
                                 columns = ["User", "Assistant"]
                                 ),
                            ],
                            inputs = context_df,
                            label = "svjack/Chinese-Context-Dialogue-Generator"
                    )

        with gr.Column():
            #chatbot = gr.Chatbot(height = 512 + 64)
            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/English-Context-Dialogue-Generator"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                    gr.Examples(
                            [
                                #### svjack/English-Context-Dialogue-Generator
                                pd.DataFrame(
                                [['Generate a daily conversation between Jack and Anna based on the context below.', 'Ok'],
                                ["Hurricane Gert was a large-scale tropical cyclone that caused severe flooding in Mexico and throughout Central America in September 1993. It originated from a tropical wave over the southwest Caribbean Sea on September 14. It landed in Nicaragua the next day, passed through Honduras, and reached tropical storm status again in the Gulf of Honduras on September 17. However, it weakened into a tropical depression after entering Belize the next day. After passing through the Yucatan Peninsula, it strengthened into a Category 2 hurricane on September 20 and made landfall in Mexico near Tuxpan, Veracruz. It was downgraded to a tropical depression when it entered the Pacific Ocean from Nayarit on September 21, and finally dissipated over open waters five days later.",
                                "Jack: Hi, how are you?\nAnna: I'm fine, how are you doing? \nJack: We were traveling in Mexico last September, and severe tropical cyclones occurred throughout Central America. \nAnna: Tell me carefully. \nJack: What we had with Hurricane Gert was a disaster. \nAnna: How do hurricanes happen? \nJack: It landed in South Caragua, passed through Guadalcanal, and reached tropical storm standards again in the Gulf of Honduras on September 17. \nAnna:What happened the next day? \nJack: The next day we arrived over Belize, then passed through the Yucatan Peninsula, crossed over, and entered northern Mexico. \nAnna: It's terrible. \nJack: Later the hurricane was reduced to a tropical depression. \nAnna: It's the worst ever. what's the result? \nJack: Dispersed after five days."],
                                ['Coral Dam is a floodplain in the Yangtze River. It is located on the left side of the main channel of the Yuzhong District section of Chongqing City[1], close to the Yuzhong Peninsula. It used to belong to Caiyuanba Street and Shibanpo Street, Shizhong District, Chongqing City[2], and now belongs to Shibanpo Community, Caiyuanba Street, Yuzhong District [3] is a natural alluvial sandbank in the buffer zone of the upper reaches of the Yangtze River. It is slightly spindle-shaped [4] or oval, about 1,800 meters long and 600 meters wide. The dam is covered with pebbles and aquatic plants. It is submerged during floods every summer, and is often exposed to the water during the rest of the year. During the dry season, it is connected to the left bank of the Yangtze River [5].',
                                "Jack: Have you heard of the Coral Dam?\nAnna: Yes, it is a floodplain\nJack: What kind of place is it?\nAnna: It is located on the left side of the main channel of the Yuzhong District of Chongqing City on the Yangtze River. \nJack: Which one is closer? \nAnna: Yuzhong Peninsula\nJack: In what administrative division is such a good scenic spot located?\nAnna: It belongs to Shibanpo streets and communities. \nJack: What are the characteristics of this dike?\nAnna: It is a slightly spindle-shaped natural alluvial sandbar. \nJack: The scene is different in different seasons, right? \nAnna: Yes, it is submerged during floods in the summer, and is often exposed during the rest of the time."],
                                ["The Yongjiang River is the last stretch of river before the East Zhejiang Canal enters the sea. Historically, it has played an important role in grain transportation and has also connected the inland and built the Maritime Silk Road [5]. A series of wars occurred at the mouth of the Yongjiang River, bringing turmoil to Ningbo [6]. But on the other hand, Western civilization also landed on the banks of the Yongjiang River at the earliest, forming a settlement for outsiders on the north bank of the river and starting the modernization of Ningbo [7].", ""]
                                 ],
                                 columns = ["User", "Assistant"]
                                 ),
                            ],
                            inputs = context_df,
                            label = "svjack/English-Context-Dialogue-Generator"
                    )

            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/prompt-extend-gpt-chinese"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                    gr.Examples(
                            [
                                #### svjack/prompt-extend-gpt-chinese
                                pd.DataFrame(
                                [['使用中文续写下面stable diffusion模型的prompt', '好的。'],
                                ["一只凶猛的老虎,咬死了一只豺狼。", "一只凶猛的老虎,咬死了一只豺狼。高度详细,数字绘画,艺术站,概念艺术,锐利的焦点,插图,电影照明,艺术由artgerm和gregrutkowski和alphonsemucha8k彩色辛烷渲染。"],
                                ["印度毗湿奴神话图,", "印度毗湿奴神话图,毁灭之神的亲属,戴着王冠,没有胡子,穿着黄金僧衣,看着镜头,蓝色和金色,空灵,幻想,概念艺术,数字绘画,artstation上的趋势,deviantart上的趋势,史诗般的构图,极其详细,体积照明,杰作,辛烷渲染,由创作8k4k高分辨率35mm16"],
                                ["外太空幻想机器人,", "外太空幻想机器人,战斗盔甲,银色带金边,极其详细,数字绘画,艺术站,概念艺术,平滑,锐利的焦点,插图,电影照明,艺术由artgerm和gregrutkowski和alphonsemucha。16k。35mm8k。质量::高分辨率。。4k。50mm。"],
                                ["中国古代帝王衮服,", "中国古代帝王衮服,黄金富贵,权倾朝野,勤政为民,吉祥太平,复杂的细节,光明,高度详细,逼真,神权渲染,天宫引擎。黄金比例,artstation上的趋势美丽的细节插图背景水墨宫廷画。"],
                                ["哈努曼神猴,", ""]
                                 ],
                                 columns = ["User", "Assistant"]
                                 ),
                            ],
                            inputs = context_df,
                            label = "svjack/prompt-extend-gpt-chinese"
                    )

            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/bloom-dialogue-chinese"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                        lora_repo = "svjack/bloom-daliy-dialogue-chinese"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                        lora_repo = "svjack/gpt-dialogue-chinese"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                        lora_repo = "svjack/gpt-daliy-dialogue-chinese"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1


                    gr.Examples(
                                [
                                    #### svjack/bloom-dialogue-chinese
                                    pd.DataFrame(
                                    [['根据下面主题给出多轮对话', '好的。'],
                                    ["天气", "问:今天天气不错。 真的不热又凉爽。 \n答:是的, 我希望中午不会变得太热。 \n问:你想去别处看看吗? \n问:是的,我想去别处走在,我不想在这里呆太久。你会等我吗? \n答:不用担心 。 我会等着你 。 \n问:谢谢你,您今年要去哪里度假? \n答:哦,我要去中国沿海。 那是一个美丽的国家。 \n问:真的吗 ?那里是什么样的? \n答:它有很棒的海滩和许多树木和海岸。 \n问:您会在那待多久? \n答:大约2周。 "],
                                    ["就餐", "问:你饿吗? \n答:是的,我真的很想吃点东西。 \n问:你要吃什么? \n答:我想要披萨。 \n问:还要别的吗 ? \n答:吃的就这些,我也有些口渴。 \n问:你想喝什么吗? \n答:我想要一杯可乐。 \n问:你觉得这份披萨怎么样? \n答:那是一个很棒的披萨,放了很多干酪。"],
                                    ["戏剧", ""]
                                     ],
                                     columns = ["User", "Assistant"]
                                     ),
                                ],
                                inputs = context_df,
                                label = "svjack/bloom-dialogue-chinese svjack/bloom-daliy-dialogue-chinese svjack/gpt-dialogue-chinese svjack/gpt-daliy-dialogue-chinese"
                        )

            with gr.Row():
                with gr.Column():
                    with gr.Column():
                        lora_repo = "svjack/bloom-daliy-dialogue-english"
                        emoji = space_info(lora_repo).__dict__["cardData"]["emoji"]
                        gr.Markdown(
                                    value=f"{markdown_exp_size} {space_cnt}. Jump To original task space repo : [{lora_repo}](https://huggingface.co/spaces/{lora_repo}) {emoji}",
                                    visible=True,
                                    elem_id="selected_space",
                                )
                        space_cnt += 1

                    gr.Examples(
                                [
                                    #### svjack/bloom-daliy-dialogue-english
                                    pd.DataFrame(
                                    [['Give multiple rounds of dialogue based on the following topics', 'Ok'],
                                    ["Weather", "Q: The weather is nice today. Really not hot and cool. \nA: Yes, I hope it doesn't get too hot at noon. \nQ: Do you want to go somewhere else? \nQ: Yes, I want to go somewhere else. I don’t want to stay here for too long. Are you going to wait for me? \nAnswer: Don’t worry. I will wait for you . \nQ: Thank you, where are you going on vacation this year? \nA: Oh, I’m going to the coast of China. That's a beautiful country. \nQ: Really? What's it like there? \nA: It has great beaches and lots of trees and coast. \nQ: How long will you stay there? \nAnswer: About 2 weeks."],
                                    ["Dining", "Question: Are you hungry? \nAnswer: Yes, I really want to eat something. \nQ: What do you want to eat? \nAnswer: I want pizza. \nQ: Do you want anything else? \nAnswer: That's all I have to eat. I'm also a little thirsty. \nQ: What would you like to drink? \nAnswer: I would like a Coke. \nQ: What do you think of this pizza? \nA: That was a great pizza with a lot of cheese."],
                                    ["Drama", ""]
                                     ],
                                     columns = ["User", "Assistant"]
                                     ),
                                ],
                                inputs = context_df,
                                label = "svjack/bloom-daliy-dialogue-english"
                        )
    '''

    sub_button.click(user, [context_df, chatbot], chatbot, queue=False).then(
        bot, chatbot, chatbot
    )
    clear.click(lambda: None, None, chatbot, queue=False)
    clear.click(lambda: None, None, context_df, queue=False)
    few_shot_demo.load(get_params, None, [input_list_index, context_df])

few_shot_demo.queue()
few_shot_demo.launch()

'''
#### as web url params change
http://localhost:7860
http://localhost:7860/?input_list_index=1
http://localhost:7860/?input_list_index=2
...
http://localhost:7860/?input_list_index=11
'''