Spaces:
Runtime error
Runtime error
Update chain_app.py
Browse files- chain_app.py +163 -104
chain_app.py
CHANGED
@@ -100,10 +100,10 @@ async def chat_profile():
|
|
100 |
name="Qwen1.5-110B",
|
101 |
markdown_description="Qwen first generation improved model with 110B parameters",
|
102 |
),
|
103 |
-
cl.ChatProfile(
|
104 |
-
|
105 |
-
|
106 |
-
),
|
107 |
cl.ChatProfile(
|
108 |
name="Qwen1.5-32B",
|
109 |
markdown_description="Qwen first generation improved model with 32B parameters",
|
@@ -112,18 +112,18 @@ async def chat_profile():
|
|
112 |
name="Qwen1.5-2.7B",
|
113 |
markdown_description="Qwen first generation improved model with 2.7B parameters",
|
114 |
),
|
115 |
-
cl.ChatProfile(
|
116 |
-
|
117 |
-
|
118 |
-
),
|
119 |
-
cl.ChatProfile(
|
120 |
-
|
121 |
-
|
122 |
-
),
|
123 |
-
cl.ChatProfile(
|
124 |
-
|
125 |
-
|
126 |
-
),
|
127 |
cl.ChatProfile(
|
128 |
name="Llama-3.1-405B",
|
129 |
markdown_description="Meta Open Source Model Llama with 405B parameters",
|
@@ -491,28 +491,28 @@ async def on_chat_start():
|
|
491 |
content='Im Qwens 1.5th generation Large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
|
492 |
).send()
|
493 |
|
494 |
-
if chat_profile == 'Qwen1.5-72B':
|
495 |
-
|
496 |
-
|
497 |
-
|
498 |
-
|
499 |
-
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
-
|
505 |
-
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
|
517 |
if chat_profile == 'Qwen1.5-32B':
|
518 |
await cl.ChatSettings(
|
@@ -560,74 +560,74 @@ async def on_chat_start():
|
|
560 |
content='Im Qwens 1.5th generation small model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
|
561 |
).send()
|
562 |
|
563 |
-
if chat_profile == 'Qwen-72B':
|
564 |
-
|
565 |
-
|
566 |
-
|
567 |
-
|
568 |
-
|
569 |
-
|
570 |
-
|
571 |
-
|
572 |
-
|
573 |
-
|
574 |
-
|
575 |
-
|
576 |
-
|
577 |
-
|
578 |
-
|
579 |
-
|
580 |
-
|
581 |
-
|
582 |
-
|
583 |
-
|
584 |
-
|
585 |
|
586 |
-
if chat_profile == 'Qwen-14B':
|
587 |
-
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
608 |
|
609 |
-
if chat_profile == 'Qwen-7B':
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
-
|
614 |
-
|
615 |
-
|
616 |
-
|
617 |
-
|
618 |
-
|
619 |
-
|
620 |
-
|
621 |
-
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
|
627 |
-
|
628 |
-
|
629 |
-
|
630 |
-
|
631 |
|
632 |
if chat_profile == 'Llama-3.1-405B':
|
633 |
await cl.ChatSettings(
|
@@ -1041,6 +1041,65 @@ async def main(message: cl.Message):
|
|
1041 |
content=result[1][0][1]
|
1042 |
|
1043 |
).send()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1044 |
|
1045 |
elif chat_profile == 'Llama-3.1-405B':
|
1046 |
client = InferenceClient(
|
|
|
100 |
name="Qwen1.5-110B",
|
101 |
markdown_description="Qwen first generation improved model with 110B parameters",
|
102 |
),
|
103 |
+
# cl.ChatProfile(
|
104 |
+
# name="Qwen1.5-72B",
|
105 |
+
# markdown_description="Qwen first generation improved model with 72B parameters",
|
106 |
+
# ),
|
107 |
cl.ChatProfile(
|
108 |
name="Qwen1.5-32B",
|
109 |
markdown_description="Qwen first generation improved model with 32B parameters",
|
|
|
112 |
name="Qwen1.5-2.7B",
|
113 |
markdown_description="Qwen first generation improved model with 2.7B parameters",
|
114 |
),
|
115 |
+
# cl.ChatProfile(
|
116 |
+
# name="Qwen-72B",
|
117 |
+
# markdown_description="Qwen first generation model with 72B parameters",
|
118 |
+
# ),
|
119 |
+
# cl.ChatProfile(
|
120 |
+
# name="Qwen-14B",
|
121 |
+
# markdown_description="Qwen first generation model with 14B parameters",
|
122 |
+
# ),
|
123 |
+
# cl.ChatProfile(
|
124 |
+
# name="Qwen-7B",
|
125 |
+
# markdown_description="Qwen first generation model with 7B parameters",
|
126 |
+
# ),
|
127 |
cl.ChatProfile(
|
128 |
name="Llama-3.1-405B",
|
129 |
markdown_description="Meta Open Source Model Llama with 405B parameters",
|
|
|
491 |
content='Im Qwens 1.5th generation Large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
|
492 |
).send()
|
493 |
|
494 |
+
# if chat_profile == 'Qwen1.5-72B':
|
495 |
+
# await cl.ChatSettings(
|
496 |
+
# [
|
497 |
+
# Select(
|
498 |
+
# id="Qwen-Model",
|
499 |
+
# label="Qwen - Model",
|
500 |
+
# values=["Qwen1.5-72B"],
|
501 |
+
# initial_index=0,
|
502 |
+
# ),
|
503 |
+
# Slider(
|
504 |
+
# id="Temperature",
|
505 |
+
# label="Model Temperature",
|
506 |
+
# initial=0.7,
|
507 |
+
# min=0,
|
508 |
+
# max=1,
|
509 |
+
# step=0.1,
|
510 |
+
# ),
|
511 |
+
# ]
|
512 |
+
# ).send()
|
513 |
+
# await cl.Message(
|
514 |
+
# content='Im Qwens 1.5th generation second Large model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
|
515 |
+
# ).send()
|
516 |
|
517 |
if chat_profile == 'Qwen1.5-32B':
|
518 |
await cl.ChatSettings(
|
|
|
560 |
content='Im Qwens 1.5th generation small model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
|
561 |
).send()
|
562 |
|
563 |
+
# if chat_profile == 'Qwen-72B':
|
564 |
+
# await cl.ChatSettings(
|
565 |
+
# [
|
566 |
+
# Select(
|
567 |
+
# id="Qwen-Model",
|
568 |
+
# label="Qwen - Model",
|
569 |
+
# values=["Qwen-72B"],
|
570 |
+
# initial_index=0,
|
571 |
+
# ),
|
572 |
+
# Slider(
|
573 |
+
# id="Temperature",
|
574 |
+
# label="Model Temperature",
|
575 |
+
# initial=0.7,
|
576 |
+
# min=0,
|
577 |
+
# max=1,
|
578 |
+
# step=0.1,
|
579 |
+
# ),
|
580 |
+
# ]
|
581 |
+
# ).send()
|
582 |
+
# await cl.Message(
|
583 |
+
# content='Im Qwens open source Ai model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
|
584 |
+
# ).send()
|
585 |
|
586 |
+
# if chat_profile == 'Qwen-14B':
|
587 |
+
# await cl.ChatSettings(
|
588 |
+
# [
|
589 |
+
# Select(
|
590 |
+
# id="Qwen-Model",
|
591 |
+
# label="Qwen - Model",
|
592 |
+
# values=["Qwen-14B"],
|
593 |
+
# initial_index=0,
|
594 |
+
# ),
|
595 |
+
# Slider(
|
596 |
+
# id="Temperature",
|
597 |
+
# label="Model Temperature",
|
598 |
+
# initial=0.7,
|
599 |
+
# min=0,
|
600 |
+
# max=1,
|
601 |
+
# step=0.1,
|
602 |
+
# ),
|
603 |
+
# ]
|
604 |
+
# ).send()
|
605 |
+
# await cl.Message(
|
606 |
+
# content='Im Qwens open source Ai model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
|
607 |
+
# ).send()
|
608 |
|
609 |
+
# if chat_profile == 'Qwen-7B':
|
610 |
+
# await cl.ChatSettings(
|
611 |
+
# [
|
612 |
+
# Select(
|
613 |
+
# id="Qwen-Model",
|
614 |
+
# label="Qwen - Model",
|
615 |
+
# values=["Qwen-7B"],
|
616 |
+
# initial_index=0,
|
617 |
+
# ),
|
618 |
+
# Slider(
|
619 |
+
# id="Temperature",
|
620 |
+
# label="Model Temperature",
|
621 |
+
# initial=0.7,
|
622 |
+
# min=0,
|
623 |
+
# max=1,
|
624 |
+
# step=0.1,
|
625 |
+
# ),
|
626 |
+
# ]
|
627 |
+
# ).send()
|
628 |
+
# await cl.Message(
|
629 |
+
# content='Im Qwens open source Ai model and i am configured by two iranian boys Artin Daneshvar and Sadra Noadoust to help you out!',
|
630 |
+
# ).send()
|
631 |
|
632 |
if chat_profile == 'Llama-3.1-405B':
|
633 |
await cl.ChatSettings(
|
|
|
1041 |
content=result[1][0][1]
|
1042 |
|
1043 |
).send()
|
1044 |
+
elif chat_profile == 'Qwen1.5-110B':
|
1045 |
+
client = Client("Qwen/Qwen1.5-110B-Chat-demo", hf_token=hf_token)
|
1046 |
+
result = client.predict(
|
1047 |
+
query=message.content,
|
1048 |
+
system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
|
1049 |
+
api_name="/model_chat"
|
1050 |
+
)
|
1051 |
+
await cl.Message(
|
1052 |
+
content=result[1][0][1]
|
1053 |
+
|
1054 |
+
).send()
|
1055 |
+
|
1056 |
+
elif chat_profile == 'Qwen1.5-32B':
|
1057 |
+
client = Client("Qwen/Qwen1.5-32B-Chat-demo", hf_token=hf_token)
|
1058 |
+
result = client.predict(
|
1059 |
+
query=message.content,
|
1060 |
+
system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
|
1061 |
+
api_name="/model_chat"
|
1062 |
+
)
|
1063 |
+
await cl.Message(
|
1064 |
+
content=result[1][0][1]
|
1065 |
+
|
1066 |
+
).send()
|
1067 |
+
|
1068 |
+
elif chat_profile == 'Qwen1.5-2.7B':
|
1069 |
+
client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
|
1070 |
+
result = client.predict(
|
1071 |
+
query=message.content,
|
1072 |
+
system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
|
1073 |
+
api_name="/model_chat"
|
1074 |
+
)
|
1075 |
+
await cl.Message(
|
1076 |
+
content=result[1][0][1]
|
1077 |
+
|
1078 |
+
).send()
|
1079 |
+
|
1080 |
+
# elif chat_profile == 'Qwen-14B':
|
1081 |
+
# client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
|
1082 |
+
# result = client.predict(
|
1083 |
+
# query=message.content,
|
1084 |
+
# system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
|
1085 |
+
# api_name="/model_chat"
|
1086 |
+
# )
|
1087 |
+
# await cl.Message(
|
1088 |
+
# content=result[1][0][1]
|
1089 |
+
|
1090 |
+
# ).send()
|
1091 |
+
|
1092 |
+
# elif chat_profile == 'Qwen-7B':
|
1093 |
+
# client = Client("Qwen/qwen1.5-MoE-A2.7B-Chat-demo", hf_token=hf_token)
|
1094 |
+
# result = client.predict(
|
1095 |
+
# query=message.content,
|
1096 |
+
# system="You are a helpful AI chatbot made by two iranian boys named Artin Daneshvar and Sadra Noadoust",
|
1097 |
+
# api_name="/model_chat"
|
1098 |
+
# )
|
1099 |
+
# await cl.Message(
|
1100 |
+
# content=result[1][0][1]
|
1101 |
+
|
1102 |
+
# ).send()
|
1103 |
|
1104 |
elif chat_profile == 'Llama-3.1-405B':
|
1105 |
client = InferenceClient(
|