Spaces:
Running
Running
peterpeter8585
commited on
Commit
•
f20cd87
1
Parent(s):
c9b702a
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
import numpy as np
|
3 |
from Ai import chatbot, chatbot2, chatbot3, chatbot4, chatbot5, chatbot7, chatbot11
|
4 |
from huggingface_hub import InferenceClient
|
5 |
-
def chat(message,history: list[tuple[str, str]],system_message,max_tokens,temperature,top_p
|
6 |
m=torch.load("./model.pt")
|
7 |
|
8 |
messages = [{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}]
|
@@ -21,7 +21,7 @@ def chat(message,history: list[tuple[str, str]],system_message,max_tokens,temper
|
|
21 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
22 |
p=pipe.tokenizer.apply_chat_template([{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}], tokenize=False, add_generation_prompt=True)
|
23 |
o= pipe(p, max_new_tokens=max_tokens, do_sample=True, temperature=0.1)
|
24 |
-
outputs = pipe(prompt, max_new_tokens=max_tokens, do_sample=True, temperature=temperature,
|
25 |
return outputs[0]["generated_text"]
|
26 |
import random
|
27 |
from diffusers import DiffusionPipeline
|
@@ -538,8 +538,7 @@ a8= gr.ChatInterface(
|
|
538 |
value=0.1,
|
539 |
step=0.05,
|
540 |
label="Top-p (nucleus sampling)",
|
541 |
-
)
|
542 |
-
gr.Slider(minimum=0.1, maximum=1.0, step=0.05,label="Top-k")
|
543 |
],
|
544 |
)
|
545 |
|
|
|
2 |
import numpy as np
|
3 |
from Ai import chatbot, chatbot2, chatbot3, chatbot4, chatbot5, chatbot7, chatbot11
|
4 |
from huggingface_hub import InferenceClient
|
5 |
+
def chat(message,history: list[tuple[str, str]],system_message,max_tokens,temperature,top_p):
|
6 |
m=torch.load("./model.pt")
|
7 |
|
8 |
messages = [{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}]
|
|
|
21 |
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
22 |
p=pipe.tokenizer.apply_chat_template([{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}], tokenize=False, add_generation_prompt=True)
|
23 |
o= pipe(p, max_new_tokens=max_tokens, do_sample=True, temperature=0.1)
|
24 |
+
outputs = pipe(prompt, max_new_tokens=max_tokens, do_sample=True, temperature=temperature, top_p=top_p)
|
25 |
return outputs[0]["generated_text"]
|
26 |
import random
|
27 |
from diffusers import DiffusionPipeline
|
|
|
538 |
value=0.1,
|
539 |
step=0.05,
|
540 |
label="Top-p (nucleus sampling)",
|
541 |
+
)
|
|
|
542 |
],
|
543 |
)
|
544 |
|