Update app.py
Browse files
app.py
CHANGED
@@ -44,7 +44,7 @@ model_code = AutoModelForCausalLM.from_pretrained(
|
|
44 |
tokenizer_code = AutoTokenizer.from_pretrained("THUDM/codegeex4-all-9b", trust_remote_code=True)
|
45 |
|
46 |
@spaces.GPU
|
47 |
-
def stream_chat(message: str, history: list, temperature: float, max_length: int,
|
48 |
print(f'message is - {message}')
|
49 |
print(f'history is - {history}')
|
50 |
conversation = []
|
@@ -54,7 +54,7 @@ def stream_chat(message: str, history: list, temperature: float, max_length: int
|
|
54 |
|
55 |
print(f"Conversation is -\n{conversation}")
|
56 |
|
57 |
-
if
|
58 |
tokenizer = tokenizer_chat
|
59 |
model = model_chat
|
60 |
else:
|
@@ -111,7 +111,8 @@ with gr.Blocks(css=CSS) as demo:
|
|
111 |
),
|
112 |
gr.Radio(
|
113 |
["glm-4-9b-chat", "codegeex4-all-9b"],
|
114 |
-
label="Load Model"
|
|
|
115 |
),
|
116 |
],
|
117 |
examples=[
|
|
|
44 |
tokenizer_code = AutoTokenizer.from_pretrained("THUDM/codegeex4-all-9b", trust_remote_code=True)
|
45 |
|
46 |
@spaces.GPU
|
47 |
+
def stream_chat(message: str, history: list, temperature: float, max_length: int, choice: str):
|
48 |
print(f'message is - {message}')
|
49 |
print(f'history is - {history}')
|
50 |
conversation = []
|
|
|
54 |
|
55 |
print(f"Conversation is -\n{conversation}")
|
56 |
|
57 |
+
if choice == "glm-4-9b-chat":
|
58 |
tokenizer = tokenizer_chat
|
59 |
model = model_chat
|
60 |
else:
|
|
|
111 |
),
|
112 |
gr.Radio(
|
113 |
["glm-4-9b-chat", "codegeex4-all-9b"],
|
114 |
+
label="Load Model",
|
115 |
+
render=False,
|
116 |
),
|
117 |
],
|
118 |
examples=[
|