Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -14,7 +14,7 @@ MODEL_LIST = "THUDM/LongWriter-glm4-9b"
|
|
14 |
|
15 |
TITLE = "<h1><center>GLM SPACE</center></h1>"
|
16 |
|
17 |
-
PLACEHOLDER = f'<h3><center>
|
18 |
|
19 |
CSS = """
|
20 |
.duplicate-button {
|
@@ -25,14 +25,14 @@ CSS = """
|
|
25 |
}
|
26 |
"""
|
27 |
|
28 |
-
|
29 |
"THUDM/LongWriter-glm4-9b",
|
30 |
torch_dtype=torch.bfloat16,
|
31 |
low_cpu_mem_usage=True,
|
32 |
trust_remote_code=True,
|
33 |
).to(0).eval()
|
34 |
|
35 |
-
|
36 |
|
37 |
|
38 |
@spaces.GPU
|
|
|
14 |
|
15 |
TITLE = "<h1><center>GLM SPACE</center></h1>"
|
16 |
|
17 |
+
PLACEHOLDER = f'<h3><center>LongWriter-glm4-9b is trained based on glm-4-9b, and is capable of generating 10,000+ words at once.</h3>'
|
18 |
|
19 |
CSS = """
|
20 |
.duplicate-button {
|
|
|
25 |
}
|
26 |
"""
|
27 |
|
28 |
+
model = AutoModelForCausalLM.from_pretrained(
|
29 |
"THUDM/LongWriter-glm4-9b",
|
30 |
torch_dtype=torch.bfloat16,
|
31 |
low_cpu_mem_usage=True,
|
32 |
trust_remote_code=True,
|
33 |
).to(0).eval()
|
34 |
|
35 |
+
tokenizer = AutoTokenizer.from_pretrained("THUDM/LongWriter-glm4-9b",trust_remote_code=True)
|
36 |
|
37 |
|
38 |
@spaces.GPU
|