Spaces:
Running
on
Zero
Running
on
Zero
Update description
Browse files
app.py
CHANGED
@@ -41,18 +41,17 @@ MODEL_PATHS = {
|
|
41 |
"type_narrative": "hallisky/lora-type-narrative-llama-3-8b",
|
42 |
"type_descriptive": "hallisky/lora-type-descriptive-llama-3-8b",
|
43 |
}
|
44 |
-
FIRST_MODEL = list(MODEL_PATHS.keys())[
|
45 |
MAX_NEW_TOKENS=1024
|
46 |
|
47 |
DESCRIPTION = """\
|
48 |
-
# Authorship Obfuscation
|
49 |
-
This Space demonstrates StyleRemix, a Llama
|
50 |
-
|
51 |
-
|
52 |
"""
|
53 |
|
54 |
import subprocess
|
55 |
-
|
56 |
def print_nvidia_smi():
|
57 |
try:
|
58 |
# Run the nvidia-smi command
|
@@ -153,6 +152,7 @@ def greet(input_text, length, function_words, grade_level, formality, sarcasm, v
|
|
153 |
print(combo_adapter_name)
|
154 |
print(list(sliders_dict.values()))
|
155 |
print(list(sliders_dict.keys()))
|
|
|
156 |
|
157 |
# Add and set the weighted adapater
|
158 |
model.add_weighted_adapter(
|
@@ -248,7 +248,7 @@ with demo:
|
|
248 |
gr.HTML(hide_css)
|
249 |
with gr.Row():
|
250 |
with gr.Column(variant="panel"):
|
251 |
-
gr.Markdown("# 1) Input Text\n### Enter the text to be obfuscated.")
|
252 |
input_text = gr.Textbox(
|
253 |
label="Input Text",
|
254 |
placeholder="The quick brown fox jumped over the lazy dogs."
|
|
|
41 |
"type_narrative": "hallisky/lora-type-narrative-llama-3-8b",
|
42 |
"type_descriptive": "hallisky/lora-type-descriptive-llama-3-8b",
|
43 |
}
|
44 |
+
FIRST_MODEL = list(MODEL_PATHS.keys())[0]
|
45 |
MAX_NEW_TOKENS=1024
|
46 |
|
47 |
DESCRIPTION = """\
|
48 |
+
# Authorship Obfuscation with StyleRemix
|
49 |
+
This Space demonstrates StyleRemix, a controllable and interpretable method for authorship obfuscation. At its core, it uses a Llama-3 model with 8B parameters and various LoRA adapters fine-tuned to rewrite text towards specific stylistic attributes (like text being longer or shorter). Feel free to play with it, or duplicate to run generations without a queue! If you want to run your own service, you can also deploy the model on [deploy the model on Inference Endpoints](https://huggingface.co/inference-endpoints).
|
50 |
+
<br> π΅οΈ Want to learn more? Check out our paper [here](google.com) and our code [here](google.com)!
|
51 |
+
<br> π§ Have questions about our work or issues with the demo? Feel free to email us at hallisky@uw.edu.
|
52 |
"""
|
53 |
|
54 |
import subprocess
|
|
|
55 |
def print_nvidia_smi():
|
56 |
try:
|
57 |
# Run the nvidia-smi command
|
|
|
152 |
print(combo_adapter_name)
|
153 |
print(list(sliders_dict.values()))
|
154 |
print(list(sliders_dict.keys()))
|
155 |
+
print(list(model.peft_config.keys()))
|
156 |
|
157 |
# Add and set the weighted adapater
|
158 |
model.add_weighted_adapter(
|
|
|
248 |
gr.HTML(hide_css)
|
249 |
with gr.Row():
|
250 |
with gr.Column(variant="panel"):
|
251 |
+
gr.Markdown("# 1) Input Text\n### Enter the text to be obfuscated. We recommend *full sentences* or *paragraphs*.")
|
252 |
input_text = gr.Textbox(
|
253 |
label="Input Text",
|
254 |
placeholder="The quick brown fox jumped over the lazy dogs."
|