JerryLiJinyi's picture
Update app.py
68cb523 verified
import gradio as gr
from compressor import PromptCompressor
import os
res = os.popen('python -m spacy download en_core_web_sm').read()
print(res)
def compressit(original_text, compressor1, ratio, maxlength):
if compressor1=="Selective Context":
compressor = PromptCompressor(type='SCCompressor', lang='en', model='gpt2', device='cpu')
elif compressor1=="LLMLingua":
return "Sorry, currently we cannot provide services for LLMLingua due to the Huggingface Token issue. Please try other compressors.", 0
elif compressor1=="LongLLMLingua":
return "Sorry, currently we cannot provide services for LongLLMLingua due to the Huggingface Token issue. Please try other compressors.", 0
elif compressor1=="SCRL":
return "Sorry, but currently we found an issue in presenting SCRL on Huggingface, you can try this compressor from our GitHub repository.", 0
# compressor = PromptCompressor(type='SCRLCompressor', model_dir="models/newsroom-L11/", device="cpu", tokenizer_dir="sentence-transformers/paraphrase-distilroberta-base-v2")
elif compressor1=="KiS":
compressor = PromptCompressor(type='KiSCompressor', device="cpu", model_dir="philippelaban/keep_it_simple")
else:
compressor = PromptCompressor(type='SCCompressor', lang='en', model='gpt2', device='cpu')
if compressor1 == "Selective Context":
compressed_prompt = compressor.compressgo(original_prompt=original_text, ratio=float(ratio))
else:
if maxlength:
compressed_prompt = compressor.compressgo(original_prompt=original_text, ratio=float(ratio), max_length=int(maxlength))
else:
gr.Warning("max_length is needed for this type of compressor. Please fill in and try again.")
return "max_length is missing.", 0
gr.Warning("The prompt is generating, please wait patiently as it may take a long time to generate during busy hours.")
return compressed_prompt["compressed_prompt"], compressed_prompt["ratio"]
demo = gr.Interface(
fn=compressit,
inputs=[
gr.Textbox(lines=2, placeholder="Enter your prompt here...", label="input", info="Enter the original prompt here."),
gr.Dropdown(
["Selective Context", "LLMLingua", "LongLLMLingua", "SCRL", "KiS"], label="compressor", info="Choose your compressor here. \n Currently, we cannot support the online demo for LLMLingua and LongLLMLingua due to the Huggingface Token issue."
),
gr.Textbox(lines=1, placeholder="Enter the compression ratio here...", info="Ratio only works for Selective Context, LLMLingua and LongLLMLingua."),
gr.Textbox(lines=1, placeholder="Enter the max_length parameter (integer) if you are using SCRL or KiS", label="max_length", info="If you are using SCRL or KiS, fill in the parameter, if not, just ignore this.\n Hint: For SCRL, max_length should be shorter than the lenght of original prompt; For KiS, max_length should be longer than it.")
],
outputs=[
gr.Textbox(lines=1, label="output", info="Please wait patiently when proceeding it may take more than 2 minutes to generate since we are using CPUs for free."),
gr.Textbox(lines=1, label="ratio", info="With the compression ratio of: ")
],
examples=[
["Read the following poem and summarize it: O Captain! My Captain! our fearful trip is done; The ship has weather'd every rack, the prize we sought is won; The port is near, the bells I hear, the people all exulting, While follow eyes the steady keel, the vessel grim and daring.", "Selective Context", 0.5, 0],
["Read the following poem and summarize it: O Captain! My Captain! our fearful trip is done; The ship has weather'd every rack, the prize we sought is won; The port is near, the bells I hear, the people all exulting, While follow eyes the steady keel, the vessel grim and daring.", "SCRL", 0, 16],
]
)
demo.launch(share=False)