import vllm import torch import gradio import huggingface_hub import os huggingface_hub.login(token=os.environ["HF_TOKEN"]) # Fava prompt INPUT = "Read the following references:\n{evidence}\nPlease identify all the errors in the following text using the information in the references provided and suggest edits if necessary:\n[Text] {output}\n[Edited] " model = vllm.LLM(model="uw-llm-factuality/FAVA") def result(passage, reference): prompt = [INPUT.format_map({"evidence":reference, "output":passage})] print(prompt) print("\n") sampling_params = vllm.SamplingParams( temperature=0, top_p=1.0, max_tokens=500, ) outputs = model.generate(prompt, sampling_params) outputs = [it.outputs[0].text for it in outputs] output = outputs[0].replace("", " ") output = output.replace("", " ") output = output.replace("", "") output = output.replace("", "") output = output.replace("", "entity") output = output.replace("", "relation") output = output.replace("", "contradictory") output = output.replace("", "unverifiable") output = output.replace("", "invented") output = output.replace("", "subjective") output = output.replace("", "") output = output.replace("", "") output = output.replace("", "") output = output.replace("", "") output = output.replace("", "") output = output.replace("", "") output = output.replace("Edited:", "") return f'
{output}
'; #output; if __name__ == "__main__": demo = gradio.Interface(fn=result, inputs=["text", "text"], outputs="html") demo.launch(share=True)