| import gradio as gr |
| from huggingface_hub import InferenceClient |
|
|
| """ |
| For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference |
| """ |
| client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") |
|
|
|
|
| def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p): |
| messages = [{"role": "system", "content": system_message}] |
|
|
| for val in history: |
| if val[0]: |
| messages.append({"role": "user", "content": val[0]}) |
| if val[1]: |
| messages.append({"role": "assistant", "content": val[1]}) |
|
|
| messages.append({"role": "user", "content": message}) |
|
|
| response = "" |
|
|
| for message in client.chat_completion( |
| messages, |
| max_tokens=max_tokens, |
| stream=True, |
| temperature=temperature, |
| top_p=top_p, |
| ): |
| token = message.choices[0].delta.content |
| response += token |
| yield response |
|
|
|
|
| def display_image(image): |
| return image |
|
|
|
|
| demo = gr.Interface( |
| fn=display_image, |
| inputs=gr.Image(type="pil", label="Upload Image"), |
| outputs=gr.Image(label="Uploaded Image") |
| ) |
|
|
| if __name__ == "__main__": |
| demo.launch() |
|
|