|
import gradio as gr |
|
import logging |
|
import os |
|
import torch |
|
import transformers |
|
from transformers import AutoTokenizer |
|
|
|
logging.basicConfig(level=logging.INFO) |
|
|
|
print("APP startup") |
|
|
|
pipe_flan = transformers.pipeline("text2text-generation", model="google/flan-t5-small") |
|
def google_flan(input_text, request: gr.Request): |
|
print("New response 2") |
|
print(request.query_params) |
|
print(os.environ.get("HF_TOKEN")[:5]) |
|
logging.info(os.environ.get("HF_TOKEN")[:5]) |
|
return pipe_flan(input_text) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
demo = gr.Interface( |
|
fn=google_flan, |
|
inputs="text", |
|
outputs="text", |
|
allow_flagging=False, |
|
title="How can I help?", |
|
theme=gr.themes.Default(primary_hue="blue", secondary_hue="pink") |
|
) |
|
|
|
demo.launch(server_name="0.0.0.0", server_port=7860) |