Spaces:
Running
on
Zero
Running
on
Zero
| # import spaces | |
| # import transformers | |
| # import gradio as gr | |
| # def greet(name): | |
| # return "Hello " + name + "!!" | |
| # # @spaces.GPU | |
| # # def infer(input_text: str = "Who are you?"): | |
| # # # messages = [ | |
| # # # {"role": "user", "content": name}, | |
| # # # ] | |
| # # model = transformers.AutoModelForCausalLM.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True) | |
| # # token = transformers.AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct", trust_remote_code=True) | |
| # # input_ids = token.encode(input_text, return_tensors="pt" ) | |
| # # output = model(input_ids) | |
| # # print(output) | |
| # # return output | |
| # @spaces.GPU | |
| # def infer_demo(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps): | |
| # text_input = gr.Textbox(label="Input Text", placeholder="test") | |
| # demo = gr.Interface(fn=infer, inputs="text", outputs="text") | |
| # demo.launch() | |