Spaces:
Sleeping
Sleeping
| #导入gradio | |
| import gradio as gr | |
| #导入transformers相关包 | |
| from transformers import * | |
| # import webbrowser | |
| # # 打开指定的URL | |
| # webbrowser.open('http://127.0.0.1:7860') | |
| #通过Interface加载pipeline并启动服务 | |
| gr.Interface.from_pipeline( pipeline("image-to-text",model="Salesforce/blip-image-captioning-base")).launch(share=True) | |
| # import numpy as np | |
| # import gradio as gr | |
| # def flip_text(x): | |
| # return x[::-1] | |
| # def flip_image(x): | |
| # return np.fliplr(x) | |
| # with gr.Blocks() as demo: | |
| # gr.Markdown("Flip text or image files using this demo.") | |
| # with gr.Tab("Flip Text"): | |
| # text_input = gr.Textbox() | |
| # text_output = gr.Textbox() | |
| # text_button = gr.Button("Flip") | |
| # with gr.Tab("Flip Image"): | |
| # with gr.Row(): | |
| # image_input = gr.Image() | |
| # image_output = gr.Image() | |
| # image_button = gr.Button("Flip") | |
| # with gr.Accordion("Open for More!"): | |
| # gr.Markdown("Look at me...") | |
| # text_button.click(flip_text, inputs=text_input, outputs=text_output) | |
| # image_button.click(flip_image, inputs=image_input, outputs=image_output) | |
| # demo.launch(share=True) | |