|
<!DOCTYPE html> |
|
<html> |
|
<head> |
|
<meta charset="utf-8"> |
|
<meta name="viewport" content="width=device-width, initial-scale=1"> |
|
<title>Gradio-Lite: Serverless Gradio Running Entirely in Your Browser</title> |
|
<meta name="description" content="Gradio-Lite: Serverless Gradio Running Entirely in Your Browser"> |
|
|
|
<script type="module" crossorigin src="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.js"></script> |
|
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@gradio/lite/dist/lite.css" /> |
|
|
|
<style> |
|
html, body { |
|
margin: 0; |
|
padding: 0; |
|
height: 100%; |
|
} |
|
</style> |
|
</head> |
|
<body> |
|
<gradio-lite> |
|
<gradio-file name="app.py" entrypoint> |
|
from transformers_js import import_transformers_js, as_url |
|
import gradio as gr |
|
|
|
transformers = await import_transformers_js() |
|
pipeline = transformers.pipeline |
|
depth_estimator = await pipeline('depth-estimation', 'Xenova/depth-anything-small-hf'); |
|
|
|
|
|
async def estimate(input_image): |
|
output = await depth_estimator(as_url(input_image)) |
|
|
|
depth_image = output["depth"].to_pil() |
|
|
|
tensor = output["predicted_depth"] |
|
tensor_data = { |
|
"dims": tensor.dims, |
|
"type": tensor.type, |
|
"data": tensor.data, |
|
"size": tensor.size, |
|
} |
|
|
|
return depth_image, tensor_data |
|
|
|
demo = gr.Interface( |
|
fn=estimate, |
|
inputs=[ |
|
gr.Image(type="filepath") |
|
], |
|
outputs=[ |
|
gr.Image(label="Depth Image"), |
|
gr.JSON(label="Tensor"), |
|
], |
|
examples=[ |
|
["bread_small.png"] |
|
] |
|
) |
|
|
|
demo.launch() |
|
</gradio-file> |
|
|
|
<gradio-file name="bread_small.png" url="https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/bread_small.png" /> |
|
|
|
<gradio-requirements> |
|
transformers_js_py |
|
</gradio-requirements> |
|
</gradio-lite> |
|
</body> |
|
</html> |