Spaces:
Runtime error
Runtime error
| # -*- coding: utf-8 -*- | |
| """app.ipynb | |
| Automatically generated by Colaboratory. | |
| Original file is located at | |
| https://colab.research.google.com/drive/1yM6SHreIA1NYzW6CrVDyUl5Fub4VPwDy | |
| """ | |
| !pip install transformers | |
| import torch | |
| from PIL import Image | |
| from transformers import CLIPProcessor, CLIPModel | |
| device = torch.device("cpu") # Use CPU device | |
| model_name = "openai/clip-vit-base-patch32" # Pretrained CLIP model | |
| model = CLIPModel.from_pretrained(model_name).to(device) | |
| processor = CLIPProcessor.from_pretrained(model_name) | |
| def stable_diffusion(image_path): | |
| image = Image.open(image_path) | |
| inputs = processor(text=["a photo"], images=image, return_tensors="pt", padding=True) | |
| inputs = {k: v.to(device) for k, v in inputs.items()} | |
| outputs = model(**inputs) | |
| # Process the outputs as per your requirements | |
| # For example, you can access the image and text embeddings as follows: | |
| image_embed = outputs["image_embeds"] | |
| text_embed = outputs["text_embeds"] | |
| # Perform further computations or display the results | |
| from flask import Flask, request, render_template | |
| import os | |
| app = Flask(__name__) | |
| def upload_image(): | |
| if request.method == "POST": | |
| if "image" not in request.files: | |
| return "No image uploaded" | |
| image = request.files["image"] | |
| image.save("uploaded_image.jpg") | |
| stable_diffusion("uploaded_image.jpg") | |
| os.remove("uploaded_image.jpg") | |
| return "Stable Diffusion completed!" | |
| return render_template("index.html") |