|
import warnings |
|
warnings.filterwarnings("ignore") |
|
|
|
|
|
import os |
|
from typing import Any, Union,Dict, List |
|
import numpy as np |
|
import io |
|
import base64 |
|
import requests |
|
import tensorflow as tf |
|
import tensorflow_hub as hub |
|
from PIL import Image |
|
from helper import * |
|
from fastapi import FastAPI |
|
|
|
|
|
app = FastAPI() |
|
|
|
hub_handle = 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2' |
|
hub_module = hub.load(hub_handle) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@app.get("/generate") |
|
def generate(features: Dict[str, str]): |
|
""" |
|
Using the text summarization pipeline from `transformers`, summerize text |
|
from the given input text. The model used is `philschmid/bart-large-cnn-samsum`, which |
|
can be found [here](<https://huggingface.co/philschmid/bart-large-cnn-samsum>). |
|
""" |
|
|
|
|
|
content_image_url = features['url'] |
|
if 'style_url' not in features: |
|
style_image_url = 'https://upload.wikimedia.org/wikipedia/commons/c/c5/Edvard_Munch%2C_1893%2C_The_Scream%2C_oil%2C_tempera_and_pastel_on_cardboard%2C_91_x_73_cm%2C_National_Gallery_of_Norway.jpg' |
|
else: |
|
style_image_url = features['style_url'] |
|
|
|
|
|
content_img_size = (500, 500) |
|
style_img_size = (300, 300) |
|
|
|
style_image = load_image(style_image_url, style_img_size) |
|
content_image = load_image(content_image_url, content_img_size) |
|
style_image = tf.nn.avg_pool( |
|
style_image, ksize=[3, 3], strides=[1, 1], padding='SAME') |
|
|
|
|
|
outputs = hub_module(tf.constant(content_image), |
|
tf.constant(style_image)) |
|
stylized_image = outputs[0] |
|
|
|
|
|
img = Image.fromarray(np.uint8(stylized_image.numpy()[0] * 255)) |
|
im_file = io.BytesIO() |
|
img.save(im_file, format="PNG") |
|
im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8") |
|
|
|
return {"output": im_bytes} |
|
|
|
|
|
|