File size: 1,186 Bytes
2c83deb
d70b1c6
2c83deb
 
 
 
 
26fde58
 
 
 
 
 
2c83deb
 
 
 
 
2c745bf
 
2c83deb
 
 
2c745bf
2c83deb
87d9a6a
2c745bf
 
2c83deb
 
4fa2b44
2c83deb
 
4fa2b44
2c83deb
 
 
2c745bf
 
2c83deb
 
4fa2b44
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from typing import  Dict, List, Any
from PIL import Image
import torch
from torch import autocast
from diffusers import StableDiffusionPipeline
import base64
from io import BytesIO
from transformers.utils import logging

logging.set_verbosity_info()
logger = logging.get_logger("transformers")
logger.info("INFO")
logger.warning("WARN")


# set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

#if device.type != 'cuda':
    #raise ValueError("need to run on GPU")

class EndpointHandler():
    def __init__(self, path=""):
        self.path = path
        # load the optimized model
        #model_id = "stabilityai/stable-diffusion-x4-upscaler"
        #self.pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
        #self.pipe = self.pipe.to(device)


    def __call__(self, *args, **kwargs) -> List[Dict[str, Any]]:
        """
        Args:
            image (:obj:`string`)
        Return:
            A :obj:`dict`:. base64 encoded image
        """
        logger.info('args received %s', args)
        logger.info('kwargs received %s', kwargs)

        # postprocess the prediction
        return {"image": "nothing"}