duchaba's picture
Upload app.py with huggingface_hub
8c41ba7
import torch
# import pandas
import gradio
# import PIL
import huggingface_hub
import huggingface_hub.hf_api
# import json
# import requests
import transformers
# import openai
# openai.api_key = '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'
class HFace_Pluto(object):
#
# initialize the object
def __init__(self, name="Pluto",*args, **kwargs):
super(HFace_Pluto, self).__init__(*args, **kwargs)
self.author = "Duc Haba"
self.name = name
self._ph()
self._pp("Hello from class", str(self.__class__) + " Class: " + str(self.__class__.__name__))
self._pp("Code name", self.name)
self._pp("Author is", self.author)
self._ph()
#
# define class var for stable division
self._device = 'cuda'
self._steps = [3,8,21,55,89,144]
self._guidances = [1.1,3.0,5.0,8.0,13.0,21.0]
self._models = ['CompVis/stable-diffusion-v1-4', #default
'stabilityai/stable-diffusion-2-1', #1 latest as of feb. 28, 2023
'dreamlike-art/dreamlike-diffusion-1.0', #2 ilike
'prompthero/openjourney-v2', #3 ilike
'itecgo/sd-lexica_6k-model', #4
'nitrosocke/mo-di-diffusion',
'coreco/seek.art_MEGA',
'andite/anything-v4.0', #7 anime
'nitrosocke/Nitro-Diffusion',
'22h/vintedois-diffusion-v0-1', #9 ilike
'Lykon/DreamShaper', #10 ilike
'rrustom/stable-architecture-diffusers', # 11
'hakurei/waifu-diffusion', #anime style
'wavymulder/portraitplus', #13 ilike
'dreamlike-art/dreamlike-photoreal-2.0', #no check
'johnslegers/epic-diffusion', #15 ilike good example
'nitrosocke/Arcane-Diffusion' #16 ilike
]
self._seed = 667 # sum of walnut in ascii (or Angle 667)
self._width = 512
self._height = 512
self._step = 50
self._guidances = 7.5
#self._generator = torch.Generator(device='cuda')
self.pipes = []
self.prompts = []
self.images = []
self.seeds = []
self.fname_id = 0
self.dname_img = "img_colab/"
return
#
# pretty print output name-value line
def _pp(self, a, b):
print("%34s : %s" % (str(a), str(b)))
return
#
# pretty print the header or footer lines
def _ph(self):
print("-" * 34, ":", "-" * 34)
return
#
# fetch huggingface file
def fetch_hface_files(self,
hf_names,
hf_space="duchaba/skin_cancer_diagnose",
local_dir="/content/"):
f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names))
try:
for f in hf_names:
lo = local_dir + f
huggingface_hub.hf_hub_download(repo_id=hf_space, filename=f,
use_auth_token=True,repo_type=huggingface_hub.REPO_TYPE_SPACE,
force_filename=lo)
except:
self._pp("*Error", f)
return
#
#
def push_hface_files(self,
hf_names,
hf_space="duchaba/skin_cancer_diagnose",
local_dir="/content/"):
f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names))
try:
for f in hf_names:
lo = local_dir + f
huggingface_hub.upload_file(
path_or_fileobj=lo,
path_in_repo=f,
repo_id=hf_space,
repo_type=huggingface_hub.REPO_TYPE_SPACE)
except:
self._pp("*Error", f)
return
#
def write_file(self,fname, txt):
f = open(fname, "w")
f.writelines("\n".join(txt))
f.close()
return
def draw_it(self,prompt):
url = 'lion.png'
img = PIL.Image.open(url)
return img
#
# add module/method
#
import functools
def add_method(cls):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
setattr(cls, func.__name__, wrapper)
return func # returning func means func can still be used normally
return decorator
# instantiate the class
monty = HFace_Pluto('Monty')
# use magic prompt model
monty.gpt2_pipe = transformers.pipeline('text-generation',
model='Gustavosta/MagicPrompt-Stable-Diffusion',
tokenizer='gpt2')
# fetch prompt
@add_method(HFace_Pluto)
def _print_response(self, response):
for x in response:
print(x['generated_text'])
return
#
@add_method(HFace_Pluto)
def fetch_prompt(self, prompt, max_num=1, max_length=240, is_print=False):
response = self.gpt2_pipe(prompt,
max_length=max_length,
num_return_sequences=max_num)
#
if (is_print):
self._print_response(response)
return response
# use pluto _pp for interface testing
# iface = gradio.Interface(fn=pluto.draw_it, inputs="text", outputs="image",
# flagging_options=["Excellent", "Good", "Not Bad"])
iface = gradio.Interface(fn=monty.fetch_prompt, inputs="text", outputs="text",
flagging_options=[])
# Launch it
iface.launch()