Spaces:
Running
Running
File size: 4,689 Bytes
77059fc 8c41ba7 77059fc 8c41ba7 77059fc 8c41ba7 77059fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
import torch
# import pandas
import gradio
# import PIL
import huggingface_hub
import huggingface_hub.hf_api
# import json
# import requests
import transformers
# import openai
# openai.api_key = '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'
class HFace_Pluto(object):
#
# initialize the object
def __init__(self, name="Pluto",*args, **kwargs):
super(HFace_Pluto, self).__init__(*args, **kwargs)
self.author = "Duc Haba"
self.name = name
self._ph()
self._pp("Hello from class", str(self.__class__) + " Class: " + str(self.__class__.__name__))
self._pp("Code name", self.name)
self._pp("Author is", self.author)
self._ph()
#
# define class var for stable division
self._device = 'cuda'
self._steps = [3,8,21,55,89,144]
self._guidances = [1.1,3.0,5.0,8.0,13.0,21.0]
self._models = ['CompVis/stable-diffusion-v1-4', #default
'stabilityai/stable-diffusion-2-1', #1 latest as of feb. 28, 2023
'dreamlike-art/dreamlike-diffusion-1.0', #2 ilike
'prompthero/openjourney-v2', #3 ilike
'itecgo/sd-lexica_6k-model', #4
'nitrosocke/mo-di-diffusion',
'coreco/seek.art_MEGA',
'andite/anything-v4.0', #7 anime
'nitrosocke/Nitro-Diffusion',
'22h/vintedois-diffusion-v0-1', #9 ilike
'Lykon/DreamShaper', #10 ilike
'rrustom/stable-architecture-diffusers', # 11
'hakurei/waifu-diffusion', #anime style
'wavymulder/portraitplus', #13 ilike
'dreamlike-art/dreamlike-photoreal-2.0', #no check
'johnslegers/epic-diffusion', #15 ilike good example
'nitrosocke/Arcane-Diffusion' #16 ilike
]
self._seed = 667 # sum of walnut in ascii (or Angle 667)
self._width = 512
self._height = 512
self._step = 50
self._guidances = 7.5
#self._generator = torch.Generator(device='cuda')
self.pipes = []
self.prompts = []
self.images = []
self.seeds = []
self.fname_id = 0
self.dname_img = "img_colab/"
return
#
# pretty print output name-value line
def _pp(self, a, b):
print("%34s : %s" % (str(a), str(b)))
return
#
# pretty print the header or footer lines
def _ph(self):
print("-" * 34, ":", "-" * 34)
return
#
# fetch huggingface file
def fetch_hface_files(self,
hf_names,
hf_space="duchaba/skin_cancer_diagnose",
local_dir="/content/"):
f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names))
try:
for f in hf_names:
lo = local_dir + f
huggingface_hub.hf_hub_download(repo_id=hf_space, filename=f,
use_auth_token=True,repo_type=huggingface_hub.REPO_TYPE_SPACE,
force_filename=lo)
except:
self._pp("*Error", f)
return
#
#
def push_hface_files(self,
hf_names,
hf_space="duchaba/skin_cancer_diagnose",
local_dir="/content/"):
f = str(hf_names) + " is not iteratable, type: " + str(type(hf_names))
try:
for f in hf_names:
lo = local_dir + f
huggingface_hub.upload_file(
path_or_fileobj=lo,
path_in_repo=f,
repo_id=hf_space,
repo_type=huggingface_hub.REPO_TYPE_SPACE)
except:
self._pp("*Error", f)
return
#
def write_file(self,fname, txt):
f = open(fname, "w")
f.writelines("\n".join(txt))
f.close()
return
def draw_it(self,prompt):
url = 'lion.png'
img = PIL.Image.open(url)
return img
#
# add module/method
#
import functools
def add_method(cls):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
setattr(cls, func.__name__, wrapper)
return func # returning func means func can still be used normally
return decorator
# instantiate the class
monty = HFace_Pluto('Monty')
# use magic prompt model
monty.gpt2_pipe = transformers.pipeline('text-generation',
model='Gustavosta/MagicPrompt-Stable-Diffusion',
tokenizer='gpt2')
# fetch prompt
@add_method(HFace_Pluto)
def _print_response(self, response):
for x in response:
print(x['generated_text'])
return
#
@add_method(HFace_Pluto)
def fetch_prompt(self, prompt, max_num=1, max_length=240, is_print=False):
response = self.gpt2_pipe(prompt,
max_length=max_length,
num_return_sequences=max_num)
#
if (is_print):
self._print_response(response)
return response
# use pluto _pp for interface testing
# iface = gradio.Interface(fn=pluto.draw_it, inputs="text", outputs="image",
# flagging_options=["Excellent", "Good", "Not Bad"])
iface = gradio.Interface(fn=monty.fetch_prompt, inputs="text", outputs="text",
flagging_options=[])
# Launch it
iface.launch()
|