Spaces:
Runtime error
Runtime error
import argparse | |
import itertools | |
import math | |
import os | |
from pathlib import Path | |
from typing import Optional | |
import subprocess | |
import sys | |
import torch | |
from spanish_medica_llm import run_training | |
import gradio as gr | |
#def greet(name): | |
# return "Hello " + name + "!!" | |
#iface = gr.Interface(fn=greet, inputs="text", outputs="text") | |
#iface.launch() | |
def generate_model(name): | |
return f"Welcome to Gradio, {name}!" | |
def generate(prompt): | |
from diffusers import StableDiffusionPipeline | |
pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16) | |
pipe = pipe.to("cuda") | |
image = pipe(prompt).images[0] | |
return(image) | |
def evaluate_model(): | |
#from diffusers import StableDiffusionPipeline | |
#pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16) | |
#pipe = pipe.to("cuda") | |
#image = pipe(prompt).images[0] | |
return("Evaluate Model") | |
def train_model(*inputs): | |
if "IS_SHARED_UI" in os.environ: | |
raise gr.Error("This Space only works in duplicated instances") | |
args_general = argparse.Namespace( | |
image_captions_filename = True, | |
train_text_encoder = True, | |
stop_text_encoder_training = stptxt, | |
save_n_steps = 0, | |
pretrained_model_name_or_path = model_to_load, | |
instance_data_dir="instance_images", | |
class_data_dir=class_data_dir, | |
output_dir="output_model", | |
instance_prompt="", | |
seed=42, | |
resolution=512, | |
mixed_precision="fp16", | |
train_batch_size=1, | |
gradient_accumulation_steps=1, | |
use_8bit_adam=True, | |
learning_rate=2e-6, | |
lr_scheduler="polynomial", | |
lr_warmup_steps = 0, | |
max_train_steps=Training_Steps, | |
) | |
run_training(args_general) | |
torch.cuda.empty_cache() | |
#convert("output_model", "model.ckpt") | |
#shutil.rmtree('instance_images') | |
#shutil.make_archive("diffusers_model", 'zip', "output_model") | |
#with zipfile.ZipFile('diffusers_model.zip', 'w', zipfile.ZIP_DEFLATED) as zipf: | |
# zipdir('output_model/', zipf) | |
torch.cuda.empty_cache() | |
return [gr.update(visible=True, value=["diffusers_model.zip"]), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)] | |
def stop_model(*input): | |
return f"Model with Gradio!" | |
with gr.Blocks() as demo: | |
gr.Markdown("Start typing below and then click **Run** to see the output.") | |
with gr.Row(): | |
inp = gr.Textbox(placeholder="What is your name?") | |
out = gr.Textbox() | |
btn_response = gr.Button("Generate Response") | |
btn_response.click(fn=generate_model, inputs=inp, outputs=out) | |
btn_train = gr.Button("Train Model") | |
btn_train.click(fn=train_model, inputs=[], outputs=out) | |
btn_evaluate = gr.Button("Evaluate Model") | |
btn_evaluate.click(fn=evaluate_model, inputs=[], outputs=out) | |
btn_stop = gr.Button("Stop Model") | |
btn_stop.click(fn=stop_model, inputs=[], outputs=out) | |
demo.launch() |