File size: 3,160 Bytes
cc2101c
 
 
 
 
 
 
 
 
 
 
 
 
1ccb976
 
0f0ea0c
 
1ccb976
0f0ea0c
 
44ca16b
cc2101c
e1cc861
cc2101c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44ca16b
 
 
 
 
 
cc2101c
 
 
 
 
 
 
 
44ca16b
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import argparse
import itertools
import math
import os
from pathlib import Path
from typing import Optional
import subprocess
import sys

import torch

from spanish_medica_llm import run_training

import gradio as gr

#def greet(name):
#    return "Hello " + name + "!!"

#iface = gr.Interface(fn=greet, inputs="text", outputs="text")
#iface.launch()

def generate_model(name):
    return f"Welcome to Gradio, {name}!"
    
def generate(prompt):
    from diffusers import StableDiffusionPipeline
    
    pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16)
    pipe = pipe.to("cuda")
    image = pipe(prompt).images[0]  
    return(image)
    
def evaluate_model():
    #from diffusers import StableDiffusionPipeline
    
    #pipe = StableDiffusionPipeline.from_pretrained("./output_model", torch_dtype=torch.float16)
    #pipe = pipe.to("cuda")
    #image = pipe(prompt).images[0]  
    return("Evaluate Model")

def train_model(*inputs):
    if "IS_SHARED_UI" in os.environ:
        raise gr.Error("This Space only works in duplicated instances")
  
    args_general = argparse.Namespace(
                image_captions_filename = True,
                train_text_encoder = True,
                stop_text_encoder_training = stptxt,
                save_n_steps = 0,
                pretrained_model_name_or_path = model_to_load,
                instance_data_dir="instance_images",
                class_data_dir=class_data_dir,
                output_dir="output_model",
                instance_prompt="",
                seed=42,
                resolution=512,
                mixed_precision="fp16",
                train_batch_size=1,
                gradient_accumulation_steps=1,
                use_8bit_adam=True,
                learning_rate=2e-6,
                lr_scheduler="polynomial",
                lr_warmup_steps = 0,
                max_train_steps=Training_Steps,     
    )
    run_training(args_general)
    torch.cuda.empty_cache()
    #convert("output_model", "model.ckpt")
    #shutil.rmtree('instance_images')
    #shutil.make_archive("diffusers_model", 'zip', "output_model")
    #with zipfile.ZipFile('diffusers_model.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:
    #    zipdir('output_model/', zipf)
    torch.cuda.empty_cache()
    return [gr.update(visible=True, value=["diffusers_model.zip"]), gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)]

def stop_model(*input):
    return f"Model with Gradio!"


with gr.Blocks() as demo:
    gr.Markdown("Start typing below and then click **Run** to see the output.")
    with gr.Row():
        inp = gr.Textbox(placeholder="What is your name?")
        out = gr.Textbox()
    btn_response = gr.Button("Generate Response")
    btn_response.click(fn=generate_model, inputs=inp, outputs=out)
    btn_train = gr.Button("Train Model")
    btn_train.click(fn=train_model, inputs=[], outputs=out)
    btn_evaluate = gr.Button("Evaluate Model")
    btn_evaluate.click(fn=evaluate_model, inputs=[], outputs=out)
    btn_stop = gr.Button("Stop Model")
    btn_stop.click(fn=stop_model, inputs=[], outputs=out)

demo.launch()