Nupur Kumari commited on
Commit
a8744f2
1 Parent(s): 6c93887

custom-diffusion-space

Browse files
Files changed (4) hide show
  1. .gitmodules +2 -2
  2. app.py +2 -0
  3. inference.py +1 -1
  4. trainer.py +2 -2
.gitmodules CHANGED
@@ -1,3 +1,3 @@
1
- [submodule "customdiffusion"]
2
- path = customdiffusion
3
  url = https://github.com/adobe-research/custom-diffusion
 
1
+ [submodule "custom-diffusion"]
2
+ path = custom-diffusion
3
  url = https://github.com/adobe-research/custom-diffusion
app.py CHANGED
@@ -38,6 +38,8 @@ You can assign a GPU in the {SETTINGS} tab if you are running this on HF Spaces.
38
  </center>
39
  '''
40
 
 
 
41
 
42
  def show_warning(warning_text: str) -> gr.Blocks:
43
  with gr.Blocks() as demo:
 
38
  </center>
39
  '''
40
 
41
+ os.system("git clone https://github.com/adobe-research/custom-diffusion")
42
+ sys.path.append("custom-diffusion")
43
 
44
  def show_warning(warning_text: str) -> gr.Blocks:
45
  with gr.Blocks() as demo:
inference.py CHANGED
@@ -10,7 +10,7 @@ import numpy as np
10
 
11
  import torch
12
  from diffusers import StableDiffusionPipeline
13
- sys.path.insert(0, 'customdiffusion')
14
 
15
 
16
  def load_model(text_encoder, tokenizer, unet, save_path, modifier_token, freeze_model='crossattn_kv'):
 
10
 
11
  import torch
12
  from diffusers import StableDiffusionPipeline
13
+ sys.path.insert(0, 'custom-diffusion')
14
 
15
 
16
  def load_model(text_encoder, tokenizer, unet, save_path, modifier_token, freeze_model='crossattn_kv'):
trainer.py CHANGED
@@ -10,7 +10,7 @@ import gradio as gr
10
  import PIL.Image
11
  import torch
12
 
13
- os.environ['PYTHONPATH'] = f'customdiffusion:{os.getenv("PYTHONPATH", "")}'
14
 
15
 
16
  def pad_image(image: PIL.Image.Image) -> PIL.Image.Image:
@@ -87,7 +87,7 @@ class Trainer:
87
  self.prepare_dataset(concept_images, resolution)
88
 
89
  command = f'''
90
- accelerate launch customdiffusion/src/diffuser_training.py \
91
  --pretrained_model_name_or_path={base_model} \
92
  --instance_data_dir={self.instance_data_dir} \
93
  --output_dir={self.output_dir} \
 
10
  import PIL.Image
11
  import torch
12
 
13
+ os.environ['PYTHONPATH'] = f'custom-diffusion:{os.getenv("PYTHONPATH", "")}'
14
 
15
 
16
  def pad_image(image: PIL.Image.Image) -> PIL.Image.Image:
 
87
  self.prepare_dataset(concept_images, resolution)
88
 
89
  command = f'''
90
+ accelerate launch custom-diffusion/src/diffuser_training.py \
91
  --pretrained_model_name_or_path={base_model} \
92
  --instance_data_dir={self.instance_data_dir} \
93
  --output_dir={self.output_dir} \