Josh Cox commited on
Commit
45bcb92
1 Parent(s): fafaeda
Files changed (1) hide show
  1. artist_lib.py +3 -2
artist_lib.py CHANGED
@@ -56,6 +56,7 @@ def imageClassifier(inputImage):
56
 
57
  def audioGenerator(inputText):
58
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
59
  pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-256").to(device)
60
  output = pipe()
61
  from IPython.display import display
@@ -88,7 +89,7 @@ def generate_tone(note, octave, duration):
88
 
89
  def draw(inp, this_model, force_new):
90
  device = "cuda" if torch.cuda.is_available() else "cpu"
91
- dtype = "torch.float16" if torch.cuda.is_available() else "torch.float32"
92
  drawing = inp
93
  if this_model == "stable-diffusion-2":
94
  this_model_addr = "stabilityai/stable-diffusion-2"
@@ -141,7 +142,7 @@ def write_blog(inp, this_model, min_length, max_length, force_new):
141
  return file.read()
142
  print("generating blog '", blog_post_name, "'", target_filename)
143
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
144
- dtype = "torch.float16" if torch.cuda.is_available() else "torch.float32"
145
  #generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B', device=device, torch_dtype=dtype)
146
  #generator = pipeline('text-generation', model=this_model_addr, torch_dtype=dtype)
147
  #generator = pipeline('text-generation', model=this_model_addr)
 
56
 
57
  def audioGenerator(inputText):
58
  device = "cuda" if torch.cuda.is_available() else "cpu"
59
+ dtype = torch.float16 if torch.cuda.is_available() else torch.float32
60
  pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-256").to(device)
61
  output = pipe()
62
  from IPython.display import display
 
89
 
90
  def draw(inp, this_model, force_new):
91
  device = "cuda" if torch.cuda.is_available() else "cpu"
92
+ dtype = torch.float16 if torch.cuda.is_available() else torch.float32
93
  drawing = inp
94
  if this_model == "stable-diffusion-2":
95
  this_model_addr = "stabilityai/stable-diffusion-2"
 
142
  return file.read()
143
  print("generating blog '", blog_post_name, "'", target_filename)
144
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
145
+ dtype = torch.float16 if torch.cuda.is_available() else torch.float32
146
  #generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B', device=device, torch_dtype=dtype)
147
  #generator = pipeline('text-generation', model=this_model_addr, torch_dtype=dtype)
148
  #generator = pipeline('text-generation', model=this_model_addr)