Josh Cox commited on
Commit
d6a11dd
1 Parent(s): 45bcb92

justArtist

Browse files
Files changed (2) hide show
  1. __pycache__/artist_lib.cpython-311.pyc +0 -0
  2. app.py +2 -104
__pycache__/artist_lib.cpython-311.pyc CHANGED
Binary files a/__pycache__/artist_lib.cpython-311.pyc and b/__pycache__/artist_lib.cpython-311.pyc differ
 
app.py CHANGED
@@ -30,110 +30,10 @@ drawdemo = gr.Interface(
30
  ],
31
  outputs="image",
32
  examples=[
33
- ['van gogh dogs playing poker', "stable-diffusion-v1-5", False],
34
- ['picasso the scream', "stable-diffusion-v1-5", False],
35
- ['dali american gothic', "stable-diffusion-v1-5", False],
36
- ['matisse mona lisa', "stable-diffusion-v1-5", False],
37
- ['maxfield parrish angel in lake ', "stable-diffusion-v1-5", False],
38
- ['peter max dogs playing poker', "stable-diffusion-v1-5", False],
39
- ['hindu mandala copper and patina green', "stable-diffusion-v1-5", False],
40
- ['hindu mandala fruit salad', "stable-diffusion-v1-5", False],
41
- ['hindu mandala neon green black and purple', "stable-diffusion-v1-5", False],
42
- ['astronaut riding a horse on mars', "stable-diffusion-v1-5", False]
43
  ],
44
  )
45
 
46
- AudioDemo = gr.Interface(
47
- fn=artist_lib.generate_tone,
48
- inputs=[
49
- gr.Dropdown(artist_lib.notes, type="index"),
50
- gr.Slider(4, 6, step=1),
51
- gr.Textbox(value=1, label="Duration in seconds")
52
- ],
53
- outputs="audio"
54
- )
55
-
56
- imageClassifierDemo = gr.Interface(
57
- fn=artist_lib.imageClassifier,
58
- inputs="image",
59
- outputs="text"
60
- )
61
-
62
- audioGeneratorDemo = gr.Interface(
63
- fn=artist_lib.audioGenerator,
64
- inputs="text",
65
- outputs="audio",
66
- examples=[
67
- ['balsamic beats'],
68
- ['dance the night away']
69
- ]
70
- )
71
-
72
- nameMyPetDemo = gr.Interface(
73
- fn=artist_lib.nameMyPet,
74
- inputs=[
75
- gr.Text(label="What type of animal is your pet?", value="green cat")
76
- ],
77
- outputs="text",
78
- examples=[
79
- ['dog'],
80
- ['pink dolphin'],
81
- ['elevated elephant'],
82
- ['green monkey'],
83
- ['bionic beaver'],
84
- ['felonous fish'],
85
- ['delinquent dog'],
86
- ['dragging donkey'],
87
- ['stinky skunk'],
88
- ['pink unicorn'],
89
- ['naughty narwahl'],
90
- ['blue cat']
91
- ],
92
- )
93
-
94
- blog_writer_demo = gr.Interface(
95
- fn=artist_lib.write_blog,
96
- inputs=[
97
- gr.Text(label="Blog description text", value="machine learning can be used to track chickens"),
98
- gr.Dropdown(label='Model', choices=["gpt-neo-1.3B", "gpt-neo-2.7B"], value="gpt-neo-1.3B"),
99
- gr.Number(label='Minimum word count', value=50, precision=0),
100
- gr.Number(label='Maximum word count', value=50, precision=0),
101
- gr.Checkbox(label="Force-New"),
102
- ],
103
- outputs="text",
104
- examples=[
105
- ['machine learning can be used to track chickens', "gpt-neo-1.3B", 50, 50, False],
106
- ['music and machine learning', "gpt-neo-2.7B", 50, 50, False]
107
- ],
108
- )
109
-
110
- generateAudioDemo = gr.Interface(
111
- fn=artist_lib.generate_spectrogram_audio_and_loop,
112
- title="Audio Diffusion",
113
- description="Generate audio using Huggingface diffusers.\
114
- The models without 'latent' or 'ddim' give better results but take about \
115
- 20 minutes without a GPU. For GPU, you can use \
116
- [colab](https://colab.research.google.com/github/teticio/audio-diffusion/blob/master/notebooks/gradio_app.ipynb) \
117
- to run this app.",
118
- inputs=[
119
- gr.Dropdown(label="Model",
120
- choices=[
121
- "teticio/audio-diffusion-256",
122
- "teticio/audio-diffusion-breaks-256",
123
- "teticio/audio-diffusion-instrumental-hiphop-256",
124
- "teticio/audio-diffusion-ddim-256",
125
- "teticio/latent-audio-diffusion-256",
126
- "teticio/latent-audio-diffusion-ddim-256"
127
- ],
128
- value="teticio/latent-audio-diffusion-ddim-256")
129
- ],
130
- outputs=[
131
- gr.Image(label="Mel spectrogram", image_mode="L"),
132
- gr.Audio(label="Audio"),
133
- gr.Audio(label="Loop"),
134
- ],
135
- allow_flagging="never")
136
-
137
  with gr.Blocks() as gallerydemo:
138
  with gr.Column(variant="panel"):
139
  with gr.Row(variant="compact"):
@@ -151,9 +51,7 @@ with gr.Blocks() as gallerydemo:
151
 
152
  btn.click(artist_lib.fake_gan, None, gallery)
153
 
154
- #artist = gr.TabbedInterface( [drawdemo, blog_writer_demo, gallerydemo], ["Draw", "Bloggr", "Gallery"])
155
- #artist = gr.TabbedInterface( [drawdemo, blog_writer_demo, imageClassifierDemo, generateAudioDemo, audioGeneratorDemo, AudioDemo, nameMyPetDemo], ["Draw", "Bloggr", "imageClassifier", "generateAudio", "audioGenerator", "AudioDemo", "nameMyPet"])
156
- artist = gr.TabbedInterface( [drawdemo, imageClassifierDemo, generateAudioDemo, nameMyPetDemo, blog_writer_demo], ["Draw", "imageClassifier", "generateAudio", "nameMyPet", "Bloggr"])
157
 
158
  artist.queue(
159
  max_size = 4
 
30
  ],
31
  outputs="image",
32
  examples=[
33
+ ['hindu mandala fruit salad', "stable-diffusion-v1-5", False]
 
 
 
 
 
 
 
 
 
34
  ],
35
  )
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  with gr.Blocks() as gallerydemo:
38
  with gr.Column(variant="panel"):
39
  with gr.Row(variant="compact"):
 
51
 
52
  btn.click(artist_lib.fake_gan, None, gallery)
53
 
54
+ artist = gr.TabbedInterface( [drawdemo], ["Draw"])
 
 
55
 
56
  artist.queue(
57
  max_size = 4