ahmedalrashedi commited on
Commit
6d4d5a3
1 Parent(s): 72ec550

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -16
app.py CHANGED
@@ -42,6 +42,30 @@
42
 
43
 
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  # # Import streamlit and huggingface libraries
47
  # import streamlit as st
@@ -88,6 +112,25 @@
88
 
89
 
90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
 
93
  # import streamlit as st
@@ -126,26 +169,26 @@
126
 
127
 
128
 
129
- import streamlit as st
130
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
131
 
132
- # Load the text-to-speech model from Huggingface Spaces
133
- model_id = "facebook/bart-base-tts"
134
- model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
135
- tokenizer = AutoTokenizer.from_pretrained(model_id)
136
 
137
- # Create a text input field
138
- text = st.text_input("Enter some text:")
139
 
140
- # If the user enters some text, generate the speech
141
- if text:
142
- # Tokenize the text
143
- inputs = tokenizer(text=text, return_tensors="pt")
144
 
145
- # Generate the speech
146
- generated_speech = model.generate(**inputs)
147
 
148
- # Play the speech
149
- st.audio(generated_speech, format="wav")
150
 
151
 
 
42
 
43
 
44
 
45
+ import torch
46
+ from transformers import AutoModelForDiffusion, DiffusionConfig
47
+
48
+ # Specify the model parameters
49
+ model_config = DiffusionConfig(
50
+ image_size=64,
51
+ num_channels=3,
52
+ num_steps=1000,
53
+ diffusion_steps=100,
54
+ noise_schedule="linear",
55
+ learning_rate=0.0001,
56
+ )
57
+
58
+ # Load the model
59
+ model = AutoModelForDiffusion.from_pretrained("runwayml/stable-diffusion-v1-5", config=model_config)
60
+
61
+ # Generate an image
62
+ image = model.generate(text="This is a test")
63
+
64
+ # Save the image
65
+ torch.save(image, "image.png")
66
+
67
+
68
+
69
 
70
  # # Import streamlit and huggingface libraries
71
  # import streamlit as st
 
112
 
113
 
114
 
115
+
116
+
117
+
118
+
119
+
120
+
121
+
122
+
123
+
124
+
125
+
126
+
127
+
128
+
129
+
130
+
131
+
132
+
133
+
134
 
135
 
136
  # import streamlit as st
 
169
 
170
 
171
 
172
+ # import streamlit as st
173
+ # from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
174
 
175
+ # # Load the text-to-speech model from Huggingface Spaces
176
+ # model_id = "facebook/bart-base-tts"
177
+ # model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
178
+ # tokenizer = AutoTokenizer.from_pretrained(model_id)
179
 
180
+ # # Create a text input field
181
+ # text = st.text_input("Enter some text:")
182
 
183
+ # # If the user enters some text, generate the speech
184
+ # if text:
185
+ # # Tokenize the text
186
+ # inputs = tokenizer(text=text, return_tensors="pt")
187
 
188
+ # # Generate the speech
189
+ # generated_speech = model.generate(**inputs)
190
 
191
+ # # Play the speech
192
+ # st.audio(generated_speech, format="wav")
193
 
194