ahmedalrashedi commited on
Commit
1c716b5
1 Parent(s): 6d4d5a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -161
app.py CHANGED
@@ -1,16 +1,23 @@
1
  # import sentencepiece
2
 
3
 
4
- # import streamlit as st
5
- # from transformers import pipeline
 
 
 
 
 
6
 
7
- # sentiment_analysis = pipeline("sentiment-analysis")
 
 
 
 
 
 
8
 
9
- # text = st.text_input("Enter some text")
10
 
11
- # if text:
12
- # result = sentiment_analysis(text)
13
- # st.json(result)
14
 
15
 
16
 
@@ -38,157 +45,4 @@
38
  # confidence = results[0]['score']
39
 
40
  # st.write(f"Sentiment: {sentiment}")
41
- # st.write(f"Confidence: {confidence:.2f}")
42
-
43
-
44
-
45
- import torch
46
- from transformers import AutoModelForDiffusion, DiffusionConfig
47
-
48
- # Specify the model parameters
49
- model_config = DiffusionConfig(
50
- image_size=64,
51
- num_channels=3,
52
- num_steps=1000,
53
- diffusion_steps=100,
54
- noise_schedule="linear",
55
- learning_rate=0.0001,
56
- )
57
-
58
- # Load the model
59
- model = AutoModelForDiffusion.from_pretrained("runwayml/stable-diffusion-v1-5", config=model_config)
60
-
61
- # Generate an image
62
- image = model.generate(text="This is a test")
63
-
64
- # Save the image
65
- torch.save(image, "image.png")
66
-
67
-
68
-
69
-
70
- # # Import streamlit and huggingface libraries
71
- # import streamlit as st
72
- # from transformers import pipeline
73
-
74
- # # Create a text to image pipeline
75
- # text_to_image = pipeline("text-to-image", model="runwayml/stable-diffusion-v1-5")
76
-
77
- # # Create a text input widget
78
- # text = st.text_input("Enter some text")
79
-
80
- # # Generate an image and display the result
81
- # if text:
82
- # result = text_to_image(text)[0]
83
- # st.image(result["image"])
84
-
85
-
86
-
87
-
88
- # import streamlit as st
89
- # from transformers import pipeline
90
-
91
-
92
-
93
-
94
- # translation = pipeline("translation_en_to_ar", model="anibahug/marian-finetuned-kde4-en-to-ar")
95
-
96
- # if text:
97
- # result = translation(text)[0]
98
- # st.write(f"Translated text: {result['translation_text']}")
99
-
100
-
101
-
102
-
103
-
104
-
105
- # text_to_speech = pipeline("Text-to-Speech")
106
-
107
- # if text:
108
- # result = text_to_speech(text)[0]
109
- # st.audio(result["audio"])
110
-
111
-
112
-
113
-
114
-
115
-
116
-
117
-
118
-
119
-
120
-
121
-
122
-
123
-
124
-
125
-
126
-
127
-
128
-
129
-
130
-
131
-
132
-
133
-
134
-
135
-
136
- # import streamlit as st
137
- # from transformers import pipeline, MarianTokenizer, MarianForCausalLM
138
-
139
- # # Set up Streamlit layout
140
- # st.title("Text-to-Speech App")
141
- # st.write("Enter text below:")
142
-
143
- # text_input = st.text_area("Input Text")
144
-
145
- # # Load the text-to-speech model
146
- # model_name = "facebook/wav2vec2-base-960h" # Replace with your desired TTS model
147
- # tokenizer = MarianTokenizer.from_pretrained(model_name)
148
- # model = MarianForCausalLM.from_pretrained(model_name)
149
-
150
- # # Generate audio
151
- # if st.button("Generate Audio"):
152
- # if text_input:
153
- # input_ids = tokenizer.encode(text_input, return_tensors="pt")
154
- # with st.spinner("Generating audio..."):
155
- # output = model.generate(input_ids)
156
- # st.audio(output[0].numpy(), format="audio/wav")
157
-
158
- # # Display credits and instructions
159
- # st.markdown("Powered by Hugging Face Spaces API and Streamlit.")
160
-
161
-
162
-
163
-
164
-
165
-
166
-
167
-
168
-
169
-
170
-
171
-
172
- # import streamlit as st
173
- # from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
174
-
175
- # # Load the text-to-speech model from Huggingface Spaces
176
- # model_id = "facebook/bart-base-tts"
177
- # model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
178
- # tokenizer = AutoTokenizer.from_pretrained(model_id)
179
-
180
- # # Create a text input field
181
- # text = st.text_input("Enter some text:")
182
-
183
- # # If the user enters some text, generate the speech
184
- # if text:
185
- # # Tokenize the text
186
- # inputs = tokenizer(text=text, return_tensors="pt")
187
-
188
- # # Generate the speech
189
- # generated_speech = model.generate(**inputs)
190
-
191
- # # Play the speech
192
- # st.audio(generated_speech, format="wav")
193
-
194
-
 
1
  # import sentencepiece
2
 
3
 
4
+ import streamlit as st
5
+ from transformers import pipeline
6
+
7
+ sentiment_analysis = pipeline("sentiment-analysis")
8
+ translation = pipeline("translation_en_to_ar", model="anibahug/marian-finetuned-kde4-en-to-ar")
9
+
10
+ text = st.text_input("Enter some text")
11
 
12
+ if text:
13
+ result = sentiment_analysis(text)
14
+ st.json(result)
15
+
16
+ if text:
17
+ result = translation(text)[0]
18
+ st.write(f"Translated text: {result['translation_text']}")
19
 
 
20
 
 
 
 
21
 
22
 
23
 
 
45
  # confidence = results[0]['score']
46
 
47
  # st.write(f"Sentiment: {sentiment}")
48
+ # st.write(f"Confidence: {confidence:.2f}")