loubnabnl HF staff commited on
Commit
321451b
1 Parent(s): d0c9149

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -19
app.py CHANGED
@@ -2,7 +2,8 @@ import json
2
  import os
3
  import pandas as pd
4
  import requests
5
- import threading
 
6
  import streamlit as st
7
  from datasets import load_dataset, load_metric
8
 
@@ -32,11 +33,7 @@ def read_markdown(path):
32
  output = f.read()
33
  st.markdown(output, unsafe_allow_html=True)
34
 
35
-
36
- def generate_code(
37
- generations, model_name, gen_prompt, max_new_tokens, temperature, seed
38
- ):
39
- # call space using its API endpoint
40
  url = (
41
  f"https://hf.space/embed/loubnabnl/{model_name.lower()}-subspace/+/api/predict/"
42
  )
@@ -44,8 +41,7 @@ def generate_code(
44
  url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
45
  )
46
  generated_text = r.json()["data"][0]
47
- generations.append({model_name: generated_text})
48
-
49
 
50
  def generate_code_threads(
51
  generations, models, gen_prompt, max_new_tokens, temperature, seed
@@ -185,24 +181,23 @@ gen_prompt = st.text_area(
185
  value=example_text,
186
  height=200,
187
  ).strip()
188
- if st.button("Generate code!", key=4):
189
  with st.spinner("Generating code..."):
190
- # use threading
191
- generations = []
192
- generate_code_threads(
193
- generations,
194
- selected_models,
195
  gen_prompt=gen_prompt,
196
  max_new_tokens=max_new_tokens,
197
  temperature=temperature,
198
  seed=seed,
199
  )
200
- for i in range(len(generations)):
 
201
  st.markdown(f"**{selected_models[i]}**")
202
- for j in range(len(generations)):
203
- if selected_models[i] in generations[j].keys():
204
- st.code(generations[j][selected_models[i]])
205
- if len(generations) < len(selected_models):
206
  st.markdown("<span style='color:red'>Warning: Some models run into timeout, you can try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)
207
 
208
  # Resources
 
2
  import os
3
  import pandas as pd
4
  import requests
5
+ from multiprocessing import Pool
6
+ from functools import partial
7
  import streamlit as st
8
  from datasets import load_dataset, load_metric
9
 
 
33
  output = f.read()
34
  st.markdown(output, unsafe_allow_html=True)
35
 
36
+ def generate_code(model_name, gen_prompt, max_new_tokens, temperature, seed):
 
 
 
 
37
  url = (
38
  f"https://hf.space/embed/loubnabnl/{model_name.lower()}-subspace/+/api/predict/"
39
  )
 
41
  url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
42
  )
43
  generated_text = r.json()["data"][0]
44
+ return generated_text
 
45
 
46
  def generate_code_threads(
47
  generations, models, gen_prompt, max_new_tokens, temperature, seed
 
181
  value=example_text,
182
  height=200,
183
  ).strip()
184
+ if st.button("Generate code!"):
185
  with st.spinner("Generating code..."):
186
+ # Create a multiprocessing Pool
187
+ pool = Pool()
188
+ generate_parallel = partial(
189
+ generate_code,
190
+
191
  gen_prompt=gen_prompt,
192
  max_new_tokens=max_new_tokens,
193
  temperature=temperature,
194
  seed=seed,
195
  )
196
+ output = pool.map(generate_parallel, selected_models)
197
+ for i in range(len(output)):
198
  st.markdown(f"**{selected_models[i]}**")
199
+ st.code(output[i])
200
+ if len(output) < len(selected_models):
 
 
201
  st.markdown("<span style='color:red'>Warning: Some models run into timeout, you can try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)
202
 
203
  # Resources