loubnabnl HF staff commited on
Commit
64beba9
1 Parent(s): 94cc48e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -14
app.py CHANGED
@@ -2,8 +2,7 @@ import json
2
  import os
3
  import pandas as pd
4
  import requests
5
- from multiprocessing import Pool
6
- from functools import partial
7
  import streamlit as st
8
  from datasets import load_dataset, load_metric
9
 
@@ -33,7 +32,11 @@ def read_markdown(path):
33
  output = f.read()
34
  st.markdown(output, unsafe_allow_html=True)
35
 
36
- def generate_code(model_name, gen_prompt, max_new_tokens, temperature, seed):
 
 
 
 
37
  url = (
38
  f"https://hf.space/embed/loubnabnl/{model_name.lower()}-subspace/+/api/predict/"
39
  )
@@ -41,7 +44,8 @@ def generate_code(model_name, gen_prompt, max_new_tokens, temperature, seed):
41
  url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
42
  )
43
  generated_text = r.json()["data"][0]
44
- return generated_text
 
45
 
46
  def generate_code_threads(
47
  generations, models, gen_prompt, max_new_tokens, temperature, seed
@@ -181,23 +185,22 @@ gen_prompt = st.text_area(
181
  value=example_text,
182
  height=200,
183
  ).strip()
184
- if st.button("Generate code!"):
185
  with st.spinner("Generating code..."):
186
- # Create a multiprocessing Pool
187
- pool = Pool()
188
- generate_parallel = partial(
189
- generate_code,
190
-
191
  gen_prompt=gen_prompt,
192
  max_new_tokens=max_new_tokens,
193
  temperature=temperature,
194
  seed=seed,
195
  )
196
- output = pool.map(generate_parallel, selected_models)
197
- for i in range(len(output)):
198
  st.markdown(f"**{selected_models[i]}**")
199
- st.code(output[i])
200
- if len(output) < len(selected_models):
201
  st.markdown("<span style='color:red'>Warning: Some models run into timeout, you can try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)
202
 
203
  # Resources
 
2
  import os
3
  import pandas as pd
4
  import requests
5
+ import threading
 
6
  import streamlit as st
7
  from datasets import load_dataset, load_metric
8
 
 
32
  output = f.read()
33
  st.markdown(output, unsafe_allow_html=True)
34
 
35
+
36
+ def generate_code(
37
+ generations, model_name, gen_prompt, max_new_tokens, temperature, seed
38
+ ):
39
+ # call space using its API endpoint
40
  url = (
41
  f"https://hf.space/embed/loubnabnl/{model_name.lower()}-subspace/+/api/predict/"
42
  )
 
44
  url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
45
  )
46
  generated_text = r.json()["data"][0]
47
+ generations.append(generated_text)
48
+
49
 
50
  def generate_code_threads(
51
  generations, models, gen_prompt, max_new_tokens, temperature, seed
 
185
  value=example_text,
186
  height=200,
187
  ).strip()
188
+ if st.button("Generate code!", key=4):
189
  with st.spinner("Generating code..."):
190
+ # use threading
191
+ generations = []
192
+ generate_code_threads(
193
+ generations,
194
+ selected_models,
195
  gen_prompt=gen_prompt,
196
  max_new_tokens=max_new_tokens,
197
  temperature=temperature,
198
  seed=seed,
199
  )
200
+ for i in range(len(generations)):
 
201
  st.markdown(f"**{selected_models[i]}**")
202
+ st.code(generations[i])
203
+ if len(generations) < len(selected_models):
204
  st.markdown("<span style='color:red'>Warning: Some models run into timeout, you can try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)
205
 
206
  # Resources