loubnabnl HF staff commited on
Commit
b4a8842
1 Parent(s): 64beba9

fix threading

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -44,7 +44,7 @@ def generate_code(
44
  url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
45
  )
46
  generated_text = r.json()["data"][0]
47
- generations.append(generated_text)
48
 
49
 
50
  def generate_code_threads(
@@ -199,7 +199,9 @@ if st.button("Generate code!", key=4):
199
  )
200
  for i in range(len(generations)):
201
  st.markdown(f"**{selected_models[i]}**")
202
- st.code(generations[i])
 
 
203
  if len(generations) < len(selected_models):
204
  st.markdown("<span style='color:red'>Warning: Some models run into timeout, you can try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)
205
 
 
44
  url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
45
  )
46
  generated_text = r.json()["data"][0]
47
+ generations.append({model_name: generated_text})
48
 
49
 
50
  def generate_code_threads(
 
199
  )
200
  for i in range(len(generations)):
201
  st.markdown(f"**{selected_models[i]}**")
202
+ for j in range(len(generations)):
203
+ if selected_models[i] in generations[j].keys():
204
+ st.code(generations[j][selected_models[i]])
205
  if len(generations) < len(selected_models):
206
  st.markdown("<span style='color:red'>Warning: Some models run into timeout, you can try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)
207