loubnabnl HF staff commited on
Commit
1d384f8
β€’
1 Parent(s): b6b5314
Files changed (1) hide show
  1. app.py +1 -84
app.py CHANGED
@@ -33,91 +33,8 @@ def generate_code(model_name, gen_prompt, max_new_tokens, temperature, seed):
33
  st.set_page_config(page_icon=":laptop:", layout="wide")
34
 
35
  # Introduction
36
- st.title("Code Generation Models")
37
  with open("utils/intro.txt", "r") as f:
38
  intro = f.read()
39
  st.markdown(intro)
40
 
41
- # Pretraining datasets
42
- st.title("1 - Pretraining datasets πŸ“š")
43
- st.markdown(
44
- f"Preview of some code files from Github repositories in [Github-code dataset]({GITHUB_CODE}):"
45
- )
46
- df = pd.read_csv("utils/data_preview.csv")
47
- st.dataframe(df)
48
- st.header("Model")
49
- selected_model = st.selectbox(
50
- "Select a code generation model", MODELS, default=["CodeParrot"]
51
- )
52
- with open(f"datasets/{selected_model.lower()}.txt", "r") as f:
53
- text = f.read()
54
- st.markdown(text)
55
-
56
- # Model architecture
57
- st.title("Model architecture")
58
- st.markdow("Most code generation models use GPT style architectures trained on code. Some use encoder-decoder architectures such as AlphaCode.")
59
- st.header("Model")
60
- selected_model = st.selectbox(
61
- "Select a code generation model", MODELS, default=["CodeParrot"]
62
- )
63
- with open(f"architectures/{selected_model.lower()}.txt", "r") as f:
64
- text = f.read()
65
- st.markdown(text)
66
- if model == "InCoder":
67
- st.image(INCODER_IMG, caption="Figure 1: InCoder training", width=700)
68
-
69
- # Model evaluation
70
- st.title("Code models evaluation πŸ“Š")
71
- with open("evaluation/intro.txt", "r") as f:
72
- intro = f.read()
73
- st.markdown(intro)
74
-
75
- # Code generation
76
- st.title("Code generation πŸ’»")
77
- st.header("Models")
78
- selected_models = st.sidebar.multiselect(
79
- "Select code generation models to compare", MODELS, default=["CodeParrot"]
80
- )
81
- st.header("Examples")
82
- examples = load_examples()
83
- example_names = [example["name"] for example in examples]
84
- name2id = dict([(name, i) for i, name in enumerate(example_names)])
85
- selected_example = st.selectbox(
86
- "Select one of the following examples or implement yours", example_names
87
- )
88
- example_text = examples[name2id[selected_example]]["value"]
89
- default_length = examples[name2id[selected_example]]["length"]
90
- st.header("Generation settings")
91
- temperature = st.slider(
92
- "Temperature:", value=0.2, min_value=0.0, step=0.1, max_value=2.0
93
- )
94
- max_new_tokens = st.slider(
95
- "Number of tokens to generate:",
96
- value=default_length,
97
- min_value=8,
98
- step=8,
99
- max_value=256,
100
- )
101
- seed = st.slider(
102
- "Random seed:", value=42, min_value=0, step=1, max_value=1000
103
- )
104
- gen_prompt = st.text_area(
105
- "Generate code with prompt:",
106
- value=example_text,
107
- height=220,
108
- ).strip()
109
- if st.button("Generate code!"):
110
- with st.spinner("Generating code..."):
111
- # Create a multiprocessing Pool
112
- pool = Pool()
113
- generate_parallel = partial(
114
- generate_code,
115
- gen_prompt=gen_prompt,
116
- max_new_tokens=max_new_tokens,
117
- temperature=temperature,
118
- seed=seed,
119
- )
120
- output = pool.map(generate_parallel, selected_models)
121
- for i in range(len(output)):
122
- st.markdown(f"**{selected_models[i]}**")
123
- st.code(output[i])
 
33
  st.set_page_config(page_icon=":laptop:", layout="wide")
34
 
35
  # Introduction
36
+ st.title("Code generation with πŸ€—")
37
  with open("utils/intro.txt", "r") as f:
38
  intro = f.read()
39
  st.markdown(intro)
40