File size: 6,929 Bytes
c9e8e4a
a799099
3bce3fb
a16fa71
64beba9
41d27ac
a799099
c9e8e4a
68bc50c
cddb272
fa5e188
25f90dd
c9e8e4a
 
f4313df
c9e8e4a
 
a799099
 
 
 
 
 
 
 
 
 
c9e8e4a
7c0d726
aa07439
 
 
25f90dd
 
64beba9
 
 
 
 
7c0d726
13d65d7
7c0d726
 
 
 
 
b4a8842
64beba9
d8b70b5
25f90dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0793888
1565f57
 
edd32da
6b9b86e
1565f57
2dc5a7a
1e77c56
807f36d
99224da
807f36d
c5fafcd
0d5adbc
 
6b3b52a
 
 
 
 
 
 
 
 
 
 
 
0793888
1e77c56
0d5adbc
f70b655
7212da7
1e77c56
 
25f90dd
9d2b32b
0b16412
1e77c56
4bd868a
0d5adbc
 
7036561
1e77c56
25f90dd
29136c5
46dbbb1
1e77c56
0d5adbc
 
10b566a
1e77c56
 
a799099
 
 
 
 
 
 
 
 
 
c3152eb
a799099
 
 
 
 
 
 
05ca39c
 
a799099
 
 
0d5adbc
7036561
1e77c56
25f90dd
29136c5
606a970
29136c5
25f90dd
 
df5803f
25f90dd
 
596c6fa
606a970
12798fb
 
 
 
33147c8
12798fb
 
 
606a970
 
12798fb
19f7c1e
12798fb
 
 
 
 
cc14b64
12798fb
 
25f90dd
06d2b63
 
 
33147c8
06d2b63
64beba9
06d2b63
64beba9
 
 
 
 
06d2b63
 
 
 
 
64beba9
06d2b63
b4a8842
 
 
64beba9
eac2180
cc3091d
 
 
091c31a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
import json
import os
import pandas as pd
import requests
import threading
import streamlit as st
from datasets import load_dataset, load_metric

MODELS = ["CodeParrot", "InCoder", "CodeGen", "PolyCoder"]
GENERATION_MODELS = ["CodeParrot", "InCoder", "CodeGen"]


@st.cache()
def load_examples():
    with open("utils/examples.json", "r") as f:
        examples = json.load(f)
    return examples
    
    
def load_evaluation():
    # load task 2 of HumanEval and code_eval_metric
    os.environ["HF_ALLOW_CODE_EVAL"] = "1"
    human_eval = load_dataset("openai_humaneval")
    entry_point = f"check({human_eval['test'][2]['entry_point']})"
    test_func = "\n" + human_eval["test"][2]["test"] + "\n" + entry_point
    code_eval = load_metric("code_eval")
    return code_eval, test_func


def read_markdown(path):
    with open(path, "r") as f:
        output = f.read()
    st.markdown(output, unsafe_allow_html=True)


def generate_code(
    generations, model_name, gen_prompt, max_new_tokens, temperature, seed
):
    # call space using its API endpoint
    url = (
        f"https://hf.space/embed/codeparrot/{model_name.lower()}-subspace/+/api/predict/"
    )
    r = requests.post(
        url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
    )
    generated_text = r.json()["data"][0]
    generations.append({model_name: generated_text})


def generate_code_threads(
    generations, models, gen_prompt, max_new_tokens, temperature, seed
):
    threads = []
    for model_name in models:
        # create the thread
        threads.append(
            threading.Thread(
                target=generate_code,
                args=(
                    generations,
                    model_name,
                    gen_prompt,
                    max_new_tokens,
                    temperature,
                    seed,
                ),
            )
        )
        threads[-1].start()

    for t in threads:
        t.join()

@st.cache(show_spinner=False)
def generate_teaser(gen_prompt):
    generations = []
    generate_code(generations, "CodeParrot", gen_prompt, 8, 0.2, 42)
    return generations[0]["CodeParrot"]
    
st.set_page_config(page_icon=":laptop:", layout="wide")
with open("utils/table_contents.md", "r") as f:
    contents = f.read()

st.sidebar.markdown(contents)

# Introduction
st.title("Code generation with πŸ€—")
read_markdown("utils/summary.md")
## teaser
example_text = "def print_hello_world():"
col1, col2, col3 = st.columns([1, 2, 1])
with col2:
    gen_prompt = st.text_area(
        "",
        value=example_text,
        height=100,
    ).strip()
    if st.button("Generate code!", key=1):
        with st.spinner("Generating code..."):
            st.code(generate_teaser(gen_prompt)) 
read_markdown("utils/intro.md")

# Code datasets
st.subheader("1 - Code datasets")
read_markdown("datasets/intro.md")
read_markdown("datasets/github_code.md")
col1, col2 = st.columns([1, 2])
with col1:
    selected_model = st.selectbox("", MODELS, key=1)
read_markdown(f"datasets/{selected_model.lower()}.md")


# Model architecture
st.subheader("2 - Model architecture")
read_markdown("architectures/intro.md")
col1, col2 = st.columns([1, 2])
with col1:
    selected_model = st.selectbox("", MODELS, key=2)
read_markdown(f"architectures/{selected_model.lower()}.md")

# Model evaluation
st.subheader("3 - Code model evaluation")
read_markdown("evaluation/intro.md")
read_markdown("evaluation/demo_humaneval.md")
## quiz
st.markdown("Below you can try solving this problem or visualize the solution of CodeParrot:")
with open("evaluation/problem.md", "r") as f:
    problem = f.read()
with open("evaluation/solution.md", "r") as f:
    solution = f.read()
    
candidate_solution = st.text_area(
    "Complete the problem:",
    value=problem,
    height=240,
).strip()
if st.button("Test my solution", key=2):
    with st.spinner("Testing..."):
        code_eval, test_func = load_evaluation()
        test_cases = [test_func]
        candidates = [[candidate_solution]]
        pass_at_k, _ = code_eval.compute(references=test_cases, predictions=candidates)
        text = "Your solution didn't pass the test, pass@1 is 0 πŸ˜•" if pass_at_k['pass@1'] < 1  else "Congrats your pass@1 is 1! πŸŽ‰"
        st.markdown(text)
if st.button("Show model solution", key=3):
    st.markdown(solution)
    
# Code generation
st.subheader("4 - Code generation ✨")
read_markdown("generation/intro.md")
col1, col2, col3 = st.columns([7, 1, 6])
with col1:
    st.markdown("**Models**")
    selected_models = st.multiselect(
        "Select code generation models to compare:",
        GENERATION_MODELS,
        default=GENERATION_MODELS,
        key=3,
    )
    st.markdown(" ")
    st.markdown("**Examples**")
    examples = load_examples()
    example_names = [example["name"] for example in examples]
    name2id = dict([(name, i) for i, name in enumerate(example_names)])
    selected_example = st.selectbox(
        "Select one of the following examples or implement yours:", example_names
    )
    example_text = examples[name2id[selected_example]]["value"]
    default_length = examples[name2id[selected_example]]["length"]
with col3:
    st.markdown("**Generation settings**")
    temperature = st.slider(
        "Temperature:", value=0.2, min_value=0.1, step=0.1, max_value=2.0
    )
    max_new_tokens = st.slider(
        "Number of tokens to generate:",
        value=default_length,
        min_value=8,
        step=4,
        max_value=256,
    )
    seed = st.slider("Random seed:", value=42, min_value=0, step=1, max_value=1000)
gen_prompt = st.text_area(
    "Generate code with prompt:",
    value=example_text,
    height=200,
).strip()
if st.button("Generate code!", key=4):
    with st.spinner("Generating code..."):
        # use threading
        generations = []
        generate_code_threads(
            generations,
            selected_models,
            gen_prompt=gen_prompt,
            max_new_tokens=max_new_tokens,
            temperature=temperature,
            seed=seed,
        )
        for i in range(len(generations)):
            st.markdown(f"**{selected_models[i]}**")
            for j in range(len(generations)):
                if selected_models[i] in generations[j].keys():
                    st.code(generations[j][selected_models[i]])
        if len(generations) < len(selected_models):
            st.markdown("<span style='color:red'>Warning: Some models run into timeout, try another time or reduce the Number of tokens to generate. You can also try generating code using the original subspaces: [InCoder](https://huggingface.co/spaces/loubnabnl/incoder-subspace), [CodeGen](https://huggingface.co/spaces/loubnabnl/codegen-subspace), [CodeParrot](https://huggingface.co/spaces/loubnabnl/codeparrot-subspace)</span>", unsafe_allow_html=True)

# Resources
st.subheader("Resources")
read_markdown("utils/resources.md")