loubnabnl HF staff commited on
Commit
1d9a2e3
0 Parent(s):

Duplicate from codeparrot/code-explainer

Browse files
Files changed (4) hide show
  1. .gitattributes +31 -0
  2. README.md +14 -0
  3. app.py +66 -0
  4. requirements.txt +2 -0
.gitattributes ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.npy filter=lfs diff=lfs merge=lfs -text
13
+ *.npz filter=lfs diff=lfs merge=lfs -text
14
+ *.onnx filter=lfs diff=lfs merge=lfs -text
15
+ *.ot filter=lfs diff=lfs merge=lfs -text
16
+ *.parquet filter=lfs diff=lfs merge=lfs -text
17
+ *.pickle filter=lfs diff=lfs merge=lfs -text
18
+ *.pkl filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pt filter=lfs diff=lfs merge=lfs -text
21
+ *.pth filter=lfs diff=lfs merge=lfs -text
22
+ *.rar filter=lfs diff=lfs merge=lfs -text
23
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
25
+ *.tflite filter=lfs diff=lfs merge=lfs -text
26
+ *.tgz filter=lfs diff=lfs merge=lfs -text
27
+ *.wasm filter=lfs diff=lfs merge=lfs -text
28
+ *.xz filter=lfs diff=lfs merge=lfs -text
29
+ *.zip filter=lfs diff=lfs merge=lfs -text
30
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
31
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Code Explainer
3
+ emoji: 🪞
4
+ colorFrom: red
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 3.0.24
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: codeparrot/code-explainer
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed, pipeline
3
+
4
+
5
+ title = "Code Explainer"
6
+ description = "This is a space to convert Python code into english text explaining what it does using [codeparrot-small-code-to-text](https://huggingface.co/codeparrot/codeparrot-small-code-to-text),\
7
+ a code generation model for Python finetuned on [github-jupyter-code-to-text](https://huggingface.co/datasets/codeparrot/github-jupyter-code-to-text) a dataset of Python code followed by a docstring explaining it, the data was originally extracted from Jupyter notebooks."
8
+
9
+ EXAMPLE_1 = "def sort_function(arr):\n n = len(arr)\n \n # Traverse through all array elements\n for i in range(n):\n \n # Last i elements are already in place\n for j in range(0, n-i-1):\n \n # traverse the array from 0 to n-i-1\n # Swap if the element found is greater\n # than the next element\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]"
10
+ EXAMPLE_2 = "from sklearn import model_selection\nX_train, X_test, Y_train, Y_test = model_selection.train_test_split(X, Y, test_size=0.2)"
11
+ EXAMPLE_3 = "def load_text(file)\n with open(filename, 'r') as f:\n text = f.read()\n return text"
12
+ example = [
13
+ [EXAMPLE_1, 32, 0.6, 42],
14
+ [EXAMPLE_2, 16, 0.6, 42],
15
+ [EXAMPLE_3, 11, 0.2, 42],
16
+ ]
17
+
18
+ # change model to the finetuned one
19
+ tokenizer = AutoTokenizer.from_pretrained("codeparrot/codeparrot-small-code-to-text")
20
+ model = AutoModelForCausalLM.from_pretrained("codeparrot/codeparrot-small-code-to-text")
21
+
22
+ def make_doctring(gen_prompt):
23
+ return gen_prompt + f"\n\n\"\"\"\nExplanation:"
24
+
25
+ def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
26
+ set_seed(seed)
27
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
28
+ prompt = make_doctring(gen_prompt)
29
+ generated_text = pipe(prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
30
+ return generated_text
31
+
32
+
33
+ iface = gr.Interface(
34
+ fn=code_generation,
35
+ inputs=[
36
+ gr.Textbox(lines=10, label="Python code"),
37
+ gr.inputs.Slider(
38
+ minimum=8,
39
+ maximum=256,
40
+ step=1,
41
+ default=8,
42
+ label="Number of tokens to generate",
43
+ ),
44
+ gr.inputs.Slider(
45
+ minimum=0,
46
+ maximum=2.5,
47
+ step=0.1,
48
+ default=0.6,
49
+ label="Temperature",
50
+ ),
51
+ gr.inputs.Slider(
52
+ minimum=0,
53
+ maximum=1000,
54
+ step=1,
55
+ default=42,
56
+ label="Random seed to use for the generation"
57
+ )
58
+ ],
59
+ outputs=gr.Textbox(label="Predicted explanation", lines=10),
60
+ examples=example,
61
+ layout="horizontal",
62
+ theme="peach",
63
+ description=description,
64
+ title=title
65
+ )
66
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers==4.19.0
2
+ torch==1.11.0