Bartlomiej Lewandowski commited on
Commit
7956fdc
1 Parent(s): 307b00f

initial commit

Browse files
Files changed (3) hide show
  1. README.md +5 -5
  2. app.py +58 -0
  3. requirements.txt +3 -0
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Gpt2 Test Subspace
3
- emoji:
4
- colorFrom: green
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 3.0.10
8
  app_file: app.py
9
  pinned: false
10
  ---
1
  ---
2
+ title: Codeparrot Subspace
3
+ emoji: 🦜
4
+ colorFrom: blue
5
+ colorTo: purple
6
  sdk: gradio
7
+ sdk_version: 3.0.4
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
3
+ from transformers import pipeline
4
+
5
+ #https://huggingface.co/spaces/lvwerra/codeparrot-generation
6
+
7
+ title = "CodeParrot Generator 🦜"
8
+ description = "This is a subspace to make code generation with [CodeParrot](https://huggingface.co/lvwerra/codeparrot), it is used in a larger [space](https://huggingface.co/spaces/loubnabnl/Code-generation-models-v1) for model comparison. For more flexibilty in sampling, you can find another demo for CodeParrot [here](https://huggingface.co/spaces/lvwerra/codeparrot-generation)."
9
+ example = [
10
+ ["def print_hello_world():", 8, 0.6, 42],
11
+ ["def get_file_size(filepath):", 24, 0.6, 42],
12
+ ["def count_lines(filename):", 40, 0.6, 42],
13
+ ["def count_words(filename):", 40, 0.6, 42]]
14
+ tokenizer = AutoTokenizer.from_pretrained("lvwerra/codeparrot")
15
+ model = AutoModelForCausalLM.from_pretrained("lvwerra/codeparrot", low_cpu_mem_usage=True)
16
+
17
+
18
+ def code_generation(gen_prompt, max_tokens, temperature=0.6, seed=42):
19
+ set_seed(seed)
20
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
21
+ generated_text = pipe(gen_prompt, do_sample=True, top_p=0.95, temperature=temperature, max_new_tokens=max_tokens)[0]['generated_text']
22
+ return generated_text
23
+
24
+
25
+ iface = gr.Interface(
26
+ fn=code_generation,
27
+ inputs=[
28
+ gr.Textbox(lines=10, label="Input code"),
29
+ gr.inputs.Slider(
30
+ minimum=8,
31
+ maximum=256,
32
+ step=1,
33
+ default=8,
34
+ label="Number of tokens to generate",
35
+ ),
36
+ gr.inputs.Slider(
37
+ minimum=0,
38
+ maximum=2,
39
+ step=0.1,
40
+ default=0.6,
41
+ label="Temperature",
42
+ ),
43
+ gr.inputs.Slider(
44
+ minimum=0,
45
+ maximum=1000,
46
+ step=1,
47
+ default=42,
48
+ label="Random seed to use for the generation"
49
+ )
50
+ ],
51
+ outputs=gr.Textbox(label="Predicted code", lines=10),
52
+ examples=example,
53
+ layout="horizontal",
54
+ theme="peach",
55
+ description=description,
56
+ title=title
57
+ )
58
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ git+https://github.com/huggingface/transformers
2
+ accelerate
3
+ torch