Spaces:
Runtime error
Runtime error
Bartlomiej Lewandowski
commited on
Commit
Β·
7787576
1
Parent(s):
78208dd
initial commit
Browse files- .gitignore +1 -0
- README.md +28 -7
- app.py +154 -0
- architectures/codegen.txt +25 -0
- architectures/codeparrot.txt +33 -0
- architectures/incoder.txt +31 -0
- architectures/intro.txt +2 -0
- architectures/polycoder.txt +14 -0
- datasets/.ipynb_checkpoints/codeparrot-checkpoint.txt +9 -0
- datasets/.ipynb_checkpoints/opt-checkpoint.txt +2 -0
- datasets/codegen.txt +17 -0
- datasets/codeparrot.txt +9 -0
- datasets/github_code.txt +22 -0
- datasets/incoder.txt +13 -0
- datasets/intro.txt +3 -0
- datasets/polycoder.txt +5 -0
- evaluation/demo_humaneval.txt +83 -0
- evaluation/intro.txt +25 -0
- generation/intro.txt +4 -0
- requirements.txt +2 -0
- utils/.ipynb_checkpoints/intro-checkpoint.txt +5 -0
- utils/data_preview.csv +408 -0
- utils/examples.json +43 -0
- utils/intro.txt +8 -0
- utils/resources.txt +6 -0
- utils/table_contents.txt +18 -0
.gitignore
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
.idea
|
README.md
CHANGED
|
@@ -1,12 +1,33 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk:
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Code generation with π€
|
| 3 |
+
emoji: β¨
|
| 4 |
+
colorFrom: purple
|
| 5 |
+
colorTo: yellow
|
| 6 |
+
sdk: streamlit
|
| 7 |
+
sdk_version: 1.9.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
+
license: apache-2.0
|
| 11 |
---
|
| 12 |
|
| 13 |
+
# Configuration
|
| 14 |
+
`title`: _string_
|
| 15 |
+
Display title for the Space
|
| 16 |
+
`emoji`: _string_
|
| 17 |
+
Space emoji (emoji-only character allowed)
|
| 18 |
+
`colorFrom`: _string_
|
| 19 |
+
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
| 20 |
+
`colorTo`: _string_
|
| 21 |
+
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
| 22 |
+
`sdk`: _string_
|
| 23 |
+
Can be either `gradio` or `streamlit`
|
| 24 |
+
`sdk_version` : _string_
|
| 25 |
+
Only applicable for `streamlit` SDK.
|
| 26 |
+
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
| 27 |
+
|
| 28 |
+
`app_file`: _string_
|
| 29 |
+
Path to your main application file (which contains either `gradio` or `streamlit` Python code).
|
| 30 |
+
Path is relative to the root of the repository.
|
| 31 |
+
|
| 32 |
+
`pinned`: _boolean_
|
| 33 |
+
Whether the Space stays on top of your list.
|
app.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import requests
|
| 4 |
+
import threading
|
| 5 |
+
import streamlit as st
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
MODELS = ["CodeParrot", "InCoder", "CodeGen", "PolyCoder"]
|
| 9 |
+
GENERATION_MODELS = ["CodeParrot", "InCoder", "CodeGen"]
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
@st.cache()
|
| 13 |
+
def load_examples():
|
| 14 |
+
with open("utils/examples.json", "r") as f:
|
| 15 |
+
examples = json.load(f)
|
| 16 |
+
return examples
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def read_markdown(path):
|
| 20 |
+
with open(path, "r") as f:
|
| 21 |
+
output = f.read()
|
| 22 |
+
st.markdown(output, unsafe_allow_html=True)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def generate_code(
|
| 26 |
+
generations, model_name, gen_prompt, max_new_tokens, temperature, seed
|
| 27 |
+
):
|
| 28 |
+
# call space using its API endpoint
|
| 29 |
+
url = (
|
| 30 |
+
f"https://hf.space/embed/loubnabnl/{model_name.lower()}-subspace/+/api/predict/"
|
| 31 |
+
)
|
| 32 |
+
r = requests.post(
|
| 33 |
+
url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
|
| 34 |
+
)
|
| 35 |
+
generated_text = r.json()["data"][0]
|
| 36 |
+
generations.append(generated_text)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def generate_code_threads(
|
| 40 |
+
generations, models, gen_prompt, max_new_tokens, temperature, seed
|
| 41 |
+
):
|
| 42 |
+
threads = []
|
| 43 |
+
for model_name in models:
|
| 44 |
+
# create the thread
|
| 45 |
+
threads.append(
|
| 46 |
+
threading.Thread(
|
| 47 |
+
target=generate_code,
|
| 48 |
+
args=(
|
| 49 |
+
generations,
|
| 50 |
+
model_name,
|
| 51 |
+
gen_prompt,
|
| 52 |
+
max_new_tokens,
|
| 53 |
+
temperature,
|
| 54 |
+
seed,
|
| 55 |
+
),
|
| 56 |
+
)
|
| 57 |
+
)
|
| 58 |
+
threads[-1].start()
|
| 59 |
+
|
| 60 |
+
for t in threads:
|
| 61 |
+
t.join()
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
st.set_page_config(page_icon=":laptop:", layout="wide")
|
| 65 |
+
with open("utils/table_contents.txt", "r") as f:
|
| 66 |
+
contents = f.read()
|
| 67 |
+
st.sidebar.markdown(contents)
|
| 68 |
+
|
| 69 |
+
# Introduction
|
| 70 |
+
st.title("Code generation with π€")
|
| 71 |
+
read_markdown("utils/intro.txt")
|
| 72 |
+
|
| 73 |
+
# Code datasets
|
| 74 |
+
st.subheader("1 - Code datasets")
|
| 75 |
+
read_markdown("datasets/intro.txt")
|
| 76 |
+
read_markdown("datasets/github_code.txt")
|
| 77 |
+
col1, col2 = st.columns([1, 2])
|
| 78 |
+
with col1:
|
| 79 |
+
selected_model = st.selectbox("", MODELS, key=1)
|
| 80 |
+
read_markdown(f"datasets/{selected_model.lower()}.txt")
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# Model architecture
|
| 84 |
+
st.subheader("2 - Model architecture")
|
| 85 |
+
read_markdown("architectures/intro.txt")
|
| 86 |
+
col1, col2 = st.columns([1, 2])
|
| 87 |
+
with col1:
|
| 88 |
+
selected_model = st.selectbox("", MODELS, key=2)
|
| 89 |
+
read_markdown(f"architectures/{selected_model.lower()}.txt")
|
| 90 |
+
|
| 91 |
+
# Model evaluation
|
| 92 |
+
st.subheader("3 - Code models evaluation")
|
| 93 |
+
read_markdown("evaluation/intro.txt")
|
| 94 |
+
read_markdown("evaluation/demo_humaneval.txt")
|
| 95 |
+
|
| 96 |
+
# Code generation
|
| 97 |
+
st.subheader("4 - Code generation β¨")
|
| 98 |
+
read_markdown("generation/intro.txt")
|
| 99 |
+
col1, col2, col3 = st.columns([7, 1, 6])
|
| 100 |
+
with col1:
|
| 101 |
+
st.markdown("**Models**")
|
| 102 |
+
selected_models = st.multiselect(
|
| 103 |
+
"Select code generation models to compare:",
|
| 104 |
+
GENERATION_MODELS,
|
| 105 |
+
default=["CodeParrot"],
|
| 106 |
+
key=3,
|
| 107 |
+
)
|
| 108 |
+
st.markdown(" ")
|
| 109 |
+
st.markdown("**Examples**")
|
| 110 |
+
examples = load_examples()
|
| 111 |
+
example_names = [example["name"] for example in examples]
|
| 112 |
+
name2id = dict([(name, i) for i, name in enumerate(example_names)])
|
| 113 |
+
selected_example = st.selectbox(
|
| 114 |
+
"Select one of the following examples or implement yours:", example_names
|
| 115 |
+
)
|
| 116 |
+
example_text = examples[name2id[selected_example]]["value"]
|
| 117 |
+
default_length = examples[name2id[selected_example]]["length"]
|
| 118 |
+
with col3:
|
| 119 |
+
st.markdown("**Generation settings**")
|
| 120 |
+
temperature = st.slider(
|
| 121 |
+
"Temperature:", value=0.2, min_value=0.0, step=0.1, max_value=2.0
|
| 122 |
+
)
|
| 123 |
+
max_new_tokens = st.slider(
|
| 124 |
+
"Number of tokens to generate:",
|
| 125 |
+
value=default_length,
|
| 126 |
+
min_value=8,
|
| 127 |
+
step=4,
|
| 128 |
+
max_value=256,
|
| 129 |
+
)
|
| 130 |
+
seed = st.slider("Random seed:", value=42, min_value=0, step=1, max_value=1000)
|
| 131 |
+
gen_prompt = st.text_area(
|
| 132 |
+
"Generate code with prompt:",
|
| 133 |
+
value=example_text,
|
| 134 |
+
height=200,
|
| 135 |
+
).strip()
|
| 136 |
+
if st.button("Generate code!"):
|
| 137 |
+
with st.spinner("Generating code..."):
|
| 138 |
+
# use threading
|
| 139 |
+
generations = []
|
| 140 |
+
generate_code_threads(
|
| 141 |
+
generations,
|
| 142 |
+
selected_models,
|
| 143 |
+
gen_prompt=gen_prompt,
|
| 144 |
+
max_new_tokens=max_new_tokens,
|
| 145 |
+
temperature=temperature,
|
| 146 |
+
seed=seed,
|
| 147 |
+
)
|
| 148 |
+
for i in range(len(generations)):
|
| 149 |
+
st.markdown(f"**{selected_models[i]}**")
|
| 150 |
+
st.code(generations[i])
|
| 151 |
+
|
| 152 |
+
# Resources
|
| 153 |
+
st.subheader("Resources")
|
| 154 |
+
read_markdown("utils/resources.txt")
|
architectures/codegen.txt
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[CodeGen](https://huggingface.co/Salesforce/codegen-16B-mono) architecture follows a standard transformer decoder with left-to-right causal masking. With rotary position embedding for the positional encoding [(Su et al., 2021)](https://arxiv.org/abs/2104.09864), and a context length of 2048. CodeGen models are trained in various sizes.
|
| 2 |
+
|
| 3 |
+
<div align="center">
|
| 4 |
+
|
| 5 |
+
|Model | # parameters |
|
| 6 |
+
| - | - |
|
| 7 |
+
| Decoder | 350M |
|
| 8 |
+
| Decoder | 2.7B |
|
| 9 |
+
| Decoder | 6.1B |
|
| 10 |
+
| Decoder | 16.1B |
|
| 11 |
+
|
| 12 |
+
</div>
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
You can load the model and tokenizer directly from [`transformers`](https://huggingface.co/docs/transformers/index):
|
| 16 |
+
|
| 17 |
+
```python
|
| 18 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 19 |
+
|
| 20 |
+
tokenizer = AutoTokenizer.from_pretrained('Salesforce/codegen-16B-mono')
|
| 21 |
+
model = AutoModelForCausalLM.from_pretrained('Salesforce/codegen-16B-mono')
|
| 22 |
+
|
| 23 |
+
inputs = tokenizer("def hello_world():", return_tensors="pt")
|
| 24 |
+
outputs = model(**inputs)
|
| 25 |
+
```
|
architectures/codeparrot.txt
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[CodeParrot](https://huggingface.co/lvwerra/codeparrot) uses GPT-2 architecture with BPE tokenizer trained on Python code from the training split of the data, and a context length of 1024. We released this model as an educational tool for training large language models from scratch on code, with detailed tutorials and descriptions of the training process. It makes use of π€ [`accelerate`](https://huggingface.co/docs/accelerate/index) for distributed training and mixed precision. See this [blog](https://huggingface.co/blog/codeparrot) and [repo](https://github.com/huggingface/transformers/tree/main/examples/research_projects/codeparrot) for more details.
|
| 2 |
+
|
| 3 |
+
<div align="center">
|
| 4 |
+
|
| 5 |
+
|Model | # parameters |
|
| 6 |
+
| - | - |
|
| 7 |
+
| GPT2 | 110M |
|
| 8 |
+
| GPT2 | 1.5B |
|
| 9 |
+
|
| 10 |
+
</div>
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
You can load the model and tokenizer directly from π€ [`transformers`](https://huggingface.co/docs/transformers/index):
|
| 14 |
+
|
| 15 |
+
```python
|
| 16 |
+
from transformers import AutoTokenizer, AutoModelWithLMHead
|
| 17 |
+
|
| 18 |
+
tokenizer = AutoTokenizer.from_pretrained("lvwerra/codeparrot")
|
| 19 |
+
model = AutoModelWithLMHead.from_pretrained("lvwerra/codeparrot")
|
| 20 |
+
|
| 21 |
+
inputs = tokenizer("def hello_world():", return_tensors="pt")
|
| 22 |
+
outputs = model(**inputs)
|
| 23 |
+
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
You can also use `pipeline` to generate code:
|
| 27 |
+
|
| 28 |
+
```python
|
| 29 |
+
from transformers import pipeline
|
| 30 |
+
|
| 31 |
+
pipe = pipeline("text-generation", model="lvwerra/codeparrot")
|
| 32 |
+
outputs = pipe("def hello_world():")
|
| 33 |
+
```
|
architectures/incoder.txt
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[InCoder](https://huggingface.co/facebook/incoder-6B) uses a decoder-only Transformer with Causal Masking objective, to train a left-to-right language model to fill in masked token segments, with a context length of 2048.
|
| 2 |
+
<div align="center">
|
| 3 |
+
|
| 4 |
+
|Model | # parameters |
|
| 5 |
+
| - | - |
|
| 6 |
+
| Decoder |1.3B |
|
| 7 |
+
| Decoder |6.7B |
|
| 8 |
+
|
| 9 |
+
</div>
|
| 10 |
+
|
| 11 |
+
[Causal Masking objective](https://arxiv.org/abs/2201.07520) is a hybrid approach of Causal and Masked language models, "it combines the benefit of per-token generation with optional bi-directionality specifically tailored to prompting".
|
| 12 |
+
During the training of InCoder, spans of code were randomly masked and moved to the end of each file, which allows for bidirectional context. Figure below from InCoder [paper](https://arxiv.org/pdf/2204.05999.pdf) illustrates the training process.
|
| 13 |
+
|
| 14 |
+
<p align="center">
|
| 15 |
+
<img src="https://huggingface.co/datasets/loubnabnl/repo-images/raw/main/incoder.png" alt="drawing" width="750"/>
|
| 16 |
+
</p>
|
| 17 |
+
|
| 18 |
+
So in addition to program synthesis (via left-to-right generation), InCoder can also perform editing (via infilling). The model gives promising results in some zero-shot code infilling tasks such as type prediction, variable re-naming and comment generation.
|
| 19 |
+
|
| 20 |
+
You can load the model and tokenizer directly from [`transformers`](https://huggingface.co/docs/transformers/index):
|
| 21 |
+
|
| 22 |
+
```python
|
| 23 |
+
from transformers import AutoTokenizer, AutoModelWithLMHead
|
| 24 |
+
|
| 25 |
+
tokenizer = AutoTokenizer.from_pretrained("facebook/incoder-6B")
|
| 26 |
+
model = AutoModelWithLMHead.from_pretrained("facebook/incoder-6B")
|
| 27 |
+
|
| 28 |
+
inputs = tokenizer("def hello_world():", return_tensors="pt")
|
| 29 |
+
outputs = model(**inputs)
|
| 30 |
+
|
| 31 |
+
```
|
architectures/intro.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Various architectures are used in code generation models, but most of them use the auto-regressive left-to-right setting, such as GPT. However InCoder used a decoder-only Transformer with Causal Masking objective,
|
| 2 |
+
that combines both next token prediction and bidirectional context through masking. AlphaCode used an encoder-decoder architecture. For model-specific information about the architecture, please select a model below:
|
architectures/polycoder.txt
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[PolyCoder](https://github.com/VHellendoorn/Code-LMs) uses GPT2 architecture, with BPE tokenizer trained on a random 5% subset of the data (all languages), and a context length of 2048. To study the effect of scaling of model size, the odel was trained in 3 different sizes.
|
| 2 |
+
|
| 3 |
+
<div align="center">
|
| 4 |
+
|
| 5 |
+
|Model | # parameters |
|
| 6 |
+
| - | - |
|
| 7 |
+
| GPT2 | 160M |
|
| 8 |
+
| GPT2 | 400M |
|
| 9 |
+
| GPT2 | 2.7B |
|
| 10 |
+
|
| 11 |
+
</div>
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
PolyCoder is currently being integrated in `transformers`. Meanwhile it can be loaded following the instructions in the original Github [repo](https://github.com/vhellendoorn/code-lms#models).
|
datasets/.ipynb_checkpoints/codeparrot-checkpoint.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[CodeParrot](https://huggingface.co/lvwerra/codeparrot) was trained on **50GB** of Python data from Github repositories: [CodeParrot dataset](https://huggingface.co/datasets/lvwerra/codeparrot-clean). The original dataset contains a lot of duplicated and noisy data. Therefore, the dataset was cleaned with the following steps:
|
| 2 |
+
- Exact match deduplication
|
| 3 |
+
- Filtering
|
| 4 |
+
- Average line length < 100
|
| 5 |
+
- Maximum line length < 1000
|
| 6 |
+
- Alpha numeric characters fraction > 0.25
|
| 7 |
+
- Remove auto-generated files (keyword search)
|
| 8 |
+
|
| 9 |
+
For more details see the preprocessing script in the transformers repository [here](https://github.com/huggingface/transformers/tree/master/examples/research_projects/codeparrot).
|
datasets/.ipynb_checkpoints/opt-checkpoint.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[OPT](https://huggingface.co/facebook/opt-30b) was trained on the following 5 filtered datasets of textual documents, one of them includes code, [The Pile](https://arxiv.org/pdf/2101.00027v1.pdf), it used *Pile-CC, OpenWebText2, USPTO, Project Gutenberg, OpenSubtitles, Wikipedia, DM Mathematics and HackerNews*.
|
| 2 |
+
The final training data contains 180B tokens corresponding to 800GB of data. For more details please refer to this [paper](https://arxiv.org/abs/2205.01068)
|
datasets/codegen.txt
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[Codegen](https://huggingface.co/Salesforce/codegen-16B-mono) is a model for conversational program synthesis, where each problem is interactively solved in multiple steps, each consisting of a natural language specification from the user and a synthesized subprogram from the system.
|
| 2 |
+
|
| 3 |
+
It was sequentially trained on three datasets:
|
| 4 |
+
- [The Pile](https://huggingface.co/datasets/the_pile)
|
| 5 |
+
- A 341GB subset of Googleβs [BigQuery dataset](https://cloud.google.com/blog/topics/public-datasets/github-on-bigquery-analyze-all-the-open-source-code) of code files from multiple programming languages, keeping only 6: C, C++, Go, Java, JavaScript, and Python
|
| 6 |
+
- 217GB of Python data from Github repositories
|
| 7 |
+
|
| 8 |
+
The second and third datasets used the following preprocessing:
|
| 9 |
+
- Exact match deduplication
|
| 10 |
+
- Filtering:
|
| 11 |
+
- Exact match deduplication
|
| 12 |
+
- Average line length < 100 tokens
|
| 13 |
+
- Maximum line length < 1000 MB
|
| 14 |
+
- Characters being decimal or hexadecimal digits >90%
|
| 15 |
+
|
| 16 |
+
**Remark**:
|
| 17 |
+
The reported data sizes are after preprocessing.
|
datasets/codeparrot.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[CodeParrot](https://huggingface.co/lvwerra/codeparrot) is a code generation model trained on **50GB** of Python data, after preprocessing, from Github repositories: [CodeParrot dataset](https://huggingface.co/datasets/lvwerra/codeparrot-clean). The original dataset contains a lot of duplicated and noisy data. Therefore, the dataset was cleaned with the following steps:
|
| 2 |
+
- Exact match deduplication
|
| 3 |
+
- Filtering:
|
| 4 |
+
- Average line length < 100 tokens
|
| 5 |
+
- Maximum line length < 1000 MB
|
| 6 |
+
- Alpha numeric characters fraction > 0.25
|
| 7 |
+
- Remove auto-generated files (keyword search)
|
| 8 |
+
|
| 9 |
+
For more details see the preprocessing script in the transformers repository [here](https://github.com/huggingface/transformers/tree/master/examples/research_projects/codeparrot).
|
datasets/github_code.txt
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
We also released [Github code dataset](https://huggingface.co/datasets/lvwerra/github-code), a 1TB of code data from Github repositories in 32 programming languages. It was created from the public GitHub dataset on Google [BigQuery](https://cloud.google.com/blog/topics/public-datasets/github-on-bigquery-analyze-all-the-open-source-code). The dataset can be loaded in streaming mode if you don't want to download it because of memory limitations, this will create an iterable dataset:
|
| 2 |
+
|
| 3 |
+
```python
|
| 4 |
+
from datasets import load_dataset
|
| 5 |
+
|
| 6 |
+
ds = load_dataset("lvwerra/github-code", streaming=True, split="train")
|
| 7 |
+
print(next(iter(ds)))
|
| 8 |
+
|
| 9 |
+
#OUTPUT:
|
| 10 |
+
{
|
| 11 |
+
'code': "import mod189 from './mod189';\nvar value=mod189+1;\nexport default value;\n",
|
| 12 |
+
'repo_name': 'MirekSz/webpack-es6-ts',
|
| 13 |
+
'path': 'app/mods/mod190.js',
|
| 14 |
+
'language': 'JavaScript',
|
| 15 |
+
'license': 'isc',
|
| 16 |
+
'size': 73
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
```
|
| 20 |
+
You can see that in addition to the code, the samples include some metadata: repo name, path, language, license, and the size of the file.
|
| 21 |
+
|
| 22 |
+
For model-specific information about the pretraining dataset, please select a model below:
|
datasets/incoder.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[InCoder](https://huggingface.co/facebook/incoder-6B) is a code generation model that also allows code editing via infilling. It was trained on **216 GB** of data, after preprocessing, from Github and Stackoverflow from 28 programming languages. 52 GB is in Python, 107GB in other programming languages and 57GB is content from Stackoverflow that isn't code.
|
| 2 |
+
|
| 3 |
+
The Github data used the following filtering:
|
| 4 |
+
- Average line length < 100 tokens
|
| 5 |
+
- Maximum line length < 3000 MB
|
| 6 |
+
- Alphanumeric characters fraction > 0.4
|
| 7 |
+
- Remove auto-generated files (keyword search)
|
| 8 |
+
|
| 9 |
+
The second component of the data consists of questions, answers, and comments from StackOverflow, it includes:
|
| 10 |
+
- all questions that have at least one answer
|
| 11 |
+
- up to ten answers with a non-negative score (sorted by score) per question
|
| 12 |
+
- up to five comments per question/answer
|
| 13 |
+
Exact match deduplication was performed on code files. For more details please refer to this [paper](https://arxiv.org/pdf/2204.05999.pdf).
|
datasets/intro.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Most code models are trained on data from public software repositories hosted on GitHub. Some also include code coupled with natural text from Stackoverflow for example. Additional datasets can be crafted based on the target task of the model. [Alphacode](https://arxiv.org/pdf/2203.07814v1.pdf), for instance, was fine-tuned on [CodeContests](https://github.com/deepmind/code_contests), a competitive programming dataset for machine-learning. Another popular dataset is [The Pile](https://huggingface.co/datasets/the_pile), it is a large corpus containing both natural language texts and code from different sources such as StackExchange dumps and popular (>100 stars) GitHub repositories. It can be efficient for models intended to do translation from natural text to code or the opposite, it was used in [CodeGen](https://arxiv.org/pdf/2203.13474.pdf) for instance.
|
| 2 |
+
|
| 3 |
+
Some other useful datasets that are available on the π€ hub are [CodeSearchNet](https://huggingface.co/datasets/code_search_net), a corpus of 2 milllion (comment, code) pairs from opensource libraries hosted on GitHub for several programming languages, and [Mostly Basic Python Problems (mbpp)](https://huggingface.co/datasets/mbpp), a benchmark of around 1,000 crowd-sourced Python programming problems, for entry level programmers, where each problem consists of a task description, code solution and 3 automated test cases, this dataset was used in [InCoder](https://huggingface.co/facebook/incoder-6B) evaluation in addition to [HumanEval](https://huggingface.co/datasets/openai_humaneval) that we will present later.
|
datasets/polycoder.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[PolyCoder paper](https://arxiv.org/pdf/2202.13169v3.pdf) gives a nice comparison of existing code models. The authors also trained a code generation model on **254GB** of data, after preprocessing, consisting of popular repositories for 12 popular programming languages with at least 50 stars from GitHub in October 2021. The data used the following preprocessing:
|
| 2 |
+
- Exact match deduplication
|
| 3 |
+
- Filtering:
|
| 4 |
+
- Average line length < 100 tokens
|
| 5 |
+
- Maximum line length < 1000 MB
|
evaluation/demo_humaneval.txt
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
We can load HumanEval dataset and pass@k metric from π€ [`datasets`](https://huggingface.co/docs/datasets/index)
|
| 3 |
+
|
| 4 |
+
```python
|
| 5 |
+
from datasets import load_dataset, load_metric
|
| 6 |
+
|
| 7 |
+
human_eval = load_dataset("openai_humaneval")
|
| 8 |
+
code_eval_metric = load_metric("code_eval")
|
| 9 |
+
```
|
| 10 |
+
|
| 11 |
+
We can easily compute the pass@k for a problem that asks for the implementation of a function that sums two integers:
|
| 12 |
+
|
| 13 |
+
```python
|
| 14 |
+
test_cases = ["assert add(2,3)==5"]
|
| 15 |
+
candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
|
| 16 |
+
pass_at_k, results = code_eval_metric.compute(references=test_cases, predictions=candidates, k=[1, 2])
|
| 17 |
+
print(pass_at_k)
|
| 18 |
+
{'pass@1': 0.5, 'pass@2': 1.0}
|
| 19 |
+
```
|
| 20 |
+
|
| 21 |
+
To better understand how pass@k metric works, we will illustrate it with some concrete examples. We select two problems from the HumanEval dataset and see how CodeParrot π¦ (110M) performs and which code completions pass the unit tests of the two problems below:
|
| 22 |
+
|
| 23 |
+
**Problem 1:**
|
| 24 |
+
|
| 25 |
+
```python
|
| 26 |
+
|
| 27 |
+
from typing import List
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def separate_paren_groups(paren_string: str) -> List[str]:
|
| 31 |
+
""" Input to this function is a string containing multiple groups of nested parentheses. Your goal is to
|
| 32 |
+
separate those group into separate strings and return the list of those.
|
| 33 |
+
Separate groups are balanced (each open brace is properly closed) and not nested within each other
|
| 34 |
+
Ignore any spaces in the input string.
|
| 35 |
+
>>> separate_paren_groups('( ) (( )) (( )( ))')
|
| 36 |
+
['()', '(())', '(()())']
|
| 37 |
+
"""
|
| 38 |
+
````
|
| 39 |
+
**Problem 2:**
|
| 40 |
+
```python
|
| 41 |
+
|
| 42 |
+
def truncate_number(number: float) -> float:
|
| 43 |
+
""" Given a positive floating point number, it can be decomposed into
|
| 44 |
+
and integer part (largest integer smaller than given number) and decimals
|
| 45 |
+
(leftover part always smaller than 1).
|
| 46 |
+
|
| 47 |
+
Return the decimal part of the number.
|
| 48 |
+
>>> truncate_number(3.5)
|
| 49 |
+
0.5
|
| 50 |
+
"""
|
| 51 |
+
````
|
| 52 |
+
|
| 53 |
+
For each problem, instead of 200 candidate solutions, we will only generate 20 samples for illustration purposes. We use nucleus sampling with top-p where `p=0.95`, `temperature=0.2`, and sample tokens from the model until we encounter a stop sequence indicating the end of a method: β\nclassβ, β\ndefβ, β\n#β, β\nifβ, or β\nprintβ. For more details about decoding strategies for language generation, we recommend this [blog](https://huggingface.co/blog/how-to-generate).
|
| 54 |
+
|
| 55 |
+
**Remark**:
|
| 56 |
+
|
| 57 |
+
Regarding the temperature parameter, in [CodeGen](https://github.com/salesforce/CodeGen) paper, the authors observed that the best performing temperature increases as the number of samples permitted k increases. When a model is only allowed a few samples to pass unit tests, it is beneficial to use the learned distribution, through a low temperature, to select candidates that are likely to pass. But when a model is allowed for more chances with a high k, using a higher sampling temperature to tilt the learned model distribution lets it explore diverse samples and thus have a greater chance of synthesizing a correct program.
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
For our experiment, we compute pass@1, pass@10 and pass@20, each correspending to unit test pass rate when selecting respectively 1, 10 and 20 samples from the candidate solutions.
|
| 61 |
+
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
Results: {'pass@1': 0.0750, 'pass@10': 0.4473, 'pass@20': 0.5}
|
| 65 |
+
|
| 66 |
+
````
|
| 67 |
+
|
| 68 |
+
If we take a closer look at the unit test results for each candidate solution in the two problems, we find that 3 passed the test for the second problem, and none did for the first problem. This means that we have 3 correct solutions among 40, which corresponds to our pass@1 value `3/40 = 0.075`. The scores pass@10 and pass@20 are higher, because the more samples we select from the candidate completions, the more likely we are to include the correct implementation. As
|
| 69 |
+
for pass@20, it is `1/2 = 0.5`, since if we select all 20 candidates for each problem, the second problem get solved which gives 50% success rate. If you are curious about the candidate solutions that passed the tests, they all implemented this function:
|
| 70 |
+
|
| 71 |
+
```python
|
| 72 |
+
|
| 73 |
+
def truncate_number(number: float) -> float:
|
| 74 |
+
""" Given a positive floating point number, it can be decomposed into
|
| 75 |
+
and integer part (largest integer smaller than given number) and decimals
|
| 76 |
+
(leftover part always smaller than 1).
|
| 77 |
+
|
| 78 |
+
Return the decimal part of the number.
|
| 79 |
+
>>> truncate_number(3.5)
|
| 80 |
+
0.5
|
| 81 |
+
"""
|
| 82 |
+
return number % 1
|
| 83 |
+
```
|
evaluation/intro.txt
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
A popular evaluation framework for code generation models is the [pass@k](https://huggingface.co/metrics/code_eval) metric on [HumanEval](https://huggingface.co/datasets/openai_humaneval) dataset, which was introduced in [Codex paper](https://arxiv.org/pdf/2107.03374v2.pdf). The dataset includes 164 handwritten programming problems. In the pass@k metric, k code samples are generated per problem, and a problem is considered solved if any sample passes the unit tests and the total fraction of problems solved is reported.
|
| 2 |
+
In most papers, 200 candidate program completions are sampled, and pass@1, pass@10, and pass@100 are computed using an unbiased sampling estimator. Table 1 below shows the HumanEval scores of CodeParrot, InCoder, PolyCoder, CodeGen and Codex (not open-source).
|
| 3 |
+
|
| 4 |
+
<div align="center">
|
| 5 |
+
|
| 6 |
+
Model | pass@1 | pass@10 | pass@100|
|
| 7 |
+
|-------|--------|---------|---------|
|
| 8 |
+
|CodeParrot (110M) | 3.80% | 6.57% | 12.78% |
|
| 9 |
+
|CodeParrot (1.5B) | 3.58% | 8.03% | 14.96% |
|
| 10 |
+
|||||
|
| 11 |
+
|InCoder (6.7B) | 15.2% | 27.8% | 47.00% |
|
| 12 |
+
|||||
|
| 13 |
+
|PolyCoder (160M)| 2.13% | 3.35% | 4.88% |
|
| 14 |
+
|PolyCoder (400M)| 2.96% | 5.29% | 11.59% |
|
| 15 |
+
|PolyCoder (2.7B)| 5.59% | 9.84% | 17.68% |
|
| 16 |
+
|||||
|
| 17 |
+
|CodeGen-Mono (350M)| 12.76% | 23.11% | 35.19% |
|
| 18 |
+
|CodeGen-Mono (2.7B)| 23.70% | 36.64% | 57.01% |
|
| 19 |
+
|CodeGen-Mono (16.1B)| **29.28%** | **49.86%** | **75.00%** |
|
| 20 |
+
|||||
|
| 21 |
+
|Codex (25M)| 3.21% | 7.1% | 12.89%|
|
| 22 |
+
|Codex (300M)| 13.17%| 20.37% | 36.27% |
|
| 23 |
+
|Codex (12B)| 28.81%| 46.81% | 72.31% |
|
| 24 |
+
|
| 25 |
+
</div>
|
generation/intro.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
In this section you can prompt the following models to generate Python code: CodeParrot 1.5B, InCoder 6.7B and CodeGen 6.1B.
|
| 2 |
+
|
| 3 |
+
* For CodeGen, thereβs a larger [model](https://huggingface.co/Salesforce/codegen-16B-mono) available on the π€ hub with 16.1B parameters, but we use the 6.1B version to have models of comparable size in this demo.
|
| 4 |
+
* For InCoder, you can also try the original [demo](https://huggingface.co/spaces/facebook/incoder-demo), which has more tasks and examples.
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
git+https://github.com/huggingface/transformers
|
| 2 |
+
torch
|
utils/.ipynb_checkpoints/intro-checkpoint.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
In this space you can compare some of the features of code generation models:
|
| 2 |
+
* Pretraining datasets
|
| 3 |
+
* Model Architecture
|
| 4 |
+
* Model evaluation
|
| 5 |
+
You can also test their code generation capacities β¨.
|
utils/data_preview.csv
ADDED
|
@@ -0,0 +1,408 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
code,repo_name,path,language,license,size
|
| 2 |
+
"/* { dg-do compile } */
|
| 3 |
+
/* { dg-options ""-mavx512f -O2 -masm=att"" } */
|
| 4 |
+
/* { dg-final { scan-assembler-times ""vmovss\[ \\t\]+\\(%\[a-z0-9,]*\\), %xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)"" 1 } } */
|
| 5 |
+
/* { dg-final { scan-assembler-times ""vmovss\[ \\t\]+\\(%\[a-z0-9,]*\\), %xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)"" 1 } } */
|
| 6 |
+
/* { dg-final { scan-assembler-times ""vmovss\[ \\t\]+%xmm\[0-9\]+, %xmm\[0-9\]+, %xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)"" 1 } } */
|
| 7 |
+
/* { dg-final { scan-assembler-times ""vmovss\[ \\t\]+%xmm\[0-9\]+, %xmm\[0-9\]+, %xmm\[0-9\]+\{%k\[1-7\]\}\{z\}(?:\n|\[ \\t\]+#)"" 1 } } */
|
| 8 |
+
/* { dg-final { scan-assembler-times ""vmovss\[ \\t\]+%xmm\[0-9\]+, \\(%\[a-z0-9,]*\\)\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)"" 1 } } */
|
| 9 |
+
|
| 10 |
+
#include <immintrin.h>
|
| 11 |
+
|
| 12 |
+
volatile __m128 x1, x2, x3;
|
| 13 |
+
volatile __mmask8 m;
|
| 14 |
+
float *volatile p;
|
| 15 |
+
|
| 16 |
+
void extern
|
| 17 |
+
avx512f_test (void)
|
| 18 |
+
{
|
| 19 |
+
x1 = _mm_mask_load_ss (x1, m, p);
|
| 20 |
+
x1 = _mm_maskz_load_ss (m, p);
|
| 21 |
+
x1 = _mm_mask_move_ss (x1, m, x2, x3);
|
| 22 |
+
x1 = _mm_maskz_move_ss (m, x2, x3);
|
| 23 |
+
_mm_mask_store_ss (p, m, x1);
|
| 24 |
+
}
|
| 25 |
+
",Gurgel100/gcc,gcc/testsuite/gcc.target/i386/avx512f-vmovss-1.c,C,gpl-2.0,1037
|
| 26 |
+
"from virtTrinity import picker
|
| 27 |
+
from virtTrinity.providers.virsh_cmd import data
|
| 28 |
+
from virtTrinity.providers.virsh_cmd.utils import virsh
|
| 29 |
+
from virtTrinity.providers.virsh_cmd.picker.command import CmdPicker
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class OptSetPicker(picker.PickerBase):
|
| 33 |
+
depends_on = CmdPicker
|
| 34 |
+
data_type = data.VirshOptSet()
|
| 35 |
+
|
| 36 |
+
types = {
|
| 37 |
+
""positive"": {
|
| 38 |
+
""patterns"": None,
|
| 39 |
+
""data_type"": data.OptSet(),
|
| 40 |
+
},
|
| 41 |
+
""miss_dep"": {
|
| 42 |
+
""patterns"": r""command '.*' requires .* option"",
|
| 43 |
+
""data_type"": data.MissingDepOptSet(),
|
| 44 |
+
},
|
| 45 |
+
""other"": {
|
| 46 |
+
""patterns"": [
|
| 47 |
+
r""command '.*' doesn't support option --.*"",
|
| 48 |
+
# r""command or command group '.*' doesn't exist"",
|
| 49 |
+
]
|
| 50 |
+
},
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
def prerequisite(self):
|
| 54 |
+
return self.test.cmd in virsh.commands
|
| 55 |
+
|
| 56 |
+
def apply(self, result):
|
| 57 |
+
self.test.options = result
|
| 58 |
+
",Hao-Liu/virt-trinity,virtTrinity/providers/virsh_cmd/picker/optset.py,Python,gpl-2.0,913
|
| 59 |
+
"package com.suscipio_solutions.consecro_mud.Abilities.Spells;
|
| 60 |
+
import java.util.LinkedList;
|
| 61 |
+
import java.util.Vector;
|
| 62 |
+
|
| 63 |
+
import com.suscipio_solutions.consecro_mud.Abilities.interfaces.Ability;
|
| 64 |
+
import com.suscipio_solutions.consecro_mud.Common.interfaces.CMMsg;
|
| 65 |
+
import com.suscipio_solutions.consecro_mud.Items.interfaces.Item;
|
| 66 |
+
import com.suscipio_solutions.consecro_mud.Items.interfaces.Wearable;
|
| 67 |
+
import com.suscipio_solutions.consecro_mud.Locales.interfaces.Room;
|
| 68 |
+
import com.suscipio_solutions.consecro_mud.MOBS.interfaces.MOB;
|
| 69 |
+
import com.suscipio_solutions.consecro_mud.core.CMClass;
|
| 70 |
+
import com.suscipio_solutions.consecro_mud.core.CMLib;
|
| 71 |
+
import com.suscipio_solutions.consecro_mud.core.CMStrings;
|
| 72 |
+
import com.suscipio_solutions.consecro_mud.core.interfaces.Environmental;
|
| 73 |
+
import com.suscipio_solutions.consecro_mud.core.interfaces.Physical;
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@SuppressWarnings(""rawtypes"")
|
| 77 |
+
public class Spell_SpyingStone extends Spell
|
| 78 |
+
{
|
| 79 |
+
@Override public String ID() { return ""Spell_SpyingStone""; }
|
| 80 |
+
private final static String localizedName = CMLib.lang().L(""Spying Stone"");
|
| 81 |
+
@Override public String name() { return localizedName; }
|
| 82 |
+
private final static String localizedStaticDisplay = CMLib.lang().L(""(Spying Stone)"");
|
| 83 |
+
@Override public String displayText() { return localizedStaticDisplay; }
|
| 84 |
+
@Override protected int canAffectCode(){return CAN_ITEMS;}
|
| 85 |
+
@Override protected int canTargetCode(){return Ability.CAN_ITEMS;}
|
| 86 |
+
@Override public int classificationCode(){return Ability.ACODE_SPELL|Ability.DOMAIN_DIVINATION;}
|
| 87 |
+
@Override public int abstractQuality(){ return Ability.QUALITY_INDIFFERENT;}
|
| 88 |
+
|
| 89 |
+
protected LinkedList<String> msgs=new LinkedList<String>();
|
| 90 |
+
|
| 91 |
+
@Override
|
| 92 |
+
public void executeMsg(final Environmental myHost, final CMMsg msg)
|
| 93 |
+
{
|
| 94 |
+
super.executeMsg(myHost, msg);
|
| 95 |
+
if((msg.targetMinor()==CMMsg.TYP_SPEAK)
|
| 96 |
+
&&((msg.source()==invoker())
|
| 97 |
+
||((invoker()!=null) && msg.source().Name().equalsIgnoreCase(invoker().Name())))
|
| 98 |
+
&&(msg.target()==affected)
|
| 99 |
+
&&(msg.sourceMessage().toUpperCase().indexOf(""SPEAK"")>=0))
|
| 100 |
+
{
|
| 101 |
+
final Room room=CMLib.map().roomLocation(affected);
|
| 102 |
+
if(room!=null)
|
| 103 |
+
{
|
| 104 |
+
final StringBuilder str=new StringBuilder("""");
|
| 105 |
+
for(final String m : msgs)
|
| 106 |
+
str.append(m).append(""\n\r"");
|
| 107 |
+
if(str.length()==0) str.append(L(""Nothing!""));
|
| 108 |
+
room.showHappens(CMMsg.MSG_SPEAK, affected,L(""^S<S-NAME> grow(s) a mouth and say(s) '^N@x1^S'^N"",str.toString()));
|
| 109 |
+
msgs.clear();
|
| 110 |
+
}
|
| 111 |
+
}
|
| 112 |
+
else
|
| 113 |
+
if((msg.othersCode()!=CMMsg.NO_EFFECT)
|
| 114 |
+
&&(msg.othersMessage()!=null)
|
| 115 |
+
&&(msg.othersMessage().length()>0))
|
| 116 |
+
msgs.add(CMLib.coffeeFilter().fullOutFilter(null, null, msg.source(), msg.target(), msg.tool(), CMStrings.removeColors(msg.othersMessage()), false));
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
@Override
|
| 120 |
+
public boolean invoke(MOB mob, Vector commands, Physical givenTarget, boolean auto, int asLevel)
|
| 121 |
+
{
|
| 122 |
+
final Physical target=getTarget(mob,mob.location(),givenTarget,commands,Wearable.FILTER_ANY);
|
| 123 |
+
if(target==null) return false;
|
| 124 |
+
|
| 125 |
+
if(!(target instanceof Item))
|
| 126 |
+
{
|
| 127 |
+
mob.tell(L(""You can't cast this spell on that.""));
|
| 128 |
+
return false;
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
if(target.fetchEffect(this.ID())!=null)
|
| 132 |
+
{
|
| 133 |
+
mob.tell(L(""@x1 is already a spying stone!"",target.name(mob)));
|
| 134 |
+
return false;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
if(!super.invoke(mob,commands,givenTarget,auto,asLevel))
|
| 138 |
+
return false;
|
| 139 |
+
|
| 140 |
+
final boolean success=proficiencyCheck(mob,0,auto);
|
| 141 |
+
|
| 142 |
+
if(success)
|
| 143 |
+
{
|
| 144 |
+
final CMMsg msg=CMClass.getMsg(mob,target,this,verbalCastCode(mob,target,auto),auto?"""":L(""^S<S-NAME> point(s) <S-HIS-HER> finger at <T-NAMESELF>, incanting.^?""));
|
| 145 |
+
if(mob.location().okMessage(mob,msg))
|
| 146 |
+
{
|
| 147 |
+
mob.location().send(mob,msg);
|
| 148 |
+
beneficialAffect(mob,target,asLevel,0);
|
| 149 |
+
mob.location().show(mob,target,CMMsg.MSG_OK_VISUAL,L(""<T-NAME> open(s) a pair of strange eyes, which become transluscent.""));
|
| 150 |
+
}
|
| 151 |
+
}
|
| 152 |
+
else
|
| 153 |
+
beneficialWordsFizzle(mob,target,L(""<S-NAME> point(s) at <T-NAMESELF>, incanting, but nothing happens.""));
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
// return whether it worked
|
| 157 |
+
return success;
|
| 158 |
+
}
|
| 159 |
+
}
|
| 160 |
+
",ConsecroMUD/ConsecroMUD,com/suscipio_solutions/consecro_mud/Abilities/Spells/Spell_SpyingStone.java,Java,apache-2.0,3919
|
| 161 |
+
"# -*- encoding: utf-8 -*-
|
| 162 |
+
'''
|
| 163 |
+
HubbleStack Nebula-to-Splunk returner
|
| 164 |
+
|
| 165 |
+
Deliver HubbleStack Nebula query data into Splunk using the HTTP
|
| 166 |
+
event collector. Required config/pillar settings:
|
| 167 |
+
|
| 168 |
+
.. code-block:: yaml
|
| 169 |
+
|
| 170 |
+
hubblestack:
|
| 171 |
+
returner:
|
| 172 |
+
splunk:
|
| 173 |
+
- token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
| 174 |
+
indexer: splunk-indexer.domain.tld
|
| 175 |
+
index: hubble
|
| 176 |
+
sourcetype_nebula: hubble_osquery
|
| 177 |
+
|
| 178 |
+
You can also add a `custom_fields` argument which is a list of keys to add to
|
| 179 |
+
events with using the results of config.get(<custom_field>). These new keys
|
| 180 |
+
will be prefixed with 'custom_' to prevent conflicts. The values of these keys
|
| 181 |
+
should be strings or lists (will be sent as CSV string), do not choose grains
|
| 182 |
+
or pillar values with complex values or they will be skipped.
|
| 183 |
+
|
| 184 |
+
Additionally, you can define a fallback_indexer which will be used if a default
|
| 185 |
+
gateway is not defined.
|
| 186 |
+
|
| 187 |
+
.. code-block:: yaml
|
| 188 |
+
|
| 189 |
+
hubblestack:
|
| 190 |
+
returner:
|
| 191 |
+
splunk:
|
| 192 |
+
- token: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
| 193 |
+
indexer: splunk-indexer.domain.tld
|
| 194 |
+
index: hubble
|
| 195 |
+
sourcetype_nebula: hubble_osquery
|
| 196 |
+
fallback_indexer: splunk-indexer.loc.domain.tld
|
| 197 |
+
custom_fields:
|
| 198 |
+
- site
|
| 199 |
+
- product_group
|
| 200 |
+
'''
|
| 201 |
+
import socket
|
| 202 |
+
|
| 203 |
+
# Imports for http event forwarder
|
| 204 |
+
import requests
|
| 205 |
+
import json
|
| 206 |
+
import time
|
| 207 |
+
from datetime import datetime
|
| 208 |
+
from hubblestack.hec import http_event_collector, get_splunk_options, make_hec_args
|
| 209 |
+
|
| 210 |
+
import logging
|
| 211 |
+
|
| 212 |
+
_max_content_bytes = 100000
|
| 213 |
+
http_event_collector_debug = False
|
| 214 |
+
RETRY = False
|
| 215 |
+
|
| 216 |
+
log = logging.getLogger(__name__)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def returner(ret):
|
| 220 |
+
try:
|
| 221 |
+
opts_list = get_splunk_options( sourcetype_nebula='hubble_osquery',
|
| 222 |
+
add_query_to_sourcetype=True, _nick={'sourcetype_nebula': 'sourcetype'})
|
| 223 |
+
|
| 224 |
+
for opts in opts_list:
|
| 225 |
+
logging.debug('Options: %s' % json.dumps(opts))
|
| 226 |
+
custom_fields = opts['custom_fields']
|
| 227 |
+
|
| 228 |
+
# Set up the fields to be extracted at index time. The field values must be strings.
|
| 229 |
+
# Note that these fields will also still be available in the event data
|
| 230 |
+
index_extracted_fields = []
|
| 231 |
+
try:
|
| 232 |
+
index_extracted_fields.extend(__opts__.get('splunk_index_extracted_fields', []))
|
| 233 |
+
except TypeError:
|
| 234 |
+
pass
|
| 235 |
+
|
| 236 |
+
# Set up the collector
|
| 237 |
+
args, kwargs = make_hec_args(opts)
|
| 238 |
+
hec = http_event_collector(*args, **kwargs)
|
| 239 |
+
|
| 240 |
+
# st = 'salt:hubble:nova'
|
| 241 |
+
data = ret['return']
|
| 242 |
+
minion_id = ret['id']
|
| 243 |
+
jid = ret['jid']
|
| 244 |
+
global RETRY
|
| 245 |
+
RETRY = ret['retry']
|
| 246 |
+
master = __grains__['master']
|
| 247 |
+
fqdn = __grains__['fqdn']
|
| 248 |
+
# Sometimes fqdn is blank. If it is, replace it with minion_id
|
| 249 |
+
fqdn = fqdn if fqdn else minion_id
|
| 250 |
+
try:
|
| 251 |
+
fqdn_ip4 = __grains__.get('local_ip4')
|
| 252 |
+
if not fqdn_ip4:
|
| 253 |
+
fqdn_ip4 = __grains__['fqdn_ip4'][0]
|
| 254 |
+
except IndexError:
|
| 255 |
+
try:
|
| 256 |
+
fqdn_ip4 = __grains__['ipv4'][0]
|
| 257 |
+
except IndexError:
|
| 258 |
+
raise Exception('No ipv4 grains found. Is net-tools installed?')
|
| 259 |
+
if fqdn_ip4.startswith('127.'):
|
| 260 |
+
for ip4_addr in __grains__['ipv4']:
|
| 261 |
+
if ip4_addr and not ip4_addr.startswith('127.'):
|
| 262 |
+
fqdn_ip4 = ip4_addr
|
| 263 |
+
break
|
| 264 |
+
local_fqdn = __grains__.get('local_fqdn', __grains__['fqdn'])
|
| 265 |
+
|
| 266 |
+
# Sometimes fqdn reports a value of localhost. If that happens, try another method.
|
| 267 |
+
bad_fqdns = ['localhost', 'localhost.localdomain', 'localhost6.localdomain6']
|
| 268 |
+
if fqdn in bad_fqdns:
|
| 269 |
+
new_fqdn = socket.gethostname()
|
| 270 |
+
if '.' not in new_fqdn or new_fqdn in bad_fqdns:
|
| 271 |
+
new_fqdn = fqdn_ip4
|
| 272 |
+
fqdn = new_fqdn
|
| 273 |
+
|
| 274 |
+
# Get cloud details
|
| 275 |
+
cloud_details = __grains__.get('cloud_details', {})
|
| 276 |
+
|
| 277 |
+
if not data:
|
| 278 |
+
return
|
| 279 |
+
else:
|
| 280 |
+
for query in data:
|
| 281 |
+
for query_name, query_results in query.iteritems():
|
| 282 |
+
if 'data' not in query_results:
|
| 283 |
+
query_results['data'] = [{'error': 'result missing'}]
|
| 284 |
+
for query_result in query_results['data']:
|
| 285 |
+
event = {}
|
| 286 |
+
payload = {}
|
| 287 |
+
event.update(query_result)
|
| 288 |
+
event.update({'query': query_name})
|
| 289 |
+
event.update({'job_id': jid})
|
| 290 |
+
event.update({'master': master})
|
| 291 |
+
event.update({'minion_id': minion_id})
|
| 292 |
+
event.update({'dest_host': fqdn})
|
| 293 |
+
event.update({'dest_ip': fqdn_ip4})
|
| 294 |
+
event.update({'dest_fqdn': local_fqdn})
|
| 295 |
+
event.update({'system_uuid': __grains__.get('system_uuid')})
|
| 296 |
+
|
| 297 |
+
event.update(cloud_details)
|
| 298 |
+
|
| 299 |
+
for custom_field in custom_fields:
|
| 300 |
+
custom_field_name = 'custom_' + custom_field
|
| 301 |
+
custom_field_value = __salt__['config.get'](custom_field, '')
|
| 302 |
+
if isinstance(custom_field_value, (str, unicode)):
|
| 303 |
+
event.update({custom_field_name: custom_field_value})
|
| 304 |
+
elif isinstance(custom_field_value, list):
|
| 305 |
+
custom_field_value = ','.join(custom_field_value)
|
| 306 |
+
event.update({custom_field_name: custom_field_value})
|
| 307 |
+
|
| 308 |
+
payload.update({'host': fqdn})
|
| 309 |
+
payload.update({'index': opts['index']})
|
| 310 |
+
if opts['add_query_to_sourcetype']:
|
| 311 |
+
payload.update({'sourcetype': ""%s_%s"" % (opts['sourcetype'], query_name)})
|
| 312 |
+
else:
|
| 313 |
+
payload.update({'sourcetype': opts['sourcetype']})
|
| 314 |
+
|
| 315 |
+
# Remove any empty fields from the event payload
|
| 316 |
+
remove_keys = [k for k in event if event[k] == """"]
|
| 317 |
+
for k in remove_keys:
|
| 318 |
+
del event[k]
|
| 319 |
+
|
| 320 |
+
payload.update({'event': event})
|
| 321 |
+
|
| 322 |
+
# Potentially add metadata fields:
|
| 323 |
+
fields = {}
|
| 324 |
+
for item in index_extracted_fields:
|
| 325 |
+
if item in payload['event'] and not isinstance(payload['event'][item], (list, dict, tuple)):
|
| 326 |
+
fields[""meta_%s"" % item] = str(payload['event'][item])
|
| 327 |
+
if fields:
|
| 328 |
+
payload.update({'fields': fields})
|
| 329 |
+
|
| 330 |
+
# If the osquery query includes a field called 'time' it will be checked.
|
| 331 |
+
# If it's within the last year, it will be used as the eventtime.
|
| 332 |
+
event_time = query_result.get('time', '')
|
| 333 |
+
try:
|
| 334 |
+
if (datetime.fromtimestamp(time.time()) - datetime.fromtimestamp(float(event_time))).days > 365:
|
| 335 |
+
event_time = ''
|
| 336 |
+
except Exception:
|
| 337 |
+
event_time = ''
|
| 338 |
+
finally:
|
| 339 |
+
hec.batchEvent(payload, eventtime=event_time)
|
| 340 |
+
|
| 341 |
+
hec.flushBatch()
|
| 342 |
+
except Exception:
|
| 343 |
+
log.exception('Error ocurred in splunk_nebula_return')
|
| 344 |
+
return
|
| 345 |
+
",basepi/hubble,hubblestack/extmods/returners/splunk_nebula_return.py,Python,apache-2.0,7889
|
| 346 |
+
"// Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
| 347 |
+
// Use of this source code is governed by a BSD-style license that can be
|
| 348 |
+
// found in the LICENSE file.
|
| 349 |
+
|
| 350 |
+
#ifndef CHROME_BROWSER_UI_VIEWS_TAB_ICON_VIEW_MODEL_H_
|
| 351 |
+
#define CHROME_BROWSER_UI_VIEWS_TAB_ICON_VIEW_MODEL_H_
|
| 352 |
+
|
| 353 |
+
namespace ui {
|
| 354 |
+
class ImageModel;
|
| 355 |
+
} // namespace ui
|
| 356 |
+
|
| 357 |
+
// Classes implement this interface to provide state for the TabIconView.
|
| 358 |
+
class TabIconViewModel {
|
| 359 |
+
public:
|
| 360 |
+
// Returns true if the TabIconView should show a loading animation.
|
| 361 |
+
virtual bool ShouldTabIconViewAnimate() const = 0;
|
| 362 |
+
|
| 363 |
+
// Returns the favicon to display in the icon view
|
| 364 |
+
virtual ui::ImageModel GetFaviconForTabIconView() = 0;
|
| 365 |
+
|
| 366 |
+
protected:
|
| 367 |
+
virtual ~TabIconViewModel() {}
|
| 368 |
+
};
|
| 369 |
+
|
| 370 |
+
#endif // CHROME_BROWSER_UI_VIEWS_TAB_ICON_VIEW_MODEL_H_
|
| 371 |
+
",ric2b/Vivaldi-browser,chromium/chrome/browser/ui/views/tab_icon_view_model.h,C,bsd-3-clause,784
|
| 372 |
+
"//
|
| 373 |
+
// HealthKit.h
|
| 374 |
+
// HealthKit
|
| 375 |
+
//
|
| 376 |
+
// Copyright (c) 2013-2014 Apple Inc. All rights reserved.
|
| 377 |
+
//
|
| 378 |
+
|
| 379 |
+
#import <HealthKit/HKActivitySummary.h>
|
| 380 |
+
#import <HealthKit/HKActivitySummaryQuery.h>
|
| 381 |
+
#import <HealthKit/HKAnchoredObjectQuery.h>
|
| 382 |
+
#import <HealthKit/HKCategorySample.h>
|
| 383 |
+
#import <HealthKit/HKCorrelation.h>
|
| 384 |
+
#import <HealthKit/HKCorrelationQuery.h>
|
| 385 |
+
#import <HealthKit/HKDefines.h>
|
| 386 |
+
#import <HealthKit/HKDeletedObject.h>
|
| 387 |
+
#import <HealthKit/HKDevice.h>
|
| 388 |
+
#import <HealthKit/HKHealthStore.h>
|
| 389 |
+
#import <HealthKit/HKMetadata.h>
|
| 390 |
+
#import <HealthKit/HKObject.h>
|
| 391 |
+
#import <HealthKit/HKObjectType.h>
|
| 392 |
+
#import <HealthKit/HKObserverQuery.h>
|
| 393 |
+
#import <HealthKit/HKQuantity.h>
|
| 394 |
+
#import <HealthKit/HKQuantitySample.h>
|
| 395 |
+
#import <HealthKit/HKQuery.h>
|
| 396 |
+
#import <HealthKit/HKSample.h>
|
| 397 |
+
#import <HealthKit/HKSampleQuery.h>
|
| 398 |
+
#import <HealthKit/HKSource.h>
|
| 399 |
+
#import <HealthKit/HKSourceQuery.h>
|
| 400 |
+
#import <HealthKit/HKSourceRevision.h>
|
| 401 |
+
#import <HealthKit/HKStatistics.h>
|
| 402 |
+
#import <HealthKit/HKStatisticsCollectionQuery.h>
|
| 403 |
+
#import <HealthKit/HKStatisticsQuery.h>
|
| 404 |
+
#import <HealthKit/HKTypeIdentifiers.h>
|
| 405 |
+
#import <HealthKit/HKUnit.h>
|
| 406 |
+
#import <HealthKit/HKWorkout.h>
|
| 407 |
+
#import <HealthKit/HKWorkoutSession.h>
|
| 408 |
+
",rweichler/cylinder,deps/iPhoneOS9.3.sdk/System/Library/Frameworks/HealthKit.framework/Headers/HealthKit.h,C,mit,1159
|
utils/examples.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"name": "Hello World!",
|
| 4 |
+
"value": "def print_hello_world():\n \"\"\"Print 'Hello World!'.\"\"\"",
|
| 5 |
+
"length": 8
|
| 6 |
+
},
|
| 7 |
+
{
|
| 8 |
+
"name": "Scikit-Learn",
|
| 9 |
+
"value": "import numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\n\n# create training data\nX = np.random.randn(100, 100)\ny = np.random.randint(0, 1, 100)\n\n# setup train test split",
|
| 10 |
+
"length": 96
|
| 11 |
+
},
|
| 12 |
+
{
|
| 13 |
+
"name": "Filesize",
|
| 14 |
+
"value": "def get_file_size(filepath):",
|
| 15 |
+
"length": 64
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"name": "Is_in_list",
|
| 19 |
+
"value": "def is_in_list(L, e):\n \"\"\"Find if list L contains the element e.\"\"\"",
|
| 20 |
+
"length": 32
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"name": "Python to Numpy",
|
| 24 |
+
"value": "# native Python:\ndef mean(a):\n return sum(a)/len(a)\n\n# with numpy:\nimport numpy as np\n\ndef mean(a):",
|
| 25 |
+
"length": 16
|
| 26 |
+
},
|
| 27 |
+
{
|
| 28 |
+
"name": "unittest",
|
| 29 |
+
"value": "def is_even(value):\n \"\"\"Returns True if value is an even number.\"\"\"\n return value % 2 == 0\n\n# setup unit tests for is_even\nimport unittest",
|
| 30 |
+
"length": 64
|
| 31 |
+
|
| 32 |
+
},
|
| 33 |
+
{
|
| 34 |
+
"name": "Pandas",
|
| 35 |
+
"value": "# load dataframe from csv\ndf = pd.read_csv(filename)\n\n# columns: \"age_group\", \"income\"\n# calculate average income per age group",
|
| 36 |
+
"length": 16
|
| 37 |
+
},
|
| 38 |
+
{
|
| 39 |
+
"name": "Transformers",
|
| 40 |
+
"value": "from transformers import AutoTokenizer, AutoModelForSequenceClassification\n\n# build a BERT classifier",
|
| 41 |
+
"length": 48
|
| 42 |
+
}
|
| 43 |
+
]
|
utils/intro.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
This is an **interactive** blog, to give an overview of open-source language models for code generation. We present their code datasets, model architecture and model evaluation along with examples and tips to use the π€ hub for this task. At the end of this blog, you will find a **demo** to test and compare code generation across these models β¨.
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
## Introduction
|
| 5 |
+
|
| 6 |
+
The application of language models to code generation has sparked great interest recently. You have probably heard of [Codex](https://arxiv.org/pdf/2107.03374v2.pdf), the model behind [Github Copilot](https://copilot.github.com/), or [AlphaCode](https://arxiv.org/pdf/2203.07814v1.pdf) for competition-level programming. These models aren't open-source, and it is hard to reproduce them with a limited budget and incomplete information about their training. The ML community has luckily contributed some code models to allow for further research.
|
| 7 |
+
|
| 8 |
+
However, It can be easy to get lost between models, so at Hugging Face we aim to democratize ML and centralize all information in the π€ ecosystem to make the usage of open-source tools easier and more efficient. Code models aren't an exception, you can find all open-source models on the hub, with several code datasets and evaluation metrics. In this blog we will give an overview of these tools and how to use them.
|
utils/resources.txt
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- Natural Language Processing with Transformers [Tunstall et al., 2022](https://www.oreilly.com/library/view/natural-language-processing/9781098103231/).
|
| 2 |
+
- Evaluating large language models trained on code [Chen et al., 2021](https://arxiv.org/abs/2107.03374).
|
| 3 |
+
- Competition-Level Code Generation with AlphaCode [Li et al., 2022](https://arxiv.org/abs/2203.07814).
|
| 4 |
+
- InCoder: A Generative Model for Code Infilling and Synthesis [Fried et al., 2022](https://arxiv.org/abs/2204.05999).
|
| 5 |
+
- A Conversational Paradigm for Program Synthesis [Nijkamp et al. 2022](https://arxiv.org/abs/2203.13474).
|
| 6 |
+
- A systematic evaluation of large language models of code [Xu et al. 2022](https://arxiv.org/abs/2202.13169).
|
utils/table_contents.txt
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### π Table of contents π
|
| 2 |
+
|
| 3 |
+
1 - Code datasets
|
| 4 |
+
|
| 5 |
+
2 - Model architecture
|
| 6 |
+
|
| 7 |
+
3 - Model evaluation
|
| 8 |
+
|
| 9 |
+
4 - Code generation
|
| 10 |
+
|
| 11 |
+
For each section, you can choose to visualize the information of 4 code generation models:
|
| 12 |
+
|
| 13 |
+
* [CodeParrot](https://huggingface.co/lvwerra/codeparrot)
|
| 14 |
+
* [InCoder](https://huggingface.co/facebook/incoder-6B)
|
| 15 |
+
* [CodeGen](https://github.com/salesforce/CodeGen)
|
| 16 |
+
* [PolyCoder](https://github.com/vhellendoorn/code-lms)
|
| 17 |
+
|
| 18 |
+
In section 4, you get to prompt the models and test their **code generation** capacities β¨!
|