task_id
int64
19.3k
41.9M
prompt
stringlengths
17
68
suffix
stringclasses
37 values
canonical_solution
stringlengths
6
153
test_start
stringlengths
22
198
test
sequencelengths
1
7
entry_point
stringlengths
7
10
intent
stringlengths
19
200
library
sequencelengths
0
3
docs
listlengths
0
3
1,762,484
def f_1762484(stocks_list): return
[x for x in range(len(stocks_list)) if stocks_list[x] == 'MSFT']
def check(candidate):
[ "\n stocks_list = ['AAPL', 'MSFT', 'GOOG', 'MSFT', 'MSFT']\n assert(candidate(stocks_list) == [1,3,4])\n", "\n stocks_list = ['AAPL', 'MSXT', 'GOOG', 'MSAT', 'SFT']\n assert(candidate(stocks_list) == [])\n" ]
f_1762484
find the index of an element 'MSFT' in a list `stocks_list`
[]
[]
3,464,359
def f_3464359(ax, labels): return
ax.set_xticklabels(labels, rotation=45)
import matplotlib.pyplot as plt def check(candidate):
[ "\n fig, ax = plt.subplots()\n ax.plot([1, 2, 3, 4], [1, 4, 2, 3])\n ret = candidate(ax, [f\"#{i}\" for i in range(7)])\n assert [tt.get_rotation() == 45.0 for tt in ret]\n" ]
f_3464359
rotate the xtick `labels` of matplotlib plot `ax` by `45` degrees to make long labels readable
[ "matplotlib" ]
[ { "function": "ax.set_xticklabels", "text": "matplotlib.axes.Axes.set_xticklabels Axes.set_xticklabels(labels, *, fontdict=None, minor=False, **kwargs)[source]\n \nSet the xaxis' labels with list of string labels. Warning This method should only be used after fixing the tick positions using Axes.set_xticks. Otherwise, the labels may end up in unexpected positions. Parameters ", "title": "matplotlib._as_gen.matplotlib.axes.axes.set_xticklabels" } ]
875,968
def f_875968(s): return
re.sub('[^\\w]', ' ', s)
import re def check(candidate):
[ "\n s = \"how much for the maple syrup? $20.99? That's ridiculous!!!\"\n assert candidate(s) == 'how much for the maple syrup 20 99 That s ridiculous '\n" ]
f_875968
remove symbols from a string `s`
[ "re" ]
[ { "function": "re.sub", "text": "re.sub(pattern, repl, string, count=0, flags=0) \nReturn the string obtained by replacing the leftmost non-overlapping occurrences of pattern in string by the replacement repl. If the pattern isn’t found, string is returned unchanged. repl can be a string or a function; if it is a string, any backslash escapes in it are processed. That is, \\n is converted to a single newline character, \\r is converted to a carriage return, and so forth. Unknown escapes of ASCII letters are reserved for future use and treated as errors. Other unknown escapes such as \\& are left alone. Backreferences, such as \\6, are replaced with the substring matched by group 6 in the pattern. For example: >>> re.sub(r'def\\s+([a-zA-Z_][a-zA-Z_0-9]*)\\s*\\(\\s*\\):',\n... r'static PyObject*\\npy_\\1(void)\\n{',\n... 'def myfunc():')\n'static PyObject*\\npy_myfunc(void)\\n{'\n If repl is a function, it is called for every non-overlapping occurrence of pattern. The function takes a single match object argument, and returns the replacement string. For example: >>> def dashrepl(matchobj):\n... if matchobj.group(0) == '-': return ' '\n... else: return '-'", "title": "python.library.re#re.sub" } ]
34,750,084
def f_34750084(s): return
re.findall("'\\\\[0-7]{1,3}'", s)
import re def check(candidate):
[ "\n assert candidate(r\"char x = '\\077';\") == [\"'\\\\077'\"]\n" ]
f_34750084
Find octal characters matches from a string `s` using regex
[ "re" ]
[ { "function": "re.findall", "text": "re.findall(pattern, string, flags=0) \nReturn all non-overlapping matches of pattern in string, as a list of strings. The string is scanned left-to-right, and matches are returned in the order found. If one or more groups are present in the pattern, return a list of groups; this will be a list of tuples if the pattern has more than one group. Empty matches are included in the result. Changed in version 3.7: Non-empty matches can now start just after a previous empty match.", "title": "python.library.re#re.findall" } ]
13,209,288
def f_13209288(input): return
re.split(r'[ ](?=[A-Z]+\b)', input)
import re def check(candidate):
[ "\n assert candidate('HELLO there HOW are YOU') == ['HELLO there', 'HOW are', 'YOU']\n", "\n assert candidate('hELLO there HoW are YOU') == ['hELLO there HoW are', 'YOU']\n", "\n assert candidate('7 is a NUMBER') == ['7 is a', 'NUMBER']\n", "\n assert candidate('NUMBER 7') == ['NUMBER 7']\n" ]
f_13209288
split string `input` based on occurrences of regex pattern '[ ](?=[A-Z]+\\b)'
[ "re" ]
[ { "function": "re.split", "text": "re.split(pattern, string, maxsplit=0, flags=0) \nSplit string by the occurrences of pattern. If capturing parentheses are used in pattern, then the text of all groups in the pattern are also returned as part of the resulting list. If maxsplit is nonzero, at most maxsplit splits occur, and the remainder of the string is returned as the final element of the list. >>> re.split(r'\\W+', 'Words, words, words.')\n['Words', 'words', 'words', '']", "title": "python.library.re#re.split" } ]
13,209,288
def f_13209288(input): return
re.split('[ ](?=[A-Z])', input)
import re def check(candidate):
[ "\n assert candidate('HELLO there HOW are YOU') == ['HELLO there', 'HOW are', 'YOU']\n", "\n assert candidate('hELLO there HoW are YOU') == ['hELLO there', 'HoW are', 'YOU']\n", "\n assert candidate('7 is a NUMBER') == ['7 is a', 'NUMBER']\n", "\n assert candidate('NUMBER 7') == ['NUMBER 7']\n" ]
f_13209288
Split string `input` at every space followed by an upper-case letter
[ "re" ]
[ { "function": "re.split", "text": "re.split(pattern, string, maxsplit=0, flags=0) \nSplit string by the occurrences of pattern. If capturing parentheses are used in pattern, then the text of all groups in the pattern are also returned as part of the resulting list. If maxsplit is nonzero, at most maxsplit splits occur, and the remainder of the string is returned as the final element of the list. >>> re.split(r'\\W+', 'Words, words, words.')\n['Words', 'words', 'words', '']", "title": "python.library.re#re.split" } ]
24,642,040
def f_24642040(url, files, headers, data): return
requests.post(url, files=files, headers=headers, data=data)
import requests from unittest.mock import Mock def check(candidate):
[ "\n requests.post = Mock()\n try:\n candidate('https://www.google.com', ['a.txt'], {'accept': 'text/json'}, {'name': 'abc'})\n except:\n assert False\n" ]
f_24642040
send multipart encoded file `files` to url `url` with headers `headers` and metadata `data`
[ "requests" ]
[]
4,290,716
def f_4290716(filename, bytes_): return
open(filename, 'wb').write(bytes_)
def check(candidate):
[ "\n bytes_ = b'68 65 6c 6c 6f'\n candidate(\"tmpfile\", bytes_)\n\n with open(\"tmpfile\", 'rb') as fr:\n assert fr.read() == bytes_\n" ]
f_4290716
write bytes `bytes_` to a file `filename` in python 3
[]
[]
33,078,554
def f_33078554(lst, dct): return
[dct[k] for k in lst]
def check(candidate):
[ "\n assert candidate(['c', 'd', 'a', 'b', 'd'], {'a': '3', 'b': '3', 'c': '5', 'd': '3'}) == ['5', '3', '3', '3', '3'] \n", "\n assert candidate(['c', 'd', 'a', 'b', 'd'], {'a': 3, 'b': 3, 'c': 5, 'd': 3}) == [5, 3, 3, 3, 3] \n", "\n assert candidate(['c', 'd', 'a', 'b'], {'a': 3, 'b': 3, 'c': 5, 'd': 3}) == [5, 3, 3, 3]\n" ]
f_33078554
get a list from a list `lst` with values mapped into a dictionary `dct`
[]
[]
15,247,628
def f_15247628(x): return
x['name'][x.duplicated('name')]
import pandas as pd def check(candidate):
[ "\n assert candidate(pd.DataFrame([{'name': 'willy', 'age': 10}, {'name': 'wilson', 'age': 11}, {'name': 'zoe', 'age': 10}])).tolist() == [] \n", "\n assert candidate(pd.DataFrame([{'name': 'willy', 'age': 10}, {'name': 'willy', 'age': 11}, {'name': 'zoe', 'age': 10}])).tolist() == ['willy'] \n", "\n assert candidate(pd.DataFrame([{'name': 'willy', 'age': 11}, {'name': 'willy', 'age': 11}, {'name': 'zoe', 'age': 10}])).tolist() == ['willy'] \n", "\n assert candidate(pd.DataFrame([{'name': 'Willy', 'age': 11}, {'name': 'willy', 'age': 11}, {'name': 'zoe', 'age': 10}])).tolist() == []\n" ]
f_15247628
find duplicate names in column 'name' of the dataframe `x`
[ "pandas" ]
[ { "function": "x.duplicated", "text": "pandas.DataFrame.duplicated DataFrame.duplicated(subset=None, keep='first')[source]\n \nReturn boolean Series denoting duplicate rows. Considering certain columns is optional. Parameters ", "title": "pandas.reference.api.pandas.dataframe.duplicated" } ]
783,897
def f_783897(): return
round(1.923328437452, 3)
def check(candidate):
[ "\n assert candidate() == 1.923\n" ]
f_783897
truncate float 1.923328437452 to 3 decimal places
[]
[]
22,859,493
def f_22859493(li): return
sorted(li, key=lambda x: datetime.strptime(x[1], '%d/%m/%Y'), reverse=True)
from datetime import datetime def check(candidate):
[ "\n assert candidate([['name', '01/03/2012', 'job'], ['name', '02/05/2013', 'job'], ['name', '03/08/2014', 'job']]) == [['name', '03/08/2014', 'job'], ['name', '02/05/2013', 'job'], ['name', '01/03/2012', 'job']] \n", "\n assert candidate([['name', '01/03/2012', 'job'], ['name', '02/05/2012', 'job'], ['name', '03/08/2012', 'job']]) == [['name', '03/08/2012', 'job'], ['name', '02/05/2012', 'job'], ['name', '01/03/2012', 'job']] \n", "\n assert candidate([['name', '01/03/2012', 'job'], ['name', '02/03/2012', 'job'], ['name', '03/03/2012', 'job']]) == [['name', '03/03/2012', 'job'], ['name', '02/03/2012', 'job'], ['name', '01/03/2012', 'job']] \n", "\n assert candidate([['name', '03/03/2012', 'job'], ['name', '03/03/2012', 'job'], ['name', '03/03/2012', 'job']]) == [['name', '03/03/2012', 'job'], ['name', '03/03/2012', 'job'], ['name', '03/03/2012', 'job']] \n" ]
f_22859493
sort list `li` in descending order based on the date value in second element of each list in list `li`
[ "datetime" ]
[ { "function": "datetime.strptime", "text": "classmethod datetime.strptime(date_string, format) \nReturn a datetime corresponding to date_string, parsed according to format. This is equivalent to: datetime(*(time.strptime(date_string, format)[0:6]))\n ValueError is raised if the date_string and format can’t be parsed by time.strptime() or if it returns a value which isn’t a time tuple. For a complete list of formatting directives, see strftime() and strptime() Behavior.", "title": "python.library.datetime#datetime.datetime.strptime" } ]
29,394,552
def f_29394552(ax):
return
ax.set_rlabel_position(135)
import matplotlib.pyplot as plt def check(candidate):
[ "\n ax = plt.subplot(111, polar=True)\n candidate(ax)\n assert ax.properties()['rlabel_position'] == 135.0\n" ]
f_29394552
place the radial ticks in plot `ax` at 135 degrees
[ "matplotlib" ]
[ { "function": "ax.set_rlabel_position", "text": "set_rlabel_position(value)[source]\n \nUpdate the theta position of the radius labels. Parameters \n \nvaluenumber\n\n\nThe angular position of the radius labels in degrees.", "title": "matplotlib.projections_api#matplotlib.projections.polar.PolarAxes.set_rlabel_position" } ]
3,320,406
def f_3320406(my_path): return
os.path.isabs(my_path)
import os def check(candidate):
[ "\n assert candidate('.') == False \n", "\n assert candidate('/') == True \n", "\n assert candidate('/usr') == True\n" ]
f_3320406
check if path `my_path` is an absolute path
[ "os" ]
[ { "function": "os.isabs", "text": "os.path.isabs(path) \nReturn True if path is an absolute pathname. On Unix, that means it begins with a slash, on Windows that it begins with a (back)slash after chopping off a potential drive letter. Changed in version 3.6: Accepts a path-like object.", "title": "python.library.os.path#os.path.isabs" } ]
2,212,433
def f_2212433(yourdict): return
len(list(yourdict.keys()))
def check(candidate):
[ "\n assert candidate({'a': 1, 'b': 2, 'c': 3}) == 3 \n", "\n assert candidate({'a': 2, 'c': 3}) == 2\n" ]
f_2212433
get number of keys in dictionary `yourdict`
[]
[]
2,212,433
def f_2212433(yourdictfile): return
len(set(open(yourdictfile).read().split()))
def check(candidate):
[ "\n with open('dict.txt', 'w') as fw:\n for w in [\"apple\", \"banana\", \"tv\", \"apple\", \"phone\"]:\n fw.write(f\"{w}\\n\")\n assert candidate('dict.txt') == 4\n" ]
f_2212433
count the number of keys in dictionary `yourdictfile`
[]
[]
20,067,636
def f_20067636(df): return
df.groupby('id').first()
import pandas as pd def check(candidate):
[ "\n df = pd.DataFrame({\n 'id': [1, 1, 1, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6, 6, 6, 7, 7], \n 'value': ['first', 'second', 'second', 'first', 'second', 'first', 'third', 'fourth', 'fifth', 'second', 'fifth', 'first', 'first', 'second', 'third', 'fourth', 'fifth']\n })\n assert candidate(df).to_dict() == {'value': {1: 'first', 2: 'first', 3: 'first', 4: 'second', 5: 'first', 6: 'first', 7: 'fourth'}}\n" ]
f_20067636
pandas dataframe `df` get first row of each group by 'id'
[ "pandas" ]
[ { "function": "dataframe.groupby", "text": "pandas.DataFrame.groupby DataFrame.groupby(by=None, axis=0, level=None, as_index=True, sort=True, group_keys=True, squeeze=NoDefault.no_default, observed=False, dropna=True)[source]\n \nGroup DataFrame using a mapper or by a Series of columns. A groupby operation involves some combination of splitting the object, applying a function, and combining the results. This can be used to group large amounts of data and compute operations on these groups. Parameters ", "title": "pandas.reference.api.pandas.dataframe.groupby" } ]
40,924,332
def f_40924332(df): return
pd.concat([df[0].apply(pd.Series), df[1]], axis=1)
import numpy as np import pandas as pd def check(callerFunction):
[ "\n assert callerFunction(pd.DataFrame([[[8, 10, 12], 'A'], [[7, 9, 11], 'B']])).equals(pd.DataFrame([[8,10,12,'A'], [7,9,11,'B']], columns=[0,1,2,1]))\n", "\n assert callerFunction(pd.DataFrame([[[8, 10, 12], 'A'], [[7, 11], 'B']])).equals(pd.DataFrame([[8.0,10.0,12.0,'A'], [7.0,11.0,np.nan,'B']], columns=[0,1,2,1]))\n", "\n assert callerFunction(pd.DataFrame([[[8, 10, 12]], [[7, 9, 11], 'B']])).equals(pd.DataFrame([[8,10,12,None], [7,9,11,'B']], columns=[0,1,2,1]))\n" ]
f_40924332
split a list in first column into multiple columns keeping other columns as well in pandas data frame `df`
[ "numpy", "pandas" ]
[ { "function": "pandas.concat", "text": "pandas.concat pandas.concat(objs, axis=0, join='outer', ignore_index=False, keys=None, levels=None, names=None, verify_integrity=False, sort=False, copy=True)[source]\n \nConcatenate pandas objects along a particular axis with optional set logic along the other axes. Can also add a layer of hierarchical indexing on the concatenation axis, which may be useful if the labels are the same (or overlapping) on the passed axis number. Parameters ", "title": "pandas.reference.api.pandas.concat" }, { "function": "dataframe.apply", "text": "pandas.DataFrame.apply DataFrame.apply(func, axis=0, raw=False, result_type=None, args=(), **kwargs)[source]\n \nApply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame’s index (axis=0) or the DataFrame’s columns (axis=1). By default (result_type=None), the final return type is inferred from the return type of the applied function. Otherwise, it depends on the result_type argument. Parameters ", "title": "pandas.reference.api.pandas.dataframe.apply" } ]
30,759,776
def f_30759776(data): return
re.findall('src="js/([^"]*\\bjquery\\b[^"]*)"', data)
import re def check(candidate):
[ "\n data = '<script type=\"text/javascript\" src=\"js/jquery-1.9.1.min.js\"/><script type=\"text/javascript\" src=\"js/jquery-migrate-1.2.1.min.js\"/><script type=\"text/javascript\" src=\"js/jquery-ui.min.js\"/><script type=\"text/javascript\" src=\"js/abc_bsub.js\"/><script type=\"text/javascript\" src=\"js/abc_core.js\"/> <script type=\"text/javascript\" src=\"js/abc_explore.js\"/><script type=\"text/javascript\" src=\"js/abc_qaa.js\"/>'\n assert candidate(data) == ['jquery-1.9.1.min.js', 'jquery-migrate-1.2.1.min.js', 'jquery-ui.min.js']\n" ]
f_30759776
extract attributes 'src="js/([^"]*\\bjquery\\b[^"]*)"' from string `data`
[ "re" ]
[ { "function": "re.findall", "text": "re.findall(pattern, string, flags=0) \nReturn all non-overlapping matches of pattern in string, as a list of strings. The string is scanned left-to-right, and matches are returned in the order found. If one or more groups are present in the pattern, return a list of groups; this will be a list of tuples if the pattern has more than one group. Empty matches are included in the result. Changed in version 3.7: Non-empty matches can now start just after a previous empty match.", "title": "python.library.re#re.findall" } ]
25,388,796
def f_25388796(): return
sum(int(float(item)) for item in [_f for _f in ['', '3.4', '', '', '1.0'] if _f])
def check(candidate):
[ "\n assert candidate() == 4\n" ]
f_25388796
Sum integers contained in strings in list `['', '3.4', '', '', '1.0']`
[]
[]
804,995
def f_804995(): return
subprocess.Popen(['c:\\Program Files\\VMware\\VMware Server\\vmware-cmd.bat'])
import subprocess from unittest.mock import Mock def check(candidate):
[ "\n subprocess.Popen = Mock(return_value = 0)\n assert candidate() == 0\n" ]
f_804995
Call a subprocess with arguments `c:\\Program Files\\VMware\\VMware Server\\vmware-cmd.bat` that may contain spaces
[ "subprocess" ]
[ { "function": "subprocess.Popen", "text": "class subprocess.Popen(args, bufsize=-1, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=True, shell=False, cwd=None, env=None, universal_newlines=None, startupinfo=None, creationflags=0, restore_signals=True, start_new_session=False, pass_fds=(), *, group=None, extra_groups=None, user=None, umask=-1, encoding=None, errors=None, text=None) \nExecute a child program in a new process. On POSIX, the class uses os.execvp()-like behavior to execute the child program. On Windows, the class uses the Windows CreateProcess() function. The arguments to Popen are as follows. args should be a sequence of program arguments or else a single string or path-like object. By default, the program to execute is the first item in args if args is a sequence. If args is a string, the interpretation is platform-dependent and described below. See the shell and executable arguments for additional differences from the default behavior. Unless otherwise stated, it is recommended to pass args as a sequence. An example of passing some arguments to an external program as a sequence is: Popen([\"/usr/bin/git\", \"commit\", \"-m\", \"Fixes a bug.\"])\n On POSIX, if args is a string, the string is interpreted as the name or path of the program to execute. However, this can only be done if not passing arguments to the program. Note It may not be obvious how to break a shell command into a sequence of arguments, especially in complex cases. shlex.split() can illustrate how to determine the correct tokenization for args: >>> import shlex, subprocess", "title": "python.library.subprocess#subprocess.Popen" } ]
26,441,253
def f_26441253(q):
return q
for n in [1,3,4,2]: q.put((-n, n))
from queue import PriorityQueue def check(candidate):
[ "\n q = PriorityQueue()\n q = candidate(q)\n expected = [4, 3, 2, 1]\n for i in range(0, len(expected)):\n assert q.get()[1] == expected[i]\n" ]
f_26441253
reverse a priority queue `q` in python without using classes
[ "queue" ]
[ { "function": "q.put", "text": "Queue.put(item, block=True, timeout=None) \nPut item into the queue. If optional args block is true and timeout is None (the default), block if necessary until a free slot is available. If timeout is a positive number, it blocks at most timeout seconds and raises the Full exception if no free slot was available within that time. Otherwise (block is false), put an item on the queue if a free slot is immediately available, else raise the Full exception (timeout is ignored in that case).", "title": "python.library.queue#queue.Queue.put" } ]
18,897,261
def f_18897261(df): return
df['group'].plot(kind='bar', color=['r', 'g', 'b', 'r', 'g', 'b', 'r'])
import pandas as pd def check(candidate):
[ "\n df = pd.DataFrame([1, 3, 4, 5, 7, 9], columns = ['group'])\n a = candidate(df)\n assert 'AxesSubplot' in str(type(a))\n" ]
f_18897261
make a barplot of data in column `group` of dataframe `df` colour-coded according to list `color`
[ "pandas" ]
[ { "function": "dataframe.plot", "text": "pandas.Series.plot Series.plot(*args, **kwargs)[source]\n \nMake plots of Series or DataFrame. Uses the backend specified by the option plotting.backend. By default, matplotlib is used. Parameters ", "title": "pandas.reference.api.pandas.series.plot" } ]
373,194
def f_373194(data): return
re.findall('([a-fA-F\\d]{32})', data)
import re def check(candidate):
[ "\n assert candidate('6f96cfdfe5ccc627cadf24b41725caa4 gorilla') == ['6f96cfdfe5ccc627cadf24b41725caa4']\n" ]
f_373194
find all matches of regex pattern '([a-fA-F\\d]{32})' in string `data`
[ "re" ]
[ { "function": "re.findall", "text": "re.findall(pattern, string, flags=0) \nReturn all non-overlapping matches of pattern in string, as a list of strings. The string is scanned left-to-right, and matches are returned in the order found. If one or more groups are present in the pattern, return a list of groups; this will be a list of tuples if the pattern has more than one group. Empty matches are included in the result. Changed in version 3.7: Non-empty matches can now start just after a previous empty match.", "title": "python.library.re#re.findall" } ]
518,021
def f_518021(my_list): return
len(my_list)
def check(candidate):
[ "\n assert candidate([]) == 0\n", "\n assert candidate([1]) == 1\n", "\n assert candidate([1, 2]) == 2\n" ]
f_518021
Get the length of list `my_list`
[]
[]
518,021
def f_518021(l): return
len(l)
import numpy as np def check(candidate):
[ "\n assert candidate([]) == 0\n", "\n assert candidate(np.array([1])) == 1\n", "\n assert candidate(np.array([1, 2])) == 2\n" ]
f_518021
Getting the length of array `l`
[ "numpy" ]
[]
518,021
def f_518021(s): return
len(s)
import numpy as np def check(candidate):
[ "\n assert candidate([]) == 0\n", "\n assert candidate(np.array([1])) == 1\n", "\n assert candidate(np.array([1, 2])) == 2\n" ]
f_518021
Getting the length of array `s`
[ "numpy" ]
[]
518,021
def f_518021(my_tuple): return
len(my_tuple)
def check(candidate):
[ "\n assert candidate(()) == 0\n", "\n assert candidate(('aa', 'wfseg', '')) == 3\n", "\n assert candidate(('apple',)) == 1\n" ]
f_518021
Getting the length of `my_tuple`
[]
[]
518,021
def f_518021(my_string): return
len(my_string)
def check(candidate):
[ "\n assert candidate(\"sedfgbdjofgljnh\") == 15\n", "\n assert candidate(\" \") == 13\n", "\n assert candidate(\"vsdh4'cdf'\") == 10\n" ]
f_518021
Getting the length of `my_string`
[]
[]
40,452,956
def f_40452956(): return
b'\\a'.decode('unicode-escape')
def check(candidate):
[ "\n assert candidate() == '\\x07'\n" ]
f_40452956
remove escape character from string "\\a"
[]
[]
8,687,018
def f_8687018(): return
"""obama""".replace('a', '%temp%').replace('b', 'a').replace('%temp%', 'b')
def check(candidate):
[ "\n assert candidate() == 'oabmb'\n" ]
f_8687018
replace each 'a' with 'b' and each 'b' with 'a' in the string 'obama' in a single pass.
[]
[]
303,200
def f_303200():
return
shutil.rmtree('/folder_name')
import os import shutil from unittest.mock import Mock def check(candidate):
[ "\n shutil.rmtree = Mock()\n os.walk = Mock(return_value = [])\n candidate()\n assert os.walk('/') == []\n" ]
f_303200
remove directory tree '/folder_name'
[ "os", "shutil" ]
[ { "function": "shutil.rmtree", "text": "shutil.rmtree(path, ignore_errors=False, onerror=None) \nDelete an entire directory tree; path must point to a directory (but not a symbolic link to a directory). If ignore_errors is true, errors resulting from failed removals will be ignored; if false or omitted, such errors are handled by calling a handler specified by onerror or, if that is omitted, they raise an exception. Note On platforms that support the necessary fd-based functions a symlink attack resistant version of rmtree() is used by default. On other platforms, the rmtree() implementation is susceptible to a symlink attack: given proper timing and circumstances, attackers can manipulate symlinks on the filesystem to delete files they wouldn’t be able to access otherwise. Applications can use the rmtree.avoids_symlink_attacks function attribute to determine which case applies. If onerror is provided, it must be a callable that accepts three parameters: function, path, and excinfo. The first parameter, function, is the function which raised the exception; it depends on the platform and implementation. The second parameter, path, will be the path name passed to function. The third parameter, excinfo, will be the exception information returned by sys.exc_info(). Exceptions raised by onerror will not be caught. Raises an auditing event shutil.rmtree with argument path. Changed in version 3.3: Added a symlink attack resistant version that is used automatically if platform supports fd-based functions. Changed in version 3.8: On Windows, will no longer delete the contents of a directory junction before removing the junction. \nrmtree.avoids_symlink_attacks \nIndicates whether the current platform and implementation provides a symlink attack resistant version of rmtree(). Currently this is only true for platforms supporting fd-based directory access functions. New in version 3.3.", "title": "python.library.shutil#shutil.rmtree" } ]
13,740,672
def f_13740672(data):
return data
def weekday(i): if i >=1 and i <= 5: return True else: return False data['weekday'] = data['my_dt'].apply(lambda x: weekday(x))
import pandas as pd def check(candidate):
[ "\n data = pd.DataFrame([1, 2, 3, 4, 5, 6, 7], columns = ['my_dt'])\n data = candidate(data)\n assert data['weekday'][5] == False\n assert data['weekday'][6] == False\n for i in range (0, 5):\n assert data['weekday'][i]\n" ]
f_13740672
create a new column `weekday` in pandas data frame `data` based on the values in column `my_dt`
[ "pandas" ]
[ { "function": "dataframe.apply", "text": "pandas.DataFrame.apply DataFrame.apply(func, axis=0, raw=False, result_type=None, args=(), **kwargs)[source]\n \nApply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame’s index (axis=0) or the DataFrame’s columns (axis=1). By default (result_type=None), the final return type is inferred from the return type of the applied function. Otherwise, it depends on the result_type argument. Parameters ", "title": "pandas.reference.api.pandas.dataframe.apply" } ]
20,950,650
def f_20950650(x): return
sorted(x, key=x.get, reverse=True)
from collections import Counter def check(candidate):
[ "\n x = Counter({'blue': 1, 'red': 2, 'green': 3})\n assert candidate(x) == ['green', 'red', 'blue']\n", "\n x = Counter({'blue': 1.234, 'red': 1.35, 'green': 1.789})\n assert candidate(x) == ['green', 'red', 'blue']\n", "\n x = Counter({'blue': \"b\", 'red': \"r\", 'green': \"g\"})\n assert candidate(x) == ['red', 'green', 'blue']\n" ]
f_20950650
reverse sort Counter `x` by values
[ "collections" ]
[]
20,950,650
def f_20950650(x): return
sorted(list(x.items()), key=lambda pair: pair[1], reverse=True)
from collections import Counter def check(candidate):
[ "\n x = Counter({'blue': 1, 'red': 2, 'green': 3})\n assert candidate(x) == [('green', 3), ('red', 2), ('blue', 1)]\n", "\n x = Counter({'blue': 1.234, 'red': 1.35, 'green': 1.789})\n assert candidate(x) == [('green', 1.789), ('red', 1.35), ('blue', 1.234)]\n", "\n x = Counter({'blue': \"b\", 'red': \"r\", 'green': \"g\"})\n assert candidate(x) == [('red', \"r\"), ('green', \"g\"), ('blue', \"b\")]\n" ]
f_20950650
reverse sort counter `x` by value
[ "collections" ]
[]
9,775,297
def f_9775297(a, b): return
np.vstack((a, b))
import numpy as np def check(candidate):
[ "\n a = np.array([[1, 2, 3], [4, 5, 6]])\n b = np.array([[9, 8, 7], [6, 5, 4]])\n assert np.array_equal(candidate(a, b), np.array([[1, 2, 3], [4, 5, 6], [9, 8, 7], [6, 5, 4]]))\n", "\n a = np.array([[1, 2.45, 3], [4, 0.55, 612]])\n b = np.array([[988, 8, 7], [6, 512, 4]])\n assert np.array_equal(candidate(a, b), np.array([[1, 2.45, 3], [4, 0.55, 612], [988, 8, 7], [6, 512, 4]]))\n" ]
f_9775297
append a numpy array 'b' to a numpy array 'a'
[ "numpy" ]
[ { "function": "numpy.vstack", "text": "numpy.vstack numpy.vstack(tup)[source]\n \nStack arrays in sequence vertically (row wise). This is equivalent to concatenation along the first axis after 1-D arrays of shape (N,) have been reshaped to (1,N). Rebuilds arrays divided by vsplit. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions concatenate, stack and block provide more general stacking and concatenation operations. Parameters ", "title": "numpy.reference.generated.numpy.vstack" } ]
21,887,754
def f_21887754(a, b): return
np.concatenate((a, b), axis=0)
import numpy as np def check(candidate):
[ "\n a = np.array([[1, 5, 9], [2, 6, 10]])\n b = np.array([[3, 7, 11], [4, 8, 12]])\n assert np.array_equal(candidate(a, b), np.array([[1, 5, 9], [2, 6, 10], [3, 7, 11], [4, 8, 12]]))\n", "\n a = np.array([[1, 2.45, 3], [4, 0.55, 612]])\n b = np.array([[988, 8, 7], [6, 512, 4]])\n assert np.array_equal(candidate(a, b), np.array([[1, 2.45, 3], [4, 0.55, 612], [988, 8, 7], [6, 512, 4]]))\n" ]
f_21887754
numpy concatenate two arrays `a` and `b` along the first axis
[ "numpy" ]
[ { "function": "numpy.concatenate", "text": "numpy.concatenate numpy.concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting=\"same_kind\")\n \nJoin a sequence of arrays along an existing axis. Parameters ", "title": "numpy.reference.generated.numpy.concatenate" } ]
21,887,754
def f_21887754(a, b): return
np.concatenate((a, b), axis=1)
import numpy as np def check(candidate):
[ "\n a = np.array([[1, 5, 9], [2, 6, 10]])\n b = np.array([[3, 7, 11], [4, 8, 12]])\n assert np.array_equal(candidate(a, b), np.array([[1, 5, 9, 3, 7, 11], [2, 6, 10, 4, 8, 12]]))\n", "\n a = np.array([[1, 2.45, 3], [4, 0.55, 612]])\n b = np.array([[988, 8, 7], [6, 512, 4]])\n assert np.array_equal(candidate(a, b), np.array([[1, 2.45, 3, 988, 8, 7], [4, 0.55, 612, 6, 512, 4]]))\n" ]
f_21887754
numpy concatenate two arrays `a` and `b` along the second axis
[ "numpy" ]
[ { "function": "numpy.concatenate", "text": "numpy.concatenate numpy.concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting=\"same_kind\")\n \nJoin a sequence of arrays along an existing axis. Parameters ", "title": "numpy.reference.generated.numpy.concatenate" } ]
21,887,754
def f_21887754(a, b): return
np.r_[(a[None, :], b[None, :])]
import numpy as np def check(candidate):
[ "\n a = np.array([[1, 5, 9], [2, 6, 10]])\n b = np.array([[3, 7, 11], [4, 8, 12]])\n assert np.array_equal(candidate(a, b), np.array([[[1, 5, 9], [2, 6, 10]], [[3, 7, 11], [4, 8, 12]]]))\n", "\n a = np.array([[1, 2.45, 3], [4, 0.55, 612]])\n b = np.array([[988, 8, 7], [6, 512, 4]])\n assert np.array_equal(candidate(a, b), np.array([[[1, 2.45, 3], [4, 0.55, 612]], [[988, 8 , 7], [6, 512, 4]]]))\n" ]
f_21887754
numpy concatenate two arrays `a` and `b` along the first axis
[ "numpy" ]
[ { "function": "numpy.r_", "text": "numpy.r_ numpy.r_ = <numpy.lib.index_tricks.RClass object>\n \nTranslates slice objects to concatenation along the first axis. This is a simple way to build up arrays quickly. There are two use cases. If the index expression contains comma separated arrays, then stack them along their first axis. If the index expression contains slice notation or scalars then create a 1-D array with a range indicated by the slice notation. If slice notation is used, the syntax start:stop:step is equivalent to np.arange(start, stop, step) inside of the brackets. However, if step is an imaginary number (i.e. 100j) then its integer portion is interpreted as a number-of-points desired and the start and stop are inclusive. In other words start:stop:stepj is interpreted as np.linspace(start, stop, step, endpoint=1) inside of the brackets. After expansion of slice notation, all comma separated sequences are concatenated together. Optional character strings placed as the first element of the index expression can be used to change the output. The strings ‘r’ or ‘c’ result in matrix output. If the result is 1-D and ‘r’ is specified a 1 x N (row) matrix is produced. If the result is 1-D and ‘c’ is specified, then a N x 1 (column) matrix is produced. If the result is 2-D then both provide the same matrix result. A string integer specifies which axis to stack multiple comma separated arrays along. A string of two comma-separated integers allows indication of the minimum number of dimensions to force each entry into as the second integer (the axis to concatenate along is still the first integer). A string with three comma-separated integers allows specification of the axis to concatenate along, the minimum number of dimensions to force the entries to, and which axis should contain the start of the arrays which are less than the specified number of dimensions. In other words the third integer allows you to specify where the 1’s should be placed in the shape of the arrays that have their shapes upgraded. By default, they are placed in the front of the shape tuple. The third argument allows you to specify where the start of the array should be instead. Thus, a third argument of ‘0’ would place the 1’s at the end of the array shape. Negative integers specify where in the new shape tuple the last dimension of upgraded arrays should be placed, so the default is ‘-1’. Parameters ", "title": "numpy.reference.generated.numpy.r_" } ]
21,887,754
def f_21887754(a, b): return
np.array((a, b))
import numpy as np def check(candidate):
[ "\n a = np.array([[1, 5, 9], [2, 6, 10]])\n b = np.array([[3, 7, 11], [4, 8, 12]])\n assert np.array_equal(candidate(a, b), np.array([[[1, 5, 9], [2, 6, 10]], [[3, 7, 11], [4, 8, 12]]]))\n", "\n a = np.array([[1, 2.45, 3], [4, 0.55, 612]])\n b = np.array([[988, 8, 7], [6, 512, 4]])\n assert np.array_equal(candidate(a, b), np.array([[[1, 2.45, 3], [4, 0.55, 612]], [[988, 8 , 7], [6, 512, 4]]]))\n" ]
f_21887754
numpy concatenate two arrays `a` and `b` along the first axis
[ "numpy" ]
[ { "function": "numpy.array", "text": "numpy.array numpy.array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0, like=None)\n \nCreate an array. Parameters ", "title": "numpy.reference.generated.numpy.array" } ]
2,805,231
def f_2805231(): return
socket.getaddrinfo('google.com', 80)
import socket def check(candidate):
[ "\n res = candidate()\n assert all([(add[4][1] == 80) for add in res])\n" ]
f_2805231
fetch address information for host 'google.com' ion port 80
[ "socket" ]
[ { "function": "socket.getaddrinfo", "text": "socket.getaddrinfo(host, port, family=0, type=0, proto=0, flags=0) \nTranslate the host/port argument into a sequence of 5-tuples that contain all the necessary arguments for creating a socket connected to that service. host is a domain name, a string representation of an IPv4/v6 address or None. port is a string service name such as 'http', a numeric port number or None. By passing None as the value of host and port, you can pass NULL to the underlying C API. The family, type and proto arguments can be optionally specified in order to narrow the list of addresses returned. Passing zero as a value for each of these arguments selects the full range of results. The flags argument can be one or several of the AI_* constants, and will influence how results are computed and returned. For example, AI_NUMERICHOST will disable domain name resolution and will raise an error if host is a domain name. The function returns a list of 5-tuples with the following structure: (family, type, proto, canonname, sockaddr) In these tuples, family, type, proto are all integers and are meant to be passed to the socket() function. canonname will be a string representing the canonical name of the host if AI_CANONNAME is part of the flags argument; else canonname will be empty. sockaddr is a tuple describing a socket address, whose format depends on the returned family (a (address, port) 2-tuple for AF_INET, a (address, port, flowinfo, scope_id) 4-tuple for AF_INET6), and is meant to be passed to the socket.connect() method. Raises an auditing event socket.getaddrinfo with arguments host, port, family, type, protocol. The following example fetches address information for a hypothetical TCP connection to example.org on port 80 (results may differ on your system if IPv6 isn’t enabled): >>> socket.getaddrinfo(\"example.org\", 80, proto=socket.IPPROTO_TCP)\n[(<AddressFamily.AF_INET6: 10>, <SocketType.SOCK_STREAM: 1>,\n 6, '', ('2606:2800:220:1:248:1893:25c8:1946', 80, 0, 0)),", "title": "python.library.socket#socket.getaddrinfo" } ]
17,552,997
def f_17552997(df): return
df.xs('sat', level='day', drop_level=False)
import pandas as pd def check(candidate):
[ "\n df = pd.DataFrame({'year':[2008,2008,2008,2008,2009,2009,2009,2009], \n 'flavour':['strawberry','strawberry','banana','banana',\n 'strawberry','strawberry','banana','banana'],\n 'day':['sat','sun','sat','sun','sat','sun','sat','sun'],\n 'sales':[10,12,22,23,11,13,23,24]})\n df = df.set_index(['year','flavour','day'])\n assert candidate(df).to_dict() == {'sales': {(2008, 'strawberry', 'sat'): 10, (2008, 'banana', 'sat'): 22, (2009, 'strawberry', 'sat'): 11, (2009, 'banana', 'sat'): 23}}\n" ]
f_17552997
add a column 'day' with value 'sat' to dataframe `df`
[ "pandas" ]
[ { "function": "df.xs", "text": "pandas.DataFrame.xs DataFrame.xs(key, axis=0, level=None, drop_level=True)[source]\n \nReturn cross-section from the Series/DataFrame. This method takes a key argument to select data at a particular level of a MultiIndex. Parameters ", "title": "pandas.reference.api.pandas.dataframe.xs" } ]
4,356,842
def f_4356842(): return
HttpResponse('Unauthorized', status=401)
from django.http import HttpResponse from django.conf import settings if not settings.configured: settings.configure(DEBUG=True) def check(candidate):
[ "\n assert candidate().status_code == 401\n" ]
f_4356842
return a 401 unauthorized in django
[ "django" ]
[ { "function": "HttpResponse", "text": "class HttpResponse", "title": "django.ref.request-response#django.http.HttpResponse" } ]
13,598,363
def f_13598363(): return
Flask('test', template_folder='wherever')
from flask import Flask def check(candidate):
[ "\n __name__ == \"test\"\n assert candidate().template_folder == \"wherever\"\n" ]
f_13598363
Flask set folder 'wherever' as the default template folder
[ "flask" ]
[ { "function": "Flask", "text": "class flask.Flask(import_name, static_url_path=None, static_folder='static', static_host=None, host_matching=False, subdomain_matching=False, template_folder='templates', instance_path=None, instance_relative_config=False, root_path=None) \nThe flask object implements a WSGI application and acts as the central object. It is passed the name of the module or package of the application. Once it is created it will act as a central registry for the view functions, the URL rules, template configuration and much more. The name of the package is used to resolve resources from inside the package or the folder the module is contained in depending on if the package parameter resolves to an actual python package (a folder with an __init__.py file inside) or a standard module (just a .py file). For more information about resource loading, see open_resource(). Usually you create a Flask instance in your main module or in the __init__.py file of your package like this: from flask import Flask\napp = Flask(__name__)", "title": "flask.api.index#flask.Flask" } ]
3,398,589
def f_3398589(c2):
return c2
c2.sort(key=lambda row: row[2])
def check(candidate):
[ "\n c2 = [[14, 25, 46], [1, 22, 53], [7, 8, 9]]\n candidate(c2)\n assert c2[0] == [7,8,9]\n", "\n c2 = [[14.343, 25.24, 46], [1, 22, 53.45], [7, 8.65, 9]]\n candidate(c2)\n assert c2[0] == [7,8.65,9]\n" ]
f_3398589
sort a list of lists 'c2' such that third row comes first
[]
[]
3,398,589
def f_3398589(c2):
return c2
c2.sort(key=lambda row: (row[2], row[1], row[0]))
def check(candidate):
[ "\n c2 = [[14, 25, 46], [1, 22, 53], [7, 8, 9]]\n candidate(c2)\n assert c2[0] == [7,8,9]\n", "\n c2 = [[14.343, 25.24, 46], [1, 22, 53.45], [7, 8.65, 9]]\n candidate(c2)\n assert c2[0] == [7,8.65,9]\n" ]
f_3398589
sort a list of lists 'c2' in reversed row order
[]
[]
3,398,589
def f_3398589(c2):
return c2
c2.sort(key=lambda row: (row[2], row[1]))
def check(candidate):
[ "\n c2 = [[14, 25, 46], [1, 22, 53], [7, 8, 9]]\n candidate(c2)\n assert c2[0] == [7,8,9]\n", "\n c2 = [[14.343, 25.24, 46], [1, 22, 53.45], [7, 8.65, 9]]\n candidate(c2)\n assert c2[0] == [7,8.65,9]\n" ]
f_3398589
Sorting a list of lists `c2`, each by the third and second row
[]
[]
10,960,463
def f_10960463(): return
matplotlib.rc('font', **{'sans-serif': 'Arial', 'family': 'sans-serif'})
import matplotlib def check(candidate):
[ "\n try:\n candidate()\n except:\n assert False\n" ]
f_10960463
set font `Arial` to display non-ascii characters in matplotlib
[ "matplotlib" ]
[ { "function": "matplotlib.rc", "text": "matplotlib.pyplot.rc matplotlib.pyplot.rc(group, **kwargs)[source]\n \nSet the current rcParams. group is the grouping for the rc, e.g., for lines.linewidth the group is lines, for axes.facecolor, the group is axes, and so on. Group may also be a list or tuple of group names, e.g., (xtick, ytick). kwargs is a dictionary attribute name/value pairs, e.g.,: rc('lines', linewidth=2, color='r')\n sets the current rcParams and is equivalent to: rcParams['lines.linewidth'] = 2", "title": "matplotlib._as_gen.matplotlib.pyplot.rc" } ]
20,576,618
def f_20576618(df): return
df['date'].apply(lambda x: x.toordinal())
import pandas as pd def check(candidate):
[ "\n df = pd.DataFrame(\n {\n \"group\": [\"A\", \"A\", \"A\", \"A\", \"A\"],\n \"date\": pd.to_datetime([\"2020-01-02\", \"2020-01-13\", \"2020-02-01\", \"2020-02-23\", \"2020-03-05\"]),\n \"value\": [10, 20, 16, 31, 56],\n }) \n data_series = candidate(df).tolist()\n assert data_series[1] == 737437\n", "\n df = pd.DataFrame(\n {\n \"group\": [\"A\", \"A\", \"A\", \"A\", \"A\"],\n \"date\": pd.to_datetime([\"2020-01-02\", \"2020-01-13\", \"2020-02-01\", \"2020-02-23\", \"2020-03-05\"]),\n \"value\": [10, 20, 16, 31, 56],\n }) \n data_series = candidate(df).tolist()\n assert data_series[1] == 737437\n" ]
f_20576618
Convert DateTime column 'date' of pandas dataframe 'df' to ordinal
[ "pandas" ]
[ { "function": "dataframe.apply", "text": "pandas.DataFrame.apply DataFrame.apply(func, axis=0, raw=False, result_type=None, args=(), **kwargs)[source]\n \nApply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame’s index (axis=0) or the DataFrame’s columns (axis=1). By default (result_type=None), the final return type is inferred from the return type of the applied function. Otherwise, it depends on the result_type argument. Parameters ", "title": "pandas.reference.api.pandas.dataframe.apply" } ]
31,793,195
def f_31793195(df): return
df.index.get_loc('bob')
import pandas as pd import numpy as np def check(candidate):
[ "\n df = pd.DataFrame(data=np.asarray([[1,2,3],[4,5,6],[7,8,9]]), index=['alice', 'bob', 'charlie'])\n index = candidate(df)\n assert index == 1\n" ]
f_31793195
Get the integer location of a key `bob` in a pandas data frame `df`
[ "numpy", "pandas" ]
[ { "function": "df.get_loc", "text": "pandas.Index.get_loc Index.get_loc(key, method=None, tolerance=None)[source]\n \nGet integer location, slice or boolean mask for requested label. Parameters ", "title": "pandas.reference.api.pandas.index.get_loc" } ]
10,487,278
def f_10487278(my_dict):
return my_dict
my_dict.update({'third_key': 1})
def check(candidate):
[ "\n my_dict = {'a':1, 'b':2}\n assert candidate(my_dict) == {'a':1, 'b':2, 'third_key': 1}\n", "\n my_dict = {'c':1, 'd':2}\n assert candidate(my_dict) == {'c':1, 'd':2, 'third_key': 1}\n" ]
f_10487278
add an item with key 'third_key' and value 1 to an dictionary `my_dict`
[]
[]
10,487,278
def f_10487278():
return my_list
my_list = []
def check(candidate):
[ "\n assert candidate() == []\n" ]
f_10487278
declare an array `my_list`
[]
[]
10,487,278
def f_10487278(my_list):
return my_list
my_list.append(12)
def check(candidate):
[ "\n assert candidate([1,2]) == [1, 2, 12] \n", "\n assert candidate([5,6]) == [5, 6, 12]\n" ]
f_10487278
Insert item `12` to a list `my_list`
[]
[]
10,155,684
def f_10155684(myList):
return myList
myList.insert(0, 'wuggah')
def check(candidate):
[ "\n assert candidate([1,2]) == ['wuggah', 1, 2]\n", "\n assert candidate([]) == ['wuggah'] \n" ]
f_10155684
add an entry 'wuggah' at the beginning of list `myList`
[]
[]
3,519,125
def f_3519125(hex_str): return
bytes.fromhex(hex_str.replace('\\x', ''))
def check(candidate):
[ "\n assert candidate(\"\\\\xF3\\\\xBE\\\\x80\\\\x80\") == b'\\xf3\\xbe\\x80\\x80'\n" ]
f_3519125
convert a hex-string representation `hex_str` to actual bytes
[]
[]
40,144,769
def f_40144769(df): return
df[df.columns[-1]]
import pandas as pd def check(candidate):
[ "\n df = pd.DataFrame([[1, 2, 3],[4,5,6]], columns=[\"a\", \"b\", \"c\"])\n assert candidate(df).tolist() == [3,6]\n", "\n df = pd.DataFrame([[\"Hello\", \"world!\"],[\"Hi\", \"world!\"]], columns=[\"a\", \"b\"])\n assert candidate(df).tolist() == [\"world!\", \"world!\"]\n" ]
f_40144769
select the last column of dataframe `df`
[ "pandas" ]
[ { "function": "dataframe.columns", "text": "pandas.DataFrame.columns DataFrame.columns\n \nThe column labels of the DataFrame.", "title": "pandas.reference.api.pandas.dataframe.columns" } ]
30,787,901
def f_30787901(df): return
df.loc[df['Letters'] == 'C', 'Letters'].values[0]
import pandas as pd def check(candidate):
[ "\n df = pd.DataFrame([[\"a\", 1],[\"C\", 6]], columns=[\"Letters\", \"Numbers\"])\n assert candidate(df) == 'C'\n", "\n df = pd.DataFrame([[None, 1],[\"C\", 789]], columns=[\"Letters\", \"Names\"])\n assert candidate(df) == 'C'\n" ]
f_30787901
get the first value from dataframe `df` where column 'Letters' is equal to 'C'
[ "pandas" ]
[ { "function": "dataframe.loc", "text": "pandas.DataFrame.loc propertyDataFrame.loc\n \nAccess a group of rows and columns by label(s) or a boolean array. .loc[] is primarily label based, but may also be used with a boolean array. Allowed inputs are: A single label, e.g. 5 or 'a', (note that 5 is interpreted as a label of the index, and never as an integer position along the index). A list or array of labels, e.g. ['a', 'b', 'c']. ", "title": "pandas.reference.api.pandas.dataframe.loc" } ]
18,730,044
def f_18730044(): return
np.column_stack(([1, 2, 3], [4, 5, 6]))
import numpy as np def check(candidate):
[ "\n assert np.all(candidate() == np.array([[1, 4], [2, 5], [3, 6]]))\n" ]
f_18730044
converting two lists `[1, 2, 3]` and `[4, 5, 6]` into a matrix
[ "numpy" ]
[ { "function": "numpy.column_stack", "text": "numpy.column_stack numpy.column_stack(tup)[source]\n \nStack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns to make a single 2-D array. 2-D arrays are stacked as-is, just like with hstack. 1-D arrays are turned into 2-D columns first. Parameters ", "title": "numpy.reference.generated.numpy.column_stack" } ]
402,504
def f_402504(i): return
type(i)
def check(candidate):
[ "\n assert candidate(\"hello\") is str\n", "\n assert candidate(123) is int\n", "\n assert candidate(\"123\") is str\n", "\n assert candidate(123.4) is float\n" ]
f_402504
get the type of `i`
[]
[]
402,504
def f_402504(v): return
type(v)
def check(candidate):
[ "\n assert candidate(\"hello\") is str\n", "\n assert candidate(123) is int\n", "\n assert candidate(\"123\") is str\n", "\n assert candidate(123.4) is float\n" ]
f_402504
determine the type of variable `v`
[]
[]
402,504
def f_402504(v): return
type(v)
def check(candidate):
[ "\n assert candidate(\"hello\") is str\n", "\n assert candidate(123) is int\n", "\n assert candidate(\"123\") is str\n", "\n assert candidate(123.4) is float\n" ]
f_402504
determine the type of variable `v`
[]
[]
402,504
def f_402504(variable_name): return
type(variable_name)
def check(candidate):
[ "\n assert candidate(\"hello\") is str\n", "\n assert candidate(123) is int\n", "\n assert candidate(\"123\") is str\n", "\n assert candidate(123.4) is float\n" ]
f_402504
get the type of variable `variable_name`
[]
[]
2,300,756
def f_2300756(g): return
next(itertools.islice(g, 5, 5 + 1))
import itertools def check(candidate):
[ "\n test = [1, 2, 3, 4, 5, 6, 7]\n assert(candidate(test) == 6)\n" ]
f_2300756
get the 5th item of a generator `g`
[ "itertools" ]
[ { "function": "itertools.islice", "text": "itertools.islice(iterable, stop) \nitertools.islice(iterable, start, stop[, step]) \nMake an iterator that returns selected elements from the iterable. If start is non-zero, then elements from the iterable are skipped until start is reached. Afterward, elements are returned consecutively unless step is set higher than one which results in items being skipped. If stop is None, then iteration continues until the iterator is exhausted, if at all; otherwise, it stops at the specified position. Unlike regular slicing, islice() does not support negative values for start, stop, or step. Can be used to extract related fields from data where the internal structure has been flattened (for example, a multi-line report may list a name field on every third line). Roughly equivalent to: def islice(iterable, *args):", "title": "python.library.itertools#itertools.islice" } ]
20,056,548
def f_20056548(word): return
'"{}"'.format(word)
def check(candidate):
[ "\n assert candidate('Some Random Word') == '\"Some Random Word\"'\n" ]
f_20056548
return a string `word` with string format
[]
[]
8,546,245
def f_8546245(list): return
""" """.join(list)
def check(candidate):
[ "\n test = ['hello', 'good', 'morning']\n assert candidate(test) == \"hello good morning\"\n" ]
f_8546245
join a list of strings `list` using a space ' '
[]
[]
2,276,416
def f_2276416():
return y
y = [[] for n in range(2)]
def check(candidate):
[ "\n assert(candidate() == [[], []])\n" ]
f_2276416
create list `y` containing two empty lists
[]
[]
3,925,614
def f_3925614(filename):
return data
data = [line.strip() for line in open(filename, 'r')]
def check(candidate):
[ "\n file1 = open(\"myfile.txt\", \"w\")\n L = [\"This is Delhi \\n\", \"This is Paris \\n\", \"This is London \\n\"]\n file1.writelines(L)\n file1.close()\n assert candidate('myfile.txt') == ['This is Delhi', 'This is Paris', 'This is London']\n" ]
f_3925614
read a file `filename` into a list `data`
[]
[]
22,187,233
def f_22187233(): return
"""""".join([char for char in 'it is icy' if char != 'i'])
def check(candidate):
[ "\n assert candidate() == 't s cy'\n" ]
f_22187233
delete all occurrences of character 'i' in string 'it is icy'
[]
[]
22,187,233
def f_22187233(): return
re.sub('i', '', 'it is icy')
import re def check(candidate):
[ "\n assert candidate() == 't s cy'\n" ]
f_22187233
delete all instances of a character 'i' in a string 'it is icy'
[ "re" ]
[ { "function": "re.sub", "text": "re.sub(pattern, repl, string, count=0, flags=0) \nReturn the string obtained by replacing the leftmost non-overlapping occurrences of pattern in string by the replacement repl. If the pattern isn’t found, string is returned unchanged. repl can be a string or a function; if it is a string, any backslash escapes in it are processed. That is, \\n is converted to a single newline character, \\r is converted to a carriage return, and so forth. Unknown escapes of ASCII letters are reserved for future use and treated as errors. Other unknown escapes such as \\& are left alone. Backreferences, such as \\6, are replaced with the substring matched by group 6 in the pattern. For example: >>> re.sub(r'def\\s+([a-zA-Z_][a-zA-Z_0-9]*)\\s*\\(\\s*\\):',\n... r'static PyObject*\\npy_\\1(void)\\n{',\n... 'def myfunc():')\n'static PyObject*\\npy_myfunc(void)\\n{'\n If repl is a function, it is called for every non-overlapping occurrence of pattern. The function takes a single match object argument, and returns the replacement string. For example: >>> def dashrepl(matchobj):\n... if matchobj.group(0) == '-': return ' '\n... else: return '-'", "title": "python.library.re#re.sub" } ]
22,187,233
def f_22187233(): return
"""it is icy""".replace('i', '')
def check(candidate):
[ "\n assert candidate() == 't s cy'\n" ]
f_22187233
delete all characters "i" in string "it is icy"
[]
[]
13,413,590
def f_13413590(df): return
df.dropna(subset=[1])
import numpy as np import pandas as pd def check(candidate):
[ "\n data = {0:[3.0, 4.0, 2.0], 1:[2.0, 3.0, np.nan], 2:[np.nan, 3.0, np.nan]}\n df = pd.DataFrame(data)\n d = {0:[3.0, 4.0], 1:[2.0, 3.0], 2:[np.nan, 3.0]}\n res = pd.DataFrame(d)\n assert candidate(df).equals(res)\n" ]
f_13413590
Drop rows of pandas dataframe `df` having NaN in column at index "1"
[ "numpy", "pandas" ]
[ { "function": "df.dropna", "text": "pandas.DataFrame.dropna DataFrame.dropna(axis=0, how='any', thresh=None, subset=None, inplace=False)[source]\n \nRemove missing values. See the User Guide for more on which values are considered missing, and how to work with missing data. Parameters ", "title": "pandas.reference.api.pandas.dataframe.dropna" } ]
598,398
def f_598398(myList): return
[x for x in myList if x.n == 30]
import numpy as np import pandas as pd def check(candidate):
[ "\n class Data: \n def __init__(self, a, n): \n self.a = a\n self.n = n\n \n myList = [Data(i, 10*(i%4)) for i in range(20)]\n assert candidate(myList) == [myList[i] for i in [3, 7, 11, 15, 19]]\n" ]
f_598398
get elements from list `myList`, that have a field `n` value 30
[ "numpy", "pandas" ]
[]
10,351,772
def f_10351772(intstringlist):
return nums
nums = [int(x) for x in intstringlist]
def check(candidate):
[ "\n assert candidate(['1', '2', '3', '4', '5']) == [1, 2, 3, 4, 5]\n", "\n assert candidate(['001', '200', '3', '4', '5']) == [1, 200, 3, 4, 5]\n" ]
f_10351772
converting list of strings `intstringlist` to list of integer `nums`
[]
[]
493,386
def f_493386(): return
sys.stdout.write('.')
import sys def check(candidate):
[ "\n assert candidate() == 1\n" ]
f_493386
print "." without newline
[ "sys" ]
[ { "function": "sys.write", "text": "sys — System-specific parameters and functions This module provides access to some variables used or maintained by the interpreter and to functions that interact strongly with the interpreter. It is always available. \nsys.abiflags \nOn POSIX systems where Python was built with the standard configure script, this contains the ABI flags as specified by PEP 3149. Changed in version 3.8: Default flags became an empty string (m flag for pymalloc has been removed). New in version 3.2. \n ", "title": "python.library.sys" }, { "function": "sys.stdout", "text": "sys.stdin \nsys.stdout \nsys.stderr \nFile objects used by the interpreter for standard input, output and errors: \nstdin is used for all interactive input (including calls to input()); \nstdout is used for the output of print() and expression statements and for the prompts of input(); The interpreter’s own prompts and its error messages go to stderr. These streams are regular text files like those returned by the open() function. Their parameters are chosen as follows: \nThe character encoding is platform-dependent. Non-Windows platforms use the locale encoding (see locale.getpreferredencoding()). On Windows, UTF-8 is used for the console device. Non-character devices such as disk files and pipes use the system locale encoding (i.e. the ANSI codepage). Non-console character devices such as NUL (i.e. where isatty() returns True) use the value of the console input and output codepages at startup, respectively for stdin and stdout/stderr. This defaults to the system locale encoding if the process is not initially attached to a console. The special behaviour of the console can be overridden by setting the environment variable PYTHONLEGACYWINDOWSSTDIO before starting Python. In that case, the console codepages are used as for any other character device. Under all platforms, you can override the character encoding by setting the PYTHONIOENCODING environment variable before starting Python or by using the new -X utf8 command line option and PYTHONUTF8 environment variable. However, for the Windows console, this only applies when PYTHONLEGACYWINDOWSSTDIO is also set. When interactive, the stdout stream is line-buffered. Otherwise, it is block-buffered like regular text files. The stderr stream is line-buffered in both cases. You can make both streams unbuffered by passing the -u command-line option or setting the PYTHONUNBUFFERED environment variable. Changed in version 3.9: Non-interactive stderr is now line-buffered instead of fully buffered. Note To write or read binary data from/to the standard streams, use the underlying binary buffer object. For example, to write bytes to stdout, use sys.stdout.buffer.write(b'abc'). However, if you are writing a library (and do not control in which context its code will be executed), be aware that the standard streams may be replaced with file-like objects like io.StringIO which do not support the buffer attribute.", "title": "python.library.sys#sys.stdout" } ]
6,569,528
def f_6569528(): return
int(round(2.52 * 100))
def check(candidate):
[ "\n assert candidate() == 252\n" ]
f_6569528
round off the float that is the product of `2.52 * 100` and convert it to an int
[]
[]
3,964,681
def f_3964681():
return files
os.chdir('/mydir') files = [] for file in glob.glob('*.txt'): files.append(file)
import os import glob from unittest.mock import Mock def check(candidate):
[ "\n samples = ['abc.txt']\n os.chdir = Mock()\n glob.glob = Mock(return_value = samples)\n assert candidate() == samples\n" ]
f_3964681
Find all files `files` in directory '/mydir' with extension '.txt'
[ "glob", "os" ]
[ { "function": "os.chdir", "text": "os.chdir(path) \nChange the current working directory to path. This function can support specifying a file descriptor. The descriptor must refer to an opened directory, not an open file. This function can raise OSError and subclasses such as FileNotFoundError, PermissionError, and NotADirectoryError. Raises an auditing event os.chdir with argument path. New in version 3.3: Added support for specifying path as a file descriptor on some platforms. Changed in version 3.6: Accepts a path-like object.", "title": "python.library.os#os.chdir" }, { "function": "glob.glob", "text": "glob.glob(pathname, *, recursive=False) \nReturn a possibly-empty list of path names that match pathname, which must be a string containing a path specification. pathname can be either absolute (like /usr/src/Python-1.5/Makefile) or relative (like ../../Tools/*/*.gif), and can contain shell-style wildcards. Broken symlinks are included in the results (as in the shell). Whether or not the results are sorted depends on the file system. If a file that satisfies conditions is removed or added during the call of this function, whether a path name for that file be included is unspecified. If recursive is true, the pattern “**” will match any files and zero or more directories, subdirectories and symbolic links to directories. If the pattern is followed by an os.sep or os.altsep then files will not match. Raises an auditing event glob.glob with arguments pathname, recursive. Note Using the “**” pattern in large directory trees may consume an inordinate amount of time. Changed in version 3.5: Support for recursive globs using “**”.", "title": "python.library.glob#glob.glob" } ]
3,964,681
def f_3964681(): return
[file for file in os.listdir('/mydir') if file.endswith('.txt')]
import os from unittest.mock import Mock def check(candidate):
[ "\n samples = ['abc.txt', 'f.csv']\n os.listdir = Mock(return_value = samples)\n assert candidate() == ['abc.txt']\n" ]
f_3964681
Find all files in directory "/mydir" with extension ".txt"
[ "os" ]
[ { "function": "os.listdir", "text": "os.listdir(path='.') \nReturn a list containing the names of the entries in the directory given by path. The list is in arbitrary order, and does not include the special entries '.' and '..' even if they are present in the directory. If a file is removed from or added to the directory during the call of this function, whether a name for that file be included is unspecified. path may be a path-like object. If path is of type bytes (directly or indirectly through the PathLike interface), the filenames returned will also be of type bytes; in all other circumstances, they will be of type str. This function can also support specifying a file descriptor; the file descriptor must refer to a directory. Raises an auditing event os.listdir with argument path. Note To encode str filenames to bytes, use fsencode(). See also The scandir() function returns directory entries along with file attribute information, giving better performance for many common use cases. Changed in version 3.2: The path parameter became optional. New in version 3.3: Added support for specifying path as an open file descriptor. Changed in version 3.6: Accepts a path-like object.", "title": "python.library.os#os.listdir" } ]
3,964,681
def f_3964681(): return
[file for (root, dirs, files) in os.walk('/mydir') for file in files if file.endswith('.txt')]
import os from unittest.mock import Mock def check(candidate):
[ "\n name = '/mydir'\n samples = [(name, [], ['abc.txt', 'f.csv'])]\n os.walk = Mock(return_value = samples)\n assert candidate() == ['abc.txt']\n" ]
f_3964681
Find all files in directory "/mydir" with extension ".txt"
[ "os" ]
[ { "function": "os.walk", "text": "os.walk(top, topdown=True, onerror=None, followlinks=False) \nGenerate the file names in a directory tree by walking the tree either top-down or bottom-up. For each directory in the tree rooted at directory top (including top itself), it yields a 3-tuple (dirpath, dirnames,\nfilenames). dirpath is a string, the path to the directory. dirnames is a list of the names of the subdirectories in dirpath (excluding '.' and '..'). filenames is a list of the names of the non-directory files in dirpath. Note that the names in the lists contain no path components. To get a full path (which begins with top) to a file or directory in dirpath, do os.path.join(dirpath, name). Whether or not the lists are sorted depends on the file system. If a file is removed from or added to the dirpath directory during generating the lists, whether a name for that file be included is unspecified. If optional argument topdown is True or not specified, the triple for a directory is generated before the triples for any of its subdirectories (directories are generated top-down). If topdown is False, the triple for a directory is generated after the triples for all of its subdirectories (directories are generated bottom-up). No matter the value of topdown, the list of subdirectories is retrieved before the tuples for the directory and its subdirectories are generated. When topdown is True, the caller can modify the dirnames list in-place (perhaps using del or slice assignment), and walk() will only recurse into the subdirectories whose names remain in dirnames; this can be used to prune the search, impose a specific order of visiting, or even to inform walk() about directories the caller creates or renames before it resumes walk() again. Modifying dirnames when topdown is False has no effect on the behavior of the walk, because in bottom-up mode the directories in dirnames are generated before dirpath itself is generated. By default, errors from the scandir() call are ignored. If optional argument onerror is specified, it should be a function; it will be called with one argument, an OSError instance. It can report the error to continue with the walk, or raise the exception to abort the walk. Note that the filename is available as the filename attribute of the exception object. By default, walk() will not walk down into symbolic links that resolve to directories. Set followlinks to True to visit directories pointed to by symlinks, on systems that support them. Note Be aware that setting followlinks to True can lead to infinite recursion if a link points to a parent directory of itself. walk() does not keep track of the directories it visited already. Note If you pass a relative pathname, don’t change the current working directory between resumptions of walk(). walk() never changes the current directory, and assumes that its caller doesn’t either. This example displays the number of bytes taken by non-directory files in each directory under the starting directory, except that it doesn’t look under any CVS subdirectory: import os\nfrom os.path import join, getsize\nfor root, dirs, files in os.walk('python/Lib/email'):\n print(root, \"consumes\", end=\" \")\n print(sum(getsize(join(root, name)) for name in files), end=\" \")\n print(\"bytes in\", len(files), \"non-directory files\")\n if 'CVS' in dirs:", "title": "python.library.os#os.walk" } ]
20,865,487
def f_20865487(df): return
df.plot(legend=False)
import os import pandas as pd def check(candidate):
[ "\n df = pd.DataFrame([1, 2, 3, 4, 5], columns = ['Vals'])\n res = candidate(df)\n assert 'AxesSubplot' in str(type(res))\n assert res.legend_ is None\n" ]
f_20865487
plot dataframe `df` without a legend
[ "os", "pandas" ]
[ { "function": "df.plot", "text": "pandas.DataFrame.plot DataFrame.plot(*args, **kwargs)[source]\n \nMake plots of Series or DataFrame. Uses the backend specified by the option plotting.backend. By default, matplotlib is used. Parameters ", "title": "pandas.reference.api.pandas.dataframe.plot" } ]
13,368,659
def f_13368659(): return
['192.168.%d.%d'%(i, j) for i in range(256) for j in range(256)]
def check(candidate):
[ "\n addrs = candidate()\n assert len(addrs) == 256*256\n assert addrs == [f'192.168.{i}.{j}' for i in range(256) for j in range(256)]\n" ]
f_13368659
loop through the IP address range "192.168.x.x"
[]
[]
4,065,737
def f_4065737(x): return
sum(1 << i for i, b in enumerate(x) if b)
def check(candidate):
[ "\n assert candidate([1,2,3]) == 7\n", "\n assert candidate([1,2,None,3,None]) == 11\n" ]
f_4065737
Sum the corresponding decimal values for binary values of each boolean element in list `x`
[]
[]
8,691,311
def f_8691311(line1, line2, line3, target):
return
target.write('%r\n%r\n%r\n' % (line1, line2, line3))
def check(candidate):
[ "\n file_name = 'abc.txt'\n lines = ['fgh', 'ijk', 'mnop']\n f = open(file_name, 'a')\n candidate(lines[0], lines[1], lines[2], f)\n f.close()\n with open(file_name, 'r') as f:\n f_lines = f.readlines()\n for i in range (0, len(lines)):\n assert lines[i] in f_lines[i]\n" ]
f_8691311
write multiple strings `line1`, `line2` and `line3` in one line in a file `target`
[]
[]
10,632,111
def f_10632111(data): return
[y for x in data for y in (x if isinstance(x, list) else [x])]
def check(candidate):
[ "\n data = [[1, 2], [3]]\n assert candidate(data) == [1, 2, 3]\n", "\n data = [[1, 2], [3], []]\n assert candidate(data) == [1, 2, 3]\n", "\n data = [1,2,3]\n assert candidate(data) == [1, 2, 3]\n" ]
f_10632111
Convert list of lists `data` into a flat list
[]
[]
15,392,730
def f_15392730(): return
'foo\nbar'.encode('unicode_escape')
def check(candidate):
[ "\n assert candidate() == b'foo\\\\nbar'\n" ]
f_15392730
Print new line character as `\n` in a string `foo\nbar`
[]
[]
1,010,961
def f_1010961(s): return
"""""".join(s.rsplit(',', 1))
def check(candidate):
[ "\n assert candidate('abc, def, klm') == 'abc, def klm'\n" ]
f_1010961
remove last comma character ',' in string `s`
[]
[]
23,855,976
def f_23855976(x): return
(x[1:] + x[:-1]) / 2
import numpy as np def check(candidate):
[ "\n x = np.array([ 1230., 1230., 1227., 1235., 1217., 1153., 1170.])\n xm = np.array([1230. , 1228.5, 1231. , 1226. , 1185. , 1161.5])\n assert np.array_equal(candidate(x), xm)\n" ]
f_23855976
calculate the mean of each element in array `x` with the element previous to it
[ "numpy" ]
[]
23,855,976
def f_23855976(x): return
x[:-1] + (x[1:] - x[:-1]) / 2
import numpy as np def check(candidate):
[ "\n x = np.array([ 1230., 1230., 1227., 1235., 1217., 1153., 1170.])\n xm = np.array([1230. , 1228.5, 1231. , 1226. , 1185. , 1161.5])\n assert np.array_equal(candidate(x), xm)\n" ]
f_23855976
get an array of the mean of each two consecutive values in numpy array `x`
[ "numpy" ]
[]
6,375,343
def f_6375343():
return arr
arr = numpy.fromiter(codecs.open('new.txt', encoding='utf-8'), dtype='<U2')
import numpy import codecs import numpy as np def check(candidate):
[ "\n with open ('new.txt', 'a', encoding='utf-8') as f:\n f.write('ट')\n f.write('ज')\n arr = candidate()\n assert arr[0] == 'टज'\n" ]
f_6375343
load data containing `utf-8` from file `new.txt` into numpy array `arr`
[ "codecs", "numpy" ]
[ { "function": "numpy.fromiter", "text": "numpy.fromiter numpy.fromiter(iter, dtype, count=- 1, *, like=None)\n \nCreate a new 1-dimensional array from an iterable object. Parameters ", "title": "numpy.reference.generated.numpy.fromiter" }, { "function": "codecs.open", "text": "codecs.open(filename, mode='r', encoding=None, errors='strict', buffering=-1) \nOpen an encoded file using the given mode and return an instance of StreamReaderWriter, providing transparent encoding/decoding. The default file mode is 'r', meaning to open the file in read mode. Note Underlying encoded files are always opened in binary mode. No automatic conversion of '\\n' is done on reading and writing. The mode argument may be any binary mode acceptable to the built-in open() function; the 'b' is automatically added. encoding specifies the encoding which is to be used for the file. Any encoding that encodes to and decodes from bytes is allowed, and the data types supported by the file methods depend on the codec used. errors may be given to define the error handling. It defaults to 'strict' which causes a ValueError to be raised in case an encoding error occurs. buffering has the same meaning as for the built-in open() function. It defaults to -1 which means that the default buffer size will be used.", "title": "python.library.codecs#codecs.open" } ]
1,547,733
def f_1547733(l):
return l
l = sorted(l, key=itemgetter('time'), reverse=True)
from operator import itemgetter def check(candidate):
[ "\n l = [ {'time':33}, {'time':11}, {'time':66} ]\n assert candidate(l) == [{'time':66}, {'time':33}, {'time':11}]\n" ]
f_1547733
reverse sort list of dicts `l` by value for key `time`
[ "operator" ]
[ { "function": "operator.itemgetter", "text": "operator.itemgetter(item) \noperator.itemgetter(*items) \nReturn a callable object that fetches item from its operand using the operand’s __getitem__() method. If multiple items are specified, returns a tuple of lookup values. For example: After f = itemgetter(2), the call f(r) returns r[2]. After g = itemgetter(2, 5, 3), the call g(r) returns (r[2], r[5], r[3]). Equivalent to: def itemgetter(*items):", "title": "python.library.operator#operator.itemgetter" } ]
1,547,733
def f_1547733(l):
return l
l = sorted(l, key=lambda a: a['time'], reverse=True)
def check(candidate):
[ "\n l = [ {'time':33}, {'time':11}, {'time':66} ]\n assert candidate(l) == [{'time':66}, {'time':33}, {'time':11}]\n" ]
f_1547733
Sort a list of dictionary `l` based on key `time` in descending order
[]
[]
37,080,612
def f_37080612(df): return
df.loc[df[0].str.contains('(Hel|Just)')]
import pandas as pd def check(candidate):
[ "\n df = pd.DataFrame([['Hello', 'World'], ['Just', 'Wanted'], ['To', 'Say'], ['I\\'m', 'Tired']])\n df1 = candidate(df)\n assert df1[0][0] == 'Hello'\n assert df1[0][1] == 'Just'\n" ]
f_37080612
get rows of dataframe `df` that match regex '(Hel|Just)'
[ "pandas" ]
[ { "function": "pandas.dataframe.loc", "text": "pandas.DataFrame.loc propertyDataFrame.loc\n \nAccess a group of rows and columns by label(s) or a boolean array. .loc[] is primarily label based, but may also be used with a boolean array. Allowed inputs are: A single label, e.g. 5 or 'a', (note that 5 is interpreted as a label of the index, and never as an integer position along the index). A list or array of labels, e.g. ['a', 'b', 'c']. ", "title": "pandas.reference.api.pandas.dataframe.loc" }, { "function": "str.contains", "text": "pandas.Series.str.contains Series.str.contains(pat, case=True, flags=0, na=None, regex=True)[source]\n \nTest if pattern or regex is contained within a string of a Series or Index. Return boolean Series or Index based on whether a given pattern or regex is contained within a string of a Series or Index. Parameters ", "title": "pandas.reference.api.pandas.series.str.contains" } ]
14,716,342
def f_14716342(your_string): return
re.search('\\[(.*)\\]', your_string).group(1)
import re def check(candidate):
[ "\n assert candidate('[uranus]') == 'uranus'\n", "\n assert candidate('hello[world] !') == 'world'\n" ]
f_14716342
find the string in `your_string` between two special characters "[" and "]"
[ "re" ]
[ { "function": "re.search", "text": "re.search(pattern, string, flags=0) \nScan through string looking for the first location where the regular expression pattern produces a match, and return a corresponding match object. Return None if no position in the string matches the pattern; note that this is different from finding a zero-length match at some point in the string.", "title": "python.library.re#re.search" }, { "function": "re.Match.group", "text": "Match.group([group1, ...]) \nReturns one or more subgroups of the match. If there is a single argument, the result is a single string; if there are multiple arguments, the result is a tuple with one item per argument. Without arguments, group1 defaults to zero (the whole match is returned). If a groupN argument is zero, the corresponding return value is the entire matching string; if it is in the inclusive range [1..99], it is the string matching the corresponding parenthesized group. If a group number is negative or larger than the number of groups defined in the pattern, an IndexError exception is raised. If a group is contained in a part of the pattern that did not match, the corresponding result is None. If a group is contained in a part of the pattern that matched multiple times, the last match is returned. >>> m = re.match(r\"(\\w+) (\\w+)\", \"Isaac Newton, physicist\")", "title": "python.library.re#re.Match.group" } ]
18,684,076
def f_18684076(): return
[d.strftime('%Y%m%d') for d in pandas.date_range('20130226', '20130302')]
import pandas def check(candidate):
[ "\n assert candidate() == ['20130226', '20130227', '20130228', '20130301', '20130302']\n" ]
f_18684076
create a list of date string in 'yyyymmdd' format with Python Pandas from '20130226' to '20130302'
[ "pandas" ]
[ { "function": "d.strftime", "text": "pandas.Timestamp.strftime Timestamp.strftime(format)\n \nReturn a string representing the given POSIX timestamp controlled by an explicit format string. Parameters \n \nformat:str\n\n\nFormat string to convert Timestamp to string. See strftime documentation for more information on the format string: https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior. Examples ", "title": "pandas.reference.api.pandas.timestamp.strftime" }, { "function": "pandas.date_range", "text": "pandas.date_range pandas.date_range(start=None, end=None, periods=None, freq=None, tz=None, normalize=False, name=None, closed=NoDefault.no_default, inclusive=None, **kwargs)[source]\n \nReturn a fixed frequency DatetimeIndex. Returns the range of equally spaced time points (where the difference between any two adjacent points is specified by the given frequency) such that they all satisfy start <[=] x <[=] end, where the first one and the last one are, resp., the first and last time points in that range that fall on the boundary of freq (if given as a frequency string) or that are valid for freq (if given as a pandas.tseries.offsets.DateOffset). (If exactly one of start, end, or freq is not specified, this missing parameter can be computed given periods, the number of timesteps in the range. See the note below.) Parameters ", "title": "pandas.reference.api.pandas.date_range" } ]
1,666,700
def f_1666700(): return
"""The big brown fox is brown""".count('brown')
def check(candidate):
[ "\n assert candidate() == 2\n" ]
f_1666700
count number of times string 'brown' occurred in string 'The big brown fox is brown'
[]
[]
18,979,111
def f_18979111(request_body): return
json.loads(request_body)
import json def check(candidate):
[ "\n x = \"\"\"{\n \"Name\": \"Jennifer Smith\",\n \"Contact Number\": 7867567898,\n \"Email\": \"jen123@gmail.com\",\n \"Hobbies\":[\"Reading\", \"Sketching\", \"Horse Riding\"]\n }\"\"\"\n assert candidate(x) == {'Hobbies': ['Reading', 'Sketching', 'Horse Riding'], 'Name': 'Jennifer Smith', 'Email': 'jen123@gmail.com', 'Contact Number': 7867567898}\n" ]
f_18979111
decode json string `request_body` to python dict
[ "json" ]
[ { "function": "json.loads", "text": "json.loads(s, *, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw) \nDeserialize s (a str, bytes or bytearray instance containing a JSON document) to a Python object using this conversion table. The other arguments have the same meaning as in load(). If the data being deserialized is not a valid JSON document, a JSONDecodeError will be raised. Changed in version 3.6: s can now be of type bytes or bytearray. The input encoding should be UTF-8, UTF-16 or UTF-32. Changed in version 3.9: The keyword argument encoding has been removed.", "title": "python.library.json#json.loads" } ]
7,243,750
def f_7243750(url, file_name): return
urllib.request.urlretrieve(url, file_name)
import urllib def check(candidate):
[ "\n file_name = 'g.html'\n candidate('https://asia.nikkei.com/Business/Tech/Semiconductors/U.S.-chip-tool-maker-Synopsys-expands-in-Vietnam-amid-China-tech-war', file_name)\n with open (file_name, 'r') as f:\n lines = f.readlines()\n if len(lines) == 0: assert False\n else: assert True\n" ]
f_7243750
download the file from url `url` and save it under file `file_name`
[ "urllib" ]
[ { "function": "urllib.urlretrieve", "text": "urllib.request.urlretrieve(url, filename=None, reporthook=None, data=None) \nCopy a network object denoted by a URL to a local file. If the URL points to a local file, the object will not be copied unless filename is supplied. Return a tuple (filename, headers) where filename is the local file name under which the object can be found, and headers is whatever the info() method of the object returned by urlopen() returned (for a remote object). Exceptions are the same as for urlopen(). The second argument, if present, specifies the file location to copy to (if absent, the location will be a tempfile with a generated name). The third argument, if present, is a callable that will be called once on establishment of the network connection and once after each block read thereafter. The callable will be passed three arguments; a count of blocks transferred so far, a block size in bytes, and the total size of the file. The third argument may be -1 on older FTP servers which do not return a file size in response to a retrieval request. The following example illustrates the most common usage scenario: >>> import urllib.request", "title": "python.library.urllib.request#urllib.request.urlretrieve" } ]
743,806
def f_743806(text): return
text.split()
def check(candidate):
[ "\n assert candidate('The quick brown fox') == ['The', 'quick', 'brown', 'fox']\n", "\n assert candidate('hello!') == ['hello!']\n", "\n assert candidate('hello world !') == ['hello', 'world', '!']\n" ]
f_743806
split string `text` by space
[]
[]
743,806
def f_743806(text): return
text.split(',')
def check(candidate):
[ "\n assert candidate('The quick brown fox') == ['The quick brown fox']\n", "\n assert candidate('The,quick,brown,fox') == ['The', 'quick', 'brown', 'fox']\n" ]
f_743806
split string `text` by ","
[]
[]
743,806
def f_743806(line): return
line.split()
def check(candidate):
[ "\n assert candidate('The quick brown fox') == ['The', 'quick', 'brown', 'fox']\n" ]
f_743806
Split string `line` into a list by whitespace
[]
[]
35,044,115
def f_35044115(s): return
[re.sub('(?<!\\d)\\.(?!\\d)', ' ', i) for i in s]
import re def check(candidate):
[ "\n assert candidate('h.j.k') == ['h', ' ', 'j', ' ', 'k']\n" ]
f_35044115
replace dot characters '.' associated with ascii letters in list `s` with space ' '
[ "re" ]
[ { "function": "re.sub", "text": "re.sub(pattern, repl, string, count=0, flags=0) \nReturn the string obtained by replacing the leftmost non-overlapping occurrences of pattern in string by the replacement repl. If the pattern isn’t found, string is returned unchanged. repl can be a string or a function; if it is a string, any backslash escapes in it are processed. That is, \\n is converted to a single newline character, \\r is converted to a carriage return, and so forth. Unknown escapes of ASCII letters are reserved for future use and treated as errors. Other unknown escapes such as \\& are left alone. Backreferences, such as \\6, are replaced with the substring matched by group 6 in the pattern. For example: >>> re.sub(r'def\\s+([a-zA-Z_][a-zA-Z_0-9]*)\\s*\\(\\s*\\):',\n... r'static PyObject*\\npy_\\1(void)\\n{',\n... 'def myfunc():')\n'static PyObject*\\npy_myfunc(void)\\n{'\n If repl is a function, it is called for every non-overlapping occurrence of pattern. The function takes a single match object argument, and returns the replacement string. For example: >>> def dashrepl(matchobj):\n... if matchobj.group(0) == '-': return ' '\n... else: return '-'", "title": "python.library.re#re.sub" } ]