name
stringlengths
1
152
class_name
stringlengths
1
51
class_bases
stringlengths
0
159
is_member
bool
2 classes
args
stringlengths
0
804
class_docstr
stringlengths
4
8.19k
class_docstr_tok
stringlengths
2
11.6k
docstr
stringlengths
0
11.4k
docstr_tok
stringlengths
2
13.4k
returns
stringlengths
0
260
code
stringlengths
21
52.4k
code_tok
stringlengths
33
92.8k
lstart
int64
1
1.75k
lend
int64
5
1.75k
raises
stringclasses
16 values
filename
stringlengths
5
66
file_path
stringlengths
12
161
imports
stringlengths
0
1.77k
total_objects
int64
15
15
num_classes
float64
1
7
num_imports
int64
0
14
num_functions
int64
0
15
num_all_bases
float64
0
9
num_methods
float64
1
14
num_bases
float64
1
7
label_desc
stringlengths
69
1.05k
label_desc_len
int64
69
1.05k
label_id
stringclasses
15 values
__index_level_0__
int64
468
2.35M
get_object_members
HasTraitsDocumenter
ClassDocumenter
true
self,want_all
Specialized Documenter subclass for traits
["Specialized","Documenter","subclass","for","traits"]
Add traits to members list
["Add","traits","to","members","list"]
check, unknown
def get_object_members(self, want_all): """Add traits to members list""" check, members = super().get_object_members(want_all) get_traits = ( self.object.class_own_traits if self.options.inherited_members else self.object.class_traits ) members_new = OrderedDict() for m in members: members_new[m[0]] = m[1] traits = tuple(get_traits().items()) for name, trait in traits: if name not in members_new: # Don't add a member that would normally be filtered continue # pass # FIXME: Debugging # put help in __doc__ where autodoc will look for it trait.__doc__ = trait.help or extended_trait_info( getattr(self.object, name) ) members_new[name] = trait return check, [kv for kv in members_new.items()]
["def","get_object_members","(","self",",","want_all",")",":","``","''","''","Add","traits","to","members","list","''","''","''","check",",","members","=","super","(",")",".get_object_members","(","want_all",")","get_traits","=","(","self.object.class_own_traits","if","self.options.inherited_members","else","self.object.class_traits",")","members_new","=","OrderedDict","(",")","for","m","in","members",":","members_new","[","m","[","0","]","]","=","m","[","1","]","traits","=","tuple","(","get_traits","(",")",".items","(",")",")","for","name",",","trait","in","traits",":","if","name","not","in","members_new",":","#","Do","n't","add","a","member","that","would","normally","be","filtered","continue","#","pass","#","FIXME",":","Debugging","#","put","help","in","__doc__","where","autodoc","will","look","for","it","trait.__doc__","=","trait.help","or","extended_trait_info","(","getattr","(","self.object",",","name",")",")","members_new","[","name","]","=","trait","return","check",",","[","kv","for","kv","in","members_new.items","(",")","]"]
51
70
null
autodoc_traits.py
pythreejs/docs/sphinxext/autodoc_traits.py
from collections import OrderedDict from traitlets import TraitType, Undefined, Container, Dict, Any, HasTraits from sphinx.ext.autodoc import ClassDocumenter, AttributeDocumenter
15
2
3
3
2
2
1
Use image node_id 2 for calling the HasTraitsDocumenter obj's underlying member method code with example usage: obj.get_object_members(want_all) and returns: check, unknown
173
node_id 2
1,691,041
can_document_member
TraitDocumenter
AttributeDocumenter
true
cls,member,membername,isattr,parent
null
null
null
null
isinstance
def can_document_member(cls, member, membername, isattr, parent): return isinstance(member, TraitType)
["def","can_document_member","(","cls",",","member",",","membername",",","isattr",",","parent",")",":","return","isinstance","(","member",",","TraitType",")"]
80
81
null
autodoc_traits.py
pythreejs/docs/sphinxext/autodoc_traits.py
from collections import OrderedDict from traitlets import TraitType, Undefined, Container, Dict, Any, HasTraits from sphinx.ext.autodoc import ClassDocumenter, AttributeDocumenter
15
2
3
3
2
3
1
Use image node_id 1 for calling the TraitDocumenter obj's underlying member method code with example usage: obj.can_document_member(cls, member, membername, isattr, parent) and returns: isinstance
196
node_id 1
1,691,042
add_directive_header
TraitDocumenter
AttributeDocumenter
true
self,sig
null
null
null
null
super
def add_directive_header(self, sig): default = self.object.default_value if default is Undefined: default_s = "" else: default_s = repr(default) sig = " = {}({})".format( self.object.__class__.__name__, default_s, ) return super().add_directive_header(sig)
["def","add_directive_header","(","self",",","sig",")",":","default","=","self.object.default_value","if","default","is","Undefined",":","default_s","=","``","''","else",":","default_s","=","repr","(","default",")","sig","=","``","=","{","}","(","{","}",")","''",".format","(","self.object.__class__.__name__",",","default_s",",",")","return","super","(",")",".add_directive_header","(","sig",")"]
86
96
null
autodoc_traits.py
pythreejs/docs/sphinxext/autodoc_traits.py
from collections import OrderedDict from traitlets import TraitType, Undefined, Container, Dict, Any, HasTraits from sphinx.ext.autodoc import ClassDocumenter, AttributeDocumenter
15
2
3
3
2
3
1
Use image node_id 3 for calling the TraitDocumenter obj's underlying member method code with example usage: obj.add_directive_header(sig) and returns: super
156
node_id 3
1,691,044
_design_resample_poly
global
null
false
up,down,window
null
null
null
null
h
def _design_resample_poly(up, down, window): """ Design a prototype FIR low-pass filter using the window method for use in polyphase rational resampling. Parameters ---------- up : int The upsampling factor. down : int The downsampling factor. window : string or tuple Desired window to use to design the low-pass filter. See below for details. Returns ------- h : array The computed FIR filter coefficients. See Also -------- resample_poly : Resample up or down using the polyphase method. Notes ----- The argument `window` specifies the FIR low-pass filter design. The functions `cusignal.get_window` and `cusignal.firwin` are called to generate the appropriate filter coefficients. The returned array of coefficients will always be of data type `complex128` to maintain precision. For use in lower-precision filter operations, this array should be converted to the desired data type before providing it to `cusignal.resample_poly`. """ # Determine our up and down factors # Use a rational approximation to save computation time on really long # signals g_ = gcd(up, down) up //= g_ down //= g_ # Design a linear-phase low-pass FIR filter max_rate = max(up, down) f_c = 1.0 / max_rate # cutoff of FIR filter (rel. to Nyquist) # reasonable cutoff for our sinc-like function half_len = 10 * max_rate h = firwin(2 * half_len + 1, f_c, window=window) return h
["def","_design_resample_poly","(","up",",","down",",","window",")",":","``","''","''","Design","a","prototype","FIR","low-pass","filter","using","the","window","method","for","use","in","polyphase","rational","resampling",".","Parameters","--","--","--","--","--","up",":","int","The","upsampling","factor",".","down",":","int","The","downsampling","factor",".","window",":","string","or","tuple","Desired","window","to","use","to","design","the","low-pass","filter",".","See","below","for","details",".","Returns","--","--","--","-","h",":","array","The","computed","FIR","filter","coefficients",".","See","Also","--","--","--","--","resample_poly",":","Resample","up","or","down","using","the","polyphase","method",".","Notes","--","--","-","The","argument","`","window","`","specifies","the","FIR","low-pass","filter","design",".","The","functions","`","cusignal.get_window","`","and","`","cusignal.firwin","`","are","called","to","generate","the","appropriate","filter","coefficients",".","The","returned","array","of","coefficients","will","always","be","of","data","type","`","complex128","`","to","maintain","precision",".","For","use","in","lower-precision","filter","operations",",","this","array","should","be","converted","to","the","desired","data","type","before","providing","it","to","`","cusignal.resample_poly","`",".","``","''","''","#","Determine","our","up","and","down","factors","#","Use","a","rational","approximation","to","save","computation","time","on","really","long","#","signals","g_","=","gcd","(","up",",","down",")","up","\/\/=","g_","down","\/\/=","g_","#","Design","a","linear-phase","low-pass","FIR","filter","max_rate","=","max","(","up",",","down",")","f_c","=","1.0","\/","max_rate","#","cutoff","of","FIR","filter","(","rel",".","to","Nyquist",")","#","reasonable","cutoff","for","our","sinc-like","function","half_len","=","10","*","max_rate","h","=","firwin","(","2","*","half_len","+","1",",","f_c",",","window=window",")","return","h"]
44
96
null
_resample.py
cupy/cupyx/scipy/signal/_resample.py
import operator from math import gcd import cupy from cupyx.scipy.fft import fft, rfft, fftfreq, ifft, irfft, ifftshift from cupyx.scipy.signal._iir_filter_design import cheby1 from cupyx.scipy.signal._fir_filter_design import firwin from cupyx.scipy.signal._iir_filter_conversions import zpk2sos from cupyx.scipy.signal._ltisys import dlti from cupyx.scipy.signal._upfirdn import upfirdn, _output_len from cupyx.scipy.signal._signaltools import sosfiltfilt, filtfilt, sosfilt, lfilter from cupyx.scipy.signal.windows._windows import get_window
15
null
11
4
null
null
null
Use image node_id 1 for calling a global function with example usage: _design_resample_poly(up, down, window) and returns: h
124
node_id 1
692,581
setup
NdimageInterpolation
Benchmark
true
self,shape,order,mode
null
null
null
null
null
def setup(self, shape, order, mode): rstate = np.random.RandomState(5) self.x = rstate.standard_normal(shape) self.matrix_2d = np.asarray([[0.8, 0, 1.5], [0, 1.2, -5.0]]) self.matrix_3d = np.asarray( [[0.8, 0, 0, 1.5], [0, 1.2, 0, -5.0], [0, 0, 1, 0]] )
["def","setup","(","self",",","shape",",","order",",","mode",")",":","rstate","=","np.random.RandomState","(","5",")","self.x","=","rstate.standard_normal","(","shape",")","self.matrix_2d","=","np.asarray","(","[","[","0.8",",","0",",","1.5","]",",","[","0",",","1.2",",","-5.0","]","]",")","self.matrix_3d","=","np.asarray","(","[","[","0.8",",","0",",","0",",","1.5","]",",","[","0",",","1.2",",","0",",","-5.0","]",",","[","0",",","0",",","1",",","0","]","]",")"]
28
35
null
ndimage_interpolation.py
scipy/benchmarks/benchmarks/ndimage_interpolation.py
import numpy from .common import Benchmark
15
1
2
2
1
9
1
Use image node_id 1 for calling the NdimageInterpolation obj's underlying member method code with example usage: obj.setup(shape, order, mode) without return types
163
node_id 1
1,883,756
chrome_command
Converter
ImageConverter
true
self
null
null
null
null
None,None,str,str,str,str+path+str,str
def chrome_command(self) -> str | None: if platform.win32_ver()[0]: if os.system("where chrome") == 0: return "chrome" path = os.path.join( os.environ["PROGRAMW6432"], "Google\\Chrome\\Application\\chrome.exe", ) if os.path.exists(path): return f'"{path}"' return None if os.system("chrome --version") == 0: return "chrome" if platform.mac_ver()[0]: return "'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'" elif platform.libc_ver()[0]: return "google-chrome" return None
["def","chrome_command","(","self",")","-",">","str","|","None",":","if","platform.win32_ver","(",")","[","0","]",":","if","os.system","(","``","where","chrome","''",")","==","0",":","return","``","chrome","''","path","=","os.path.join","(","os.environ","[","``","PROGRAMW6432","''","]",",","``","Google\\\\Chrome\\\\Application\\\\chrome.exe","''",",",")","if","os.path.exists","(","path",")",":","return","f","'","''","{","path","}","''","'","return","None","if","os.system","(","``","chrome","--","version","''",")","==","0",":","return","``","chrome","''","if","platform.mac_ver","(",")","[","0","]",":","return","``","'\/Applications\/Google","Chrome.app\/Contents\/MacOS\/Google","Chrome","'","''","elif","platform.libc_ver","(",")","[","0","]",":","return","``","google-chrome","''","return","None"]
28
42
null
convert-svg-to-pdf.py
sympy/doc/ext/convert-svg-to-pdf.py
from __future__ import annotations from sphinx.transforms.post_transforms.images import ImageConverter from sphinx.util import logging import os import platform from typing import Any from sphinx.application import Sphinx
15
1
7
1
1
5
1
Use image node_id 2 for calling the Converter obj's underlying member method code with example usage: obj.chrome_command() and returns: None, None, str, str, str, str, path, str, str
182
node_id 2
2,029,275
__init__
DropBlock2d
nn
true
self,drop_prob,block_size,gamma_scale,with_noise,inplace,batchwise,fast
DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
["DropBlock",".","See","https",":","\/\/arxiv.org\/pdf\/1810.12890.pdf"]
null
null
DropBlock2d
def __init__( self, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False, fast: bool = True, ): super(DropBlock2d, self).__init__() self.drop_prob = drop_prob self.gamma_scale = gamma_scale self.block_size = block_size self.with_noise = with_noise self.inplace = inplace self.batchwise = batchwise self.fast = fast
["def","__init__","(","self",",","drop_prob",":","float","=","0.1",",","block_size",":","int","=","7",",","gamma_scale",":","float","=","1.0",",","with_noise",":","bool","=","False",",","inplace",":","bool","=","False",",","batchwise",":","bool","=","False",",","fast",":","bool","=","True",",",")",":","super","(","DropBlock2d",",","self",")",".__init__","(",")","self.drop_prob","=","drop_prob","self.gamma_scale","=","gamma_scale","self.block_size","=","block_size","self.with_noise","=","with_noise","self.inplace","=","inplace","self.batchwise","=","batchwise","self.fast","=","fast"]
108
124
null
drop.py
pytorch-image-models/timm/layers/drop.py
import torch import torch.nn import torch.nn.functional
15
2
3
3
2
2
1
Use image node_id 1 to create a new DropBlock2d object from inherited base classes: nn with example: obj = DropBlock2d(drop_prob, block_size, gamma_scale, with_noise, inplace, batchwise, fast)
192
node_id 1
1,692,286
test_symmetric_difference
TestIntervalIndex
null
true
self,closed,sort
null
null
null
null
null
def test_symmetric_difference(self, closed, sort): index = monotonic_index(0, 11, closed=closed) result = index[1:].symmetric_difference(index[:-1], sort=sort) expected = IntervalIndex([index[0], index[-1]]) if sort in (None, True): tm.assert_index_equal(result, expected) else: tm.assert_index_equal(result.sort_values(), expected) # GH 19101: empty result, same dtype result = index.symmetric_difference(index, sort=sort) expected = empty_index(dtype="int64", closed=closed) if sort in (None, True): tm.assert_index_equal(result, expected) else: tm.assert_index_equal(result.sort_values(), expected) # GH 19101: empty result, different dtypes other = IntervalIndex.from_arrays( index.left.astype("float64"), index.right, closed=closed ) result = index.symmetric_difference(other, sort=sort) expected = empty_index(dtype="float64", closed=closed) tm.assert_index_equal(result, expected)
["def","test_symmetric_difference","(","self",",","closed",",","sort",")",":","index","=","monotonic_index","(","0",",","11",",","closed=closed",")","result","=","index","[","1",":","]",".symmetric_difference","(","index","[",":","-1","]",",","sort=sort",")","expected","=","IntervalIndex","(","[","index","[","0","]",",","index","[","-1","]","]",")","if","sort","in","(","None",",","True",")",":","tm.assert_index_equal","(","result",",","expected",")","else",":","tm.assert_index_equal","(","result.sort_values","(",")",",","expected",")","#","GH","19101",":","empty","result",",","same","dtype","result","=","index.symmetric_difference","(","index",",","sort=sort",")","expected","=","empty_index","(","dtype=","''","int64","''",",","closed=closed",")","if","sort","in","(","None",",","True",")",":","tm.assert_index_equal","(","result",",","expected",")","else",":","tm.assert_index_equal","(","result.sort_values","(",")",",","expected",")","#","GH","19101",":","empty","result",",","different","dtypes","other","=","IntervalIndex.from_arrays","(","index.left.astype","(","``","float64","''",")",",","index.right",",","closed=closed",")","result","=","index.symmetric_difference","(","other",",","sort=sort",")","expected","=","empty_index","(","dtype=","''","float64","''",",","closed=closed",")","tm.assert_index_equal","(","result",",","expected",")"]
151
174
null
test_setops.py
pandas/pandas/tests/indexes/interval/test_setops.py
import numpy import pytest from pandas import Index, IntervalIndex, Timestamp, interval_range import pandas._testing
15
1
4
2
0
8
null
Use image node_id 7 for calling the TestIntervalIndex obj's underlying member method code with example usage: obj.test_symmetric_difference(closed, sort) without return types
174
node_id 7
1,514,639
fetch_kddcup99
global
null
false
null
null
null
null
Bunch,data, target
def fetch_kddcup99( *, subset=None, data_home=None, shuffle=False, random_state=None, percent10=True, download_if_missing=True, return_X_y=False, as_frame=False, ): """Load the kddcup99 dataset (classification). Download it if necessary. ================= ==================================== Classes 23 Samples total 4898431 Dimensionality 41 Features discrete (int) or continuous (float) ================= ==================================== Read more in the :ref:`User Guide <kddcup99_dataset>`. .. versionadded:: 0.18 Parameters ---------- subset : {'SA', 'SF', 'http', 'smtp'}, default=None To return the corresponding classical subsets of kddcup 99. If None, return the entire kddcup 99 dataset. data_home : str or path-like, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. .. versionadded:: 0.19 shuffle : bool, default=False Whether to shuffle dataset. random_state : int, RandomState instance or None, default=None Determines random number generation for dataset shuffling and for selection of abnormal samples if `subset='SA'`. Pass an int for reproducible output across multiple function calls. See :term:`Glossary <random_state>`. percent10 : bool, default=True Whether to load only 10 percent of the data. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. return_X_y : bool, default=False If True, returns ``(data, target)`` instead of a Bunch object. See below for more information about the `data` and `target` object. .. versionadded:: 0.20 as_frame : bool, default=False If `True`, returns a pandas Dataframe for the ``data`` and ``target`` objects in the `Bunch` returned object; `Bunch` return object will also have a ``frame`` member. .. versionadded:: 0.24 Returns ------- data : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : {ndarray, dataframe} of shape (494021, 41) The data matrix to learn. If `as_frame=True`, `data` will be a pandas DataFrame. target : {ndarray, series} of shape (494021,) The regression target for each sample. If `as_frame=True`, `target` will be a pandas Series. frame : dataframe of shape (494021, 42) Only present when `as_frame=True`. Contains `data` and `target`. DESCR : str The full description of the dataset. feature_names : list The names of the dataset columns target_names: list The names of the target columns (data, target) : tuple if ``return_X_y`` is True A tuple of two ndarray. The first containing a 2D array of shape (n_samples, n_features) with each row representing one sample and each column representing the features. The second ndarray of shape (n_samples,) containing the target samples. .. versionadded:: 0.20 """ data_home = get_data_home(data_home=data_home) kddcup99 = _fetch_brute_kddcup99( data_home=data_home, percent10=percent10, download_if_missing=download_if_missing, ) data = kddcup99.data target = kddcup99.target feature_names = kddcup99.feature_names target_names = kddcup99.target_names if subset == "SA": s = target == b"normal." t = np.logical_not(s) normal_samples = data[s, :] normal_targets = target[s] abnormal_samples = data[t, :] abnormal_targets = target[t] n_samples_abnormal = abnormal_samples.shape[0] # selected abnormal samples: random_state = check_random_state(random_state) r = random_state.randint(0, n_samples_abnormal, 3377) abnormal_samples = abnormal_samples[r] abnormal_targets = abnormal_targets[r] data = np.r_[normal_samples, abnormal_samples] target = np.r_[normal_targets, abnormal_targets] if subset == "SF" or subset == "http" or subset == "smtp": # select all samples with positive logged_in attribute: s = data[:, 11] == 1 data = np.c_[data[s, :11], data[s, 12:]] feature_names = feature_names[:11] + feature_names[12:] target = target[s] data[:, 0] = np.log( (data[:, 0] + 0.1).astype(float, copy=False) ) data[:, 4] = np.log( (data[:, 4] + 0.1).astype(float, copy=False) ) data[:, 5] = np.log( (data[:, 5] + 0.1).astype(float, copy=False) ) if subset == "http": s = data[:, 2] == b"http" data = data[s] target = target[s] data = np.c_[data[:, 0], data[:, 4], data[:, 5]] feature_names = [ feature_names[0], feature_names[4], feature_names[5], ] if subset == "smtp": s = data[:, 2] == b"smtp" data = data[s] target = target[s] data = np.c_[data[:, 0], data[:, 4], data[:, 5]] feature_names = [ feature_names[0], feature_names[4], feature_names[5], ] if subset == "SF": data = np.c_[ data[:, 0], data[:, 2], data[:, 4], data[:, 5] ] feature_names = [ feature_names[0], feature_names[2], feature_names[4], feature_names[5], ] if shuffle: data, target = shuffle_method( data, target, random_state=random_state ) fdescr = load_descr("kddcup99.rst") frame = None if as_frame: frame, data, target = _convert_data_dataframe( "fetch_kddcup99", data, target, feature_names, target_names, ) if return_X_y: return data, target return Bunch( data=data, target=target, frame=frame, target_names=target_names, feature_names=feature_names, DESCR=fdescr, )
["def","fetch_kddcup99","(","*",",","subset=None",",","data_home=None",",","shuffle=False",",","random_state=None",",","percent10=True",",","download_if_missing=True",",","return_X_y=False",",","as_frame=False",",",")",":","``","''","''","Load","the","kddcup99","dataset","(","classification",")",".","Download","it","if","necessary",".","=================","====================================","Classes","23","Samples","total","4898431","Dimensionality","41","Features","discrete","(","int",")","or","continuous","(","float",")","=================","====================================","Read","more","in","the",":","ref",":","`","User","Guide","<","kddcup99_dataset",">","`",".","..","versionadded",":",":","0.18","Parameters","--","--","--","--","--","subset",":","{","'SA","'",",","'SF","'",",","'http","'",",","'smtp","'","}",",","default=None","To","return","the","corresponding","classical","subsets","of","kddcup","99",".","If","None",",","return","the","entire","kddcup","99","dataset",".","data_home",":","str","or","path-like",",","default=None","Specify","another","download","and","cache","folder","for","the","datasets",".","By","default","all","scikit-learn","data","is","stored","in","'~\/scikit_learn_data","'","subfolders",".","..","versionadded",":",":","0.19","shuffle",":","bool",",","default=False","Whether","to","shuffle","dataset",".","random_state",":","int",",","RandomState","instance","or","None",",","default=None","Determines","random","number","generation","for","dataset","shuffling","and","for","selection","of","abnormal","samples","if","`","subset='SA","'","`",".","Pass","an","int","for","reproducible","output","across","multiple","function","calls",".","See",":","term",":","`","Glossary","<","random_state",">","`",".","percent10",":","bool",",","default=True","Whether","to","load","only","10","percent","of","the","data",".","download_if_missing",":","bool",",","default=True","If","False",",","raise","an","OSError","if","the","data","is","not","locally","available","instead","of","trying","to","download","the","data","from","the","source","site",".","return_X_y",":","bool",",","default=False","If","True",",","returns","``","(","data",",","target",")","``","instead","of","a","Bunch","object",".","See","below","for","more","information","about","the","`","data","`","and","`","target","`","object",".","..","versionadded",":",":","0.20","as_frame",":","bool",",","default=False","If","`","True","`",",","returns","a","pandas","Dataframe","for","the","``","data","``","and","``","target","``","objects","in","the","`","Bunch","`","returned","object",";","`","Bunch","`","return","object","will","also","have","a","``","frame","``","member",".","..","versionadded",":",":","0.24","Returns","--","--","--","-","data",":",":","class",":","`","~sklearn.utils.Bunch","`","Dictionary-like","object",",","with","the","following","attributes",".","data",":","{","ndarray",",","dataframe","}","of","shape","(","494021",",","41",")","The","data","matrix","to","learn",".","If","`","as_frame=True","`",",","`","data","`","will","be","a","pandas","DataFrame",".","target",":","{","ndarray",",","series","}","of","shape","(","494021",",",")","The","regression","target","for","each","sample",".","If","`","as_frame=True","`",",","`","target","`","will","be","a","pandas","Series",".","frame",":","dataframe","of","shape","(","494021",",","42",")","Only","present","when","`","as_frame=True","`",".","Contains","`","data","`","and","`","target","`",".","DESCR",":","str","The","full","description","of","the","dataset",".","feature_names",":","list","The","names","of","the","dataset","columns","target_names",":","list","The","names","of","the","target","columns","(","data",",","target",")",":","tuple","if","``","return_X_y","``","is","True","A","tuple","of","two","ndarray",".","The","first","containing","a","2D","array","of","shape","(","n_samples",",","n_features",")","with","each","row","representing","one","sample","and","each","column","representing","the","features",".","The","second","ndarray","of","shape","(","n_samples",",",")","containing","the","target","samples",".","..","versionadded",":",":","0.20","``","''","''","data_home","=","get_data_home","(","data_home=data_home",")","kddcup99","=","_fetch_brute_kddcup99","(","data_home=data_home",",","percent10=percent10",",","download_if_missing=download_if_missing",",",")","data","=","kddcup99.data","target","=","kddcup99.target","feature_names","=","kddcup99.feature_names","target_names","=","kddcup99.target_names","if","subset","==","``","SA","''",":","s","=","target","==","b","''","normal",".","''","t","=","np.logical_not","(","s",")","normal_samples","=","data","[","s",",",":","]","normal_targets","=","target","[","s","]","abnormal_samples","=","data","[","t",",",":","]","abnormal_targets","=","target","[","t","]","n_samples_abnormal","=","abnormal_samples.shape","[","0","]","#","selected","abnormal","samples",":","random_state","=","check_random_state","(","random_state",")","r","=","random_state.randint","(","0",",","n_samples_abnormal",",","3377",")","abnormal_samples","=","abnormal_samples","[","r","]","abnormal_targets","=","abnormal_targets","[","r","]","data","=","np.r_","[","normal_samples",",","abnormal_samples","]","target","=","np.r_","[","normal_targets",",","abnormal_targets","]","if","subset","==","``","SF","''","or","subset","==","``","http","''","or","subset","==","``","smtp","''",":","#","select","all","samples","with","positive","logged_in","attribute",":","s","=","data","[",":",",","11","]","==","1","data","=","np.c_","[","data","[","s",",",":11","]",",","data","[","s",",","12",":","]","]","feature_names","=","feature_names","[",":11","]","+","feature_names","[","12",":","]","target","=","target","[","s","]","data","[",":",",","0","]","=","np.log","(","(","data","[",":",",","0","]","+","0.1",")",".astype","(","float",",","copy=False",")",")","data","[",":",",","4","]","=","np.log","(","(","data","[",":",",","4","]","+","0.1",")",".astype","(","float",",","copy=False",")",")","data","[",":",",","5","]","=","np.log","(","(","data","[",":",",","5","]","+","0.1",")",".astype","(","float",",","copy=False",")",")","if","subset","==","``","http","''",":","s","=","data","[",":",",","2","]","==","b","''","http","''","data","=","data","[","s","]","target","=","target","[","s","]","data","=","np.c_","[","data","[",":",",","0","]",",","data","[",":",",","4","]",",","data","[",":",",","5","]","]","feature_names","=","[","feature_names","[","0","]",",","feature_names","[","4","]",",","feature_names","[","5","]",",","]","if","subset","==","``","smtp","''",":","s","=","data","[",":",",","2","]","==","b","''","smtp","''","data","=","data","[","s","]","target","=","target","[","s","]","data","=","np.c_","[","data","[",":",",","0","]",",","data","[",":",",","4","]",",","data","[",":",",","5","]","]","feature_names","=","[","feature_names","[","0","]",",","feature_names","[","4","]",",","feature_names","[","5","]",",","]","if","subset","==","``","SF","''",":","data","=","np.c_","[","data","[",":",",","0","]",",","data","[",":",",","2","]",",","data","[",":",",","4","]",",","data","[",":",",","5","]","]","feature_names","=","[","feature_names","[","0","]",",","feature_names","[","2","]",",","feature_names","[","4","]",",","feature_names","[","5","]",",","]","if","shuffle",":","data",",","target","=","shuffle_method","(","data",",","target",",","random_state=random_state",")","fdescr","=","load_descr","(","``","kddcup99.rst","''",")","frame","=","None","if","as_frame",":","frame",",","data",",","target","=","_convert_data_dataframe","(","``","fetch_kddcup99","''",",","data",",","target",",","feature_names",",","target_names",",",")","if","return_X_y",":","return","data",",","target","return","Bunch","(","data=data",",","target=target",",","frame=frame",",","target_names=target_names",",","feature_names=feature_names",",","DESCR=fdescr",",",")"]
63
243
null
_kddcup99.py
catboost/contrib/python/scikit-learn/py3/sklearn/datasets/_kddcup99.py
import errno import logging import os from gzip import GzipFile from os.path import exists, join import joblib import numpy from ..utils import Bunch, check_random_state from ..utils import shuffle from ..utils._param_validation import StrOptions, validate_params from .None import get_data_home from ._base import RemoteFileMetadata, _convert_data_dataframe, _fetch_remote, load_descr
15
null
12
3
null
null
null
Use image node_id 1 for calling a global function with example usage: fetch_kddcup99() and returns: Bunch, data, target
120
node_id 1
520,096
__init__
GPT2Decoder
BaseStepDecoder
true
self,gpt2_lm_model
null
null
null
null
GPT2Decoder
def __init__(self, gpt2_lm_model): self._gpt2_lm_model = gpt2_lm_model self._layout = self._gpt2_lm_model._backbone_model.layout
["def","__init__","(","self",",","gpt2_lm_model",")",":","self._gpt2_lm_model","=","gpt2_lm_model","self._layout","=","self._gpt2_lm_model._backbone_model.layout"]
39
41
null
interactive_conditional_gpt2_samples.py
gluon-nlp/scripts/generation/interactive_conditional_gpt2_samples.py
import os import mxnet import argparse from gluonnlp.utils import set_seed from gluonnlp.sequence_sampler import BeamSearchSampler, BaseStepDecoder from gluonnlp.models.gpt2 import GPT2ForLM, list_pretrained_gpt2, get_pretrained_gpt2
15
1
6
2
1
5
1
Use image node_id 1 to create a new GPT2Decoder object from inherited base classes: BaseStepDecoder with example: obj = GPT2Decoder(gpt2_lm_model)
146
node_id 1
1,097,714
sampled_mean
global
null
false
f,lb,ub,npts,map
null
null
null
null
_expectation_given_samples
def sampled_mean(f, lb, ub, npts=10000, map=None): """ use random sampling to calculate the mean of a function Inputs: f -- a function that takes a list and returns a number lb -- a list of lower bounds ub -- a list of upper bounds npts -- the number of points to sample [Default is npts=10000] map -- the mapping function [Default is builtins.map]""" pts = _random_samples(lb, ub, npts) return _expectation_given_samples(f, pts, map)
["def","sampled_mean","(","f",",","lb",",","ub",",","npts=10000",",","map=None",")",":","``","''","''","use","random","sampling","to","calculate","the","mean","of","a","function","Inputs",":","f","--","a","function","that","takes","a","list","and","returns","a","number","lb","--","a","list","of","lower","bounds","ub","--","a","list","of","upper","bounds","npts","--","the","number","of","points","to","sample","[","Default","is","npts=10000","]","map","--","the","mapping","function","[","Default","is","builtins.map","]","''","''","''","pts","=","_random_samples","(","lb",",","ub",",","npts",")","return","_expectation_given_samples","(","f",",","pts",",","map",")"]
94
106
null
samples.py
mystic/mystic/math/samples.py
15
null
0
15
null
null
null
Use image node_id 4 for calling a global function with example usage: sampled_mean(f, lb, ub, npts, map) and returns: _expectation_given_samples
144
node_id 4
1,407,024
sampled_variance
global
null
false
f,lb,ub,npts,map
null
null
null
null
_variance_given_samples
def sampled_variance( f, lb, ub, npts=10000, map=None ): # XXX: could be improved """ use random sampling to calculate the variance of a function Inputs: f -- a function that takes a list and returns a number lb -- a list of lower bounds ub -- a list of upper bounds npts -- the number of points to sample [Default is npts=10000] map -- the mapping function [Default is builtins.map]""" pts = _random_samples(lb, ub, npts) return _variance_given_samples(f, pts, map)
["def","sampled_variance","(","f",",","lb",",","ub",",","npts=10000",",","map=None",")",":","#","XXX",":","could","be","improved","``","''","''","use","random","sampling","to","calculate","the","variance","of","a","function","Inputs",":","f","--","a","function","that","takes","a","list","and","returns","a","number","lb","--","a","list","of","lower","bounds","ub","--","a","list","of","upper","bounds","npts","--","the","number","of","points","to","sample","[","Default","is","npts=10000","]","map","--","the","mapping","function","[","Default","is","builtins.map","]","''","''","''","pts","=","_random_samples","(","lb",",","ub",",","npts",")","return","_variance_given_samples","(","f",",","pts",",","map",")"]
133
145
null
samples.py
mystic/mystic/math/samples.py
15
null
0
15
null
null
null
Use image node_id 5 for calling a global function with example usage: sampled_variance(f, lb, ub, npts, map) and returns: _variance_given_samples
145
node_id 5
1,407,025
test_set_incompatible_types
TestIntervalIndex
null
true
self,closed,op_name,sort
null
null
null
null
null
def test_set_incompatible_types(self, closed, op_name, sort): index = monotonic_index(0, 11, closed=closed) set_op = getattr(index, op_name) # TODO: standardize return type of non-union setops type(self vs other) # non-IntervalIndex if op_name == "difference": expected = index else: expected = getattr(index.astype("O"), op_name)( Index([1, 2, 3]) ) result = set_op(Index([1, 2, 3]), sort=sort) tm.assert_index_equal(result, expected) # mixed closed -> cast to object for other_closed in {"right", "left", "both", "neither"} - { closed }: other = monotonic_index(0, 11, closed=other_closed) expected = getattr(index.astype(object), op_name)( other, sort=sort ) if op_name == "difference": expected = index result = set_op(other, sort=sort) tm.assert_index_equal(result, expected) # GH 19016: incompatible dtypes -> cast to object other = interval_range( Timestamp("20180101"), periods=9, closed=closed ) expected = getattr(index.astype(object), op_name)( other, sort=sort ) if op_name == "difference": expected = index result = set_op(other, sort=sort) tm.assert_index_equal(result, expected)
["def","test_set_incompatible_types","(","self",",","closed",",","op_name",",","sort",")",":","index","=","monotonic_index","(","0",",","11",",","closed=closed",")","set_op","=","getattr","(","index",",","op_name",")","#","TODO",":","standardize","return","type","of","non-union","setops","type","(","self","vs","other",")","#","non-IntervalIndex","if","op_name","==","``","difference","''",":","expected","=","index","else",":","expected","=","getattr","(","index.astype","(","``","O","''",")",",","op_name",")","(","Index","(","[","1",",","2",",","3","]",")",")","result","=","set_op","(","Index","(","[","1",",","2",",","3","]",")",",","sort=sort",")","tm.assert_index_equal","(","result",",","expected",")","#","mixed","closed","-",">","cast","to","object","for","other_closed","in","{","``","right","''",",","``","left","''",",","``","both","''",",","``","neither","''","}","-","{","closed","}",":","other","=","monotonic_index","(","0",",","11",",","closed=other_closed",")","expected","=","getattr","(","index.astype","(","object",")",",","op_name",")","(","other",",","sort=sort",")","if","op_name","==","``","difference","''",":","expected","=","index","result","=","set_op","(","other",",","sort=sort",")","tm.assert_index_equal","(","result",",","expected",")","#","GH","19016",":","incompatible","dtypes","-",">","cast","to","object","other","=","interval_range","(","Timestamp","(","``","20180101","''",")",",","periods=9",",","closed=closed",")","expected","=","getattr","(","index.astype","(","object",")",",","op_name",")","(","other",",","sort=sort",")","if","op_name","==","``","difference","''",":","expected","=","index","result","=","set_op","(","other",",","sort=sort",")","tm.assert_index_equal","(","result",",","expected",")"]
180
208
null
test_setops.py
pandas/pandas/tests/indexes/interval/test_setops.py
import numpy import pytest from pandas import Index, IntervalIndex, Timestamp, interval_range import pandas._testing
15
1
4
2
0
8
null
Use image node_id 8 for calling the TestIntervalIndex obj's underlying member method code with example usage: obj.test_set_incompatible_types(closed, op_name, sort) without return types
185
node_id 8
1,514,640
sampled_pof
global
null
false
f,lb,ub,npts,map
null
null
null
null
_pof_given_samples
def sampled_pof(f, lb, ub, npts=10000, map=None): """ use random sampling to calculate probability of failure for a function Inputs: f -- a function that returns True for 'success' and False for 'failure' lb -- a list of lower bounds ub -- a list of upper bounds npts -- the number of points to sample [Default is npts=10000] map -- the mapping function [Default is builtins.map]""" pts = _random_samples(lb, ub, npts) return _pof_given_samples(f, pts, map)
["def","sampled_pof","(","f",",","lb",",","ub",",","npts=10000",",","map=None",")",":","``","''","''","use","random","sampling","to","calculate","probability","of","failure","for","a","function","Inputs",":","f","--","a","function","that","returns","True","for","'success","'","and","False","for","'failure'","lb","--","a","list","of","lower","bounds","ub","--","a","list","of","upper","bounds","npts","--","the","number","of","points","to","sample","[","Default","is","npts=10000","]","map","--","the","mapping","function","[","Default","is","builtins.map","]","''","''","''","pts","=","_random_samples","(","lb",",","ub",",","npts",")","return","_pof_given_samples","(","f",",","pts",",","map",")"]
148
160
null
samples.py
mystic/mystic/math/samples.py
15
null
0
15
null
null
null
Use image node_id 6 for calling a global function with example usage: sampled_pof(f, lb, ub, npts, map) and returns: _pof_given_samples
135
node_id 6
1,407,026
__init__
Config
null
true
self,task,out_dir,max_trans,random_seed,fields,flint_model,trans_methods,trans_config,return_unk,sub_methods,sub_config,attack_methods,validate_methods
Hold some config params to control generation and report procedure.
["Hold","some","config","params","to","control","generation","and","report","procedure","."]
:param str task: task name :param string out_dir: out dir for saving generated samples, default current path. :param int max_trans: maximum transformed samples generate by one original sample pre Transformation. :param int random_seed: random number seed to reproduce generation. :param str|list[str] fields: fields on which new samples are generated. ::param str model_file: path to the python file containing the FlintModel instance which named 'model'. :param list trans_methods: indicate what transformations to apply to dataset. :param dict trans_config: parameters for the initialization of the transformation instances. :param bool return_unk: whether apply transformations which may influence label of sample. :param list sub_methods: indicate what subpopulations to apply to dataset. :param dict sub_config: parameters for the initialization of the subpopulation instances. :param str attack_methods: path to the python file containing the Attack instances which named "attacks". :param str|list[str] validate_methods: indicate use which validate methods to calculate confidence of generated samples.
[":","param","str","task",":","task","name",":","param","string","out_dir",":","out","dir","for","saving","generated","samples",",","default","current","path",".",":","param","int","max_trans",":","maximum","transformed","samples","generate","by","one","original","sample","pre","Transformation",".",":","param","int","random_seed",":","random","number","seed","to","reproduce","generation",".",":","param","str|list","[","str","]","fields",":","fields","on","which","new","samples","are","generated",".",":",":param","str","model_file",":","path","to","the","python","file","containing","the","FlintModel","instance","which","named","'model","'",".",":","param","list","trans_methods",":","indicate","what","transformations","to","apply","to","dataset",".",":","param","dict","trans_config",":","parameters","for","the","initialization","of","the","transformation","instances",".",":","param","bool","return_unk",":","whether","apply","transformations","which","may","influence","label","of","sample",".",":","param","list","sub_methods",":","indicate","what","subpopulations","to","apply","to","dataset",".",":","param","dict","sub_config",":","parameters","for","the","initialization","of","the","subpopulation","instances",".",":","param","str","attack_methods",":","path","to","the","python","file","containing","the","Attack","instances","which","named","``","attacks","''",".",":","param","str|list","[","str","]","validate_methods",":","indicate","use","which","validate","methods","to","calculate","confidence","of","generated","samples","."]
Config
def __init__( self, task="UT", out_dir=None, max_trans=1, random_seed=1, fields=None, flint_model=None, trans_methods=None, trans_config=None, return_unk=True, sub_methods=None, sub_config=None, attack_methods=None, validate_methods=None, **kwargs, ): """ :param str task: task name :param string out_dir: out dir for saving generated samples, default current path. :param int max_trans: maximum transformed samples generate by one original sample pre Transformation. :param int random_seed: random number seed to reproduce generation. :param str|list[str] fields: fields on which new samples are generated. ::param str model_file: path to the python file containing the FlintModel instance which named 'model'. :param list trans_methods: indicate what transformations to apply to dataset. :param dict trans_config: parameters for the initialization of the transformation instances. :param bool return_unk: whether apply transformations which may influence label of sample. :param list sub_methods: indicate what subpopulations to apply to dataset. :param dict sub_config: parameters for the initialization of the subpopulation instances. :param str attack_methods: path to the python file containing the Attack instances which named "attacks". :param str|list[str] validate_methods: indicate use which validate methods to calculate confidence of generated samples. """ self.task = task self.out_dir = out_dir if out_dir else "." self.max_trans = max_trans self.fields = fields if fields else TRANSFORM_FIELDS[self.task] self.flint_model = flint_model self.random_seed = random_seed if len(task) >= 2 and task[-2:] == "cn": self.trans_methods = self.get_generate_methods( trans_methods, ALLOWED_cn_TRANSFORMATIONS, allow_pipeline=True, ) else: self.trans_methods = self.get_generate_methods( trans_methods, ALLOWED_TRANSFORMATIONS, allow_pipeline=True, ) self.trans_config = trans_config if trans_config else {} # TODO, support the function. default not return origin and return unk self.return_unk = return_unk self.sub_methods = self.get_generate_methods( sub_methods, ALLOWED_SUBPOPULATIONS ) self.sub_config = sub_config if sub_config else {} self.attack_methods = attack_methods self.validate_methods = self.get_generate_methods( validate_methods, ALLOWED_VALIDATORS ) self.check_config()
["def","__init__","(","self",",","task=","''","UT","''",",","out_dir=None",",","max_trans=1",",","random_seed=1",",","fields=None",",","flint_model=None",",","trans_methods=None",",","trans_config=None",",","return_unk=True",",","sub_methods=None",",","sub_config=None",",","attack_methods=None",",","validate_methods=None",",","*","*","kwargs",",",")",":","``","''","''",":","param","str","task",":","task","name",":","param","string","out_dir",":","out","dir","for","saving","generated","samples",",","default","current","path",".",":","param","int","max_trans",":","maximum","transformed","samples","generate","by","one","original","sample","pre","Transformation",".",":","param","int","random_seed",":","random","number","seed","to","reproduce","generation",".",":","param","str|list","[","str","]","fields",":","fields","on","which","new","samples","are","generated",".",":",":param","str","model_file",":","path","to","the","python","file","containing","the","FlintModel","instance","which","named","'model","'",".",":","param","list","trans_methods",":","indicate","what","transformations","to","apply","to","dataset",".",":","param","dict","trans_config",":","parameters","for","the","initialization","of","the","transformation","instances",".",":","param","bool","return_unk",":","whether","apply","transformations","which","may","influence","label","of","sample",".",":","param","list","sub_methods",":","indicate","what","subpopulations","to","apply","to","dataset",".",":","param","dict","sub_config",":","parameters","for","the","initialization","of","the","subpopulation","instances",".",":","param","str","attack_methods",":","path","to","the","python","file","containing","the","Attack","instances","which","named","``","attacks","''",".",":","param","str|list","[","str","]","validate_methods",":","indicate","use","which","validate","methods","to","calculate","confidence","of","generated","samples.","``","''","''","self.task","=","task","self.out_dir","=","out_dir","if","out_dir","else","``",".","''","self.max_trans","=","max_trans","self.fields","=","fields","if","fields","else","TRANSFORM_FIELDS","[","self.task","]","self.flint_model","=","flint_model","self.random_seed","=","random_seed","if","len","(","task",")",">","=","2","and","task","[","-2",":","]","==","``","cn","''",":","self.trans_methods","=","self.get_generate_methods","(","trans_methods",",","ALLOWED_cn_TRANSFORMATIONS",",","allow_pipeline=True",",",")","else",":","self.trans_methods","=","self.get_generate_methods","(","trans_methods",",","ALLOWED_TRANSFORMATIONS",",","allow_pipeline=True",",",")","self.trans_config","=","trans_config","if","trans_config","else","{","}","#","TODO",",","support","the","function",".","default","not","return","origin","and","return","unk","self.return_unk","=","return_unk","self.sub_methods","=","self.get_generate_methods","(","sub_methods",",","ALLOWED_SUBPOPULATIONS",")","self.sub_config","=","sub_config","if","sub_config","else","{","}","self.attack_methods","=","attack_methods","self.validate_methods","=","self.get_generate_methods","(","validate_methods",",","ALLOWED_VALIDATORS",")","self.check_config","(",")"]
24
97
null
config.py
textflint/textflint/input/config/config.py
import os import six import json import copy from ...common.utils.logger import logger from ...common.settings import NLP_TASK_MAP, ALLOWED_TRANSFORMATIONS, TRANSFORM_FIELDS, ALLOWED_SUBPOPULATIONS, ALLOWED_VALIDATORS, ALLOWED_cn_TRANSFORMATIONS
15
1
6
0
0
8
null
Use image node_id 1 to create a new Config object with example: obj = Config(task, out_dir, max_trans, random_seed, fields, flint_model, trans_methods, trans_config, return_unk, sub_methods, sub_config, attack_methods, validate_methods)
237
node_id 1
2,188,348
check_config
Config
null
true
self
Hold some config params to control generation and report procedure.
["Hold","some","config","params","to","control","generation","and","report","procedure","."]
Check common config params.
["Check","common","config","params","."]
null
def check_config(self): r""" Check common config params. """ if self.task.upper() not in NLP_TASK_MAP: logger.error( "Your task is {0}, just support {1}.".format( self.task, NLP_TASK_MAP.keys() ) ) assert isinstance(self.out_dir, str) assert isinstance(self.max_trans, int) assert isinstance(self.random_seed, int) assert isinstance(self.fields, (str, list)) assert isinstance(self.trans_config, dict) assert isinstance(self.return_unk, bool) assert isinstance(self.sub_config, dict) if self.flint_model: assert os.path.exists(self.flint_model), ( "Please input a exist python file path " "which contains FlintModel instance" ) if self.attack_methods: assert os.path.exists(self.attack_methods), ( "Please input a exist python file path " "which contains Attack instance" ) if self.validate_methods: assert isinstance(self.validate_methods, (str, list))
["def","check_config","(","self",")",":","r","''","''","''","Check","common","config","params.","``","''","''","if","self.task.upper","(",")","not","in","NLP_TASK_MAP",":","logger.error","(","``","Your","task","is","{","0","}",",","just","support","{","1","}",".","``",".format","(","self.task",",","NLP_TASK_MAP.keys","(",")",")",")","assert","isinstance","(","self.out_dir",",","str",")","assert","isinstance","(","self.max_trans",",","int",")","assert","isinstance","(","self.random_seed",",","int",")","assert","isinstance","(","self.fields",",","(","str",",","list",")",")","assert","isinstance","(","self.trans_config",",","dict",")","assert","isinstance","(","self.return_unk",",","bool",")","assert","isinstance","(","self.sub_config",",","dict",")","if","self.flint_model",":","assert","os.path.exists","(","self.flint_model",")",",","(","``","Please","input","a","exist","python","file","path","``","``","which","contains","FlintModel","instance","''",")","if","self.attack_methods",":","assert","os.path.exists","(","self.attack_methods",")",",","(","``","Please","input","a","exist","python","file","path","``","``","which","contains","Attack","instance","''",")","if","self.validate_methods",":","assert","isinstance","(","self.validate_methods",",","(","str",",","list",")",")"]
99
128
null
config.py
textflint/textflint/input/config/config.py
import os import six import json import copy from ...common.utils.logger import logger from ...common.settings import NLP_TASK_MAP, ALLOWED_TRANSFORMATIONS, TRANSFORM_FIELDS, ALLOWED_SUBPOPULATIONS, ALLOWED_VALIDATORS, ALLOWED_cn_TRANSFORMATIONS
15
1
6
0
0
8
null
Use image node_id 2 for calling the Config obj's underlying member method code with example usage: obj.check_config() without return types
138
node_id 2
2,188,349
get_generate_methods
Config
null
true
self,methods,task_to_methods,allow_pipeline
Hold some config params to control generation and report procedure.
["Hold","some","config","params","to","control","generation","and","report","procedure","."]
Validate transformation or subpopulation methods. Watch out! Some UT transformations/subpopulations may not compatible with your task, please choose your method carefully. :param list methods: transformation or subpopulation need to apply to dataset. If not provide, return default generated methods. :param dict task_to_methods: map allowed methods by task name. :param bool allow_pipeline: whether allow pipeline input :return: list of transformation/subpopulation.
["Validate","transformation","or","subpopulation","methods",".","Watch","out","!","Some","UT","transformations\/subpopulations","may","not","compatible","with","your","task",",","please","choose","your","method","carefully",".",":","param","list","methods",":","transformation","or","subpopulation","need","to","apply","to","dataset",".","If","not","provide",",","return","default","generated","methods",".",":","param","dict","task_to_methods",":","map","allowed","methods","by","task","name",".",":","param","bool","allow_pipeline",":","whether","allow","pipeline","input",":","return",":","list","of","transformation\/subpopulation","."]
legal_methods
def get_generate_methods( self, methods, task_to_methods, allow_pipeline=False ): r""" Validate transformation or subpopulation methods. Watch out! Some UT transformations/subpopulations may not compatible with your task, please choose your method carefully. :param list methods: transformation or subpopulation need to apply to dataset. If not provide, return default generated methods. :param dict task_to_methods: map allowed methods by task name. :param bool allow_pipeline: whether allow pipeline input :return: list of transformation/subpopulation. """ allowed_methods = task_to_methods[self.task] legal_methods = [] if methods: for method in methods: if not isinstance(method, (str, list)): raise ValueError( f"Do not support transformation/subpopulation " f"input type {type(method)}" ) if isinstance(method, str): if method not in allowed_methods: logger.warning( "Do not support {0}, skip this " "input method".format(method) ) else: legal_methods.append(method) else: if not allow_pipeline: raise ValueError( f"Do not support pipeline method input {method}" ) allow = True for _method in method: if _method not in allowed_methods: logger.warning( "Do not support {0}, skip " "this method".format(method) ) allow = False if allow: legal_methods.append(method) else: legal_methods = legal_methods + allowed_methods return legal_methods
["def","get_generate_methods","(","self",",","methods",",","task_to_methods",",","allow_pipeline=False",")",":","r","''","''","''","Validate","transformation","or","subpopulation","methods",".","Watch","out","!","Some","UT","transformations\/subpopulations","may","not","compatible","with","your","task",",","please","choose","your","method","carefully",".",":","param","list","methods",":","transformation","or","subpopulation","need","to","apply","to","dataset",".","If","not","provide",",","return","default","generated","methods",".",":","param","dict","task_to_methods",":","map","allowed","methods","by","task","name",".",":","param","bool","allow_pipeline",":","whether","allow","pipeline","input",":","return",":","list","of","transformation\/subpopulation.","``","''","''","allowed_methods","=","task_to_methods","[","self.task","]","legal_methods","=","[","]","if","methods",":","for","method","in","methods",":","if","not","isinstance","(","method",",","(","str",",","list",")",")",":","raise","ValueError","(","f","''","Do","not","support","transformation\/subpopulation","``","f","''","input","type","{","type","(","method",")","}","''",")","if","isinstance","(","method",",","str",")",":","if","method","not","in","allowed_methods",":","logger.warning","(","``","Do","not","support","{","0","}",",","skip","this","``","``","input","method","''",".format","(","method",")",")","else",":","legal_methods.append","(","method",")","else",":","if","not","allow_pipeline",":","raise","ValueError","(","f","''","Do","not","support","pipeline","method","input","{","method","}","''",")","allow","=","True","for","_method","in","method",":","if","_method","not","in","allowed_methods",":","logger.warning","(","``","Do","not","support","{","0","}",",","skip","``","``","this","method","''",".format","(","method",")",")","allow","=","False","if","allow",":","legal_methods.append","(","method",")","else",":","legal_methods","=","legal_methods","+","allowed_methods","return","legal_methods"]
135
188
null
config.py
textflint/textflint/input/config/config.py
import os import six import json import copy from ...common.utils.logger import logger from ...common.settings import NLP_TASK_MAP, ALLOWED_TRANSFORMATIONS, TRANSFORM_FIELDS, ALLOWED_SUBPOPULATIONS, ALLOWED_VALIDATORS, ALLOWED_cn_TRANSFORMATIONS
15
1
6
0
0
8
null
Use image node_id 3 for calling the Config obj's underlying member method code with example usage: obj.get_generate_methods(methods, task_to_methods, allow_pipeline) and returns: legal_methods
192
node_id 3
2,188,350
from_dict
Config
null
true
cls,json_object
Hold some config params to control generation and report procedure.
["Hold","some","config","params","to","control","generation","and","report","procedure","."]
Constructs a `Config` from a Python dictionary of parameters.
["Constructs","a","`","Config","`","from","a","Python","dictionary","of","parameters","."]
config
def from_dict(cls, json_object): r""" Constructs a `Config` from a Python dictionary of parameters. """ config = cls(task=json_object["task"]) for key, value in six.iteritems(json_object): config.__dict__[key] = value return config
["def","from_dict","(","cls",",","json_object",")",":","r","''","''","''","Constructs","a","`","Config","`","from","a","Python","dictionary","of","parameters.","``","''","''","config","=","cls","(","task=json_object","[","``","task","''","]",")","for","key",",","value","in","six.iteritems","(","json_object",")",":","config.__dict__","[","key","]","=","value","return","config"]
191
199
null
config.py
textflint/textflint/input/config/config.py
import os import six import json import copy from ...common.utils.logger import logger from ...common.settings import NLP_TASK_MAP, ALLOWED_TRANSFORMATIONS, TRANSFORM_FIELDS, ALLOWED_SUBPOPULATIONS, ALLOWED_VALIDATORS, ALLOWED_cn_TRANSFORMATIONS
15
1
6
0
0
8
null
Use image node_id 4 for calling the Config obj's underlying member method code with example usage: obj.from_dict(cls, json_object) and returns: config
150
node_id 4
2,188,351
from_json_file
Config
null
true
cls,json_file
Hold some config params to control generation and report procedure.
["Hold","some","config","params","to","control","generation","and","report","procedure","."]
Constructs a `Config` from a json file of parameters.
["Constructs","a","`","Config","`","from","a","json","file","of","parameters","."]
cls
def from_json_file(cls, json_file): r""" Constructs a `Config` from a json file of parameters. """ with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() return cls.from_dict(json.loads(text))
["def","from_json_file","(","cls",",","json_file",")",":","r","''","''","''","Constructs","a","`","Config","`","from","a","json","file","of","parameters.","``","''","''","with","open","(","json_file",",","``","r","''",",","encoding=","''","utf-8","''",")","as","reader",":","text","=","reader.read","(",")","return","cls.from_dict","(","json.loads","(","text",")",")"]
202
209
null
config.py
textflint/textflint/input/config/config.py
import os import six import json import copy from ...common.utils.logger import logger from ...common.settings import NLP_TASK_MAP, ALLOWED_TRANSFORMATIONS, TRANSFORM_FIELDS, ALLOWED_SUBPOPULATIONS, ALLOWED_VALIDATORS, ALLOWED_cn_TRANSFORMATIONS
15
1
6
0
0
8
null
Use image node_id 5 for calling the Config obj's underlying member method code with example usage: obj.from_json_file(cls, json_file) and returns: cls
150
node_id 5
2,188,352
to_dict
Config
null
true
self
Hold some config params to control generation and report procedure.
["Hold","some","config","params","to","control","generation","and","report","procedure","."]
Serializes this instance to a Python dictionary.
["Serializes","this","instance","to","a","Python","dictionary","."]
output
def to_dict(self): r""" Serializes this instance to a Python dictionary. """ output = copy.deepcopy(self.__dict__) return output
["def","to_dict","(","self",")",":","r","''","''","''","Serializes","this","instance","to","a","Python","dictionary.","``","''","''","output","=","copy.deepcopy","(","self.__dict__",")","return","output"]
211
218
null
config.py
textflint/textflint/input/config/config.py
import os import six import json import copy from ...common.utils.logger import logger from ...common.settings import NLP_TASK_MAP, ALLOWED_TRANSFORMATIONS, TRANSFORM_FIELDS, ALLOWED_SUBPOPULATIONS, ALLOWED_VALIDATORS, ALLOWED_cn_TRANSFORMATIONS
15
1
6
0
0
8
null
Use image node_id 6 for calling the Config obj's underlying member method code with example usage: obj.to_dict() and returns: output
132
node_id 6
2,188,353
monotonic_index
global
null
false
start,end,dtype,closed
null
null
null
null
IntervalIndex
def monotonic_index(start, end, dtype="int64", closed="right"): return IntervalIndex.from_breaks( np.arange(start, end, dtype=dtype), closed=closed )
["def","monotonic_index","(","start",",","end",",","dtype=","''","int64","''",",","closed=","''","right","''",")",":","return","IntervalIndex.from_breaks","(","np.arange","(","start",",","end",",","dtype=dtype",")",",","closed=closed",")"]
13
14
null
test_setops.py
pandas/pandas/tests/indexes/interval/test_setops.py
import numpy import pytest from pandas import Index, IntervalIndex, Timestamp, interval_range import pandas._testing
15
null
4
2
null
null
null
Use image node_id 1 for calling a global function with example usage: monotonic_index(start, end, dtype, closed) and returns: IntervalIndex
139
node_id 1
1,514,641
empty_index
global
null
false
dtype,closed
null
null
null
null
IntervalIndex
def empty_index(dtype="int64", closed="right"): return IntervalIndex(np.array([], dtype=dtype), closed=closed)
["def","empty_index","(","dtype=","''","int64","''",",","closed=","''","right","''",")",":","return","IntervalIndex","(","np.array","(","[","]",",","dtype=dtype",")",",","closed=closed",")"]
17
18
null
test_setops.py
pandas/pandas/tests/indexes/interval/test_setops.py
import numpy import pytest from pandas import Index, IntervalIndex, Timestamp, interval_range import pandas._testing
15
null
4
2
null
null
null
Use image node_id 2 for calling a global function with example usage: empty_index(dtype, closed) and returns: IntervalIndex
123
node_id 2
1,514,642
state_batch_axis
GPT2Decoder
BaseStepDecoder
true
self
null
null
null
null
unknown
def state_batch_axis(self): return 2 if self._layout == "NT" else 3
["def","state_batch_axis","(","self",")",":","return","2","if","self._layout","==","``","NT","''","else","3"]
44
45
null
interactive_conditional_gpt2_samples.py
gluon-nlp/scripts/generation/interactive_conditional_gpt2_samples.py
import os import mxnet import argparse from gluonnlp.utils import set_seed from gluonnlp.sequence_sampler import BeamSearchSampler, BaseStepDecoder from gluonnlp.models.gpt2 import GPT2ForLM, list_pretrained_gpt2, get_pretrained_gpt2
15
1
6
2
1
5
1
Use image node_id 2 for calling the GPT2Decoder obj's underlying member method code with example usage: obj.state_batch_axis() and returns: unknown
147
node_id 2
1,097,715
to_json_string
Config
null
true
self
Hold some config params to control generation and report procedure.
["Hold","some","config","params","to","control","generation","and","report","procedure","."]
Serializes this instance to a JSON string.
["Serializes","this","instance","to","a","JSON","string","."]
json
def to_json_string(self): r""" Serializes this instance to a JSON string. """ return json.dumps( self.to_dict(), indent=2, sort_keys=True, ensure_ascii=False )
["def","to_json_string","(","self",")",":","r","''","''","''","Serializes","this","instance","to","a","JSON","string.","``","''","''","return","json.dumps","(","self.to_dict","(",")",",","indent=2",",","sort_keys=True",",","ensure_ascii=False",")"]
220
231
null
config.py
textflint/textflint/input/config/config.py
import os import six import json import copy from ...common.utils.logger import logger from ...common.settings import NLP_TASK_MAP, ALLOWED_TRANSFORMATIONS, TRANSFORM_FIELDS, ALLOWED_SUBPOPULATIONS, ALLOWED_VALIDATORS, ALLOWED_cn_TRANSFORMATIONS
15
1
6
0
0
8
null
Use image node_id 7 for calling the Config obj's underlying member method code with example usage: obj.to_json_string() and returns: json
137
node_id 7
2,188,354
convert_to_cudf
global
null
false
result
null
null
null
null
df, modularity
def convert_to_cudf( result: cp.ndarray, ) -> Tuple[cudf.DataFrame, float]: """ Creates a cudf DataFrame from cupy arrays from pylibcugraph wrapper """ cupy_vertex, cupy_partition, modularity = result df = cudf.DataFrame() df["vertex"] = cupy_vertex df["partition"] = cupy_partition return df, modularity
["def","convert_to_cudf","(","result",":","cp.ndarray",",",")","-",">","Tuple","[","cudf.DataFrame",",","float","]",":","``","''","''","Creates","a","cudf","DataFrame","from","cupy","arrays","from","pylibcugraph","wrapper","``","''","''","cupy_vertex",",","cupy_partition",",","modularity","=","result","df","=","cudf.DataFrame","(",")","df","[","``","vertex","''","]","=","cupy_vertex","df","[","``","partition","''","]","=","cupy_partition","return","df",",","modularity"]
35
44
null
leiden.py
cugraph/python/cugraph/cugraph/dask/community/leiden.py
from __future__ import annotations from dask.distributed import wait, default_client import cugraph.dask.comms.comms import dask_cudf import dask from dask import delayed import cudf from pylibcugraph import ResourceHandle from pylibcugraph import leiden import numpy import cupy from typing import Tuple, TYPE_CHECKING
15
null
12
3
null
null
null
Use image node_id 1 for calling a global function with example usage: convert_to_cudf(result) and returns: df, modularity
122
node_id 1
686,168
set_seed
global
null
false
args
null
null
null
null
null
def set_seed(args): np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed)
["def","set_seed","(","args",")",":","np.random.seed","(","args.seed",")","torch.manual_seed","(","args.seed",")","if","args.n_gpu",">","0",":","torch.cuda.manual_seed_all","(","args.seed",")"]
17
21
null
run_xsum_flexgen.py
H2O/h2o_flexgen/benchmark/xsum/run_xsum_flexgen.py
import argparse import numpy import torch import json import time from tqdm import tqdm import copy from flexgen.flex_opt import Policy, OptLM, ExecutionEnv, CompressionConfig from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig
15
null
9
6
null
null
null
Use image node_id 1 for calling a global function with example usage: set_seed(args) without return types
105
node_id 1
115,610
_call_plc_leiden
global
null
false
sID,mg_graph_x,max_iter,resolution,random_state,theta,do_expensive_check
null
null
null
null
pylibcugraph_leiden
def _call_plc_leiden( sID: bytes, mg_graph_x, max_iter: int, resolution: int, random_state: int, theta: int, do_expensive_check: bool, ) -> Tuple[cp.ndarray, cp.ndarray, float]: return pylibcugraph_leiden( resource_handle=ResourceHandle( Comms.get_handle(sID).getHandle() ), random_state=random_state, graph=mg_graph_x, max_level=max_iter, resolution=resolution, theta=theta, do_expensive_check=do_expensive_check, )
["def","_call_plc_leiden","(","sID",":","bytes",",","mg_graph_x",",","max_iter",":","int",",","resolution",":","int",",","random_state",":","int",",","theta",":","int",",","do_expensive_check",":","bool",",",")","-",">","Tuple","[","cp.ndarray",",","cp.ndarray",",","float","]",":","return","pylibcugraph_leiden","(","resource_handle=ResourceHandle","(","Comms.get_handle","(","sID",")",".getHandle","(",")",")",",","random_state=random_state",",","graph=mg_graph_x",",","max_level=max_iter",",","resolution=resolution",",","theta=theta",",","do_expensive_check=do_expensive_check",",",")"]
47
64
null
leiden.py
cugraph/python/cugraph/cugraph/dask/community/leiden.py
from __future__ import annotations from dask.distributed import wait, default_client import cugraph.dask.comms.comms import dask_cudf import dask from dask import delayed import cudf from pylibcugraph import ResourceHandle from pylibcugraph import leiden import numpy import cupy from typing import Tuple, TYPE_CHECKING
15
null
12
3
null
null
null
Use image node_id 2 for calling a global function with example usage: _call_plc_leiden(sID, mg_graph_x, max_iter, resolution, random_state, theta, do_expensive_check) and returns: pylibcugraph_leiden
199
node_id 2
686,169
forward
DropBlock2d
nn
true
self,x
DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
["DropBlock",".","See","https",":","\/\/arxiv.org\/pdf\/1810.12890.pdf"]
null
null
x,drop_block_fast_2d,drop_block_2d
def forward(self, x): if not self.training or not self.drop_prob: return x if self.fast: return drop_block_fast_2d( x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, ) else: return drop_block_2d( x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise, )
["def","forward","(","self",",","x",")",":","if","not","self.training","or","not","self.drop_prob",":","return","x","if","self.fast",":","return","drop_block_fast_2d","(","x",",","self.drop_prob",",","self.block_size",",","self.gamma_scale",",","self.with_noise",",","self.inplace",",",")","else",":","return","drop_block_2d","(","x",",","self.drop_prob",",","self.block_size",",","self.gamma_scale",",","self.with_noise",",","self.inplace",",","self.batchwise",",",")"]
126
134
null
drop.py
pytorch-image-models/timm/layers/drop.py
import torch import torch.nn import torch.nn.functional
15
2
3
3
2
2
1
Use image node_id 2 for calling the DropBlock2d obj's underlying member method code with example usage: obj.forward(x) and returns: x, drop_block_fast_2d, drop_block_2d
168
node_id 2
1,692,287
forward
MNISTConvNet
torch.nn
true
self,x
null
null
null
null
F
def forward(self, x): # switched order of pooling and relu compared to the original example # to make it identical to the keras worker # seems to also give better accuracies x = F.max_pool2d(F.relu(self.conv1(x)), 2) if not self.conv2 is None: x = F.max_pool2d(F.relu(self.conv2(x)), 2) if not self.conv3 is None: x = F.max_pool2d(F.relu(self.conv3(x)), 2) x = self.dropout(x) x = x.view(-1, self.conv_output_size) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return F.log_softmax(x, dim=1)
["def","forward","(","self",",","x",")",":","#","switched","order","of","pooling","and","relu","compared","to","the","original","example","#","to","make","it","identical","to","the","keras","worker","#","seems","to","also","give","better","accuracies","x","=","F.max_pool2d","(","F.relu","(","self.conv1","(","x",")",")",",","2",")","if","not","self.conv2","is","None",":","x","=","F.max_pool2d","(","F.relu","(","self.conv2","(","x",")",")",",","2",")","if","not","self.conv3","is","None",":","x","=","F.max_pool2d","(","F.relu","(","self.conv3","(","x",")",")",",","2",")","x","=","self.dropout","(","x",")","x","=","x.view","(","-1",",","self.conv_output_size",")","x","=","F.relu","(","self.fc1","(","x",")",")","x","=","self.dropout","(","x",")","x","=","self.fc2","(","x",")","return","F.log_softmax","(","x",",","dim=1",")"]
243
262
null
example_5_pytorch_worker.py
HpBandSter/hpbandster/examples/example_5_pytorch_worker.py
import ConfigSpace import ConfigSpace.hyperparameters from hpbandster.core.worker import Worker import logging
15
2
4
0
2
3
1
Use image node_id 2 for calling the MNISTConvNet obj's underlying member method code with example usage: obj.forward(x) and returns: F
134
node_id 2
117,777
export
SensitivityAnalysis
null
true
self,filepath
null
null
Export the results of the sensitivity analysis to a csv file. The firstline of the csv file describe the content structure. The first line is constructed by 'layername' and sparsity list. Each line below records the validation metric returned by val_func when this layer is under different sparsities. Note that, due to the early_stop option, some layers may not have the metrics under all sparsities. layername, 0.25, 0.5, 0.75 conv1, 0.6, 0.55 conv2, 0.61, 0.57, 0.56 Parameters ---------- filepath : str Path of the output file
["Export","the","results","of","the","sensitivity","analysis","to","a","csv","file",".","The","firstline","of","the","csv","file","describe","the","content","structure",".","The","first","line","is","constructed","by","'layername","'","and","sparsity","list",".","Each","line","below","records","the","validation","metric","returned","by","val_func","when","this","layer","is","under","different","sparsities",".","Note","that",",","due","to","the","early_stop","option",",","some","layers","may","not","have","the","metrics","under","all","sparsities",".","layername",",","0.25",",","0.5",",","0.75","conv1",",","0.6",",","0.55","conv2",",","0.61",",","0.57",",","0.56","Parameters","--","--","--","--","--","filepath",":","str","Path","of","the","output","file"]
null
def export(self, filepath): """ Export the results of the sensitivity analysis to a csv file. The firstline of the csv file describe the content structure. The first line is constructed by 'layername' and sparsity list. Each line below records the validation metric returned by val_func when this layer is under different sparsities. Note that, due to the early_stop option, some layers may not have the metrics under all sparsities. layername, 0.25, 0.5, 0.75 conv1, 0.6, 0.55 conv2, 0.61, 0.57, 0.56 Parameters ---------- filepath : str Path of the output file """ str_sparsities = [str(x) for x in self.sparsities] header = ["layername"] + str_sparsities with open(filepath, "w") as csvf: csv_w = csv.writer(csvf) csv_w.writerow(header) for layername in self.sensitivities: row = [] row.append(layername) for sparsity in sorted( self.sensitivities[layername].keys() ): row.append(self.sensitivities[layername][sparsity]) csv_w.writerow(row)
["def","export","(","self",",","filepath",")",":","``","''","''","Export","the","results","of","the","sensitivity","analysis","to","a","csv","file",".","The","firstline","of","the","csv","file","describe","the","content","structure",".","The","first","line","is","constructed","by","'layername","'","and","sparsity","list",".","Each","line","below","records","the","validation","metric","returned","by","val_func","when","this","layer","is","under","different","sparsities",".","Note","that",",","due","to","the","early_stop","option",",","some","layers","may","not","have","the","metrics","under","all","sparsities",".","layername",",","0.25",",","0.5",",","0.75","conv1",",","0.6",",","0.55","conv2",",","0.61",",","0.57",",","0.56","Parameters","--","--","--","--","--","filepath",":","str","Path","of","the","output","file","``","''","''","str_sparsities","=","[","str","(","x",")","for","x","in","self.sparsities","]","header","=","[","``","layername","''","]","+","str_sparsities","with","open","(","filepath",",","``","w","''",")","as","csvf",":","csv_w","=","csv.writer","(","csvf",")","csv_w.writerow","(","header",")","for","layername","in","self.sensitivities",":","row","=","[","]","row.append","(","layername",")","for","sparsity","in","sorted","(","self.sensitivities","[","layername","]",".keys","(",")",")",":","row.append","(","self.sensitivities","[","layername","]","[","sparsity","]",")","csv_w.writerow","(","row",")"]
210
238
null
sensitivity_analysis.py
auptimizer/src/aup/compression/torch/utils/sensitivity_analysis.py
import copy import csv import logging from collections import OrderedDict import numpy import torch.nn
15
1
6
0
0
8
null
Use image node_id 6 for calling the SensitivityAnalysis obj's underlying member method code with example usage: obj.export(filepath) without return types
153
node_id 6
315,482
analysis
SensitivityAnalysis
null
true
self,val_args,val_kwargs,specified_layers
null
null
This function analyze the sensitivity to pruning for each conv layer in the target model. If start and end are not set, we analyze all the conv layers by default. Users can specify several layers to analyze or parallelize the analysis process easily through the start and end parameter. Parameters ---------- val_args : list args for the val_function val_kwargs : dict kwargs for the val_funtion specified_layers : list list of layer names to analyze sensitivity. If this variable is set, then only analyze the conv layers that specified in the list. User can also use this option to parallelize the sensitivity analysis easily. Returns ------- sensitivities : dict dict object that stores the trajectory of the accuracy/loss when the prune ratio changes
["This","function","analyze","the","sensitivity","to","pruning","for","each","conv","layer","in","the","target","model",".","If","start","and","end","are","not","set",",","we","analyze","all","the","conv","layers","by","default",".","Users","can","specify","several","layers","to","analyze","or","parallelize","the","analysis","process","easily","through","the","start","and","end","parameter",".","Parameters","--","--","--","--","--","val_args",":","list","args","for","the","val_function","val_kwargs",":","dict","kwargs","for","the","val_funtion","specified_layers",":","list","list","of","layer","names","to","analyze","sensitivity",".","If","this","variable","is","set",",","then","only","analyze","the","conv","layers","that","specified","in","the","list",".","User","can","also","use","this","option","to","parallelize","the","sensitivity","analysis","easily",".","Returns","--","--","--","-","sensitivities",":","dict","dict","object","that","stores","the","trajectory","of","the","accuracy\/loss","when","the","prune","ratio","changes"]
self
def analysis( self, val_args=None, val_kwargs=None, specified_layers=None ): """ This function analyze the sensitivity to pruning for each conv layer in the target model. If start and end are not set, we analyze all the conv layers by default. Users can specify several layers to analyze or parallelize the analysis process easily through the start and end parameter. Parameters ---------- val_args : list args for the val_function val_kwargs : dict kwargs for the val_funtion specified_layers : list list of layer names to analyze sensitivity. If this variable is set, then only analyze the conv layers that specified in the list. User can also use this option to parallelize the sensitivity analysis easily. Returns ------- sensitivities : dict dict object that stores the trajectory of the accuracy/loss when the prune ratio changes """ if val_args is None: val_args = [] if val_kwargs is None: val_kwargs = {} # Get the original validation metric(accuracy/loss) before pruning # Get the accuracy baseline before starting the analysis. self.ori_metric = self.val_func(*val_args, **val_kwargs) namelist = list(self.target_layer.keys()) if specified_layers is not None: # only analyze several specified conv layers namelist = list( filter(lambda x: x in specified_layers, namelist) ) for name in namelist: self.sensitivities[name] = {} for sparsity in self.sparsities: # here the sparsity is the relative sparsity of the # the remained weights # Calculate the actual prune ratio based on the already pruned ratio real_sparsity = ( 1.0 - self.already_pruned[name] ) * sparsity + self.already_pruned[name] # TODO In current L1/L2 Filter Pruner, the 'op_types' is still necessary # I think the L1/L2 Pruner should specify the op_types automaticlly # according to the op_names cfg = [ { "sparsity": real_sparsity, "op_names": [name], "op_types": ["Conv2d"], } ] pruner = self.Pruner(self.model, cfg) pruner.compress() val_metric = self.val_func(*val_args, **val_kwargs) logger.info( "Layer: %s Sparsity: %.2f Validation Metric: %.4f", name, real_sparsity, val_metric, ) self.sensitivities[name][sparsity] = val_metric pruner._unwrap_model() del pruner # check if the current metric meet the stop condition if self._need_to_stop(self.ori_metric, val_metric): break # reset the weights pruned by the pruner, because the # input sparsities is sorted, so we donnot need to reset # weight of the layer when the sparsity changes, instead, # we only need reset the weight when the pruning layer changes. self.model.load_state_dict(self.ori_state_dict) return self.sensitivities
["def","analysis","(","self",",","val_args=None",",","val_kwargs=None",",","specified_layers=None",")",":","``","''","''","This","function","analyze","the","sensitivity","to","pruning","for","each","conv","layer","in","the","target","model",".","If","start","and","end","are","not","set",",","we","analyze","all","the","conv","layers","by","default",".","Users","can","specify","several","layers","to","analyze","or","parallelize","the","analysis","process","easily","through","the","start","and","end","parameter",".","Parameters","--","--","--","--","--","val_args",":","list","args","for","the","val_function","val_kwargs",":","dict","kwargs","for","the","val_funtion","specified_layers",":","list","list","of","layer","names","to","analyze","sensitivity",".","If","this","variable","is","set",",","then","only","analyze","the","conv","layers","that","specified","in","the","list",".","User","can","also","use","this","option","to","parallelize","the","sensitivity","analysis","easily",".","Returns","--","--","--","-","sensitivities",":","dict","dict","object","that","stores","the","trajectory","of","the","accuracy\/loss","when","the","prune","ratio","changes","``","''","''","if","val_args","is","None",":","val_args","=","[","]","if","val_kwargs","is","None",":","val_kwargs","=","{","}","#","Get","the","original","validation","metric","(","accuracy\/loss",")","before","pruning","#","Get","the","accuracy","baseline","before","starting","the","analysis",".","self.ori_metric","=","self.val_func","(","*","val_args",",","*","*","val_kwargs",")","namelist","=","list","(","self.target_layer.keys","(",")",")","if","specified_layers","is","not","None",":","#","only","analyze","several","specified","conv","layers","namelist","=","list","(","filter","(","lambda","x",":","x","in","specified_layers",",","namelist",")",")","for","name","in","namelist",":","self.sensitivities","[","name","]","=","{","}","for","sparsity","in","self.sparsities",":","#","here","the","sparsity","is","the","relative","sparsity","of","the","#","the","remained","weights","#","Calculate","the","actual","prune","ratio","based","on","the","already","pruned","ratio","real_sparsity","=","(","1.0","-","self.already_pruned","[","name","]",")","*","sparsity","+","self.already_pruned","[","name","]","#","TODO","In","current","L1\/L2","Filter","Pruner",",","the","'op_types","'","is","still","necessary","#","I","think","the","L1\/L2","Pruner","should","specify","the","op_types","automaticlly","#","according","to","the","op_names","cfg","=","[","{","``","sparsity","''",":","real_sparsity",",","``","op_names","''",":","[","name","]",",","``","op_types","''",":","[","``","Conv2d","''","]",",","}","]","pruner","=","self.Pruner","(","self.model",",","cfg",")","pruner.compress","(",")","val_metric","=","self.val_func","(","*","val_args",",","*","*","val_kwargs",")","logger.info","(","``","Layer",":","%","s","Sparsity",":","%",".2f","Validation","Metric",":","%",".4f","''",",","name",",","real_sparsity",",","val_metric",",",")","self.sensitivities","[","name","]","[","sparsity","]","=","val_metric","pruner._unwrap_model","(",")","del","pruner","#","check","if","the","current","metric","meet","the","stop","condition","if","self._need_to_stop","(","self.ori_metric",",","val_metric",")",":","break","#","reset","the","weights","pruned","by","the","pruner",",","because","the","#","input","sparsities","is","sorted",",","so","we","donnot","need","to","reset","#","weight","of","the","layer","when","the","sparsity","changes",",","instead",",","#","we","only","need","reset","the","weight","when","the","pruning","layer","changes",".","self.model.load_state_dict","(","self.ori_state_dict",")","return","self.sensitivities"]
138
208
null
sensitivity_analysis.py
auptimizer/src/aup/compression/torch/utils/sensitivity_analysis.py
import copy import csv import logging from collections import OrderedDict import numpy import torch.nn
15
1
6
0
0
8
null
Use image node_id 5 for calling the SensitivityAnalysis obj's underlying member method code with example usage: obj.analysis(val_args, val_kwargs, specified_layers) and returns: self
182
node_id 5
315,481
drop_block_2d
global
null
false
x,drop_prob,block_size,gamma_scale,with_noise,inplace,batchwise
null
null
null
null
x
def drop_block_2d( x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False, ): """DropBlock. See https://arxiv.org/pdf/1810.12890.pdf DropBlock with an experimental gaussian noise option. This layer has been tested on a few training runs with success, but needs further validation and possibly optimization for lower runtime impact. """ B, C, H, W = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) # seed_drop_rate, the gamma parameter gamma = ( gamma_scale * drop_prob * total_size / clipped_block_size**2 / ((W - block_size + 1) * (H - block_size + 1)) ) # Forces the block to be inside the feature map. w_i, h_i = torch.meshgrid( torch.arange(W).to(x.device), torch.arange(H).to(x.device) ) valid_block = ( (w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2) ) & ( (h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2) ) valid_block = torch.reshape(valid_block, (1, 1, H, W)).to( dtype=x.dtype ) if batchwise: # one mask for whole batch, quite a bit faster uniform_noise = torch.rand( (1, C, H, W), dtype=x.dtype, device=x.device ) else: uniform_noise = torch.rand_like(x) block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to( dtype=x.dtype ) block_mask = -F.max_pool2d( -block_mask, kernel_size=clipped_block_size, # block_size, stride=1, padding=clipped_block_size // 2, ) if with_noise: normal_noise = ( torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) ) if inplace: x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) else: x = x * block_mask + normal_noise * (1 - block_mask) else: normalize_scale = ( block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7) ).to(x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x
["def","drop_block_2d","(","x",",","drop_prob",":","float","=","0.1",",","block_size",":","int","=","7",",","gamma_scale",":","float","=","1.0",",","with_noise",":","bool","=","False",",","inplace",":","bool","=","False",",","batchwise",":","bool","=","False",",",")",":","``","''","''","DropBlock",".","See","https",":","\/\/arxiv.org\/pdf\/1810.12890.pdf","DropBlock","with","an","experimental","gaussian","noise","option",".","This","layer","has","been","tested","on","a","few","training","runs","with","success",",","but","needs","further","validation","and","possibly","optimization","for","lower","runtime","impact.","``","''","''","B",",","C",",","H",",","W","=","x.shape","total_size","=","W","*","H","clipped_block_size","=","min","(","block_size",",","min","(","W",",","H",")",")","#","seed_drop_rate",",","the","gamma","parameter","gamma","=","(","gamma_scale","*","drop_prob","*","total_size","\/","clipped_block_size","*","*","2","\/","(","(","W","-","block_size","+","1",")","*","(","H","-","block_size","+","1",")",")",")","#","Forces","the","block","to","be","inside","the","feature","map",".","w_i",",","h_i","=","torch.meshgrid","(","torch.arange","(","W",")",".to","(","x.device",")",",","torch.arange","(","H",")",".to","(","x.device",")",")","valid_block","=","(","(","w_i",">","=","clipped_block_size","\/\/","2",")","&","(","w_i","<","W","-","(","clipped_block_size","-","1",")","\/\/","2",")",")","&","(","(","h_i",">","=","clipped_block_size","\/\/","2",")","&","(","h_i","<","H","-","(","clipped_block_size","-","1",")","\/\/","2",")",")","valid_block","=","torch.reshape","(","valid_block",",","(","1",",","1",",","H",",","W",")",")",".to","(","dtype=x.dtype",")","if","batchwise",":","#","one","mask","for","whole","batch",",","quite","a","bit","faster","uniform_noise","=","torch.rand","(","(","1",",","C",",","H",",","W",")",",","dtype=x.dtype",",","device=x.device",")","else",":","uniform_noise","=","torch.rand_like","(","x",")","block_mask","=","(","(","2","-","gamma","-","valid_block","+","uniform_noise",")",">","=","1",")",".to","(","dtype=x.dtype",")","block_mask","=","-F.max_pool2d","(","-block_mask",",","kernel_size=clipped_block_size",",","#","block_size",",","stride=1",",","padding=clipped_block_size","\/\/","2",",",")","if","with_noise",":","normal_noise","=","(","torch.randn","(","(","1",",","C",",","H",",","W",")",",","dtype=x.dtype",",","device=x.device",")","if","batchwise","else","torch.randn_like","(","x",")",")","if","inplace",":","x.mul_","(","block_mask",")",".add_","(","normal_noise","*","(","1","-","block_mask",")",")","else",":","x","=","x","*","block_mask","+","normal_noise","*","(","1","-","block_mask",")","else",":","normalize_scale","=","(","block_mask.numel","(",")","\/","block_mask.to","(","dtype=torch.float32",")",".sum","(",")",".add","(","1e-7",")",")",".to","(","x.dtype",")","if","inplace",":","x.mul_","(","block_mask","*","normalize_scale",")","else",":","x","=","x","*","block_mask","*","normalize_scale","return","x"]
22
67
null
drop.py
pytorch-image-models/timm/layers/drop.py
import torch import torch.nn import torch.nn.functional
15
null
3
3
null
null
null
Use image node_id 1 for calling a global function with example usage: drop_block_2d(x, drop_prob, block_size, gamma_scale, with_noise, inplace, batchwise) and returns: x
169
node_id 1
1,692,291
extra_repr
DropPath
nn
true
self
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
["Drop","paths","(","Stochastic","Depth",")","per","sample","(","when","applied","in","main","path","of","residual","blocks",")","."]
null
null
str+round
def extra_repr(self): return f"drop_prob={round(self.drop_prob,3):0.3f}"
["def","extra_repr","(","self",")",":","return","f","''","drop_prob=","{","round","(","self.drop_prob,3",")",":0.3f","}","''"]
168
169
null
drop.py
pytorch-image-models/timm/layers/drop.py
import torch import torch.nn import torch.nn.functional
15
2
3
3
2
3
1
Use image node_id 3 for calling the DropPath obj's underlying member method code with example usage: obj.extra_repr() and returns: str, round
141
node_id 3
1,692,290
forward
DropPath
nn
true
self,x
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
["Drop","paths","(","Stochastic","Depth",")","per","sample","(","when","applied","in","main","path","of","residual","blocks",")","."]
null
null
drop_path
def forward(self, x): return drop_path( x, self.drop_prob, self.training, self.scale_by_keep )
["def","forward","(","self",",","x",")",":","return","drop_path","(","x",",","self.drop_prob",",","self.training",",","self.scale_by_keep",")"]
165
166
null
drop.py
pytorch-image-models/timm/layers/drop.py
import torch import torch.nn import torch.nn.functional
15
2
3
3
2
3
1
Use image node_id 2 for calling the DropPath obj's underlying member method code with example usage: obj.forward(x) and returns: drop_path
138
node_id 2
1,692,289
__init__
DropPath
nn
true
self,drop_prob,scale_by_keep
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
["Drop","paths","(","Stochastic","Depth",")","per","sample","(","when","applied","in","main","path","of","residual","blocks",")","."]
null
null
DropPath
def __init__( self, drop_prob: float = 0.0, scale_by_keep: bool = True ): super(DropPath, self).__init__() self.drop_prob = drop_prob self.scale_by_keep = scale_by_keep
["def","__init__","(","self",",","drop_prob",":","float","=","0.0",",","scale_by_keep",":","bool","=","True",")",":","super","(","DropPath",",","self",")",".__init__","(",")","self.drop_prob","=","drop_prob","self.scale_by_keep","=","scale_by_keep"]
160
163
null
drop.py
pytorch-image-models/timm/layers/drop.py
import torch import torch.nn import torch.nn.functional
15
2
3
3
2
3
1
Use image node_id 1 to create a new DropPath object from inherited base classes: nn with example: obj = DropPath(drop_prob, scale_by_keep)
138
node_id 1
1,692,288
make_fitness
global
null
false
null
null
null
null
_Fitness,_Fitness
def make_fitness(*, function, greater_is_better, wrap=True): """Make a fitness measure, a metric scoring the quality of a program's fit. This factory function creates a fitness measure object which measures the quality of a program's fit and thus its likelihood to undergo genetic operations into the next generation. The resulting object is able to be called with NumPy vectorized arguments and return a resulting floating point score quantifying the quality of the program's representation of the true relationship. Parameters ---------- function : callable A function with signature function(y, y_pred, sample_weight) that returns a floating point number. Where `y` is the input target y vector, `y_pred` is the predicted values from the genetic program, and sample_weight is the sample_weight vector. greater_is_better : bool Whether a higher value from `function` indicates a better fit. In general this would be False for metrics indicating the magnitude of the error, and True for metrics indicating the quality of fit. wrap : bool, optional (default=True) When running in parallel, pickling of custom metrics is not supported by Python's default pickler. This option will wrap the function using cloudpickle allowing you to pickle your solution, but the evolution may run slightly more slowly. If you are running single-threaded in an interactive Python session or have no need to save the model, set to `False` for faster runs. """ if not isinstance(greater_is_better, bool): raise ValueError( "greater_is_better must be bool, got %s" % type(greater_is_better) ) if not isinstance(wrap, bool): raise ValueError("wrap must be an bool, got %s" % type(wrap)) if function.__code__.co_argcount != 3: raise ValueError( "function requires 3 arguments (y, y_pred, w)," " got %d." % function.__code__.co_argcount ) if not isinstance( function( np.array([1, 1]), np.array([2, 2]), np.array([1, 1]) ), numbers.Number, ): raise ValueError("function must return a numeric.") if wrap: return _Fitness( function=wrap_non_picklable_objects(function), greater_is_better=greater_is_better, ) return _Fitness( function=function, greater_is_better=greater_is_better )
["def","make_fitness","(","*",",","function",",","greater_is_better",",","wrap=True",")",":","``","''","''","Make","a","fitness","measure",",","a","metric","scoring","the","quality","of","a","program","'s","fit",".","This","factory","function","creates","a","fitness","measure","object","which","measures","the","quality","of","a","program","'s","fit","and","thus","its","likelihood","to","undergo","genetic","operations","into","the","next","generation",".","The","resulting","object","is","able","to","be","called","with","NumPy","vectorized","arguments","and","return","a","resulting","floating","point","score","quantifying","the","quality","of","the","program","'s","representation","of","the","true","relationship",".","Parameters","--","--","--","--","--","function",":","callable","A","function","with","signature","function","(","y",",","y_pred",",","sample_weight",")","that","returns","a","floating","point","number",".","Where","`","y","`","is","the","input","target","y","vector",",","`","y_pred","`","is","the","predicted","values","from","the","genetic","program",",","and","sample_weight","is","the","sample_weight","vector",".","greater_is_better",":","bool","Whether","a","higher","value","from","`","function","`","indicates","a","better","fit",".","In","general","this","would","be","False","for","metrics","indicating","the","magnitude","of","the","error",",","and","True","for","metrics","indicating","the","quality","of","fit",".","wrap",":","bool",",","optional","(","default=True",")","When","running","in","parallel",",","pickling","of","custom","metrics","is","not","supported","by","Python","'s","default","pickler",".","This","option","will","wrap","the","function","using","cloudpickle","allowing","you","to","pickle","your","solution",",","but","the","evolution","may","run","slightly","more","slowly",".","If","you","are","running","single-threaded","in","an","interactive","Python","session","or","have","no","need","to","save","the","model",",","set","to","`","False","`","for","faster","runs.","``","''","''","if","not","isinstance","(","greater_is_better",",","bool",")",":","raise","ValueError","(","``","greater_is_better","must","be","bool",",","got","%","s","''","%","type","(","greater_is_better",")",")","if","not","isinstance","(","wrap",",","bool",")",":","raise","ValueError","(","``","wrap","must","be","an","bool",",","got","%","s","''","%","type","(","wrap",")",")","if","function.__code__.co_argcount","!","=","3",":","raise","ValueError","(","``","function","requires","3","arguments","(","y",",","y_pred",",","w",")",",","''","``","got","%","d",".","''","%","function.__code__.co_argcount",")","if","not","isinstance","(","function","(","np.array","(","[","1",",","1","]",")",",","np.array","(","[","2",",","2","]",")",",","np.array","(","[","1",",","1","]",")",")",",","numbers.Number",",",")",":","raise","ValueError","(","``","function","must","return","a","numeric",".","''",")","if","wrap",":","return","_Fitness","(","function=wrap_non_picklable_objects","(","function",")",",","greater_is_better=greater_is_better",",",")","return","_Fitness","(","function=function",",","greater_is_better=greater_is_better",")"]
52
101
null
fitness.py
gplearn/gplearn/fitness.py
import numbers import numpy from joblib import wrap_non_picklable_objects from scipy.stats import rankdata
15
null
4
7
null
null
null
Use image node_id 1 for calling a global function with example usage: make_fitness() and returns: _Fitness, _Fitness
116
node_id 1
1,106,030
_weighted_pearson
global
null
false
y,y_pred,w
null
null
null
null
int,np
def _weighted_pearson(y, y_pred, w): """Calculate the weighted Pearson correlation coefficient.""" with np.errstate(divide="ignore", invalid="ignore"): y_pred_demean = y_pred - np.average(y_pred, weights=w) y_demean = y - np.average(y, weights=w) corr = ( np.sum(w * y_pred_demean * y_demean) / np.sum(w) ) / np.sqrt( ( np.sum(w * y_pred_demean**2) * np.sum(w * y_demean**2) ) / (np.sum(w) ** 2) ) if np.isfinite(corr): return np.abs(corr) return 0.0
["def","_weighted_pearson","(","y",",","y_pred",",","w",")",":","``","''","''","Calculate","the","weighted","Pearson","correlation","coefficient",".","''","''","''","with","np.errstate","(","divide=","''","ignore","''",",","invalid=","''","ignore","''",")",":","y_pred_demean","=","y_pred","-","np.average","(","y_pred",",","weights=w",")","y_demean","=","y","-","np.average","(","y",",","weights=w",")","corr","=","(","np.sum","(","w","*","y_pred_demean","*","y_demean",")","\/","np.sum","(","w",")",")","\/","np.sqrt","(","(","np.sum","(","w","*","y_pred_demean","*","*","2",")","*","np.sum","(","w","*","y_demean","*","*","2",")",")","\/","(","np.sum","(","w",")","*","*","2",")",")","if","np.isfinite","(","corr",")",":","return","np.abs","(","corr",")","return","0.0"]
104
115
null
fitness.py
gplearn/gplearn/fitness.py
import numbers import numpy from joblib import wrap_non_picklable_objects from scipy.stats import rankdata
15
null
4
7
null
null
null
Use image node_id 2 for calling a global function with example usage: _weighted_pearson(y, y_pred, w) and returns: int, np
122
node_id 2
1,106,031
__init__
PyTorchWorker
Worker
true
self,N_train,N_valid
null
null
null
null
PyTorchWorker
def __init__(self, N_train=8192, N_valid=1024, **kwargs): super().__init__(**kwargs) batch_size = 64 # Load the MNIST Data here train_dataset = torchvision.datasets.MNIST( root="../../data", train=True, transform=transforms.ToTensor(), download=True, ) test_dataset = torchvision.datasets.MNIST( root="../../data", train=False, transform=transforms.ToTensor(), ) train_sampler = torch.utils.data.sampler.SubsetRandomSampler( range(N_train) ) validation_sampler = torch.utils.data.sampler.SubsetRandomSampler( range(N_train, N_train + N_valid) ) self.train_loader = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=batch_size, sampler=train_sampler, ) self.validation_loader = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=1024, sampler=validation_sampler, ) self.test_loader = torch.utils.data.DataLoader( dataset=test_dataset, batch_size=1024, shuffle=False )
["def","__init__","(","self",",","N_train=8192",",","N_valid=1024",",","*","*","kwargs",")",":","super","(",")",".__init__","(","*","*","kwargs",")","batch_size","=","64","#","Load","the","MNIST","Data","here","train_dataset","=","torchvision.datasets.MNIST","(","root=","''","..","\/","..","\/data","''",",","train=True",",","transform=transforms.ToTensor","(",")",",","download=True",",",")","test_dataset","=","torchvision.datasets.MNIST","(","root=","''","..","\/","..","\/data","''",",","train=False",",","transform=transforms.ToTensor","(",")",",",")","train_sampler","=","torch.utils.data.sampler.SubsetRandomSampler","(","range","(","N_train",")",")","validation_sampler","=","torch.utils.data.sampler.SubsetRandomSampler","(","range","(","N_train",",","N_train","+","N_valid",")",")","self.train_loader","=","torch.utils.data.DataLoader","(","dataset=train_dataset",",","batch_size=batch_size",",","sampler=train_sampler",",",")","self.validation_loader","=","torch.utils.data.DataLoader","(","dataset=train_dataset",",","batch_size=1024",",","sampler=validation_sampler",",",")","self.test_loader","=","torch.utils.data.DataLoader","(","dataset=test_dataset",",","batch_size=1024",",","shuffle=False",")"]
80
96
null
example_5_pytorch_worker.py
HpBandSter/hpbandster/examples/example_5_pytorch_worker.py
import ConfigSpace import ConfigSpace.hyperparameters from hpbandster.core.worker import Worker import logging
15
2
4
0
2
4
1
Use image node_id 1 to create a new PyTorchWorker object from inherited base classes: Worker with example: obj = PyTorchWorker(N_train, N_valid)
144
node_id 1
117,772
compute
PyTorchWorker
Worker
true
self,config,budget,working_directory
null
null
Simple example for a compute function using a feed forward network. It is trained on the MNIST dataset. The input parameter "config" (dictionary) contains the sampled configurations passed by the bohb optimizer
["Simple","example","for","a","compute","function","using","a","feed","forward","network",".","It","is","trained","on","the","MNIST","dataset",".","The","input","parameter","``","config","''","(","dictionary",")","contains","the","sampled","configurations","passed","by","the","bohb","optimizer"]
dict
def compute(self, config, budget, working_directory, *args, **kwargs): """ Simple example for a compute function using a feed forward network. It is trained on the MNIST dataset. The input parameter "config" (dictionary) contains the sampled configurations passed by the bohb optimizer """ # device = torch.device('cpu') model = MNISTConvNet( num_conv_layers=config["num_conv_layers"], num_filters_1=config["num_filters_1"], num_filters_2=config["num_filters_2"] if "num_filters_2" in config else None, num_filters_3=config["num_filters_3"] if "num_filters_3" in config else None, dropout_rate=config["dropout_rate"], num_fc_units=config["num_fc_units"], kernel_size=3, ) criterion = torch.nn.CrossEntropyLoss() if config["optimizer"] == "Adam": optimizer = torch.optim.Adam( model.parameters(), lr=config["lr"] ) else: optimizer = torch.optim.SGD( model.parameters(), lr=config["lr"], momentum=config["sgd_momentum"], ) for epoch in range(int(budget)): loss = 0 model.train() for i, (x, y) in enumerate(self.train_loader): optimizer.zero_grad() output = model(x) loss = F.nll_loss(output, y) loss.backward() optimizer.step() train_accuracy = self.evaluate_accuracy(model, self.train_loader) validation_accuracy = self.evaluate_accuracy( model, self.validation_loader ) test_accuracy = self.evaluate_accuracy(model, self.test_loader) return { "loss": 1 - validation_accuracy, # remember: HpBandSter always minimizes! "info": { "test accuracy": test_accuracy, "train accuracy": train_accuracy, "validation accuracy": validation_accuracy, "number of parameters": model.number_of_parameters(), }, }
["def","compute","(","self",",","config",",","budget",",","working_directory",",","*","args",",","*","*","kwargs",")",":","``","''","''","Simple","example","for","a","compute","function","using","a","feed","forward","network",".","It","is","trained","on","the","MNIST","dataset",".","The","input","parameter","``","config","''","(","dictionary",")","contains","the","sampled","configurations","passed","by","the","bohb","optimizer","``","''","''","#","device","=","torch.device","(","'cpu","'",")","model","=","MNISTConvNet","(","num_conv_layers=config","[","``","num_conv_layers","''","]",",","num_filters_1=config","[","``","num_filters_1","''","]",",","num_filters_2=config","[","``","num_filters_2","''","]","if","``","num_filters_2","''","in","config","else","None",",","num_filters_3=config","[","``","num_filters_3","''","]","if","``","num_filters_3","''","in","config","else","None",",","dropout_rate=config","[","``","dropout_rate","''","]",",","num_fc_units=config","[","``","num_fc_units","''","]",",","kernel_size=3",",",")","criterion","=","torch.nn.CrossEntropyLoss","(",")","if","config","[","``","optimizer","''","]","==","``","Adam","''",":","optimizer","=","torch.optim.Adam","(","model.parameters","(",")",",","lr=config","[","``","lr","''","]",")","else",":","optimizer","=","torch.optim.SGD","(","model.parameters","(",")",",","lr=config","[","``","lr","''","]",",","momentum=config","[","``","sgd_momentum","''","]",",",")","for","epoch","in","range","(","int","(","budget",")",")",":","loss","=","0","model.train","(",")","for","i",",","(","x",",","y",")","in","enumerate","(","self.train_loader",")",":","optimizer.zero_grad","(",")","output","=","model","(","x",")","loss","=","F.nll_loss","(","output",",","y",")","loss.backward","(",")","optimizer.step","(",")","train_accuracy","=","self.evaluate_accuracy","(","model",",","self.train_loader",")","validation_accuracy","=","self.evaluate_accuracy","(","model",",","self.validation_loader",")","test_accuracy","=","self.evaluate_accuracy","(","model",",","self.test_loader",")","return","{","``","loss","''",":","1","-","validation_accuracy",",","#","remember",":","HpBandSter","always","minimizes","!","``","info","''",":","{","``","test","accuracy","''",":","test_accuracy",",","``","train","accuracy","''",":","train_accuracy",",","``","validation","accuracy","''",":","validation_accuracy",",","``","number","of","parameters","''",":","model.number_of_parameters","(",")",",","}",",","}"]
99
144
null
example_5_pytorch_worker.py
HpBandSter/hpbandster/examples/example_5_pytorch_worker.py
import ConfigSpace import ConfigSpace.hyperparameters from hpbandster.core.worker import Worker import logging
15
2
4
0
2
4
1
Use image node_id 2 for calling the PyTorchWorker obj's underlying member method code with example usage: obj.compute(config, budget, working_directory) and returns: dict
170
node_id 2
117,773
evaluate_accuracy
PyTorchWorker
Worker
true
self,model,data_loader
null
null
null
null
accuracy
def evaluate_accuracy(self, model, data_loader): model.eval() correct = 0 with torch.no_grad(): for x, y in data_loader: output = model(x) # test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss pred = output.max(1, keepdim=True)[ 1 ] # get the index of the max log-probability correct += pred.eq(y.view_as(pred)).sum().item() # import pdb; pdb.set_trace() accuracy = correct / len(data_loader.sampler) return accuracy
["def","evaluate_accuracy","(","self",",","model",",","data_loader",")",":","model.eval","(",")","correct","=","0","with","torch.no_grad","(",")",":","for","x",",","y","in","data_loader",":","output","=","model","(","x",")","#","test_loss","+=","F.nll_loss","(","output",",","target",",","reduction='sum","'",")",".item","(",")","#","sum","up","batch","loss","pred","=","output.max","(","1",",","keepdim=True",")","[","1","]","#","get","the","index","of","the","max","log-probability","correct","+=","pred.eq","(","y.view_as","(","pred",")",")",".sum","(",")",".item","(",")","#","import","pdb",";","pdb.set_trace","(",")","accuracy","=","correct","\/","len","(","data_loader.sampler",")","return","accuracy"]
146
157
null
example_5_pytorch_worker.py
HpBandSter/hpbandster/examples/example_5_pytorch_worker.py
import ConfigSpace import ConfigSpace.hyperparameters from hpbandster.core.worker import Worker import logging
15
2
4
0
2
4
1
Use image node_id 3 for calling the PyTorchWorker obj's underlying member method code with example usage: obj.evaluate_accuracy(model, data_loader) and returns: accuracy
169
node_id 3
117,774
get_configspace
PyTorchWorker
Worker
true
null
null
It builds the configuration space with the needed hyperparameters. It is easily possible to implement different types of hyperparameters. Beside float-hyperparameters on a log scale, it is also able to handle categorical input parameter. :return: ConfigurationsSpace-Object
["It","builds","the","configuration","space","with","the","needed","hyperparameters",".","It","is","easily","possible","to","implement","different","types","of","hyperparameters",".","Beside","float-hyperparameters","on","a","log","scale",",","it","is","also","able","to","handle","categorical","input","parameter",".",":","return",":","ConfigurationsSpace-Object"]
cs
def get_configspace(): """ It builds the configuration space with the needed hyperparameters. It is easily possible to implement different types of hyperparameters. Beside float-hyperparameters on a log scale, it is also able to handle categorical input parameter. :return: ConfigurationsSpace-Object """ cs = CS.ConfigurationSpace() lr = CSH.UniformFloatHyperparameter( "lr", lower=1e-6, upper=1e-1, default_value="1e-2", log=True ) # For demonstration purposes, we add different optimizers as categorical hyperparameters. # To show how to use conditional hyperparameters with ConfigSpace, we'll add the optimizers 'Adam' and 'SGD'. # SGD has a different parameter 'momentum'. optimizer = CSH.CategoricalHyperparameter( "optimizer", ["Adam", "SGD"] ) sgd_momentum = CSH.UniformFloatHyperparameter( "sgd_momentum", lower=0.0, upper=0.99, default_value=0.9, log=False, ) cs.add_hyperparameters([lr, optimizer, sgd_momentum]) # The hyperparameter sgd_momentum will be used,if the configuration # contains 'SGD' as optimizer. cond = CS.EqualsCondition(sgd_momentum, optimizer, "SGD") cs.add_condition(cond) num_conv_layers = CSH.UniformIntegerHyperparameter( "num_conv_layers", lower=1, upper=3, default_value=2 ) num_filters_1 = CSH.UniformIntegerHyperparameter( "num_filters_1", lower=4, upper=64, default_value=16, log=True ) num_filters_2 = CSH.UniformIntegerHyperparameter( "num_filters_2", lower=4, upper=64, default_value=16, log=True ) num_filters_3 = CSH.UniformIntegerHyperparameter( "num_filters_3", lower=4, upper=64, default_value=16, log=True ) cs.add_hyperparameters( [num_conv_layers, num_filters_1, num_filters_2, num_filters_3] ) # You can also use inequality conditions: cond = CS.GreaterThanCondition(num_filters_2, num_conv_layers, 1) cs.add_condition(cond) cond = CS.GreaterThanCondition(num_filters_3, num_conv_layers, 2) cs.add_condition(cond) dropout_rate = CSH.UniformFloatHyperparameter( "dropout_rate", lower=0.0, upper=0.9, default_value=0.5, log=False, ) num_fc_units = CSH.UniformIntegerHyperparameter( "num_fc_units", lower=8, upper=256, default_value=32, log=True ) cs.add_hyperparameters([dropout_rate, num_fc_units]) return cs
["def","get_configspace","(",")",":","``","''","''","It","builds","the","configuration","space","with","the","needed","hyperparameters",".","It","is","easily","possible","to","implement","different","types","of","hyperparameters",".","Beside","float-hyperparameters","on","a","log","scale",",","it","is","also","able","to","handle","categorical","input","parameter",".",":","return",":","ConfigurationsSpace-Object","``","''","''","cs","=","CS.ConfigurationSpace","(",")","lr","=","CSH.UniformFloatHyperparameter","(","``","lr","''",",","lower=1e-6",",","upper=1e-1",",","default_value=","''","1e-2","''",",","log=True",")","#","For","demonstration","purposes",",","we","add","different","optimizers","as","categorical","hyperparameters",".","#","To","show","how","to","use","conditional","hyperparameters","with","ConfigSpace",",","we","'ll","add","the","optimizers","'Adam","'","and","'SGD","'",".","#","SGD","has","a","different","parameter","'momentum","'",".","optimizer","=","CSH.CategoricalHyperparameter","(","``","optimizer","''",",","[","``","Adam","''",",","``","SGD","''","]",")","sgd_momentum","=","CSH.UniformFloatHyperparameter","(","``","sgd_momentum","''",",","lower=0.0",",","upper=0.99",",","default_value=0.9",",","log=False",",",")","cs.add_hyperparameters","(","[","lr",",","optimizer",",","sgd_momentum","]",")","#","The","hyperparameter","sgd_momentum","will","be","used",",","if","the","configuration","#","contains","'SGD","'","as","optimizer",".","cond","=","CS.EqualsCondition","(","sgd_momentum",",","optimizer",",","``","SGD","''",")","cs.add_condition","(","cond",")","num_conv_layers","=","CSH.UniformIntegerHyperparameter","(","``","num_conv_layers","''",",","lower=1",",","upper=3",",","default_value=2",")","num_filters_1","=","CSH.UniformIntegerHyperparameter","(","``","num_filters_1","''",",","lower=4",",","upper=64",",","default_value=16",",","log=True",")","num_filters_2","=","CSH.UniformIntegerHyperparameter","(","``","num_filters_2","''",",","lower=4",",","upper=64",",","default_value=16",",","log=True",")","num_filters_3","=","CSH.UniformIntegerHyperparameter","(","``","num_filters_3","''",",","lower=4",",","upper=64",",","default_value=16",",","log=True",")","cs.add_hyperparameters","(","[","num_conv_layers",",","num_filters_1",",","num_filters_2",",","num_filters_3","]",")","#","You","can","also","use","inequality","conditions",":","cond","=","CS.GreaterThanCondition","(","num_filters_2",",","num_conv_layers",",","1",")","cs.add_condition","(","cond",")","cond","=","CS.GreaterThanCondition","(","num_filters_3",",","num_conv_layers",",","2",")","cs.add_condition","(","cond",")","dropout_rate","=","CSH.UniformFloatHyperparameter","(","``","dropout_rate","''",",","lower=0.0",",","upper=0.9",",","default_value=0.5",",","log=False",",",")","num_fc_units","=","CSH.UniformIntegerHyperparameter","(","``","num_fc_units","''",",","lower=8",",","upper=256",",","default_value=32",",","log=True",")","cs.add_hyperparameters","(","[","dropout_rate",",","num_fc_units","]",")","return","cs"]
161
208
null
example_5_pytorch_worker.py
HpBandSter/hpbandster/examples/example_5_pytorch_worker.py
import ConfigSpace import ConfigSpace.hyperparameters from hpbandster.core.worker import Worker import logging
15
2
4
0
2
4
1
Use image node_id 4 for calling the PyTorchWorker obj's underlying member method code with example usage: obj.get_configspace() and returns: cs
143
node_id 4
117,775
load_state_dict
SensitivityAnalysis
null
true
self,state_dict
null
null
Update the weight of the model
["Update","the","weight","of","the","model"]
null
def load_state_dict(self, state_dict): """ Update the weight of the model """ self.ori_state_dict = copy.deepcopy(state_dict) self.model.load_state_dict(self.ori_state_dict)
["def","load_state_dict","(","self",",","state_dict",")",":","``","''","''","Update","the","weight","of","the","model","``","''","''","self.ori_state_dict","=","copy.deepcopy","(","state_dict",")","self.model.load_state_dict","(","self.ori_state_dict",")"]
246
251
null
sensitivity_analysis.py
auptimizer/src/aup/compression/torch/utils/sensitivity_analysis.py
import copy import csv import logging from collections import OrderedDict import numpy import torch.nn
15
1
6
0
0
8
null
Use image node_id 8 for calling the SensitivityAnalysis obj's underlying member method code with example usage: obj.load_state_dict(state_dict) without return types
164
node_id 8
315,484
_weighted_spearman
global
null
false
y,y_pred,w
null
null
null
null
_weighted_pearson
def _weighted_spearman(y, y_pred, w): """Calculate the weighted Spearman correlation coefficient.""" y_pred_ranked = np.apply_along_axis(rankdata, 0, y_pred) y_ranked = np.apply_along_axis(rankdata, 0, y) return _weighted_pearson(y_pred_ranked, y_ranked, w)
["def","_weighted_spearman","(","y",",","y_pred",",","w",")",":","``","''","''","Calculate","the","weighted","Spearman","correlation","coefficient",".","''","''","''","y_pred_ranked","=","np.apply_along_axis","(","rankdata",",","0",",","y_pred",")","y_ranked","=","np.apply_along_axis","(","rankdata",",","0",",","y",")","return","_weighted_pearson","(","y_pred_ranked",",","y_ranked",",","w",")"]
118
122
null
fitness.py
gplearn/gplearn/fitness.py
import numbers import numpy from joblib import wrap_non_picklable_objects from scipy.stats import rankdata
15
null
4
7
null
null
null
Use image node_id 3 for calling a global function with example usage: _weighted_spearman(y, y_pred, w) and returns: _weighted_pearson
133
node_id 3
1,106,032
_mean_absolute_error
global
null
false
y,y_pred,w
null
null
null
null
np
def _mean_absolute_error(y, y_pred, w): """Calculate the mean absolute error.""" return np.average(np.abs(y_pred - y), weights=w)
["def","_mean_absolute_error","(","y",",","y_pred",",","w",")",":","``","''","''","Calculate","the","mean","absolute","error",".","''","''","''","return","np.average","(","np.abs","(","y_pred","-","y",")",",","weights=w",")"]
125
127
null
fitness.py
gplearn/gplearn/fitness.py
import numbers import numpy from joblib import wrap_non_picklable_objects from scipy.stats import rankdata
15
null
4
7
null
null
null
Use image node_id 4 for calling a global function with example usage: _mean_absolute_error(y, y_pred, w) and returns: np
120
node_id 4
1,106,033
_mkdirp
global
null
false
d
null
null
null
null
null
def _mkdirp(d): """Ensure directory d exists (like mkdir -p on Unix) No guarantee that the directory is writable. """ try: os.makedirs(d) except OSError as e: if e.errno != errno.EEXIST: raise
["def","_mkdirp","(","d",")",":","``","''","''","Ensure","directory","d","exists","(","like","mkdir","-p","on","Unix",")","No","guarantee","that","the","directory","is","writable.","``","''","''","try",":","os.makedirs","(","d",")","except","OSError","as","e",":","if","e.errno","!","=","errno.EEXIST",":","raise"]
393
401
null
_kddcup99.py
catboost/contrib/python/scikit-learn/py3/sklearn/datasets/_kddcup99.py
import errno import logging import os from gzip import GzipFile from os.path import exists, join import joblib import numpy from ..utils import Bunch, check_random_state from ..utils import shuffle from ..utils._param_validation import StrOptions, validate_params from .None import get_data_home from ._base import RemoteFileMetadata, _convert_data_dataframe, _fetch_remote, load_descr
15
null
12
3
null
null
null
Use image node_id 3 for calling a global function with example usage: _mkdirp(d) without return types
101
node_id 3
520,098
_fetch_brute_kddcup99
global
null
false
data_home,download_if_missing,percent10
null
null
null
null
Bunch
def _fetch_brute_kddcup99( data_home=None, download_if_missing=True, percent10=True ): """Load the kddcup99 dataset, downloading it if necessary. Parameters ---------- data_home : str, default=None Specify another download and cache folder for the datasets. By default all scikit-learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing : bool, default=True If False, raise an OSError if the data is not locally available instead of trying to download the data from the source site. percent10 : bool, default=True Whether to load only 10 percent of the data. Returns ------- dataset : :class:`~sklearn.utils.Bunch` Dictionary-like object, with the following attributes. data : ndarray of shape (494021, 41) Each row corresponds to the 41 features in the dataset. target : ndarray of shape (494021,) Each value corresponds to one of the 21 attack types or to the label 'normal.'. feature_names : list The names of the dataset columns target_names: list The names of the target columns DESCR : str Description of the kddcup99 dataset. """ data_home = get_data_home(data_home=data_home) dir_suffix = "-py3" if percent10: kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix) archive = ARCHIVE_10_PERCENT else: kddcup_dir = join(data_home, "kddcup99" + dir_suffix) archive = ARCHIVE samples_path = join(kddcup_dir, "samples") targets_path = join(kddcup_dir, "targets") available = exists(samples_path) dt = [ ("duration", int), ("protocol_type", "S4"), ("service", "S11"), ("flag", "S6"), ("src_bytes", int), ("dst_bytes", int), ("land", int), ("wrong_fragment", int), ("urgent", int), ("hot", int), ("num_failed_logins", int), ("logged_in", int), ("num_compromised", int), ("root_shell", int), ("su_attempted", int), ("num_root", int), ("num_file_creations", int), ("num_shells", int), ("num_access_files", int), ("num_outbound_cmds", int), ("is_host_login", int), ("is_guest_login", int), ("count", int), ("srv_count", int), ("serror_rate", float), ("srv_serror_rate", float), ("rerror_rate", float), ("srv_rerror_rate", float), ("same_srv_rate", float), ("diff_srv_rate", float), ("srv_diff_host_rate", float), ("dst_host_count", int), ("dst_host_srv_count", int), ("dst_host_same_srv_rate", float), ("dst_host_diff_srv_rate", float), ("dst_host_same_src_port_rate", float), ("dst_host_srv_diff_host_rate", float), ("dst_host_serror_rate", float), ("dst_host_srv_serror_rate", float), ("dst_host_rerror_rate", float), ("dst_host_srv_rerror_rate", float), ("labels", "S16"), ] column_names = [c[0] for c in dt] target_names = column_names[-1] feature_names = column_names[:-1] if available: try: X = joblib.load(samples_path) y = joblib.load(targets_path) except Exception as e: raise OSError( "The cache for fetch_kddcup99 is invalid, please delete " f"{str(kddcup_dir)} and run the fetch_kddcup99 again" ) from e elif download_if_missing: _mkdirp(kddcup_dir) logger.info("Downloading %s" % archive.url) _fetch_remote(archive, dirname=kddcup_dir) DT = np.dtype(dt) logger.debug("extracting archive") archive_path = join(kddcup_dir, archive.filename) file_ = GzipFile(filename=archive_path, mode="r") Xy = [] for line in file_.readlines(): line = line.decode() Xy.append(line.replace("\n", "").split(",")) file_.close() logger.debug("extraction done") os.remove(archive_path) Xy = np.asarray(Xy, dtype=object) for j in range(42): Xy[:, j] = Xy[:, j].astype(DT[j]) X = Xy[:, :-1] y = Xy[:, -1] # XXX bug when compress!=0: # (error: 'Incorrect data length while decompressing[...] the file # could be corrupted.') joblib.dump(X, samples_path, compress=0) joblib.dump(y, targets_path, compress=0) else: raise OSError( "Data not found and `download_if_missing` is False" ) return Bunch( data=X, target=y, feature_names=feature_names, target_names=[target_names], )
["def","_fetch_brute_kddcup99","(","data_home=None",",","download_if_missing=True",",","percent10=True",")",":","``","''","''","Load","the","kddcup99","dataset",",","downloading","it","if","necessary",".","Parameters","--","--","--","--","--","data_home",":","str",",","default=None","Specify","another","download","and","cache","folder","for","the","datasets",".","By","default","all","scikit-learn","data","is","stored","in","'~\/scikit_learn_data","'","subfolders",".","download_if_missing",":","bool",",","default=True","If","False",",","raise","an","OSError","if","the","data","is","not","locally","available","instead","of","trying","to","download","the","data","from","the","source","site",".","percent10",":","bool",",","default=True","Whether","to","load","only","10","percent","of","the","data",".","Returns","--","--","--","-","dataset",":",":","class",":","`","~sklearn.utils.Bunch","`","Dictionary-like","object",",","with","the","following","attributes",".","data",":","ndarray","of","shape","(","494021",",","41",")","Each","row","corresponds","to","the","41","features","in","the","dataset",".","target",":","ndarray","of","shape","(","494021",",",")","Each","value","corresponds","to","one","of","the","21","attack","types","or","to","the","label","'normal.","'",".","feature_names",":","list","The","names","of","the","dataset","columns","target_names",":","list","The","names","of","the","target","columns","DESCR",":","str","Description","of","the","kddcup99","dataset.","``","''","''","data_home","=","get_data_home","(","data_home=data_home",")","dir_suffix","=","``","-py3","''","if","percent10",":","kddcup_dir","=","join","(","data_home",",","``","kddcup99_10","''","+","dir_suffix",")","archive","=","ARCHIVE_10_PERCENT","else",":","kddcup_dir","=","join","(","data_home",",","``","kddcup99","''","+","dir_suffix",")","archive","=","ARCHIVE","samples_path","=","join","(","kddcup_dir",",","``","samples","''",")","targets_path","=","join","(","kddcup_dir",",","``","targets","''",")","available","=","exists","(","samples_path",")","dt","=","[","(","``","duration","''",",","int",")",",","(","``","protocol_type","''",",","``","S4","''",")",",","(","``","service","''",",","``","S11","''",")",",","(","``","flag","''",",","``","S6","''",")",",","(","``","src_bytes","''",",","int",")",",","(","``","dst_bytes","''",",","int",")",",","(","``","land","''",",","int",")",",","(","``","wrong_fragment","''",",","int",")",",","(","``","urgent","''",",","int",")",",","(","``","hot","''",",","int",")",",","(","``","num_failed_logins","''",",","int",")",",","(","``","logged_in","''",",","int",")",",","(","``","num_compromised","''",",","int",")",",","(","``","root_shell","''",",","int",")",",","(","``","su_attempted","''",",","int",")",",","(","``","num_root","''",",","int",")",",","(","``","num_file_creations","''",",","int",")",",","(","``","num_shells","''",",","int",")",",","(","``","num_access_files","''",",","int",")",",","(","``","num_outbound_cmds","''",",","int",")",",","(","``","is_host_login","''",",","int",")",",","(","``","is_guest_login","''",",","int",")",",","(","``","count","''",",","int",")",",","(","``","srv_count","''",",","int",")",",","(","``","serror_rate","''",",","float",")",",","(","``","srv_serror_rate","''",",","float",")",",","(","``","rerror_rate","''",",","float",")",",","(","``","srv_rerror_rate","''",",","float",")",",","(","``","same_srv_rate","''",",","float",")",",","(","``","diff_srv_rate","''",",","float",")",",","(","``","srv_diff_host_rate","''",",","float",")",",","(","``","dst_host_count","''",",","int",")",",","(","``","dst_host_srv_count","''",",","int",")",",","(","``","dst_host_same_srv_rate","''",",","float",")",",","(","``","dst_host_diff_srv_rate","''",",","float",")",",","(","``","dst_host_same_src_port_rate","''",",","float",")",",","(","``","dst_host_srv_diff_host_rate","''",",","float",")",",","(","``","dst_host_serror_rate","''",",","float",")",",","(","``","dst_host_srv_serror_rate","''",",","float",")",",","(","``","dst_host_rerror_rate","''",",","float",")",",","(","``","dst_host_srv_rerror_rate","''",",","float",")",",","(","``","labels","''",",","``","S16","''",")",",","]","column_names","=","[","c","[","0","]","for","c","in","dt","]","target_names","=","column_names","[","-1","]","feature_names","=","column_names","[",":","-1","]","if","available",":","try",":","X","=","joblib.load","(","samples_path",")","y","=","joblib.load","(","targets_path",")","except","Exception","as","e",":","raise","OSError","(","``","The","cache","for","fetch_kddcup99","is","invalid",",","please","delete","``","f","''","{","str","(","kddcup_dir",")","}","and","run","the","fetch_kddcup99","again","''",")","from","e","elif","download_if_missing",":","_mkdirp","(","kddcup_dir",")","logger.info","(","``","Downloading","%","s","''","%","archive.url",")","_fetch_remote","(","archive",",","dirname=kddcup_dir",")","DT","=","np.dtype","(","dt",")","logger.debug","(","``","extracting","archive","''",")","archive_path","=","join","(","kddcup_dir",",","archive.filename",")","file_","=","GzipFile","(","filename=archive_path",",","mode=","''","r","''",")","Xy","=","[","]","for","line","in","file_.readlines","(",")",":","line","=","line.decode","(",")","Xy.append","(","line.replace","(","``","\\n","''",",","``","''",")",".split","(","``",",","''",")",")","file_.close","(",")","logger.debug","(","``","extraction","done","''",")","os.remove","(","archive_path",")","Xy","=","np.asarray","(","Xy",",","dtype=object",")","for","j","in","range","(","42",")",":","Xy","[",":",",","j","]","=","Xy","[",":",",","j","]",".astype","(","DT","[","j","]",")","X","=","Xy","[",":",",",":","-1","]","y","=","Xy","[",":",",","-1","]","#","XXX","bug","when","compress","!","=0",":","#","(","error",":","'Incorrect","data","length","while","decompressing","[","...","]","the","file","#","could","be","corrupted",".","'",")","joblib.dump","(","X",",","samples_path",",","compress=0",")","joblib.dump","(","y",",","targets_path",",","compress=0",")","else",":","raise","OSError","(","``","Data","not","found","and","`","download_if_missing","`","is","False","''",")","return","Bunch","(","data=X",",","target=y",",","feature_names=feature_names",",","target_names=","[","target_names","]",",",")"]
246
390
null
_kddcup99.py
catboost/contrib/python/scikit-learn/py3/sklearn/datasets/_kddcup99.py
import errno import logging import os from gzip import GzipFile from os.path import exists, join import joblib import numpy from ..utils import Bunch, check_random_state from ..utils import shuffle from ..utils._param_validation import StrOptions, validate_params from .None import get_data_home from ._base import RemoteFileMetadata, _convert_data_dataframe, _fetch_remote, load_descr
15
null
12
3
null
null
null
Use image node_id 2 for calling a global function with example usage: _fetch_brute_kddcup99(data_home, download_if_missing, percent10) and returns: Bunch
153
node_id 2
520,097
__new__
EdgeTypeStr
str
true
cls
A helper class to construct serializable edge types by merging an edge type tuple into a single string.
["A","helper","class","to","construct","serializable","edge","types","by","merging","an","edge","type","tuple","into","a","single","string","."]
null
null
str
def __new__(cls, *args: Any) -> "EdgeTypeStr": if isinstance(args[0], (list, tuple)): # Unwrap `EdgeType((src, rel, dst))` and `EdgeTypeStr((src, dst))`: args = tuple(args[0]) if len(args) == 1 and isinstance(args[0], str): arg = args[0] # An edge type string was passed. elif len(args) == 2 and all(isinstance(arg, str) for arg in args): # A `(src, dst)` edge type was passed - add `DEFAULT_REL`: arg = EDGE_TYPE_STR_SPLIT.join( (args[0], DEFAULT_REL, args[1]) ) elif len(args) == 3 and all(isinstance(arg, str) for arg in args): # A `(src, rel, dst)` edge type was passed: arg = EDGE_TYPE_STR_SPLIT.join(args) else: raise ValueError(f"Encountered invalid edge type '{args}'") return str.__new__(cls, arg)
["def","__new__","(","cls",",","*","args",":","Any",")","-",">","``","EdgeTypeStr","''",":","if","isinstance","(","args","[","0","]",",","(","list",",","tuple",")",")",":","#","Unwrap","`","EdgeType","(","(","src",",","rel",",","dst",")",")","`","and","`","EdgeTypeStr","(","(","src",",","dst",")",")","`",":","args","=","tuple","(","args","[","0","]",")","if","len","(","args",")","==","1","and","isinstance","(","args","[","0","]",",","str",")",":","arg","=","args","[","0","]","#","An","edge","type","string","was","passed",".","elif","len","(","args",")","==","2","and","all","(","isinstance","(","arg",",","str",")","for","arg","in","args",")",":","#","A","`","(","src",",","dst",")","`","edge","type","was","passed","-","add","`","DEFAULT_REL","`",":","arg","=","EDGE_TYPE_STR_SPLIT.join","(","(","args","[","0","]",",","DEFAULT_REL",",","args","[","1","]",")",")","elif","len","(","args",")","==","3","and","all","(","isinstance","(","arg",",","str",")","for","arg","in","args",")",":","#","A","`","(","src",",","rel",",","dst",")","`","edge","type","was","passed",":","arg","=","EDGE_TYPE_STR_SPLIT.join","(","args",")","else",":","raise","ValueError","(","f","''","Encountered","invalid","edge","type","'","{","args","}","'","''",")","return","str.__new__","(","cls",",","arg",")"]
292
311
null
typing.py
pytorch_geometric/torch_geometric/typing.py
import inspect import os import sys import warnings from typing import Any, Dict, List, Optional, Tuple, Union import numpy import torch from torch import Tensor
15
2
8
0
1
2
1
Use image node_id 1 for calling the EdgeTypeStr obj's underlying member method code with example usage: obj.__new__(cls) and returns: str
137
node_id 1
1,775,553
__init__
SensitivityAnalysis
null
true
self,model,val_func,sparsities,prune_type,early_stop_mode,early_stop_value
null
null
Perform sensitivity analysis for this model. Parameters ---------- model : torch.nn.Module the model to perform sensitivity analysis val_func : function validation function for the model. Due to different models may need different dataset/criterion , therefore the user need to cover this part by themselves. In the val_func, the model should be tested on the validation dateset, and the validation accuracy/loss should be returned as the output of val_func. There are no restrictions on the input parameters of the val_function. User can use the val_args, val_kwargs parameters in analysis to pass all the parameters that val_func needed. sparsities : list The sparsity list provided by users. This parameter is set when the user only wants to test some specific sparsities. In the sparsity list, each element is a sparsity value which means how much weight the pruner should prune. Take [0.25, 0.5, 0.75] for an example, the SensitivityAnalysis will prune 25% 50% 75% weights gradually for each layer. prune_type : str The pruner type used to prune the conv layers, default is 'l1', and 'l2', 'fine-grained' is also supported. early_stop_mode : str If this flag is set, the sensitivity analysis for a conv layer will early stop when the validation metric( for example, accurracy/loss) has alreay meet the threshold. We support four different early stop modes: minimize, maximize, dropped, raised. The default value is None, which means the analysis won't stop until all given sparsities are tested. This option should be used with early_stop_value together. minimize: The analysis stops when the validation metric return by the val_func lower than early_stop_value. maximize: The analysis stops when the validation metric return by the val_func larger than early_stop_value. dropped: The analysis stops when the validation metric has dropped by early_stop_value. raised: The analysis stops when the validation metric has raised by early_stop_value. early_stop_value : float This value is used as the threshold for different earlystop modes. This value is effective only when the early_stop_mode is set.
["Perform","sensitivity","analysis","for","this","model",".","Parameters","--","--","--","--","--","model",":","torch.nn.Module","the","model","to","perform","sensitivity","analysis","val_func",":","function","validation","function","for","the","model",".","Due","to","different","models","may","need","different","dataset\/criterion",",","therefore","the","user","need","to","cover","this","part","by","themselves",".","In","the","val_func",",","the","model","should","be","tested","on","the","validation","dateset",",","and","the","validation","accuracy\/loss","should","be","returned","as","the","output","of","val_func",".","There","are","no","restrictions","on","the","input","parameters","of","the","val_function",".","User","can","use","the","val_args",",","val_kwargs","parameters","in","analysis","to","pass","all","the","parameters","that","val_func","needed",".","sparsities",":","list","The","sparsity","list","provided","by","users",".","This","parameter","is","set","when","the","user","only","wants","to","test","some","specific","sparsities",".","In","the","sparsity","list",",","each","element","is","a","sparsity","value","which","means","how","much","weight","the","pruner","should","prune",".","Take","[","0.25",",","0.5",",","0.75","]","for","an","example",",","the","SensitivityAnalysis","will","prune","25","%","50","%","75","%","weights","gradually","for","each","layer",".","prune_type",":","str","The","pruner","type","used","to","prune","the","conv","layers",",","default","is","'l1","'",",","and","'l2","'",",","'fine-grained","'","is","also","supported",".","early_stop_mode",":","str","If","this","flag","is","set",",","the","sensitivity","analysis","for","a","conv","layer","will","early","stop","when","the","validation","metric","(","for","example",",","accurracy\/loss",")","has","alreay","meet","the","threshold",".","We","support","four","different","early","stop","modes",":","minimize",",","maximize",",","dropped",",","raised",".","The","default","value","is","None",",","which","means","the","analysis","wo","n't","stop","until","all","given","sparsities","are","tested",".","This","option","should","be","used","with","early_stop_value","together",".","minimize",":","The","analysis","stops","when","the","validation","metric","return","by","the","val_func","lower","than","early_stop_value",".","maximize",":","The","analysis","stops","when","the","validation","metric","return","by","the","val_func","larger","than","early_stop_value",".","dropped",":","The","analysis","stops","when","the","validation","metric","has","dropped","by","early_stop_value",".","raised",":","The","analysis","stops","when","the","validation","metric","has","raised","by","early_stop_value",".","early_stop_value",":","float","This","value","is","used","as","the","threshold","for","different","earlystop","modes",".","This","value","is","effective","only","when","the","early_stop_mode","is","set","."]
SensitivityAnalysis
def __init__( self, model, val_func, sparsities=None, prune_type="l1", early_stop_mode=None, early_stop_value=None, ): """ Perform sensitivity analysis for this model. Parameters ---------- model : torch.nn.Module the model to perform sensitivity analysis val_func : function validation function for the model. Due to different models may need different dataset/criterion , therefore the user need to cover this part by themselves. In the val_func, the model should be tested on the validation dateset, and the validation accuracy/loss should be returned as the output of val_func. There are no restrictions on the input parameters of the val_function. User can use the val_args, val_kwargs parameters in analysis to pass all the parameters that val_func needed. sparsities : list The sparsity list provided by users. This parameter is set when the user only wants to test some specific sparsities. In the sparsity list, each element is a sparsity value which means how much weight the pruner should prune. Take [0.25, 0.5, 0.75] for an example, the SensitivityAnalysis will prune 25% 50% 75% weights gradually for each layer. prune_type : str The pruner type used to prune the conv layers, default is 'l1', and 'l2', 'fine-grained' is also supported. early_stop_mode : str If this flag is set, the sensitivity analysis for a conv layer will early stop when the validation metric( for example, accurracy/loss) has alreay meet the threshold. We support four different early stop modes: minimize, maximize, dropped, raised. The default value is None, which means the analysis won't stop until all given sparsities are tested. This option should be used with early_stop_value together. minimize: The analysis stops when the validation metric return by the val_func lower than early_stop_value. maximize: The analysis stops when the validation metric return by the val_func larger than early_stop_value. dropped: The analysis stops when the validation metric has dropped by early_stop_value. raised: The analysis stops when the validation metric has raised by early_stop_value. early_stop_value : float This value is used as the threshold for different earlystop modes. This value is effective only when the early_stop_mode is set. """ from ..pruning.constants_pruner import PRUNER_DICT self.model = model self.val_func = val_func self.target_layer = OrderedDict() self.ori_state_dict = copy.deepcopy(self.model.state_dict()) self.target_layer = {} self.sensitivities = {} if sparsities is not None: self.sparsities = sorted(sparsities) else: self.sparsities = np.arange(0.1, 1.0, 0.1) self.sparsities = [np.round(x, 2) for x in self.sparsities] self.Pruner = PRUNER_DICT[prune_type] self.early_stop_mode = early_stop_mode self.early_stop_value = early_stop_value self.ori_metric = None # original validation metric for the model # already_pruned is for the iterative sensitivity analysis # For example, sensitivity_pruner iteratively prune the target # model according to the sensitivity. After each round of # pruning, the sensitivity_pruner will test the new sensitivity # for each layer self.already_pruned = {} self.model_parse()
["def","__init__","(","self",",","model",",","val_func",",","sparsities=None",",","prune_type=","''","l1","''",",","early_stop_mode=None",",","early_stop_value=None",",",")",":","``","''","''","Perform","sensitivity","analysis","for","this","model",".","Parameters","--","--","--","--","--","model",":","torch.nn.Module","the","model","to","perform","sensitivity","analysis","val_func",":","function","validation","function","for","the","model",".","Due","to","different","models","may","need","different","dataset\/criterion",",","therefore","the","user","need","to","cover","this","part","by","themselves",".","In","the","val_func",",","the","model","should","be","tested","on","the","validation","dateset",",","and","the","validation","accuracy\/loss","should","be","returned","as","the","output","of","val_func",".","There","are","no","restrictions","on","the","input","parameters","of","the","val_function",".","User","can","use","the","val_args",",","val_kwargs","parameters","in","analysis","to","pass","all","the","parameters","that","val_func","needed",".","sparsities",":","list","The","sparsity","list","provided","by","users",".","This","parameter","is","set","when","the","user","only","wants","to","test","some","specific","sparsities",".","In","the","sparsity","list",",","each","element","is","a","sparsity","value","which","means","how","much","weight","the","pruner","should","prune",".","Take","[","0.25",",","0.5",",","0.75","]","for","an","example",",","the","SensitivityAnalysis","will","prune","25","%","50","%","75","%","weights","gradually","for","each","layer",".","prune_type",":","str","The","pruner","type","used","to","prune","the","conv","layers",",","default","is","'l1","'",",","and","'l2","'",",","'fine-grained","'","is","also","supported",".","early_stop_mode",":","str","If","this","flag","is","set",",","the","sensitivity","analysis","for","a","conv","layer","will","early","stop","when","the","validation","metric","(","for","example",",","accurracy\/loss",")","has","alreay","meet","the","threshold",".","We","support","four","different","early","stop","modes",":","minimize",",","maximize",",","dropped",",","raised",".","The","default","value","is","None",",","which","means","the","analysis","wo","n't","stop","until","all","given","sparsities","are","tested",".","This","option","should","be","used","with","early_stop_value","together",".","minimize",":","The","analysis","stops","when","the","validation","metric","return","by","the","val_func","lower","than","early_stop_value",".","maximize",":","The","analysis","stops","when","the","validation","metric","return","by","the","val_func","larger","than","early_stop_value",".","dropped",":","The","analysis","stops","when","the","validation","metric","has","dropped","by","early_stop_value",".","raised",":","The","analysis","stops","when","the","validation","metric","has","raised","by","early_stop_value",".","early_stop_value",":","float","This","value","is","used","as","the","threshold","for","different","earlystop","modes",".","This","value","is","effective","only","when","the","early_stop_mode","is","set.","``","''","''","from","..","pruning.constants_pruner","import","PRUNER_DICT","self.model","=","model","self.val_func","=","val_func","self.target_layer","=","OrderedDict","(",")","self.ori_state_dict","=","copy.deepcopy","(","self.model.state_dict","(",")",")","self.target_layer","=","{","}","self.sensitivities","=","{","}","if","sparsities","is","not","None",":","self.sparsities","=","sorted","(","sparsities",")","else",":","self.sparsities","=","np.arange","(","0.1",",","1.0",",","0.1",")","self.sparsities","=","[","np.round","(","x",",","2",")","for","x","in","self.sparsities","]","self.Pruner","=","PRUNER_DICT","[","prune_type","]","self.early_stop_mode","=","early_stop_mode","self.early_stop_value","=","early_stop_value","self.ori_metric","=","None","#","original","validation","metric","for","the","model","#","already_pruned","is","for","the","iterative","sensitivity","analysis","#","For","example",",","sensitivity_pruner","iteratively","prune","the","target","#","model","according","to","the","sensitivity",".","After","each","round","of","#","pruning",",","the","sensitivity_pruner","will","test","the","new","sensitivity","#","for","each","layer","self.already_pruned","=","{","}","self.model_parse","(",")"]
23
91
null
sensitivity_analysis.py
auptimizer/src/aup/compression/torch/utils/sensitivity_analysis.py
import copy import csv import logging from collections import OrderedDict import numpy import torch.nn
15
1
6
0
0
8
null
Use image node_id 1 to create a new SensitivityAnalysis object with example: obj = SensitivityAnalysis(model, val_func, sparsities, prune_type, early_stop_mode, early_stop_value)
179
node_id 1
315,477
to_tuple
EdgeTypeStr
str
true
self
A helper class to construct serializable edge types by merging an edge type tuple into a single string.
["A","helper","class","to","construct","serializable","edge","types","by","merging","an","edge","type","tuple","into","a","single","string","."]
Returns the original edge type.
["Returns","the","original","edge","type","."]
out
def to_tuple(self) -> EdgeType: r"""Returns the original edge type.""" out = tuple(self.split(EDGE_TYPE_STR_SPLIT)) if len(out) != 3: raise ValueError( f"Cannot convert the edge type '{self}' to a " f"tuple since it holds invalid characters" ) return out
["def","to_tuple","(","self",")","-",">","EdgeType",":","r","''","''","''","Returns","the","original","edge","type",".","''","''","''","out","=","tuple","(","self.split","(","EDGE_TYPE_STR_SPLIT",")",")","if","len","(","out",")","!","=","3",":","raise","ValueError","(","f","''","Can","not","convert","the","edge","type","'","{","self","}","'","to","a","``","f","''","tuple","since","it","holds","invalid","characters","''",")","return","out"]
313
319
null
typing.py
pytorch_geometric/torch_geometric/typing.py
import inspect import os import sys import warnings from typing import Any, Dict, List, Optional, Tuple, Union import numpy import torch from torch import Tensor
15
2
8
0
1
2
1
Use image node_id 2 for calling the EdgeTypeStr obj's underlying member method code with example usage: obj.to_tuple() and returns: out
135
node_id 2
1,775,554
__init__
MNISTConvNet
torch.nn
true
self,num_conv_layers,num_filters_1,num_filters_2,num_filters_3,dropout_rate,num_fc_units,kernel_size
null
null
null
null
MNISTConvNet
def __init__( self, num_conv_layers, num_filters_1, num_filters_2, num_filters_3, dropout_rate, num_fc_units, kernel_size, ): super().__init__() self.conv1 = nn.Conv2d(1, num_filters_1, kernel_size=kernel_size) self.conv2 = None self.conv3 = None output_size = (28 - kernel_size + 1) // 2 num_output_filters = num_filters_1 if num_conv_layers > 1: self.conv2 = nn.Conv2d( num_filters_1, num_filters_2, kernel_size=kernel_size ) num_output_filters = num_filters_2 output_size = (output_size - kernel_size + 1) // 2 if num_conv_layers > 2: self.conv3 = nn.Conv2d( num_filters_2, num_filters_3, kernel_size=kernel_size ) num_output_filters = num_filters_3 output_size = (output_size - kernel_size + 1) // 2 self.dropout = nn.Dropout(p=dropout_rate) self.conv_output_size = ( num_output_filters * output_size * output_size ) self.fc1 = nn.Linear(self.conv_output_size, num_fc_units) self.fc2 = nn.Linear(num_fc_units, 10)
["def","__init__","(","self",",","num_conv_layers",",","num_filters_1",",","num_filters_2",",","num_filters_3",",","dropout_rate",",","num_fc_units",",","kernel_size",",",")",":","super","(",")",".__init__","(",")","self.conv1","=","nn.Conv2d","(","1",",","num_filters_1",",","kernel_size=kernel_size",")","self.conv2","=","None","self.conv3","=","None","output_size","=","(","28","-","kernel_size","+","1",")","\/\/","2","num_output_filters","=","num_filters_1","if","num_conv_layers",">","1",":","self.conv2","=","nn.Conv2d","(","num_filters_1",",","num_filters_2",",","kernel_size=kernel_size",")","num_output_filters","=","num_filters_2","output_size","=","(","output_size","-","kernel_size","+","1",")","\/\/","2","if","num_conv_layers",">","2",":","self.conv3","=","nn.Conv2d","(","num_filters_2",",","num_filters_3",",","kernel_size=kernel_size",")","num_output_filters","=","num_filters_3","output_size","=","(","output_size","-","kernel_size","+","1",")","\/\/","2","self.dropout","=","nn.Dropout","(","p=dropout_rate",")","self.conv_output_size","=","(","num_output_filters","*","output_size","*","output_size",")","self.fc1","=","nn.Linear","(","self.conv_output_size",",","num_fc_units",")","self.fc2","=","nn.Linear","(","num_fc_units",",","10",")"]
214
239
null
example_5_pytorch_worker.py
HpBandSter/hpbandster/examples/example_5_pytorch_worker.py
import ConfigSpace import ConfigSpace.hyperparameters from hpbandster.core.worker import Worker import logging
15
2
4
0
2
3
1
Use image node_id 1 to create a new MNISTConvNet object from inherited base classes: torch.nn with example: obj = MNISTConvNet(num_conv_layers, num_filters_1, num_filters_2, num_filters_3, dropout_rate, num_fc_units, kernel_size)
229
node_id 1
117,776
layers_count
SensitivityAnalysis
null
true
self
null
null
null
null
len
def layers_count(self): return len(self.target_layer)
["def","layers_count","(","self",")",":","return","len","(","self.target_layer",")"]
94
95
null
sensitivity_analysis.py
auptimizer/src/aup/compression/torch/utils/sensitivity_analysis.py
import copy import csv import logging from collections import OrderedDict import numpy import torch.nn
15
1
6
0
0
8
null
Use image node_id 2 for calling the SensitivityAnalysis obj's underlying member method code with example usage: obj.layers_count() and returns: len
147
node_id 2
315,478
model_parse
SensitivityAnalysis
null
true
self
null
null
null
null
null
def model_parse(self): for name, submodel in self.model.named_modules(): for op_type in SUPPORTED_OP_TYPE: if isinstance(submodel, op_type): self.target_layer[name] = submodel self.already_pruned[name] = 0
["def","model_parse","(","self",")",":","for","name",",","submodel","in","self.model.named_modules","(",")",":","for","op_type","in","SUPPORTED_OP_TYPE",":","if","isinstance","(","submodel",",","op_type",")",":","self.target_layer","[","name","]","=","submodel","self.already_pruned","[","name","]","=","0"]
97
102
null
sensitivity_analysis.py
auptimizer/src/aup/compression/torch/utils/sensitivity_analysis.py
import copy import csv import logging from collections import OrderedDict import numpy import torch.nn
15
1
6
0
0
8
null
Use image node_id 3 for calling the SensitivityAnalysis obj's underlying member method code with example usage: obj.model_parse() without return types
150
node_id 3
315,479
_need_to_stop
SensitivityAnalysis
null
true
self,ori_metric,cur_metric
null
null
Judge if meet the stop conditon(early_stop, min_threshold, max_threshold). Parameters ---------- ori_metric : float original validation metric cur_metric : float current validation metric Returns ------- stop : bool if stop the sensitivity analysis
["Judge","if","meet","the","stop","conditon","(","early_stop",",","min_threshold",",","max_threshold",")",".","Parameters","--","--","--","--","--","ori_metric",":","float","original","validation","metric","cur_metric",":","float","current","validation","metric","Returns","--","--","--","-","stop",":","bool","if","stop","the","sensitivity","analysis"]
False,False,True,True,True,True
def _need_to_stop(self, ori_metric, cur_metric): """ Judge if meet the stop conditon(early_stop, min_threshold, max_threshold). Parameters ---------- ori_metric : float original validation metric cur_metric : float current validation metric Returns ------- stop : bool if stop the sensitivity analysis """ if self.early_stop_mode is None: # early stop mode is not enable return False assert self.early_stop_value is not None if self.early_stop_mode == "minimize": if cur_metric < self.early_stop_value: return True elif self.early_stop_mode == "maximize": if cur_metric > self.early_stop_value: return True elif self.early_stop_mode == "dropped": if cur_metric < ori_metric - self.early_stop_value: return True elif self.early_stop_mode == "raised": if cur_metric > ori_metric + self.early_stop_value: return True return False
["def","_need_to_stop","(","self",",","ori_metric",",","cur_metric",")",":","``","''","''","Judge","if","meet","the","stop","conditon","(","early_stop",",","min_threshold",",","max_threshold",")",".","Parameters","--","--","--","--","--","ori_metric",":","float","original","validation","metric","cur_metric",":","float","current","validation","metric","Returns","--","--","--","-","stop",":","bool","if","stop","the","sensitivity","analysis","``","''","''","if","self.early_stop_mode","is","None",":","#","early","stop","mode","is","not","enable","return","False","assert","self.early_stop_value","is","not","None","if","self.early_stop_mode","==","``","minimize","''",":","if","cur_metric","<","self.early_stop_value",":","return","True","elif","self.early_stop_mode","==","``","maximize","''",":","if","cur_metric",">","self.early_stop_value",":","return","True","elif","self.early_stop_mode","==","``","dropped","''",":","if","cur_metric","<","ori_metric","-","self.early_stop_value",":","return","True","elif","self.early_stop_mode","==","``","raised","''",":","if","cur_metric",">","ori_metric","+","self.early_stop_value",":","return","True","return","False"]
104
136
null
sensitivity_analysis.py
auptimizer/src/aup/compression/torch/utils/sensitivity_analysis.py
import copy import csv import logging from collections import OrderedDict import numpy import torch.nn
15
1
6
0
0
8
null
Use image node_id 4 for calling the SensitivityAnalysis obj's underlying member method code with example usage: obj._need_to_stop(ori_metric, cur_metric) and returns: False, False, True, True, True, True
203
node_id 4
315,480
is_available
Converter
ImageConverter
true
self
null
null
Confirms if converter is available or not.
["Confirms","if","converter","is","available","or","not","."]
True
def is_available(self) -> bool: """Confirms if converter is available or not.""" return True
["def","is_available","(","self",")","-",">","bool",":","``","''","''","Confirms","if","converter","is","available","or","not",".","''","''","''","return","True"]
24
26
null
convert-svg-to-pdf.py
sympy/doc/ext/convert-svg-to-pdf.py
from __future__ import annotations from sphinx.transforms.post_transforms.images import ImageConverter from sphinx.util import logging import os import platform from typing import Any from sphinx.application import Sphinx
15
1
7
1
1
5
1
Use image node_id 1 for calling the Converter obj's underlying member method code with example usage: obj.is_available() and returns: True
138
node_id 1
2,029,274
reset
COCODetectionMetric
EvalMetric
true
self
Detection metric for COCO bbox task. Parameters ---------- dataset : instance of gluoncv.data.COCODetection The validation dataset. save_prefix : str Prefix for the saved JSON results. use_time : bool Append unique datetime string to created JSON file name if ``True``. cleanup : bool Remove created JSON file if ``True``. score_thresh : float Detection results with confident scores smaller than ``score_thresh`` will be discarded before saving to results. data_shape : tuple of int, default is None If `data_shape` is provided as (height, width), we will rescale bounding boxes when saving the predictions. This is helpful when SSD/YOLO box predictions cannot be rescaled conveniently. Note that the data_shape must be fixed for all validation images. post_affine : a callable function with input signature (orig_w, orig_h, out_w, out_h) If not None, the bounding boxes will be affine transformed rather than simply scaled.
["Detection","metric","for","COCO","bbox","task",".","Parameters","--","--","--","--","--","dataset",":","instance","of","gluoncv.data.COCODetection","The","validation","dataset",".","save_prefix",":","str","Prefix","for","the","saved","JSON","results",".","use_time",":","bool","Append","unique","datetime","string","to","created","JSON","file","name","if","``","True","``",".","cleanup",":","bool","Remove","created","JSON","file","if","``","True","``",".","score_thresh",":","float","Detection","results","with","confident","scores","smaller","than","``","score_thresh","``","will","be","discarded","before","saving","to","results",".","data_shape",":","tuple","of","int",",","default","is","None","If","`","data_shape","`","is","provided","as","(","height",",","width",")",",","we","will","rescale","bounding","boxes","when","saving","the","predictions",".","This","is","helpful","when","SSD\/YOLO","box","predictions","can","not","be","rescaled","conveniently",".","Note","that","the","data_shape","must","be","fixed","for","all","validation","images",".","post_affine",":","a","callable","function","with","input","signature","(","orig_w",",","orig_h",",","out_w",",","out_h",")","If","not","None",",","the","bounding","boxes","will","be","affine","transformed","rather","than","simply","scaled","."]
null
null
null
def reset(self): self._current_id = 0 self._results = []
["def","reset","(","self",")",":","self._current_id","=","0","self._results","=","[","]"]
87
89
null
coco_detection.py
gluon-cv/gluoncv/utils/metrics/coco_detection.py
from __future__ import absolute_import import sys import os from os import path import warnings import numpy import mxnet
15
1
7
0
1
6
1
Use image node_id 3 for calling the COCODetectionMetric obj's underlying member method code with example usage: obj.reset() without return types
144
node_id 3
1,096,310
delete_pipeline
global
null
false
ctx,engine,pipeline_name,endpoint,iap_client_id,namespace
null
null
null
null
null
def delete_pipeline( ctx: Context, engine: str, pipeline_name: str, endpoint: str, iap_client_id: str, namespace: str, ) -> None: """Command definition to delete a pipeline.""" click.echo("Deleting pipeline") ctx.flags_dict[labels.ENGINE_FLAG] = engine ctx.flags_dict[labels.PIPELINE_NAME] = pipeline_name ctx.flags_dict[labels.ENDPOINT] = endpoint ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id ctx.flags_dict[labels.NAMESPACE] = namespace handler_factory.create_handler(ctx.flags_dict).delete_pipeline()
["def","delete_pipeline","(","ctx",":","Context",",","engine",":","str",",","pipeline_name",":","str",",","endpoint",":","str",",","iap_client_id",":","str",",","namespace",":","str",",",")","-",">","None",":","``","''","''","Command","definition","to","delete","a","pipeline",".","''","''","''","click.echo","(","``","Deleting","pipeline","''",")","ctx.flags_dict","[","labels.ENGINE_FLAG","]","=","engine","ctx.flags_dict","[","labels.PIPELINE_NAME","]","=","pipeline_name","ctx.flags_dict","[","labels.ENDPOINT","]","=","endpoint","ctx.flags_dict","[","labels.IAP_CLIENT_ID","]","=","iap_client_id","ctx.flags_dict","[","labels.NAMESPACE","]","=","namespace","handler_factory.create_handler","(","ctx.flags_dict",")",".delete_pipeline","(",")"]
245
254
null
pipeline.py
tfx/tfx/tools/cli/commands/pipeline.py
import sys from typing import Optional import click from tfx.tools.cli import labels from tfx.tools.cli.cli_context import Context from tfx.tools.cli.cli_context import pass_context from tfx.tools.cli.handler import handler_factory
15
null
7
8
null
null
null
Use image node_id 5 for calling a global function with example usage: delete_pipeline(ctx, engine, pipeline_name, endpoint, iap_client_id, namespace) without return types
170
node_id 5
2,199,077
list_pipelines
global
null
false
ctx,engine,endpoint,iap_client_id,namespace
null
null
null
null
null
def list_pipelines( ctx: Context, engine: str, endpoint: str, iap_client_id: str, namespace: str, ) -> None: """Command definition to list pipelines.""" click.echo("Listing all pipelines") ctx.flags_dict[labels.ENGINE_FLAG] = engine ctx.flags_dict[labels.ENDPOINT] = endpoint ctx.flags_dict[labels.IAP_CLIENT_ID] = iap_client_id ctx.flags_dict[labels.NAMESPACE] = namespace handler_factory.create_handler(ctx.flags_dict).list_pipelines()
["def","list_pipelines","(","ctx",":","Context",",","engine",":","str",",","endpoint",":","str",",","iap_client_id",":","str",",","namespace",":","str",",",")","-",">","None",":","``","''","''","Command","definition","to","list","pipelines",".","''","''","''","click.echo","(","``","Listing","all","pipelines","''",")","ctx.flags_dict","[","labels.ENGINE_FLAG","]","=","engine","ctx.flags_dict","[","labels.ENDPOINT","]","=","endpoint","ctx.flags_dict","[","labels.IAP_CLIENT_ID","]","=","iap_client_id","ctx.flags_dict","[","labels.NAMESPACE","]","=","namespace","handler_factory.create_handler","(","ctx.flags_dict",")",".list_pipelines","(",")"]
278
286
null
pipeline.py
tfx/tfx/tools/cli/commands/pipeline.py
import sys from typing import Optional import click from tfx.tools.cli import labels from tfx.tools.cli.cli_context import Context from tfx.tools.cli.cli_context import pass_context from tfx.tools.cli.handler import handler_factory
15
null
7
8
null
null
null
Use image node_id 6 for calling a global function with example usage: list_pipelines(ctx, engine, endpoint, iap_client_id, namespace) without return types
154
node_id 6
2,199,078
on_test_end
NotificationCallback
Callback
true
self,logs
Send a notification to a channel at the beginning/ending of the training/testing and at a constant frequency (`alert_frequency`) during the training. Args: notificator (~poutyne.Notificator): The notification channel to send the message. The expected interface need to implement a `send_notification` method to send the message. You can see the `notif <https://notificationdoc.ca/index.html>`_ package which implements some Notificator respecting the interface. alert_frequency (int): The frequency (in epoch), during training, to send an update. By default, 1. experiment_name (Union[str, None]): The name of the experiment to add to the message. By default, None. Example: .. code-block:: python from notif.notificator import SlackNotificator from poutyne.framework.callbacks.notification import NotificationCallback webhook_url = "a_link" slack_notif = SlackNotificator(webhook_url=webhook_url) notif_callback = NotificationCallback(notificator=slack_notif) model = Model(...) model.fit_generator(..., callbacks=[notif_callback])
["Send","a","notification","to","a","channel","at","the","beginning\/ending","of","the","training\/testing","and","at","a","constant","frequency","(","`","alert_frequency","`",")","during","the","training",".","Args",":","notificator","(","~poutyne.Notificator",")",":","The","notification","channel","to","send","the","message",".","The","expected","interface","need","to","implement","a","`","send_notification","`","method","to","send","the","message",".","You","can","see","the","`","notif","<","https",":","\/\/notificationdoc.ca\/index.html",">","`","_","package","which","implements","some","Notificator","respecting","the","interface",".","alert_frequency","(","int",")",":","The","frequency","(","in","epoch",")",",","during","training",",","to","send","an","update",".","By","default",",","1.","experiment_name","(","Union","[","str",",","None","]",")",":","The","name","of","the","experiment","to","add","to","the","message",".","By","default",",","None",".","Example",":","..","code-block",":",":","python","from","notif.notificator","import","SlackNotificator","from","poutyne.framework.callbacks.notification","import","NotificationCallback","webhook_url","=","``","a_link","''","slack_notif","=","SlackNotificator","(","webhook_url=webhook_url",")","notif_callback","=","NotificationCallback","(","notificator=slack_notif",")","model","=","Model","(","...",")","model.fit_generator","(","...",",","callbacks=","[","notif_callback","]",")"]
Send the message to the channel 'End of the testing' or 'End of the testing for the experiment experiment_name' if an experiment name is given.
["Send","the","message","to","the","channel","'End","of","the","testing","'","or","'End","of","the","testing","for","the","experiment","experiment_name","'","if","an","experiment","name","is","given","."]
null
def on_test_end(self, logs: Dict) -> None: """ Send the message to the channel 'End of the testing' or 'End of the testing for the experiment experiment_name' if an experiment name is given. """ message = f"Here the test metrics: \n{self._format_logs(logs)}" self.notificator.send_notification( message, subject=f"End of the testing{self.experiment_name_msg}.", )
["def","on_test_end","(","self",",","logs",":","Dict",")","-",">","None",":","``","''","''","Send","the","message","to","the","channel","'End","of","the","testing","'","or","'End","of","the","testing","for","the","experiment","experiment_name","'","if","an","experiment","name","is","given.","``","''","''","message","=","f","''","Here","the","test","metrics",":","\\n","{","self._format_logs","(","logs",")","}","''","self.notificator.send_notification","(","message",",","subject=f","''","End","of","the","testing","{","self.experiment_name_msg","}",".","``",",",")"]
120
127
null
notification.py
poutyne/poutyne/framework/callbacks/notification.py
from abc import ABC, abstractmethod from typing import Dict, Union from poutyne.framework.callbacks.callbacks import Callback
15
2
3
0
2
7
1
Use image node_id 6 for calling the NotificationCallback obj's underlying member method code with example usage: obj.on_test_end(logs) without return types
155
node_id 6
1,602,782
on_test_begin
NotificationCallback
Callback
true
self,logs
Send a notification to a channel at the beginning/ending of the training/testing and at a constant frequency (`alert_frequency`) during the training. Args: notificator (~poutyne.Notificator): The notification channel to send the message. The expected interface need to implement a `send_notification` method to send the message. You can see the `notif <https://notificationdoc.ca/index.html>`_ package which implements some Notificator respecting the interface. alert_frequency (int): The frequency (in epoch), during training, to send an update. By default, 1. experiment_name (Union[str, None]): The name of the experiment to add to the message. By default, None. Example: .. code-block:: python from notif.notificator import SlackNotificator from poutyne.framework.callbacks.notification import NotificationCallback webhook_url = "a_link" slack_notif = SlackNotificator(webhook_url=webhook_url) notif_callback = NotificationCallback(notificator=slack_notif) model = Model(...) model.fit_generator(..., callbacks=[notif_callback])
["Send","a","notification","to","a","channel","at","the","beginning\/ending","of","the","training\/testing","and","at","a","constant","frequency","(","`","alert_frequency","`",")","during","the","training",".","Args",":","notificator","(","~poutyne.Notificator",")",":","The","notification","channel","to","send","the","message",".","The","expected","interface","need","to","implement","a","`","send_notification","`","method","to","send","the","message",".","You","can","see","the","`","notif","<","https",":","\/\/notificationdoc.ca\/index.html",">","`","_","package","which","implements","some","Notificator","respecting","the","interface",".","alert_frequency","(","int",")",":","The","frequency","(","in","epoch",")",",","during","training",",","to","send","an","update",".","By","default",",","1.","experiment_name","(","Union","[","str",",","None","]",")",":","The","name","of","the","experiment","to","add","to","the","message",".","By","default",",","None",".","Example",":","..","code-block",":",":","python","from","notif.notificator","import","SlackNotificator","from","poutyne.framework.callbacks.notification","import","NotificationCallback","webhook_url","=","``","a_link","''","slack_notif","=","SlackNotificator","(","webhook_url=webhook_url",")","notif_callback","=","NotificationCallback","(","notificator=slack_notif",")","model","=","Model","(","...",")","model.fit_generator","(","...",",","callbacks=","[","notif_callback","]",")"]
Send the message to the channel 'Start of the testing' or 'Start of the testing for the experiment experiment_name' if an experiment name is given.
["Send","the","message","to","the","channel","'Start","of","the","testing","'","or","'Start","of","the","testing","for","the","experiment","experiment_name","'","if","an","experiment","name","is","given","."]
null
def on_test_begin(self, logs: Dict) -> None: """ Send the message to the channel 'Start of the testing' or 'Start of the testing for the experiment experiment_name' if an experiment name is given. """ empty_message = "" self.notificator.send_notification( empty_message, subject=f"Start of the testing{self.experiment_name_msg}.", )
["def","on_test_begin","(","self",",","logs",":","Dict",")","-",">","None",":","``","''","''","Send","the","message","to","the","channel","'Start","of","the","testing","'","or","'Start","of","the","testing","for","the","experiment","experiment_name","'","if","an","experiment","name","is","given.","``","''","''","empty_message","=","``","''","self.notificator.send_notification","(","empty_message",",","subject=f","''","Start","of","the","testing","{","self.experiment_name_msg","}",".","``",",",")"]
111
118
null
notification.py
poutyne/poutyne/framework/callbacks/notification.py
from abc import ABC, abstractmethod from typing import Dict, Union from poutyne.framework.callbacks.callbacks import Callback
15
2
3
0
2
7
1
Use image node_id 5 for calling the NotificationCallback obj's underlying member method code with example usage: obj.on_test_begin(logs) without return types
157
node_id 5
1,602,781
main
global
null
false
null
null
null
null
null
def main(): """Convert standard rttm to sample-based result""" args = get_parser().parse_args() # logging info if args.verbose > 1: logging.basicConfig( level=logging.DEBUG, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) elif args.verbose > 0: logging.basicConfig( level=logging.INFO, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) else: logging.basicConfig( level=logging.WARN, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) logging.warning("Skip DEBUG/INFO messages") sampling_rate = humanfriendly.parse_size(args.sampling_rate) convert_rttm_text( args.rttm, args.wavscp, sampling_rate, args.output_path ) logging.info("Successfully finished RTTM converting.")
["def","main","(",")",":","``","''","''","Convert","standard","rttm","to","sample-based","result","''","''","''","args","=","get_parser","(",")",".parse_args","(",")","#","logging","info","if","args.verbose",">","1",":","logging.basicConfig","(","level=logging.DEBUG",",","format=","''","%","(","asctime",")","s","(","%","(","module",")","s",":","%","(","lineno",")","d",")","%","(","levelname",")","s",":","%","(","message",")","s","''",",",")","elif","args.verbose",">","0",":","logging.basicConfig","(","level=logging.INFO",",","format=","''","%","(","asctime",")","s","(","%","(","module",")","s",":","%","(","lineno",")","d",")","%","(","levelname",")","s",":","%","(","message",")","s","''",",",")","else",":","logging.basicConfig","(","level=logging.WARN",",","format=","''","%","(","asctime",")","s","(","%","(","module",")","s",":","%","(","lineno",")","d",")","%","(","levelname",")","s",":","%","(","message",")","s","''",",",")","logging.warning","(","``","Skip","DEBUG\/INFO","messages","''",")","sampling_rate","=","humanfriendly.parse_size","(","args.sampling_rate",")","convert_rttm_text","(","args.rttm",",","args.wavscp",",","sampling_rate",",","args.output_path",")","logging.info","(","``","Successfully","finished","RTTM","converting",".","''",")"]
111
136
null
convert_rttm.py
espnet/egs2/thchs30/asr1/pyscripts/utils/convert_rttm.py
import argparse import collections.abc import logging import os import re from pathlib import Path from typing import Union import humanfriendly import numpy import soundfile from typeguard import check_argument_types from espnet2.utils.types import str_or_int
15
null
12
3
null
null
null
Use image node_id 3 for calling a global function with example usage: main() without return types
97
node_id 3
981,194
get_logits
RerankModelPlugin
Plugin
true
self,query,choices
Base class for reranker models
["Base","class","for","reranker","models"]
get search ranking logits for query, choices
["get","search","ranking","logits","for","query",",","choices"]
null
def get_logits(self, query: str, choices: List[str]): """get search ranking logits for query, choices""" raise NotImplementedError()
["def","get_logits","(","self",",","query",":","str",",","choices",":","List","[","str","]",")",":","``","''","''","get","search","ranking","logits","for","query",",","choices","''","''","''","raise","NotImplementedError","(",")"]
74
76
base.py
nboost/nboost/plugins/rerank/base.py
from typing import List, Tuple import time from nboost.plugins import Plugin from nboost.delegates import RequestDelegate, ResponseDelegate from nboost.helpers import calculate_mrr from nboost.database import DatabaseRow from nboost import defaults import numpy
15
1
8
0
1
5
1
Use image node_id 4 for calling the RerankModelPlugin obj's underlying member method code with example usage: obj.get_logits(query, choices) without return types
161
node_id 4
1,408,510
_restore_configuration
Configurable
object
true
cls,saved
Base class for configurable interfaces. A configurable interface is an (abstract) class whose constructor acts as a factory function for one of its implementation subclasses. The implementation subclass as well as optional keyword arguments to its initializer can be set globally at runtime with `configure`. By using the constructor as the factory method, the interface looks like a normal class, `isinstance` works as usual, etc. This pattern is most useful when the choice of implementation is likely to be a global decision (e.g. when `~select.epoll` is available, always use it instead of `~select.select`), or when a previously-monolithic class has been split into specialized subclasses. Configurable subclasses must define the class methods `configurable_base` and `configurable_default`, and use the instance method `initialize` instead of ``__init__``.
["Base","class","for","configurable","interfaces",".","A","configurable","interface","is","an","(","abstract",")","class","whose","constructor","acts","as","a","factory","function","for","one","of","its","implementation","subclasses",".","The","implementation","subclass","as","well","as","optional","keyword","arguments","to","its","initializer","can","be","set","globally","at","runtime","with","`","configure","`",".","By","using","the","constructor","as","the","factory","method",",","the","interface","looks","like","a","normal","class",",","`","isinstance","`","works","as","usual",",","etc",".","This","pattern","is","most","useful","when","the","choice","of","implementation","is","likely","to","be","a","global","decision","(","e.g",".","when","`","~select.epoll","`","is","available",",","always","use","it","instead","of","`","~select.select","`",")",",","or","when","a","previously-monolithic","class","has","been","split","into","specialized","subclasses",".","Configurable","subclasses","must","define","the","class","methods","`","configurable_base","`","and","`","configurable_default","`",",","and","use","the","instance","method","`","initialize","`","instead","of","``","__init__","``","."]
null
null
null
def _restore_configuration(cls, saved): base = cls.configurable_base() base.__impl_class = saved[0] base.__impl_kwargs = saved[1]
["def","_restore_configuration","(","cls",",","saved",")",":","base","=","cls.configurable_base","(",")","base.__impl_class","=","saved","[","0","]","base.__impl_kwargs","=","saved","[","1","]"]
208
211
null
util.py
catboost/contrib/python/pyzmq/py2/zmq/eventloop/minitornado/util.py
from __future__ import absolute_import, division, print_function, with_statement import sys
15
1
2
3
1
8
1
Use image node_id 8 for calling the Configurable obj's underlying member method code with example usage: obj._restore_configuration(cls, saved) without return types
164
node_id 8
515,116
forward
MaskL1Loss
nn
true
self,pred,gt,mask
null
null
null
null
loss
def forward(self, pred: paddle.Tensor, gt, mask): loss = (paddle.abs(pred - gt) * mask).sum() / ( mask.sum() + self.eps ) return loss
["def","forward","(","self",",","pred",":","paddle.Tensor",",","gt",",","mask",")",":","loss","=","(","paddle.abs","(","pred","-","gt",")","*","mask",")",".sum","(",")","\/","(","mask.sum","(",")","+","self.eps",")","return","loss"]
95
97
null
basic_loss.py
PaddleOCR/benchmark/PaddleOCR_DBNet/models/losses/basic_loss.py
import paddle import paddle.nn
15
3
2
0
3
2
1
Use image node_id 2 for calling the MaskL1Loss obj's underlying member method code with example usage: obj.forward(pred, gt, mask) and returns: loss
148
node_id 2
176,934
__init__
MaskL1Loss
nn
true
self,eps
null
null
null
null
MaskL1Loss
def __init__(self, eps=1e-6): super(MaskL1Loss, self).__init__() self.eps = eps
["def","__init__","(","self",",","eps=1e-6",")",":","super","(","MaskL1Loss",",","self",")",".__init__","(",")","self.eps","=","eps"]
91
93
null
basic_loss.py
PaddleOCR/benchmark/PaddleOCR_DBNet/models/losses/basic_loss.py
import paddle import paddle.nn
15
3
2
0
3
2
1
Use image node_id 1 to create a new MaskL1Loss object from inherited base classes: nn with example: obj = MaskL1Loss(eps)
121
node_id 1
176,933
import_object
global
null
false
name
null
null
null
null
__import__,getattr
def import_object(name): """Imports an object by name. import_object('x') is equivalent to 'import x'. import_object('x.y.z') is equivalent to 'from x.y import z'. >>> import tornado.escape >>> import_object('tornado.escape') is tornado.escape True >>> import_object('tornado.escape.utf8') is tornado.escape.utf8 True >>> import_object('tornado') is tornado True >>> import_object('tornado.missing_module') Traceback (most recent call last): ... ImportError: No module named missing_module """ if isinstance(name, unicode_type) and str is not unicode_type: # On python 2 a byte string is required. name = name.encode("utf-8") if name.count(".") == 0: return __import__(name, None, None) parts = name.split(".") obj = __import__(".".join(parts[:-1]), None, None, [parts[-1]], 0) try: return getattr(obj, parts[-1]) except AttributeError: raise ImportError("No module named %s" % parts[-1])
["def","import_object","(","name",")",":","``","''","''","Imports","an","object","by","name",".","import_object","(","'","x","'",")","is","equivalent","to","'import","x","'",".","import_object","(","'","x.y.z","'",")","is","equivalent","to","'from","x.y","import","z","'",".",">",">",">","import","tornado.escape",">",">",">","import_object","(","'tornado.escape","'",")","is","tornado.escape","True",">",">",">","import_object","(","'tornado.escape.utf8","'",")","is","tornado.escape.utf8","True",">",">",">","import_object","(","'tornado","'",")","is","tornado","True",">",">",">","import_object","(","'tornado.missing_module","'",")","Traceback","(","most","recent","call","last",")",":","...","ImportError",":","No","module","named","missing_module","``","''","''","if","isinstance","(","name",",","unicode_type",")","and","str","is","not","unicode_type",":","#","On","python","2","a","byte","string","is","required",".","name","=","name.encode","(","``","utf-8","''",")","if","name.count","(","``",".","''",")","==","0",":","return","__import__","(","name",",","None",",","None",")","parts","=","name.split","(","``",".","''",")","obj","=","__import__","(","``",".","``",".join","(","parts","[",":","-1","]",")",",","None",",","None",",","[","parts","[","-1","]","]",",","0",")","try",":","return","getattr","(","obj",",","parts","[","-1","]",")","except","AttributeError",":","raise","ImportError","(","``","No","module","named","%","s","''","%","parts","[","-1","]",")"]
37
66
null
util.py
catboost/contrib/python/pyzmq/py2/zmq/eventloop/minitornado/util.py
from __future__ import absolute_import, division, print_function, with_statement import sys
15
null
2
3
null
null
null
Use image node_id 1 for calling a global function with example usage: import_object(name) and returns: __import__, getattr
122
node_id 1
515,117
input_expected_output
global
null
false
dask_client,input_combo
null
null
null
null
input_combo
def input_expected_output(dask_client, input_combo): """ This fixture returns the inputs and expected results from the Core number algo. """ core_number = input_combo["core_number"] degree_type = input_combo["degree_type"] input_data_path = input_combo["graph_file"] G = utils.generate_cugraph_graph_from_file( input_data_path, directed=False, edgevals=True ) if core_number: # compute the core_number core_number = cugraph.core_number(G, degree_type=degree_type) else: core_number = None input_combo["core_number"] = core_number input_combo["SGGraph"] = G sg_k_core_graph = cugraph.k_core( G, core_number=core_number, degree_type=degree_type ) sg_k_core_results = sg_k_core_graph.view_edge_list() # FIXME: The result will come asymetric. Symmetrize the results srcCol = sg_k_core_graph.source_columns dstCol = sg_k_core_graph.destination_columns wgtCol = sg_k_core_graph.weight_column sg_k_core_results = ( symmetrize_df(sg_k_core_results, srcCol, dstCol, wgtCol) .sort_values([srcCol, dstCol]) .reset_index(drop=True) ) input_combo["sg_k_core_results"] = sg_k_core_results # Creating an edgelist from a dask cudf dataframe chunksize = dcg.get_chunksize(input_data_path) ddf = dask_cudf.read_csv( input_data_path, chunksize=chunksize, delimiter=" ", names=["src", "dst", "value"], dtype=["int32", "int32", "float32"], ) dg = cugraph.Graph(directed=False) # FIXME: False when renumbering (C++ and python renumbering) dg.from_dask_cudf_edgelist( ddf, source="src", destination="dst", edge_attr="value", renumber=True, ) input_combo["MGGraph"] = dg return input_combo
["def","input_expected_output","(","dask_client",",","input_combo",")",":","``","''","''","This","fixture","returns","the","inputs","and","expected","results","from","the","Core","number","algo.","``","''","''","core_number","=","input_combo","[","``","core_number","''","]","degree_type","=","input_combo","[","``","degree_type","''","]","input_data_path","=","input_combo","[","``","graph_file","''","]","G","=","utils.generate_cugraph_graph_from_file","(","input_data_path",",","directed=False",",","edgevals=True",")","if","core_number",":","#","compute","the","core_number","core_number","=","cugraph.core_number","(","G",",","degree_type=degree_type",")","else",":","core_number","=","None","input_combo","[","``","core_number","''","]","=","core_number","input_combo","[","``","SGGraph","''","]","=","G","sg_k_core_graph","=","cugraph.k_core","(","G",",","core_number=core_number",",","degree_type=degree_type",")","sg_k_core_results","=","sg_k_core_graph.view_edge_list","(",")","#","FIXME",":","The","result","will","come","asymetric",".","Symmetrize","the","results","srcCol","=","sg_k_core_graph.source_columns","dstCol","=","sg_k_core_graph.destination_columns","wgtCol","=","sg_k_core_graph.weight_column","sg_k_core_results","=","(","symmetrize_df","(","sg_k_core_results",",","srcCol",",","dstCol",",","wgtCol",")",".sort_values","(","[","srcCol",",","dstCol","]",")",".reset_index","(","drop=True",")",")","input_combo","[","``","sg_k_core_results","''","]","=","sg_k_core_results","#","Creating","an","edgelist","from","a","dask","cudf","dataframe","chunksize","=","dcg.get_chunksize","(","input_data_path",")","ddf","=","dask_cudf.read_csv","(","input_data_path",",","chunksize=chunksize",",","delimiter=","''","``",",","names=","[","``","src","''",",","``","dst","''",",","``","value","''","]",",","dtype=","[","``","int32","''",",","``","int32","''",",","``","float32","''","]",",",")","dg","=","cugraph.Graph","(","directed=False",")","#","FIXME",":","False","when","renumbering","(","C++","and","python","renumbering",")","dg.from_dask_cudf_edgelist","(","ddf",",","source=","''","src","''",",","destination=","''","dst","''",",","edge_attr=","''","value","''",",","renumber=True",",",")","input_combo","[","``","MGGraph","''","]","=","dg","return","input_combo"]
59
119
null
test_k_core_mg.py
cugraph/python/cugraph/cugraph/tests/core/test_k_core_mg.py
import gc import pytest import dask_cudf import cugraph import cugraph.dask from cugraph.testing import utils from cudf.testing.testing import assert_frame_equal from cugraph.structure.symmetrize import symmetrize_df from pylibcugraph.testing import gen_fixture_params_product
15
null
9
6
null
null
null
Use image node_id 3 for calling a global function with example usage: input_expected_output(dask_client, input_combo) and returns: input_combo
142
node_id 3
686,800
_compute
DiceLoss
nn
true
self,pred,gt,mask,weights
Loss function from https://arxiv.org/abs/1707.03237, where iou computation is introduced heatmap manner to measure the diversity bwtween tow heatmaps.
["Loss","function","from","https",":","\/\/arxiv.org\/abs\/1707.03237",",","where","iou","computation","is","introduced","heatmap","manner","to","measure","the","diversity","bwtween","tow","heatmaps","."]
null
null
loss
def _compute(self, pred, gt, mask, weights): if len(pred.shape) == 4: pred = pred[:, 0, :, :] gt = gt[:, 0, :, :] assert pred.shape == gt.shape assert pred.shape == mask.shape if weights is not None: assert weights.shape == mask.shape mask = weights * mask intersection = (pred * gt * mask).sum() union = (pred * mask).sum() + (gt * mask).sum() + self.eps loss = 1 - 2.0 * intersection / union assert loss <= 1 return loss
["def","_compute","(","self",",","pred",",","gt",",","mask",",","weights",")",":","if","len","(","pred.shape",")","==","4",":","pred","=","pred","[",":",",","0",",",":",",",":","]","gt","=","gt","[",":",",","0",",",":",",",":","]","assert","pred.shape","==","gt.shape","assert","pred.shape","==","mask.shape","if","weights","is","not","None",":","assert","weights.shape","==","mask.shape","mask","=","weights","*","mask","intersection","=","(","pred","*","gt","*","mask",")",".sum","(",")","union","=","(","pred","*","mask",")",".sum","(",")","+","(","gt","*","mask",")",".sum","(",")","+","self.eps","loss","=","1","-","2.0","*","intersection","\/","union","assert","loss","<","=","1","return","loss"]
73
87
null
basic_loss.py
PaddleOCR/benchmark/PaddleOCR_DBNet/models/losses/basic_loss.py
import paddle import paddle.nn
15
3
2
0
3
3
1
Use image node_id 3 for calling the DiceLoss obj's underlying member method code with example usage: obj._compute(pred, gt, mask, weights) and returns: loss
156
node_id 3
176,932
forward
DiceLoss
nn
true
self,pred,gt,mask,weights
Loss function from https://arxiv.org/abs/1707.03237, where iou computation is introduced heatmap manner to measure the diversity bwtween tow heatmaps.
["Loss","function","from","https",":","\/\/arxiv.org\/abs\/1707.03237",",","where","iou","computation","is","introduced","heatmap","manner","to","measure","the","diversity","bwtween","tow","heatmaps","."]
pred: one or two heatmaps of shape (N, 1, H, W), the losses of tow heatmaps are added together. gt: (N, 1, H, W) mask: (N, H, W)
["pred",":","one","or","two","heatmaps","of","shape","(","N",",","1",",","H",",","W",")",",","the","losses","of","tow","heatmaps","are","added","together",".","gt",":","(","N",",","1",",","H",",","W",")","mask",":","(","N",",","H",",","W",")"]
self
def forward(self, pred: paddle.Tensor, gt, mask, weights=None): """ pred: one or two heatmaps of shape (N, 1, H, W), the losses of tow heatmaps are added together. gt: (N, 1, H, W) mask: (N, H, W) """ return self._compute(pred, gt, mask, weights)
["def","forward","(","self",",","pred",":","paddle.Tensor",",","gt",",","mask",",","weights=None",")",":","``","''","''","pred",":","one","or","two","heatmaps","of","shape","(","N",",","1",",","H",",","W",")",",","the","losses","of","tow","heatmaps","are","added","together",".","gt",":","(","N",",","1",",","H",",","W",")","mask",":","(","N",",","H",",","W",")","``","''","''","return","self._compute","(","pred",",","gt",",","mask",",","weights",")"]
64
71
null
basic_loss.py
PaddleOCR/benchmark/PaddleOCR_DBNet/models/losses/basic_loss.py
import paddle import paddle.nn
15
3
2
0
3
3
1
Use image node_id 2 for calling the DiceLoss obj's underlying member method code with example usage: obj.forward(pred, gt, mask, weights) and returns: self
155
node_id 2
176,931
test_buffer
global
null
false
df_from_dict
null
null
null
null
null
def test_buffer(df_from_dict): arr = [0, 1, -1] df = df_from_dict({"a": arr}) dfX = df.__dataframe__() colX = dfX.get_column(0) bufX = colX.get_buffers() dataBuf, dataDtype = bufX["data"] assert dataBuf.bufsize > 0 assert dataBuf.ptr != 0 device, _ = dataBuf.__dlpack_device__() # for meanings of dtype[0] see the spec; we cannot import the spec here as this # file is expected to be vendored *anywhere* assert dataDtype[0] == 0 # INT if ( device == 1 ): # CPU-only as we're going to directly read memory here bitwidth = dataDtype[1] ctype = { 8: ctypes.c_int8, 16: ctypes.c_int16, 32: ctypes.c_int32, 64: ctypes.c_int64, }[bitwidth] for idx, truth in enumerate(arr): val = ctype.from_address( dataBuf.ptr + idx * (bitwidth // 8) ).value assert val == truth, f"Buffer at index {idx} mismatch"
["def","test_buffer","(","df_from_dict",")",":","arr","=","[","0",",","1",",","-1","]","df","=","df_from_dict","(","{","``","a","''",":","arr","}",")","dfX","=","df.__dataframe__","(",")","colX","=","dfX.get_column","(","0",")","bufX","=","colX.get_buffers","(",")","dataBuf",",","dataDtype","=","bufX","[","``","data","''","]","assert","dataBuf.bufsize",">","0","assert","dataBuf.ptr","!","=","0","device",",","_","=","dataBuf.__dlpack_device__","(",")","#","for","meanings","of","dtype","[","0","]","see","the","spec",";","we","can","not","import","the","spec","here","as","this","#","file","is","expected","to","be","vendored","*","anywhere","*","assert","dataDtype","[","0","]","==","0","#","INT","if","(","device","==","1",")",":","#","CPU-only","as","we","'re","going","to","directly","read","memory","here","bitwidth","=","dataDtype","[","1","]","ctype","=","{","8",":","ctypes.c_int8",",","16",":","ctypes.c_int16",",","32",":","ctypes.c_int32",",","64",":","ctypes.c_int64",",","}","[","bitwidth","]","for","idx",",","truth","in","enumerate","(","arr",")",":","val","=","ctype.from_address","(","dataBuf.ptr","+","idx","*","(","bitwidth","\/\/","8",")",")",".value","assert","val","==","truth",",","f","''","Buffer","at","index","{","idx","}","mismatch","''"]
147
175
null
test_spec_conformance.py
pandas/pandas/tests/interchange/test_spec_conformance.py
import ctypes import math import pytest import pandas
15
null
4
11
null
null
null
Use image node_id 11 for calling a global function with example usage: test_buffer(df_from_dict) without return types
117
node_id 11
1,516,687
test_get_columns
global
null
false
df_from_dict
null
null
null
null
null
def test_get_columns(df_from_dict): df = df_from_dict({"a": [0, 1], "b": [2.5, 3.5]}) dfX = df.__dataframe__() for colX in dfX.get_columns(): assert colX.size() == 2 assert colX.num_chunks() == 1 # for meanings of dtype[0] see the spec; we cannot import the spec here as this # file is expected to be vendored *anywhere* assert dfX.get_column(0).dtype[0] == 0 # INT assert dfX.get_column(1).dtype[0] == 2
["def","test_get_columns","(","df_from_dict",")",":","df","=","df_from_dict","(","{","``","a","''",":","[","0",",","1","]",",","``","b","''",":","[","2.5",",","3.5","]","}",")","dfX","=","df.__dataframe__","(",")","for","colX","in","dfX.get_columns","(",")",":","assert","colX.size","(",")","==","2","assert","colX.num_chunks","(",")","==","1","#","for","meanings","of","dtype","[","0","]","see","the","spec",";","we","can","not","import","the","spec","here","as","this","#","file","is","expected","to","be","vendored","*","anywhere","*","assert","dfX.get_column","(","0",")",".dtype","[","0","]","==","0","#","INT","assert","dfX.get_column","(","1",")",".dtype","[","0","]","==","2"]
135
144
null
test_spec_conformance.py
pandas/pandas/tests/interchange/test_spec_conformance.py
import ctypes import math import pytest import pandas
15
null
4
11
null
null
null
Use image node_id 10 for calling a global function with example usage: test_get_columns(df_from_dict) without return types
122
node_id 10
1,516,686
test_column_get_chunks
global
null
false
size,n_chunks,df_from_dict
null
null
null
null
null
def test_column_get_chunks(size, n_chunks, df_from_dict): df = df_from_dict({"x": list(range(size))}) dfX = df.__dataframe__() chunks = list(dfX.get_column(0).get_chunks(n_chunks)) assert len(chunks) == n_chunks assert sum(chunk.size() for chunk in chunks) == size
["def","test_column_get_chunks","(","size",",","n_chunks",",","df_from_dict",")",":","df","=","df_from_dict","(","{","``","x","''",":","list","(","range","(","size",")",")","}",")","dfX","=","df.__dataframe__","(",")","chunks","=","list","(","dfX.get_column","(","0",")",".get_chunks","(","n_chunks",")",")","assert","len","(","chunks",")","==","n_chunks","assert","sum","(","chunk.size","(",")","for","chunk","in","chunks",")","==","size"]
127
132
null
test_spec_conformance.py
pandas/pandas/tests/interchange/test_spec_conformance.py
import ctypes import math import pytest import pandas
15
null
4
11
null
null
null
Use image node_id 9 for calling a global function with example usage: test_column_get_chunks(size, n_chunks, df_from_dict) without return types
143
node_id 9
1,516,685
test_df_get_chunks
global
null
false
size,n_chunks,df_from_dict
null
null
null
null
null
def test_df_get_chunks(size, n_chunks, df_from_dict): df = df_from_dict({"x": list(range(size))}) dfX = df.__dataframe__() chunks = list(dfX.get_chunks(n_chunks)) assert len(chunks) == n_chunks assert sum(chunk.num_rows() for chunk in chunks) == size
["def","test_df_get_chunks","(","size",",","n_chunks",",","df_from_dict",")",":","df","=","df_from_dict","(","{","``","x","''",":","list","(","range","(","size",")",")","}",")","dfX","=","df.__dataframe__","(",")","chunks","=","list","(","dfX.get_chunks","(","n_chunks",")",")","assert","len","(","chunks",")","==","n_chunks","assert","sum","(","chunk.num_rows","(",")","for","chunk","in","chunks",")","==","size"]
118
123
null
test_spec_conformance.py
pandas/pandas/tests/interchange/test_spec_conformance.py
import ctypes import math import pytest import pandas
15
null
4
11
null
null
null
Use image node_id 8 for calling a global function with example usage: test_df_get_chunks(size, n_chunks, df_from_dict) without return types
139
node_id 8
1,516,684
test_dataframe
global
null
false
df_from_dict
null
null
null
null
null
def test_dataframe(df_from_dict): df = df_from_dict( { "x": [True, True, False], "y": [1, 2, 0], "z": [9.2, 10.5, 11.8], } ) dfX = df.__dataframe__() assert dfX.num_columns() == 3 assert dfX.num_rows() == 3 assert dfX.num_chunks() == 1 assert list(dfX.column_names()) == ["x", "y", "z"] assert list(dfX.select_columns((0, 2)).column_names()) == list( dfX.select_columns_by_name(("x", "z")).column_names() )
["def","test_dataframe","(","df_from_dict",")",":","df","=","df_from_dict","(","{","``","x","''",":","[","True",",","True",",","False","]",",","``","y","''",":","[","1",",","2",",","0","]",",","``","z","''",":","[","9.2",",","10.5",",","11.8","]",",","}",")","dfX","=","df.__dataframe__","(",")","assert","dfX.num_columns","(",")","==","3","assert","dfX.num_rows","(",")","==","3","assert","dfX.num_chunks","(",")","==","1","assert","list","(","dfX.column_names","(",")",")","==","[","``","x","''",",","``","y","''",",","``","z","''","]","assert","list","(","dfX.select_columns","(","(","0",",","2",")",")",".column_names","(",")",")","==","list","(","dfX.select_columns_by_name","(","(","``","x","''",",","``","z","''",")",")",".column_names","(",")",")"]
102
114
null
test_spec_conformance.py
pandas/pandas/tests/interchange/test_spec_conformance.py
import ctypes import math import pytest import pandas
15
null
4
11
null
null
null
Use image node_id 7 for calling a global function with example usage: test_dataframe(df_from_dict) without return types
119
node_id 7
1,516,683
test_categorical
global
null
false
df_from_dict
null
null
null
null
null
def test_categorical(df_from_dict): df = df_from_dict( { "weekday": [ "Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun", ] }, is_categorical=True, ) colX = df.__dataframe__().get_column_by_name("weekday") categorical = colX.describe_categorical assert isinstance(categorical["is_ordered"], bool) assert isinstance(categorical["is_dictionary"], bool)
["def","test_categorical","(","df_from_dict",")",":","df","=","df_from_dict","(","{","``","weekday","''",":","[","``","Mon","''",",","``","Tue","''",",","``","Mon","''",",","``","Wed","''",",","``","Mon","''",",","``","Thu","''",",","``","Fri","''",",","``","Sat","''",",","``","Sun","''",",","]","}",",","is_categorical=True",",",")","colX","=","df.__dataframe__","(",")",".get_column_by_name","(","``","weekday","''",")","categorical","=","colX.describe_categorical","assert","isinstance","(","categorical","[","``","is_ordered","''","]",",","bool",")","assert","isinstance","(","categorical","[","``","is_dictionary","''","]",",","bool",")"]
90
99
null
test_spec_conformance.py
pandas/pandas/tests/interchange/test_spec_conformance.py
import ctypes import math import pytest import pandas
15
null
4
11
null
null
null
Use image node_id 6 for calling a global function with example usage: test_categorical(df_from_dict) without return types
121
node_id 6
1,516,682
test_noncategorical
global
null
false
df_from_dict
null
null
null
null
null
def test_noncategorical(df_from_dict): df = df_from_dict({"a": [1, 2, 3]}) dfX = df.__dataframe__() colX = dfX.get_column_by_name("a") with pytest.raises(TypeError, match=".*categorical.*"): colX.describe_categorical
["def","test_noncategorical","(","df_from_dict",")",":","df","=","df_from_dict","(","{","``","a","''",":","[","1",",","2",",","3","]","}",")","dfX","=","df.__dataframe__","(",")","colX","=","dfX.get_column_by_name","(","``","a","''",")","with","pytest.raises","(","TypeError",",","match=","''",".","*","categorical",".","*","''",")",":","colX.describe_categorical"]
82
87
null
test_spec_conformance.py
pandas/pandas/tests/interchange/test_spec_conformance.py
import ctypes import math import pytest import pandas
15
null
4
11
null
null
null
Use image node_id 5 for calling a global function with example usage: test_noncategorical(df_from_dict) without return types
124
node_id 5
1,516,681
test_na_float
global
null
false
df_from_dict
null
null
null
null
null
def test_na_float(df_from_dict): df = df_from_dict({"a": [1.0, math.nan, 2.0]}) dfX = df.__dataframe__() colX = dfX.get_column_by_name("a") assert colX.null_count == 1 assert isinstance(colX.null_count, int)
["def","test_na_float","(","df_from_dict",")",":","df","=","df_from_dict","(","{","``","a","''",":","[","1.0",",","math.nan",",","2.0","]","}",")","dfX","=","df.__dataframe__","(",")","colX","=","dfX.get_column_by_name","(","``","a","''",")","assert","colX.null_count","==","1","assert","isinstance","(","colX.null_count",",","int",")"]
74
79
null
test_spec_conformance.py
pandas/pandas/tests/interchange/test_spec_conformance.py
import ctypes import math import pytest import pandas
15
null
4
11
null
null
null
Use image node_id 4 for calling a global function with example usage: test_na_float(df_from_dict) without return types
118
node_id 4
1,516,680
errno_from_exception
global
null
false
e
null
null
null
null
e,e,None
def errno_from_exception(e): """Provides the errno from an Exception object. There are cases that the errno attribute was not set so we pull the errno out of the args but if someone instantiates an Exception without any args you will get a tuple error. So this function abstracts all that behavior to give you a safe way to get the errno. """ if hasattr(e, "errno"): return e.errno elif e.args: return e.args[0] else: return None
["def","errno_from_exception","(","e",")",":","``","''","''","Provides","the","errno","from","an","Exception","object",".","There","are","cases","that","the","errno","attribute","was","not","set","so","we","pull","the","errno","out","of","the","args","but","if","someone","instantiates","an","Exception","without","any","args","you","will","get","a","tuple","error",".","So","this","function","abstracts","all","that","behavior","to","give","you","a","safe","way","to","get","the","errno.","``","''","''","if","hasattr","(","e",",","``","errno","''",")",":","return","e.errno","elif","e.args",":","return","e.args","[","0","]","else",":","return","None"]
97
112
null
util.py
catboost/contrib/python/pyzmq/py2/zmq/eventloop/minitornado/util.py
from __future__ import absolute_import, division, print_function, with_statement import sys
15
null
2
3
null
null
null
Use image node_id 2 for calling a global function with example usage: errno_from_exception(e) and returns: e, e, None
117
node_id 2
515,118
timedelta_to_seconds
global
null
false
td
null
null
null
null
unknown
def timedelta_to_seconds(td): """Equivalent to td.total_seconds() (introduced in python 2.7).""" return ( td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6 ) / float(10**6)
["def","timedelta_to_seconds","(","td",")",":","``","''","''","Equivalent","to","td.total_seconds","(",")","(","introduced","in","python","2.7",")",".","''","''","''","return","(","td.microseconds","+","(","td.seconds","+","td.days","*","24","*","3600",")","*","10","*","*","6",")","\/","float","(","10","*","*","6",")"]
214
216
null
util.py
catboost/contrib/python/pyzmq/py2/zmq/eventloop/minitornado/util.py
from __future__ import absolute_import, division, print_function, with_statement import sys
15
null
2
3
null
null
null
Use image node_id 3 for calling a global function with example usage: timedelta_to_seconds(td) and returns: unknown
115
node_id 3
515,119
reset
DatasetPunctuationErrorRate
null
true
self
Class for computation the total puncutation-related absolute amounts of operations and their rates in pairs of reference and hypothesis strins: - Absolute amounts of correct predictions, deletions, insertions and substitutions for each given punctuation mark - Rates of correct predictions, deletions, insertions and substitutions for each given punctuation mark - Total rates of correct predictions, deletions, insertions and substiturions in pairs of reference and hypothesis strings - Punctuation Error Rate Args to init: references (list[str]) - list of references hypotheses (list[str]) - list of hypotheses punctuation_marks (list[str]) - list of punctuation marks for computing metrics punctuation_mask (str, by default "[PUNCT]") - mask token that will be applied to given punctuation marks while edit distance calculation How to use: 1. Create object of DatasetPunctuationErrorRate class. Example: references = ["Hi, dear! Nice to see you. What's"] hypotheses = ["Hi dear! Nice to see you! What's?"] punctuation_marks = [".", ",", "!", "?"] dper_obj = DatasetPunctuationErrorRate(references, hypotheses, punctuation_marks) 2. To compute punctuation metrics, call the class method "compute()". Example: dper_obj.compute() Result: The following atributes of class object will be updated with calculated metrics values. The values are available with calling the atributes: dper_obj.operation_rates - dict, rates of correctness and errors for each punctuation mark from `preset dper_obj.punctuation_marks` list. dper_obj.substitution_rates - dict, substitution rates between puncutation marks from `preset dper_obj.punctuation_marks` list. dper_obj.correct_rate - float, total rate of correctness between provided pairs of references and hypotheses. dper_obj.deletions_rate - float, total rate of deletions between provided pairs of references and hypotheses. dper_obj.insertions_rate - float, total rate of insertions between provided pairs of references and hypotheses. dper_obj.substitutions_rate - float, total rate of substitutions between provided pairs of references and hypotheses. dper_obj.punct_er - float, total Punctuation Error Rate between provided pairs of references and hypotheses.
["Class","for","computation","the","total","puncutation-related","absolute","amounts","of","operations","and","their","rates","in","pairs","of","reference","and","hypothesis","strins",":","-","Absolute","amounts","of","correct","predictions",",","deletions",",","insertions","and","substitutions","for","each","given","punctuation","mark","-","Rates","of","correct","predictions",",","deletions",",","insertions","and","substitutions","for","each","given","punctuation","mark","-","Total","rates","of","correct","predictions",",","deletions",",","insertions","and","substiturions","in","pairs","of","reference","and","hypothesis","strings","-","Punctuation","Error","Rate","Args","to","init",":","references","(","list","[","str","]",")","-","list","of","references","hypotheses","(","list","[","str","]",")","-","list","of","hypotheses","punctuation_marks","(","list","[","str","]",")","-","list","of","punctuation","marks","for","computing","metrics","punctuation_mask","(","str",",","by","default","``","[","PUNCT","]","''",")","-","mask","token","that","will","be","applied","to","given","punctuation","marks","while","edit","distance","calculation","How","to","use",":","1",".","Create","object","of","DatasetPunctuationErrorRate","class",".","Example",":","references","=","[","``","Hi",",","dear","!","Nice","to","see","you",".","What","'s","''","]","hypotheses","=","[","``","Hi","dear","!","Nice","to","see","you","!","What","'s","?","''","]","punctuation_marks","=","[","``",".","``",",","``",",","''",",","``","!","``",",","``","?","''","]","dper_obj","=","DatasetPunctuationErrorRate","(","references",",","hypotheses",",","punctuation_marks",")","2",".","To","compute","punctuation","metrics",",","call","the","class","method","``","compute","(",")","''",".","Example",":","dper_obj.compute","(",")","Result",":","The","following","atributes","of","class","object","will","be","updated","with","calculated","metrics","values",".","The","values","are","available","with","calling","the","atributes",":","dper_obj.operation_rates","-","dict",",","rates","of","correctness","and","errors","for","each","punctuation","mark","from","`","preset","dper_obj.punctuation_marks","`","list",".","dper_obj.substitution_rates","-","dict",",","substitution","rates","between","puncutation","marks","from","`","preset","dper_obj.punctuation_marks","`","list",".","dper_obj.correct_rate","-","float",",","total","rate","of","correctness","between","provided","pairs","of","references","and","hypotheses",".","dper_obj.deletions_rate","-","float",",","total","rate","of","deletions","between","provided","pairs","of","references","and","hypotheses",".","dper_obj.insertions_rate","-","float",",","total","rate","of","insertions","between","provided","pairs","of","references","and","hypotheses",".","dper_obj.substitutions_rate","-","float",",","total","rate","of","substitutions","between","provided","pairs","of","references","and","hypotheses",".","dper_obj.punct_er","-","float",",","total","Punctuation","Error","Rate","between","provided","pairs","of","references","and","hypotheses","."]
null
null
null
def reset(self): self.operation_amounts = [] self.substitution_amounts = [] self.rates = [] self.operation_rates = None self.substitution_rates = None self.correct_rate = None self.deletions_rate = None self.insertions_rate = None self.substitutions_rate = None self.punct_er = None
["def","reset","(","self",")",":","self.operation_amounts","=","[","]","self.substitution_amounts","=","[","]","self.rates","=","[","]","self.operation_rates","=","None","self.substitution_rates","=","None","self.correct_rate","=","None","self.deletions_rate","=","None","self.insertions_rate","=","None","self.substitutions_rate","=","None","self.punct_er","=","None"]
440
451
null
punct_er.py
NeMo/nemo/collections/common/metrics/punct_er.py
import re from collections import namedtuple from tqdm import tqdm from nemo.utils import logging
15
2
4
1
0
4
null
Use image node_id 3 for calling the DatasetPunctuationErrorRate obj's underlying member method code with example usage: obj.reset() without return types
152
node_id 3
135,830
bernoulli
global
null
false
probs
null
null
null
null
jax
def bernoulli( probs: Union[float, JaxArray], *, logits: Optional[Union[float, JaxArray]] = None, shape: Optional[Union[ivy.NativeArray, Sequence[int]]] = None, device: Optional[jaxlib.xla_extension.Device] = None, dtype: Optional[jnp.dtype] = None, seed: Optional[int] = None, out: Optional[JaxArray] = None, ) -> JaxArray: if seed: rng_input = jax.random.PRNGKey(seed) else: RNG_, rng_input = jax.random.split(_getRNG()) _setRNG(RNG_) if logits is not None: probs = jax.nn.softmax(logits, axis=-1) if hasattr(probs, "shape") and not _check_shapes_broadcastable( shape, probs.shape ): shape = probs.shape return jax.random.bernoulli(rng_input, probs, shape=shape)
["def","bernoulli","(","probs",":","Union","[","float",",","JaxArray","]",",","*",",","logits",":","Optional","[","Union","[","float",",","JaxArray","]","]","=","None",",","shape",":","Optional","[","Union","[","ivy.NativeArray",",","Sequence","[","int","]","]","]","=","None",",","device",":","Optional","[","jaxlib.xla_extension.Device","]","=","None",",","dtype",":","Optional","[","jnp.dtype","]","=","None",",","seed",":","Optional","[","int","]","=","None",",","out",":","Optional","[","JaxArray","]","=","None",",",")","-",">","JaxArray",":","if","seed",":","rng_input","=","jax.random.PRNGKey","(","seed",")","else",":","RNG_",",","rng_input","=","jax.random.split","(","_getRNG","(",")",")","_setRNG","(","RNG_",")","if","logits","is","not","None",":","probs","=","jax.nn.softmax","(","logits",",","axis=-1",")","if","hasattr","(","probs",",","``","shape","''",")","and","not","_check_shapes_broadcastable","(","shape",",","probs.shape",")",":","shape","=","probs.shape","return","jax.random.bernoulli","(","rng_input",",","probs",",","shape=shape",")"]
110
129
null
random.py
ivy/ivy/functional/backends/jax/experimental/random.py
from typing import Optional, Union, Sequence import jax.numpy import jax import jaxlib.xla_extension import ivy from ivy.functional.backends.jax import JaxArray from ivy.functional.backends.jax.random import RNG, _setRNG, _getRNG from ivy.functional.ivy.random import _check_bounds_and_get_shape, _check_shapes_broadcastable from ivy.func_wrapper import with_unsupported_dtypes from ..None import backend_version
15
null
10
5
null
null
null
Use image node_id 5 for calling a global function with example usage: bernoulli(probs) and returns: jax
103
node_id 5
1,194,968
input_combo
global
null
false
request
null
null
null
null
parameters
def input_combo(request): """ Simply return the current combination of params as a dictionary for use in tests or other parameterized fixtures. """ parameters = dict( zip( ("graph_file", "core_number", "degree_type"), request.param, ) ) return parameters
["def","input_combo","(","request",")",":","``","''","''","Simply","return","the","current","combination","of","params","as","a","dictionary","for","use","in","tests","or","other","parameterized","fixtures.","``","''","''","parameters","=","dict","(","zip","(","(","``","graph_file","''",",","``","core_number","''",",","``","degree_type","''",")",",","request.param",",",")",")","return","parameters"]
48
55
null
test_k_core_mg.py
cugraph/python/cugraph/cugraph/tests/core/test_k_core_mg.py
import gc import pytest import dask_cudf import cugraph import cugraph.dask from cugraph.testing import utils from cudf.testing.testing import assert_frame_equal from cugraph.structure.symmetrize import symmetrize_df from pylibcugraph.testing import gen_fixture_params_product
15
null
9
6
null
null
null
Use image node_id 2 for calling a global function with example usage: input_combo(request) and returns: parameters
114
node_id 2
686,799
print
DatasetPunctuationErrorRate
null
true
self
Class for computation the total puncutation-related absolute amounts of operations and their rates in pairs of reference and hypothesis strins: - Absolute amounts of correct predictions, deletions, insertions and substitutions for each given punctuation mark - Rates of correct predictions, deletions, insertions and substitutions for each given punctuation mark - Total rates of correct predictions, deletions, insertions and substiturions in pairs of reference and hypothesis strings - Punctuation Error Rate Args to init: references (list[str]) - list of references hypotheses (list[str]) - list of hypotheses punctuation_marks (list[str]) - list of punctuation marks for computing metrics punctuation_mask (str, by default "[PUNCT]") - mask token that will be applied to given punctuation marks while edit distance calculation How to use: 1. Create object of DatasetPunctuationErrorRate class. Example: references = ["Hi, dear! Nice to see you. What's"] hypotheses = ["Hi dear! Nice to see you! What's?"] punctuation_marks = [".", ",", "!", "?"] dper_obj = DatasetPunctuationErrorRate(references, hypotheses, punctuation_marks) 2. To compute punctuation metrics, call the class method "compute()". Example: dper_obj.compute() Result: The following atributes of class object will be updated with calculated metrics values. The values are available with calling the atributes: dper_obj.operation_rates - dict, rates of correctness and errors for each punctuation mark from `preset dper_obj.punctuation_marks` list. dper_obj.substitution_rates - dict, substitution rates between puncutation marks from `preset dper_obj.punctuation_marks` list. dper_obj.correct_rate - float, total rate of correctness between provided pairs of references and hypotheses. dper_obj.deletions_rate - float, total rate of deletions between provided pairs of references and hypotheses. dper_obj.insertions_rate - float, total rate of insertions between provided pairs of references and hypotheses. dper_obj.substitutions_rate - float, total rate of substitutions between provided pairs of references and hypotheses. dper_obj.punct_er - float, total Punctuation Error Rate between provided pairs of references and hypotheses.
["Class","for","computation","the","total","puncutation-related","absolute","amounts","of","operations","and","their","rates","in","pairs","of","reference","and","hypothesis","strins",":","-","Absolute","amounts","of","correct","predictions",",","deletions",",","insertions","and","substitutions","for","each","given","punctuation","mark","-","Rates","of","correct","predictions",",","deletions",",","insertions","and","substitutions","for","each","given","punctuation","mark","-","Total","rates","of","correct","predictions",",","deletions",",","insertions","and","substiturions","in","pairs","of","reference","and","hypothesis","strings","-","Punctuation","Error","Rate","Args","to","init",":","references","(","list","[","str","]",")","-","list","of","references","hypotheses","(","list","[","str","]",")","-","list","of","hypotheses","punctuation_marks","(","list","[","str","]",")","-","list","of","punctuation","marks","for","computing","metrics","punctuation_mask","(","str",",","by","default","``","[","PUNCT","]","''",")","-","mask","token","that","will","be","applied","to","given","punctuation","marks","while","edit","distance","calculation","How","to","use",":","1",".","Create","object","of","DatasetPunctuationErrorRate","class",".","Example",":","references","=","[","``","Hi",",","dear","!","Nice","to","see","you",".","What","'s","''","]","hypotheses","=","[","``","Hi","dear","!","Nice","to","see","you","!","What","'s","?","''","]","punctuation_marks","=","[","``",".","``",",","``",",","''",",","``","!","``",",","``","?","''","]","dper_obj","=","DatasetPunctuationErrorRate","(","references",",","hypotheses",",","punctuation_marks",")","2",".","To","compute","punctuation","metrics",",","call","the","class","method","``","compute","(",")","''",".","Example",":","dper_obj.compute","(",")","Result",":","The","following","atributes","of","class","object","will","be","updated","with","calculated","metrics","values",".","The","values","are","available","with","calling","the","atributes",":","dper_obj.operation_rates","-","dict",",","rates","of","correctness","and","errors","for","each","punctuation","mark","from","`","preset","dper_obj.punctuation_marks","`","list",".","dper_obj.substitution_rates","-","dict",",","substitution","rates","between","puncutation","marks","from","`","preset","dper_obj.punctuation_marks","`","list",".","dper_obj.correct_rate","-","float",",","total","rate","of","correctness","between","provided","pairs","of","references","and","hypotheses",".","dper_obj.deletions_rate","-","float",",","total","rate","of","deletions","between","provided","pairs","of","references","and","hypotheses",".","dper_obj.insertions_rate","-","float",",","total","rate","of","insertions","between","provided","pairs","of","references","and","hypotheses",".","dper_obj.substitutions_rate","-","float",",","total","rate","of","substitutions","between","provided","pairs","of","references","and","hypotheses",".","dper_obj.punct_er","-","float",",","total","Punctuation","Error","Rate","between","provided","pairs","of","references","and","hypotheses","."]
null
null
null
def print(self): logging.info( f"Dataset PER " + str(round(100 * self.punct_er, 2)) + "%" ) if HAVE_TABLUATE_AND_PANDAS: rates_by_pm_df = pd.DataFrame(self.operation_rates) * 100 substitution_rates_by_pm_df = ( pd.DataFrame(self.substitution_rates) * 100 ) logging.info( "Rates of punctuation correctness and errors (%):\n" + tabulate( rates_by_pm_df, headers="keys", tablefmt="psql" ) ) logging.info( "Substitution rates between punctuation marks (%):\n" + tabulate( substitution_rates_by_pm_df, headers="keys", tablefmt="psql", ) ) else: logging.warning( "Some of the modules (pandas or tabulate) can't be imported" ) logging.info( f"Rates of punctuation correctness and errors (in range [0, 1]):\n{self.operation_rates}\n" ) logging.info( f"Substitution rates between punctuation marks (in range [0, 1]):\n{self.substitution_rates}\n" )
["def","print","(","self",")",":","logging.info","(","f","''","Dataset","PER","``","+","str","(","round","(","100","*","self.punct_er",",","2",")",")","+","``","%","''",")","if","HAVE_TABLUATE_AND_PANDAS",":","rates_by_pm_df","=","pd.DataFrame","(","self.operation_rates",")","*","100","substitution_rates_by_pm_df","=","(","pd.DataFrame","(","self.substitution_rates",")","*","100",")","logging.info","(","``","Rates","of","punctuation","correctness","and","errors","(","%",")",":","\\n","''","+","tabulate","(","rates_by_pm_df",",","headers=","''","keys","''",",","tablefmt=","''","psql","''",")",")","logging.info","(","``","Substitution","rates","between","punctuation","marks","(","%",")",":","\\n","''","+","tabulate","(","substitution_rates_by_pm_df",",","headers=","''","keys","''",",","tablefmt=","''","psql","''",",",")",")","else",":","logging.warning","(","``","Some","of","the","modules","(","pandas","or","tabulate",")","ca","n't","be","imported","''",")","logging.info","(","f","''","Rates","of","punctuation","correctness","and","errors","(","in","range","[","0",",","1","]",")",":","\\n","{","self.operation_rates","}","\\n","''",")","logging.info","(","f","''","Substitution","rates","between","punctuation","marks","(","in","range","[","0",",","1","]",")",":","\\n","{","self.substitution_rates","}","\\n","''",")"]
453
473
null
punct_er.py
NeMo/nemo/collections/common/metrics/punct_er.py
import re from collections import namedtuple from tqdm import tqdm from nemo.utils import logging
15
2
4
1
0
4
null
Use image node_id 4 for calling the DatasetPunctuationErrorRate obj's underlying member method code with example usage: obj.print() without return types
152
node_id 4
135,831
sample_batch
EpisodeReplayBuffer
object
true
self,batch_size
null
null
null
null
s_batch, a_batch, r_batch, t_batch, obs_batch, available_actions_batch, filled_batch
def sample_batch(self, batch_size): batch = [] if self.count < batch_size: batch = random.sample(self.buffer, self.count) else: batch = random.sample(self.buffer, batch_size) ( s_batch, a_batch, r_batch, t_batch, obs_batch, available_actions_batch, filled_batch, ) = ([], [], [], [], [], [], []) for episode in batch: ( s, a, r, t, obs, available_actions, filled, ) = episode.get_data() s_batch.append(s) a_batch.append(a) r_batch.append(r) t_batch.append(t) obs_batch.append(obs) available_actions_batch.append(available_actions) filled_batch.append(filled) filled_batch = np.array(filled_batch) r_batch = np.array(r_batch) t_batch = np.array(t_batch) a_batch = np.array(a_batch) obs_batch = np.array(obs_batch) available_actions_batch = np.array(available_actions_batch) return ( s_batch, a_batch, r_batch, t_batch, obs_batch, available_actions_batch, filled_batch, )
["def","sample_batch","(","self",",","batch_size",")",":","batch","=","[","]","if","self.count","<","batch_size",":","batch","=","random.sample","(","self.buffer",",","self.count",")","else",":","batch","=","random.sample","(","self.buffer",",","batch_size",")","(","s_batch",",","a_batch",",","r_batch",",","t_batch",",","obs_batch",",","available_actions_batch",",","filled_batch",",",")","=","(","[","]",",","[","]",",","[","]",",","[","]",",","[","]",",","[","]",",","[","]",")","for","episode","in","batch",":","(","s",",","a",",","r",",","t",",","obs",",","available_actions",",","filled",",",")","=","episode.get_data","(",")","s_batch.append","(","s",")","a_batch.append","(","a",")","r_batch.append","(","r",")","t_batch.append","(","t",")","obs_batch.append","(","obs",")","available_actions_batch.append","(","available_actions",")","filled_batch.append","(","filled",")","filled_batch","=","np.array","(","filled_batch",")","r_batch","=","np.array","(","r_batch",")","t_batch","=","np.array","(","t_batch",")","a_batch","=","np.array","(","a_batch",")","obs_batch","=","np.array","(","obs_batch",")","available_actions_batch","=","np.array","(","available_actions_batch",")","return","(","s_batch",",","a_batch",",","r_batch",",","t_batch",",","obs_batch",",","available_actions_batch",",","filled_batch",",",")"]
67
94
null
replay_buffer.py
PARL/benchmark/torch/qmix/replay_buffer.py
from collections import deque import numpy import random
15
2
3
0
2
4
1
Use image node_id 4 for calling the EpisodeReplayBuffer obj's underlying member method code with example usage: obj.sample_batch(batch_size) and returns: s_batch, a_batch, r_batch, t_batch, obs_batch, available_actions_batch, filled_batch
244
node_id 4
154,123
count
EpisodeReplayBuffer
object
true
self
null
null
null
null
len
def count(self): return len(self.buffer)
["def","count","(","self",")",":","return","len","(","self.buffer",")"]
64
65
null
replay_buffer.py
PARL/benchmark/torch/qmix/replay_buffer.py
from collections import deque import numpy import random
15
2
3
0
2
4
1
Use image node_id 3 for calling the EpisodeReplayBuffer obj's underlying member method code with example usage: obj.count() and returns: len
140
node_id 3
154,122
_format_logs
NotificationCallback
Callback
true
logs
Send a notification to a channel at the beginning/ending of the training/testing and at a constant frequency (`alert_frequency`) during the training. Args: notificator (~poutyne.Notificator): The notification channel to send the message. The expected interface need to implement a `send_notification` method to send the message. You can see the `notif <https://notificationdoc.ca/index.html>`_ package which implements some Notificator respecting the interface. alert_frequency (int): The frequency (in epoch), during training, to send an update. By default, 1. experiment_name (Union[str, None]): The name of the experiment to add to the message. By default, None. Example: .. code-block:: python from notif.notificator import SlackNotificator from poutyne.framework.callbacks.notification import NotificationCallback webhook_url = "a_link" slack_notif = SlackNotificator(webhook_url=webhook_url) notif_callback = NotificationCallback(notificator=slack_notif) model = Model(...) model.fit_generator(..., callbacks=[notif_callback])
["Send","a","notification","to","a","channel","at","the","beginning\/ending","of","the","training\/testing","and","at","a","constant","frequency","(","`","alert_frequency","`",")","during","the","training",".","Args",":","notificator","(","~poutyne.Notificator",")",":","The","notification","channel","to","send","the","message",".","The","expected","interface","need","to","implement","a","`","send_notification","`","method","to","send","the","message",".","You","can","see","the","`","notif","<","https",":","\/\/notificationdoc.ca\/index.html",">","`","_","package","which","implements","some","Notificator","respecting","the","interface",".","alert_frequency","(","int",")",":","The","frequency","(","in","epoch",")",",","during","training",",","to","send","an","update",".","By","default",",","1.","experiment_name","(","Union","[","str",",","None","]",")",":","The","name","of","the","experiment","to","add","to","the","message",".","By","default",",","None",".","Example",":","..","code-block",":",":","python","from","notif.notificator","import","SlackNotificator","from","poutyne.framework.callbacks.notification","import","NotificationCallback","webhook_url","=","``","a_link","''","slack_notif","=","SlackNotificator","(","webhook_url=webhook_url",")","notif_callback","=","NotificationCallback","(","notificator=slack_notif",")","model","=","Model","(","...",")","model.fit_generator","(","...",",","callbacks=","[","notif_callback","]",")"]
null
null
str
def _format_logs(logs: Dict) -> str: return " ".join( [f"{key}: {value}\n" for key, value in logs.items()] )
["def","_format_logs","(","logs",":","Dict",")","-",">","str",":","return","``","``",".join","(","[","f","''","{","key","}",":","{","value","}","\\n","''","for","key",",","value","in","logs.items","(",")","]",")"]
130
131
null
notification.py
poutyne/poutyne/framework/callbacks/notification.py
from abc import ABC, abstractmethod from typing import Dict, Union from poutyne.framework.callbacks.callbacks import Callback
15
2
3
0
2
7
1
Use image node_id 7 for calling the NotificationCallback obj's underlying member method code with example usage: obj._format_logs(logs) and returns: str
152
node_id 7
1,602,783
test_knn_retrieval_non_verbose
global
null
false
null
null
null
null
null
def test_knn_retrieval_non_verbose(): annoy_index_filepath = "tests/data/.test-annoy-index.index" expected_neighbour_list = np.load("tests/data/test_knn_k3.npy") iris = datasets.load_iris() X = iris.data k = 3 search_k = -1 index = AnnoyKnnMatrix.load( annoy_index_filepath, X.shape, k=k, search_k=search_k, verbose=0, ) neighbour_list = extract_knn(index) assert np.all(expected_neighbour_list == neighbour_list)
["def","test_knn_retrieval_non_verbose","(",")",":","annoy_index_filepath","=","``","tests\/data\/.test-annoy-index.index","''","expected_neighbour_list","=","np.load","(","``","tests\/data\/test_knn_k3.npy","''",")","iris","=","datasets.load_iris","(",")","X","=","iris.data","k","=","3","search_k","=","-1","index","=","AnnoyKnnMatrix.load","(","annoy_index_filepath",",","X.shape",",","k=k",",","search_k=search_k",",","verbose=0",",",")","neighbour_list","=","extract_knn","(","index",")","assert","np.all","(","expected_neighbour_list","==","neighbour_list",")"]
82
95
null
test_knn.py
ivis/tests/data/test_knn.py
import tempfile import os import pytest from annoy import AnnoyIndex from scipy.sparse import csr_matrix from sklearn import datasets import numpy from ivis.data.neighbour_retrieval import AnnoyKnnMatrix from ivis.data.neighbour_retrieval.knn import build_annoy_index, extract_knn
15
null
9
6
null
null
null
Use image node_id 6 for calling a global function with example usage: test_knn_retrieval_non_verbose() without return types
123
node_id 6
1,192,744
test_knn_matrix_construction_params
global
null
false
annoy_index_file
null
null
null
null
null
def test_knn_matrix_construction_params(annoy_index_file): # Test too large k raises exception with pytest.raises(Exception): AnnoyKnnMatrix.build( np.zeros(shape=(4, 4)), annoy_index_file, k=4 ) with pytest.raises(Exception): AnnoyKnnMatrix.load(annoy_index_file, (4, 4), k=4) index = AnnoyKnnMatrix.build( np.zeros(shape=(4, 4)), annoy_index_file, k=2 ) loaded_index = AnnoyKnnMatrix.load(annoy_index_file, (4, 4), k=2) for original_row, loaded_row in zip(index, loaded_index): assert original_row == loaded_row
["def","test_knn_matrix_construction_params","(","annoy_index_file",")",":","#","Test","too","large","k","raises","exception","with","pytest.raises","(","Exception",")",":","AnnoyKnnMatrix.build","(","np.zeros","(","shape=","(","4",",","4",")",")",",","annoy_index_file",",","k=4",")","with","pytest.raises","(","Exception",")",":","AnnoyKnnMatrix.load","(","annoy_index_file",",","(","4",",","4",")",",","k=4",")","index","=","AnnoyKnnMatrix.build","(","np.zeros","(","shape=","(","4",",","4",")",")",",","annoy_index_file",",","k=2",")","loaded_index","=","AnnoyKnnMatrix.load","(","annoy_index_file",",","(","4",",","4",")",",","k=2",")","for","original_row",",","loaded_row","in","zip","(","index",",","loaded_index",")",":","assert","original_row","==","loaded_row"]
69
80
null
test_knn.py
ivis/tests/data/test_knn.py
import tempfile import os import pytest from annoy import AnnoyIndex from scipy.sparse import csr_matrix from sklearn import datasets import numpy from ivis.data.neighbour_retrieval import AnnoyKnnMatrix from ivis.data.neighbour_retrieval.knn import build_annoy_index, extract_knn
15
null
9
6
null
null
null
Use image node_id 5 for calling a global function with example usage: test_knn_matrix_construction_params(annoy_index_file) without return types
144
node_id 5
1,192,743
test_knn_retrieval
global
null
false
null
null
null
null
null
def test_knn_retrieval(): annoy_index_filepath = "tests/data/.test-annoy-index.index" expected_neighbour_list = np.load("tests/data/test_knn_k3.npy") iris = datasets.load_iris() X = iris.data k = 3 search_k = -1 index = AnnoyKnnMatrix.load( annoy_index_filepath, X.shape, k=k, search_k=search_k ) neighbour_list = extract_knn(index) assert np.all(expected_neighbour_list == neighbour_list)
["def","test_knn_retrieval","(",")",":","annoy_index_filepath","=","``","tests\/data\/.test-annoy-index.index","''","expected_neighbour_list","=","np.load","(","``","tests\/data\/test_knn_k3.npy","''",")","iris","=","datasets.load_iris","(",")","X","=","iris.data","k","=","3","search_k","=","-1","index","=","AnnoyKnnMatrix.load","(","annoy_index_filepath",",","X.shape",",","k=k",",","search_k=search_k",")","neighbour_list","=","extract_knn","(","index",")","assert","np.all","(","expected_neighbour_list","==","neighbour_list",")"]
53
66
null
test_knn.py
ivis/tests/data/test_knn.py
import tempfile import os import pytest from annoy import AnnoyIndex from scipy.sparse import csr_matrix from sklearn import datasets import numpy from ivis.data.neighbour_retrieval import AnnoyKnnMatrix from ivis.data.neighbour_retrieval.knn import build_annoy_index, extract_knn
15
null
9
6
null
null
null
Use image node_id 4 for calling a global function with example usage: test_knn_retrieval() without return types
111
node_id 4
1,192,742
add
EpisodeReplayBuffer
object
true
self,episode_experience
null
null
null
null
null
def add(self, episode_experience): self.buffer.append(episode_experience)
["def","add","(","self",",","episode_experience",")",":","self.buffer.append","(","episode_experience",")"]
60
61
null
replay_buffer.py
PARL/benchmark/torch/qmix/replay_buffer.py
from collections import deque import numpy import random
15
2
3
0
2
4
1
Use image node_id 2 for calling the EpisodeReplayBuffer obj's underlying member method code with example usage: obj.add(episode_experience) without return types
160
node_id 2
154,121
test_dense_annoy_index
global
null
false
annoy_index_file
null
null
null
null
null
def test_dense_annoy_index(annoy_index_file): data = np.random.choice([0, 1], size=(10, 5)) index = build_annoy_index(data, annoy_index_file) assert os.path.exists(annoy_index_file) loaded_index = AnnoyIndex(5, metric="angular") loaded_index.load(annoy_index_file) assert index.f == loaded_index.f == 5 assert index.get_n_items() == loaded_index.get_n_items() == 10 assert index.get_nns_by_item( 0, 5 ) == loaded_index.get_nns_by_item(0, 5) index.unload() loaded_index.unload()
["def","test_dense_annoy_index","(","annoy_index_file",")",":","data","=","np.random.choice","(","[","0",",","1","]",",","size=","(","10",",","5",")",")","index","=","build_annoy_index","(","data",",","annoy_index_file",")","assert","os.path.exists","(","annoy_index_file",")","loaded_index","=","AnnoyIndex","(","5",",","metric=","''","angular","''",")","loaded_index.load","(","annoy_index_file",")","assert","index.f","==","loaded_index.f","==","5","assert","index.get_n_items","(",")","==","loaded_index.get_n_items","(",")","==","10","assert","index.get_nns_by_item","(","0",",","5",")","==","loaded_index.get_nns_by_item","(","0",",","5",")","index.unload","(",")","loaded_index.unload","(",")"]
37
50
null
test_knn.py
ivis/tests/data/test_knn.py
import tempfile import os import pytest from annoy import AnnoyIndex from scipy.sparse import csr_matrix from sklearn import datasets import numpy from ivis.data.neighbour_retrieval import AnnoyKnnMatrix from ivis.data.neighbour_retrieval.knn import build_annoy_index, extract_knn
15
null
9
6
null
null
null
Use image node_id 3 for calling a global function with example usage: test_dense_annoy_index(annoy_index_file) without return types
131
node_id 3
1,192,741
test_build_sparse_annoy_index
global
null
false
annoy_index_file
null
null
null
null
null
def test_build_sparse_annoy_index(annoy_index_file): data = np.random.choice([0, 1], size=(10, 5)) sparse_data = csr_matrix(data) index = build_annoy_index(sparse_data, annoy_index_file) assert os.path.exists(annoy_index_file) loaded_index = AnnoyIndex(5, metric="angular") loaded_index.load(annoy_index_file) assert index.f == loaded_index.f == 5 assert index.get_n_items() == loaded_index.get_n_items() == 10 assert index.get_nns_by_item( 0, 5 ) == loaded_index.get_nns_by_item(0, 5) index.unload() loaded_index.unload()
["def","test_build_sparse_annoy_index","(","annoy_index_file",")",":","data","=","np.random.choice","(","[","0",",","1","]",",","size=","(","10",",","5",")",")","sparse_data","=","csr_matrix","(","data",")","index","=","build_annoy_index","(","sparse_data",",","annoy_index_file",")","assert","os.path.exists","(","annoy_index_file",")","loaded_index","=","AnnoyIndex","(","5",",","metric=","''","angular","''",")","loaded_index.load","(","annoy_index_file",")","assert","index.f","==","loaded_index.f","==","5","assert","index.get_n_items","(",")","==","loaded_index.get_n_items","(",")","==","10","assert","index.get_nns_by_item","(","0",",","5",")","==","loaded_index.get_nns_by_item","(","0",",","5",")","index.unload","(",")","loaded_index.unload","(",")"]
19
34
null
test_knn.py
ivis/tests/data/test_knn.py
import tempfile import os import pytest from annoy import AnnoyIndex from scipy.sparse import csr_matrix from sklearn import datasets import numpy from ivis.data.neighbour_retrieval import AnnoyKnnMatrix from ivis.data.neighbour_retrieval.knn import build_annoy_index, extract_knn
15
null
9
6
null
null
null
Use image node_id 2 for calling a global function with example usage: test_build_sparse_annoy_index(annoy_index_file) without return types
138
node_id 2
1,192,740
train
DenseRetrievalDataSource
DataSource
true
self
Data source for DPR (https://github.com/facebookresearch/DPR). Expects multiline json for lazy loading and improved memory usage. The original DPR files can be converted to multiline json using `jq -c .[]`
["Data","source","for","DPR","(","https",":","\/\/github.com\/facebookresearch\/DPR",")",".","Expects","multiline","json","for","lazy","loading","and","improved","memory","usage",".","The","original","DPR","files","can","be","converted","to","multiline","json","using","`","jq","-c",".","[","]","`"]
null
null
self
def train(self): return self.process_file(self.train_filename, is_train=True)
["def","train","(","self",")",":","return","self.process_file","(","self.train_filename",",","is_train=True",")"]
62
63
null
dense_retrieval.py
pytext/pytext/data/sources/dense_retrieval.py
import json import random from typing import List, Optional from pytext.data.sources.data_source import DataSource, generator_property from pytext.utils.file_io import PathManager
15
1
5
1
1
7
1
Use image node_id 3 for calling the DenseRetrievalDataSource obj's underlying member method code with example usage: obj.train() and returns: self
146
node_id 3
1,680,324
__init__
EpisodeReplayBuffer
object
true
self,max_buffer_size
null
null
null
null
EpisodeReplayBuffer
def __init__(self, max_buffer_size): self.max_buffer_size = max_buffer_size self.buffer = deque(maxlen=max_buffer_size)
["def","__init__","(","self",",","max_buffer_size",")",":","self.max_buffer_size","=","max_buffer_size","self.buffer","=","deque","(","maxlen=max_buffer_size",")"]
56
58
null
replay_buffer.py
PARL/benchmark/torch/qmix/replay_buffer.py
from collections import deque import numpy import random
15
2
3
0
2
4
1
Use image node_id 1 to create a new EpisodeReplayBuffer object from inherited base classes: object with example: obj = EpisodeReplayBuffer(max_buffer_size)
155
node_id 1
154,120
get_data
EpisodeExperience
object
true
self
null
null
null
null
np, np, np, np, np, np, np
def get_data(self): assert self.count == self.max_len return ( np.array(self.episode_state), np.array(self.episode_actions), np.array(self.episode_reward), np.array(self.episode_terminated), np.array(self.episode_obs), np.array(self.episode_available_actions), np.array(self.episode_filled), )
["def","get_data","(","self",")",":","assert","self.count","==","self.max_len","return","(","np.array","(","self.episode_state",")",",","np.array","(","self.episode_actions",")",",","np.array","(","self.episode_reward",")",",","np.array","(","self.episode_terminated",")",",","np.array","(","self.episode_obs",")",",","np.array","(","self.episode_available_actions",")",",","np.array","(","self.episode_filled",")",",",")"]
47
52
null
replay_buffer.py
PARL/benchmark/torch/qmix/replay_buffer.py
from collections import deque import numpy import random
15
2
3
0
2
4
1
Use image node_id 4 for calling the EpisodeExperience obj's underlying member method code with example usage: obj.get_data() and returns: np, np, np, np, np, np, np
170
node_id 4
154,119
add
EpisodeExperience
object
true
self,state,actions,reward,terminated,obs,available_actions,filled
null
null
null
null
null
def add( self, state, actions, reward, terminated, obs, available_actions, filled, ): assert self.count < self.max_len self.episode_state.append(state) self.episode_actions.append(actions) self.episode_reward.append(reward) self.episode_terminated.append(terminated) self.episode_obs.append(obs) self.episode_available_actions.append(available_actions) self.episode_filled.append(filled)
["def","add","(","self",",","state",",","actions",",","reward",",","terminated",",","obs",",","available_actions",",","filled",",",")",":","assert","self.count","<","self.max_len","self.episode_state.append","(","state",")","self.episode_actions.append","(","actions",")","self.episode_reward.append","(","reward",")","self.episode_terminated.append","(","terminated",")","self.episode_obs.append","(","obs",")","self.episode_available_actions.append","(","available_actions",")","self.episode_filled.append","(","filled",")"]
36
45
null
replay_buffer.py
PARL/benchmark/torch/qmix/replay_buffer.py
from collections import deque import numpy import random
15
2
3
0
2
4
1
Use image node_id 3 for calling the EpisodeExperience obj's underlying member method code with example usage: obj.add(state, actions, reward, terminated, obs, available_actions, filled) without return types
206
node_id 3
154,118
count
EpisodeExperience
object
true
self
null
null
null
null
len
def count(self): return len(self.episode_state)
["def","count","(","self",")",":","return","len","(","self.episode_state",")"]
33
34
null
replay_buffer.py
PARL/benchmark/torch/qmix/replay_buffer.py
from collections import deque import numpy import random
15
2
3
0
2
4
1
Use image node_id 2 for calling the EpisodeExperience obj's underlying member method code with example usage: obj.count() and returns: len
138
node_id 2
154,117
annoy_index_file
global
null
false
null
null
null
null
null
def annoy_index_file(): with tempfile.TemporaryDirectory() as f: yield os.path.join(f, "annoy.index")
["def","annoy_index_file","(",")",":","with","tempfile.TemporaryDirectory","(",")","as","f",":","yield","os.path.join","(","f",",","``","annoy.index","''",")"]
14
16
null
test_knn.py
ivis/tests/data/test_knn.py
import tempfile import os import pytest from annoy import AnnoyIndex from scipy.sparse import csr_matrix from sklearn import datasets import numpy from ivis.data.neighbour_retrieval import AnnoyKnnMatrix from ivis.data.neighbour_retrieval.knn import build_annoy_index, extract_knn
15
null
9
6
null
null
null
Use image node_id 1 for calling a global function with example usage: annoy_index_file() without return types
109
node_id 1
1,192,739
__init__
EpisodeExperience
object
true
self,episode_len
null
null
null
null
EpisodeExperience
def __init__(self, episode_len): self.max_len = episode_len self.episode_state = [] self.episode_actions = [] self.episode_reward = [] self.episode_terminated = [] self.episode_obs = [] self.episode_available_actions = [] self.episode_filled = []
["def","__init__","(","self",",","episode_len",")",":","self.max_len","=","episode_len","self.episode_state","=","[","]","self.episode_actions","=","[","]","self.episode_reward","=","[","]","self.episode_terminated","=","[","]","self.episode_obs","=","[","]","self.episode_available_actions","=","[","]","self.episode_filled","=","[","]"]
21
30
null
replay_buffer.py
PARL/benchmark/torch/qmix/replay_buffer.py
from collections import deque import numpy import random
15
2
3
0
2
4
1
Use image node_id 1 to create a new EpisodeExperience object from inherited base classes: object with example: obj = EpisodeExperience(episode_len)
147
node_id 1
154,116
convert_rttm_text
global
null
false
path,wavscp_path,sampling_rate,output_path
null
null
null
null
null
def convert_rttm_text( path: Union[Path, str], wavscp_path: Union[Path, str], sampling_rate: int, output_path: Union[Path, str], ) -> None: """Convert a RTTM file Note: only support speaker information now """ output_handler = Path( os.path.join(output_path, "espnet_rttm") ).open("w", encoding="utf-8") assert check_argument_types() utt_ids = set() with Path(path).open("r", encoding="utf-8") as f: for linenum, line in enumerate(f, 1): sps = re.split(" +", line.rstrip()) # RTTM format must have exactly 9 fields assert ( len(sps) == 9 ), "{} does not have exactly 9 fields".format(path) ( label_type, utt_id, channel, start, duration, _, _, spk_id, _, ) = sps # Only support speaker label now assert label_type == "SPEAKER" utt_ids.add(utt_id) start = int(np.rint(float(start) * sampling_rate)) end = start + int( np.rint(float(duration) * sampling_rate) ) output_handler.write( "{} {} {} {} {} <NA> <NA> {} <NA>\n".format( label_type, utt_id, channel, start, end, spk_id ) ) with Path(wavscp_path).open("r", encoding="utf-8") as f: for linenum, line in enumerate(f, 1): sps = re.split("[ \t]+", line.rstrip()) utt_id, wav_path = sps assert ( utt_id in utt_ids ), "{} is not in corresponding rttm {}".foramt( utt_id, path ) sf = soundfile.SoundFile(wav_path) assert sf.samplerate == sampling_rate output_handler.write( ( "{} {} <NA> <NA> {} <NA> <NA> <NA> <NA>\n".format( "END", utt_id, sf.frames ) ) ) output_handler.close()
["def","convert_rttm_text","(","path",":","Union","[","Path",",","str","]",",","wavscp_path",":","Union","[","Path",",","str","]",",","sampling_rate",":","int",",","output_path",":","Union","[","Path",",","str","]",",",")","-",">","None",":","``","''","''","Convert","a","RTTM","file","Note",":","only","support","speaker","information","now","``","''","''","output_handler","=","Path","(","os.path.join","(","output_path",",","``","espnet_rttm","''",")",")",".open","(","``","w","''",",","encoding=","''","utf-8","''",")","assert","check_argument_types","(",")","utt_ids","=","set","(",")","with","Path","(","path",")",".open","(","``","r","''",",","encoding=","''","utf-8","''",")","as","f",":","for","linenum",",","line","in","enumerate","(","f",",","1",")",":","sps","=","re.split","(","``","+","''",",","line.rstrip","(",")",")","#","RTTM","format","must","have","exactly","9","fields","assert","(","len","(","sps",")","==","9",")",",","``","{","}","does","not","have","exactly","9","fields","''",".format","(","path",")","(","label_type",",","utt_id",",","channel",",","start",",","duration",",","_",",","_",",","spk_id",",","_",",",")","=","sps","#","Only","support","speaker","label","now","assert","label_type","==","``","SPEAKER","''","utt_ids.add","(","utt_id",")","start","=","int","(","np.rint","(","float","(","start",")","*","sampling_rate",")",")","end","=","start","+","int","(","np.rint","(","float","(","duration",")","*","sampling_rate",")",")","output_handler.write","(","``","{","}","{","}","{","}","{","}","{","}","<","NA",">","<","NA",">","{","}","<","NA",">","\\n","''",".format","(","label_type",",","utt_id",",","channel",",","start",",","end",",","spk_id",")",")","with","Path","(","wavscp_path",")",".open","(","``","r","''",",","encoding=","''","utf-8","''",")","as","f",":","for","linenum",",","line","in","enumerate","(","f",",","1",")",":","sps","=","re.split","(","``","[","\\t","]","+","''",",","line.rstrip","(",")",")","utt_id",",","wav_path","=","sps","assert","(","utt_id","in","utt_ids",")",",","``","{","}","is","not","in","corresponding","rttm","{","}","''",".foramt","(","utt_id",",","path",")","sf","=","soundfile.SoundFile","(","wav_path",")","assert","sf.samplerate","==","sampling_rate","output_handler.write","(","(","``","{","}","{","}","<","NA",">","<","NA",">","{","}","<","NA",">","<","NA",">","<","NA",">","<","NA",">","\\n","''",".format","(","``","END","''",",","utt_id",",","sf.frames",")",")",")","output_handler.close","(",")"]
19
75
null
convert_rttm.py
espnet/egs2/thchs30/asr1/pyscripts/utils/convert_rttm.py
import argparse import collections.abc import logging import os import re from pathlib import Path from typing import Union import humanfriendly import numpy import soundfile from typeguard import check_argument_types from espnet2.utils.types import str_or_int
15
null
12
3
null
null
null
Use image node_id 1 for calling a global function with example usage: convert_rttm_text(path, wavscp_path, sampling_rate, output_path) without return types
155
node_id 1
981,192
__init__
DenseRetrievalDataSource
DataSource
true
self,schema,train_filename,test_filename,eval_filename,num_negative_ctxs,use_title,use_cache
Data source for DPR (https://github.com/facebookresearch/DPR). Expects multiline json for lazy loading and improved memory usage. The original DPR files can be converted to multiline json using `jq -c .[]`
["Data","source","for","DPR","(","https",":","\/\/github.com\/facebookresearch\/DPR",")",".","Expects","multiline","json","for","lazy","loading","and","improved","memory","usage",".","The","original","DPR","files","can","be","converted","to","multiline","json","using","`","jq","-c",".","[","]","`"]
null
null
DenseRetrievalDataSource
def __init__( self, schema, train_filename=None, test_filename=None, eval_filename=None, num_negative_ctxs=1, use_title=True, use_cache=False, ): super().__init__(schema) self.train_filename = train_filename self.test_filename = test_filename self.eval_filename = eval_filename self.num_negative_ctxs = num_negative_ctxs self.use_title = use_title self.use_cache = use_cache self.cache = {}
["def","__init__","(","self",",","schema",",","train_filename=None",",","test_filename=None",",","eval_filename=None",",","num_negative_ctxs=1",",","use_title=True",",","use_cache=False",",",")",":","super","(",")",".__init__","(","schema",")","self.train_filename","=","train_filename","self.test_filename","=","test_filename","self.eval_filename","=","eval_filename","self.num_negative_ctxs","=","num_negative_ctxs","self.use_title","=","use_title","self.use_cache","=","use_cache","self.cache","=","{","}"]
42
59
null
dense_retrieval.py
pytext/pytext/data/sources/dense_retrieval.py
import json import random from typing import List, Optional from pytext.data.sources.data_source import DataSource, generator_property from pytext.utils.file_io import PathManager
15
1
5
1
1
7
1
Use image node_id 2 to create a new DenseRetrievalDataSource object from inherited base classes: DataSource with example: obj = DenseRetrievalDataSource(schema, train_filename, test_filename, eval_filename, num_negative_ctxs, use_title, use_cache)
247
node_id 2
1,680,323