name
stringlengths
1
152
class_name
stringlengths
1
51
class_bases
stringlengths
0
159
is_member
bool
2 classes
args
stringlengths
0
804
class_docstr
stringlengths
4
8.19k
class_docstr_tok
stringlengths
2
11.6k
docstr
stringlengths
0
11.4k
docstr_tok
stringlengths
2
13.4k
returns
stringlengths
0
260
code
stringlengths
21
52.4k
code_tok
stringlengths
33
92.8k
lstart
int64
1
1.75k
lend
int64
5
1.75k
raises
stringclasses
16 values
filename
stringlengths
5
66
file_path
stringlengths
12
161
imports
stringlengths
0
1.77k
total_objects
int64
15
15
num_classes
float64
1
7
num_imports
int64
0
14
num_functions
int64
0
15
num_all_bases
float64
0
9
num_methods
float64
1
14
num_bases
float64
1
7
label_desc
stringlengths
69
1.05k
label_desc_len
int64
69
1.05k
label_id
stringclasses
15 values
__index_level_0__
int64
468
2.35M
do_log
TextLogger
NullLogger
true
self,arg
null
null
null
null
null
def do_log(self, arg): sys.stdout.write(str(arg))
["def","do_log","(","self",",","arg",")",":","sys.stdout.write","(","str","(","arg",")",")"]
39
40
null
logger.py
turicreate/src/external/boost/boost_1_68_0/tools/build/src/util/logger.py
import sys
15
2
1
0
1
4
1
Use image node_id 2 for calling the TextLogger obj's underlying member method code with example usage: obj.do_log(arg) without return types
139
node_id 2
2,276,655
interesting
TextLogger
NullLogger
true
self,source_name
null
null
null
null
True
def interesting(self, source_name): return True
["def","interesting","(","self",",","source_name",")",":","return","True"]
42
43
null
logger.py
turicreate/src/external/boost/boost_1_68_0/tools/build/src/util/logger.py
import sys
15
2
1
0
1
4
1
Use image node_id 3 for calling the TextLogger obj's underlying member method code with example usage: obj.interesting(source_name) and returns: True
149
node_id 3
2,276,656
test_autolog_log_models_configuration
global
null
false
log_models
null
null
null
null
null
def test_autolog_log_models_configuration(log_models): mlflow.paddle.autolog(log_models=log_models) with mlflow.start_run() as run: train_model() artifacts = MlflowClient().list_artifacts(run.info.run_id) assert any(x.path == "model" for x in artifacts) == log_models
["def","test_autolog_log_models_configuration","(","log_models",")",":","mlflow.paddle.autolog","(","log_models=log_models",")","with","mlflow.start_run","(",")","as","run",":","train_model","(",")","artifacts","=","MlflowClient","(",")",".list_artifacts","(","run.info.run_id",")","assert","any","(","x.path","==","``","model","''","for","x","in","artifacts",")","==","log_models"]
86
93
null
test_paddle_autolog.py
mlflow/tests/paddle/test_paddle_autolog.py
import paddle import pytest import mlflow from mlflow import MlflowClient
15
null
4
7
null
null
null
Use image node_id 5 for calling a global function with example usage: test_autolog_log_models_configuration(log_models) without return types
140
node_id 5
1,356,430
test_autolog_early_stopping_callback
global
null
false
null
null
null
null
null
def test_autolog_early_stopping_callback(): mlflow.paddle.autolog() early_stopping = paddle.callbacks.EarlyStopping( "loss", mode="min", patience=1, min_delta=0 ) with mlflow.start_run() as run: train_model(callbacks=[early_stopping]) client = MlflowClient() data = client.get_run(run.info.run_id).data for param_key in ["monitor", "patience", "min_delta", "baseline"]: assert param_key in data.params assert data.params[param_key] == str( getattr(early_stopping, param_key) ) for metric_key in ["stopped_epoch", "best_value"]: assert metric_key in data.metrics assert float(data.metrics[metric_key]) == getattr( early_stopping, metric_key ) for metric_key in ["loss", "step"]: assert metric_key in data.metrics metric_history = client.get_metric_history( run.info.run_id, metric_key ) assert len(metric_history) == NUM_EPOCHS
["def","test_autolog_early_stopping_callback","(",")",":","mlflow.paddle.autolog","(",")","early_stopping","=","paddle.callbacks.EarlyStopping","(","``","loss","''",",","mode=","''","min","''",",","patience=1",",","min_delta=0",")","with","mlflow.start_run","(",")","as","run",":","train_model","(","callbacks=","[","early_stopping","]",")","client","=","MlflowClient","(",")","data","=","client.get_run","(","run.info.run_id",")",".data","for","param_key","in","[","``","monitor","''",",","``","patience","''",",","``","min_delta","''",",","``","baseline","''","]",":","assert","param_key","in","data.params","assert","data.params","[","param_key","]","==","str","(","getattr","(","early_stopping",",","param_key",")",")","for","metric_key","in","[","``","stopped_epoch","''",",","``","best_value","''","]",":","assert","metric_key","in","data.metrics","assert","float","(","data.metrics","[","metric_key","]",")","==","getattr","(","early_stopping",",","metric_key",")","for","metric_key","in","[","``","loss","''",",","``","step","''","]",":","assert","metric_key","in","data.metrics","metric_history","=","client.get_metric_history","(","run.info.run_id",",","metric_key",")","assert","len","(","metric_history",")","==","NUM_EPOCHS"]
61
82
null
test_paddle_autolog.py
mlflow/tests/paddle/test_paddle_autolog.py
import paddle import pytest import mlflow from mlflow import MlflowClient
15
null
4
7
null
null
null
Use image node_id 4 for calling a global function with example usage: test_autolog_early_stopping_callback() without return types
129
node_id 4
1,356,429
test_get_files_from_dir
TestRetrieveUtils
null
true
self
null
null
null
null
null
def test_get_files_from_dir(self): files = get_files_from_dir(test_dir, recursive=False) assert all(os.path.isfile(file) for file in files) pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") files = get_files_from_dir([pdf_file_path, txt_file_path]) assert all(os.path.isfile(file) for file in files) files = get_files_from_dir( [ pdf_file_path, txt_file_path, os.path.join(test_dir, "..", "..", "website/docs"), "https://raw.githubusercontent.com/microsoft/autogen/main/README.md", ], recursive=True, ) assert all(os.path.isfile(file) for file in files) files = get_files_from_dir( [ pdf_file_path, txt_file_path, os.path.join(test_dir, "..", "..", "website/docs"), "https://raw.githubusercontent.com/microsoft/autogen/main/README.md", ], recursive=True, types=["pdf", "txt"], ) assert all(os.path.isfile(file) for file in files) assert len(files) == 3
["def","test_get_files_from_dir","(","self",")",":","files","=","get_files_from_dir","(","test_dir",",","recursive=False",")","assert","all","(","os.path.isfile","(","file",")","for","file","in","files",")","pdf_file_path","=","os.path.join","(","test_dir",",","``","example.pdf","''",")","txt_file_path","=","os.path.join","(","test_dir",",","``","example.txt","''",")","files","=","get_files_from_dir","(","[","pdf_file_path",",","txt_file_path","]",")","assert","all","(","os.path.isfile","(","file",")","for","file","in","files",")","files","=","get_files_from_dir","(","[","pdf_file_path",",","txt_file_path",",","os.path.join","(","test_dir",",","``","..","''",",","``","..","''",",","``","website\/docs","''",")",",","``","https",":","\/\/raw.githubusercontent.com\/microsoft\/autogen\/main\/README.md","''",",","]",",","recursive=True",",",")","assert","all","(","os.path.isfile","(","file",")","for","file","in","files",")","files","=","get_files_from_dir","(","[","pdf_file_path",",","txt_file_path",",","os.path.join","(","test_dir",",","``","..","''",",","``","..","''",",","``","website\/docs","''",")",",","``","https",":","\/\/raw.githubusercontent.com\/microsoft\/autogen\/main\/README.md","''",",","]",",","recursive=True",",","types=","[","``","pdf","''",",","``","txt","''","]",",",")","assert","all","(","os.path.isfile","(","file",")","for","file","in","files",")","assert","len","(","files",")","==","3"]
62
90
null
test_retrieve_utils.py
autogen/test/test_retrieve_utils.py
import pytest import os
15
1
2
0
0
12
null
Use image node_id 5 for calling the TestRetrieveUtils obj's underlying member method code with example usage: obj.test_get_files_from_dir() without return types
160
node_id 5
319,450
_transform_search_space
SearchSpaceToChoice
Transform
true
self,search_space
Replaces the search space with a single choice parameter, whose values are the signatures of the arms observed in the data. This transform is meant to be used with ThompsonSampler. Choice parameter will be unordered unless config["use_ordered"] specifies otherwise. Transform is done in-place.
["Replaces","the","search","space","with","a","single","choice","parameter",",","whose","values","are","the","signatures","of","the","arms","observed","in","the","data",".","This","transform","is","meant","to","be","used","with","ThompsonSampler",".","Choice","parameter","will","be","unordered","unless","config","[","``","use_ordered","''","]","specifies","otherwise",".","Transform","is","done","in-place","."]
null
null
SearchSpace
def _transform_search_space( self, search_space: SearchSpace ) -> SearchSpace: values = list(self.signature_to_parameterization.keys()) if len(values) > 1: parameter = ChoiceParameter( name=self.parameter_name, parameter_type=ParameterType.STRING, values=values, is_ordered=checked_cast( bool, self.config.get("use_ordered", False) ), sort_values=False, ) else: parameter = FixedParameter( name=self.parameter_name, parameter_type=ParameterType.STRING, value=values[0], ) return SearchSpace(parameters=[parameter])
["def","_transform_search_space","(","self",",","search_space",":","SearchSpace",")","-",">","SearchSpace",":","values","=","list","(","self.signature_to_parameterization.keys","(",")",")","if","len","(","values",")",">","1",":","parameter","=","ChoiceParameter","(","name=self.parameter_name",",","parameter_type=ParameterType.STRING",",","values=values",",","is_ordered=checked_cast","(","bool",",","self.config.get","(","``","use_ordered","''",",","False",")",")",",","sort_values=False",",",")","else",":","parameter","=","FixedParameter","(","name=self.parameter_name",",","parameter_type=ParameterType.STRING",",","value=values","[","0","]",",",")","return","SearchSpace","(","parameters=","[","parameter","]",")"]
65
81
null
search_space_to_choice.py
Ax/ax/modelbridge/transforms/search_space_to_choice.py
from typing import List, Optional, TYPE_CHECKING from ax.core.arm import Arm from ax.core.observation import Observation, ObservationFeatures from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType from ax.core.search_space import RobustSearchSpace, SearchSpace from ax.exceptions.core import UnsupportedError from ax.modelbridge.transforms.base import Transform from ax.models.types import TConfig from ax.utils.common.typeutils import checked_cast
15
1
9
0
1
4
1
Use image node_id 2 for calling the SearchSpaceToChoice obj's underlying member method code with example usage: obj._transform_search_space(search_space) and returns: SearchSpace
178
node_id 2
9,097
test_df_to_sql_no_dtype
TestRedshiftDbEngineSpec
TestDbEngineSpec
true
self
null
null
null
null
null
def test_df_to_sql_no_dtype(self): mock_database = mock.MagicMock() mock_database.get_df.return_value.empty = False table_name = "foobar" data = [ ("foo", "bar", pd.NA, None), ("foo", "bar", pd.NA, True), ("foo", "bar", pd.NA, None), ] numpy_dtype = [ ("id", "object"), ("value", "object"), ("num", "object"), ("bool", "object"), ] column_names = ["id", "value", "num", "bool"] test_array = np.array(data, dtype=numpy_dtype) df = pd.DataFrame(test_array, columns=column_names) df.to_sql = mock.MagicMock() with app.app_context(): RedshiftEngineSpec.df_to_sql( mock_database, Table(table=table_name), df, to_sql_kwargs={}, ) assert df.to_sql.call_args[1]["dtype"] == {}
["def","test_df_to_sql_no_dtype","(","self",")",":","mock_database","=","mock.MagicMock","(",")","mock_database.get_df.return_value.empty","=","False","table_name","=","``","foobar","''","data","=","[","(","``","foo","''",",","``","bar","''",",","pd.NA",",","None",")",",","(","``","foo","''",",","``","bar","''",",","pd.NA",",","True",")",",","(","``","foo","''",",","``","bar","''",",","pd.NA",",","None",")",",","]","numpy_dtype","=","[","(","``","id","''",",","``","object","''",")",",","(","``","value","''",",","``","object","''",")",",","(","``","num","''",",","``","object","''",")",",","(","``","bool","''",",","``","object","''",")",",","]","column_names","=","[","``","id","''",",","``","value","''",",","``","num","''",",","``","bool","''","]","test_array","=","np.array","(","data",",","dtype=numpy_dtype",")","df","=","pd.DataFrame","(","test_array",",","columns=column_names",")","df.to_sql","=","mock.MagicMock","(",")","with","app.app_context","(",")",":","RedshiftEngineSpec.df_to_sql","(","mock_database",",","Table","(","table=table_name",")",",","df",",","to_sql_kwargs=","{","}",",",")","assert","df.to_sql.call_args","[","1","]","[","``","dtype","''","]","==","{","}"]
194
221
null
redshift_tests.py
superset/tests/integration_tests/db_engine_specs/redshift_tests.py
import unittest.mock from textwrap import dedent import numpy import pandas from sqlalchemy.types import NVARCHAR from superset.db_engine_specs.redshift import RedshiftEngineSpec from superset.errors import ErrorLevel, SupersetError, SupersetErrorType from superset.sql_parse import Table from tests.integration_tests.db_engine_specs.base_tests import TestDbEngineSpec from tests.integration_tests.test_app import app
15
1
10
0
1
3
1
Use image node_id 2 for calling the TestRedshiftDbEngineSpec obj's underlying member method code with example usage: obj.test_df_to_sql_no_dtype() without return types
167
node_id 2
2,027,365
test_df_to_sql_with_string_dtype
TestRedshiftDbEngineSpec
TestDbEngineSpec
true
self
null
null
null
null
null
def test_df_to_sql_with_string_dtype(self): mock_database = mock.MagicMock() mock_database.get_df.return_value.empty = False table_name = "foobar" data = [ ("foo", "bar", pd.NA, None), ("foo", "bar", pd.NA, True), ("foo", "bar", pd.NA, None), ] column_names = ["id", "value", "num", "bool"] df = pd.DataFrame(data, columns=column_names) df = df.astype(dtype={"value": "string"}) df.to_sql = mock.MagicMock() with app.app_context(): RedshiftEngineSpec.df_to_sql( mock_database, Table(table=table_name), df, to_sql_kwargs={}, ) # varchar string length should be 65535 dtype = df.to_sql.call_args[1]["dtype"] assert isinstance(dtype["value"], NVARCHAR) assert dtype["value"].length == 65535
["def","test_df_to_sql_with_string_dtype","(","self",")",":","mock_database","=","mock.MagicMock","(",")","mock_database.get_df.return_value.empty","=","False","table_name","=","``","foobar","''","data","=","[","(","``","foo","''",",","``","bar","''",",","pd.NA",",","None",")",",","(","``","foo","''",",","``","bar","''",",","pd.NA",",","True",")",",","(","``","foo","''",",","``","bar","''",",","pd.NA",",","None",")",",","]","column_names","=","[","``","id","''",",","``","value","''",",","``","num","''",",","``","bool","''","]","df","=","pd.DataFrame","(","data",",","columns=column_names",")","df","=","df.astype","(","dtype=","{","``","value","''",":","``","string","''","}",")","df.to_sql","=","mock.MagicMock","(",")","with","app.app_context","(",")",":","RedshiftEngineSpec.df_to_sql","(","mock_database",",","Table","(","table=table_name",")",",","df",",","to_sql_kwargs=","{","}",",",")","#","varchar","string","length","should","be","65535","dtype","=","df.to_sql.call_args","[","1","]","[","``","dtype","''","]","assert","isinstance","(","dtype","[","``","value","''","]",",","NVARCHAR",")","assert","dtype","[","``","value","''","]",".length","==","65535"]
223
246
null
redshift_tests.py
superset/tests/integration_tests/db_engine_specs/redshift_tests.py
import unittest.mock from textwrap import dedent import numpy import pandas from sqlalchemy.types import NVARCHAR from superset.db_engine_specs.redshift import RedshiftEngineSpec from superset.errors import ErrorLevel, SupersetError, SupersetErrorType from superset.sql_parse import Table from tests.integration_tests.db_engine_specs.base_tests import TestDbEngineSpec from tests.integration_tests.test_app import app
15
1
10
0
1
3
1
Use image node_id 3 for calling the TestRedshiftDbEngineSpec obj's underlying member method code with example usage: obj.test_df_to_sql_with_string_dtype() without return types
176
node_id 3
2,027,366
differint
global
null
false
ctx,f,x,n,x0
null
null
null
null
unknown
def differint(ctx, f, x, n=1, x0=0): r""" Calculates the Riemann-Liouville differintegral, or fractional derivative, defined by .. math :: \,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m} \int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt where `f` is a given (presumably well-behaved) function, `x` is the evaluation point, `n` is the order, and `x_0` is the reference point of integration (`m` is an arbitrary parameter selected automatically). With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`, the second derivative `f''(x)`, etc. With `n = -1`, it gives `\int_{x_0}^x f(t) dt`, with `n = -2` it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc. As `n` is permitted to be any number, this operator generalizes iterated differentiation and iterated integration to a single operator with a continuous order parameter. **Examples** There is an exact formula for the fractional derivative of a monomial `x^p`, which may be used as a reference. For example, the following gives a half-derivative (order 0.5):: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> x = mpf(3); p = 2; n = 0.5 >>> differint(lambda t: t**p, x, n) 7.81764019044672 >>> gamma(p+1)/gamma(p-n+1) * x**(p-n) 7.81764019044672 Another useful test function is the exponential function, whose integration / differentiation formula easy generalizes to arbitrary order. Here we first compute a third derivative, and then a triply nested integral. (The reference point `x_0` is set to `-\infty` to avoid nonzero endpoint terms.):: >>> differint(lambda x: exp(pi*x), -1.5, 3) 0.278538406900792 >>> exp(pi*-1.5) * pi**3 0.278538406900792 >>> differint(lambda x: exp(pi*x), 3.5, -3, -inf) 1922.50563031149 >>> exp(pi*3.5) / pi**3 1922.50563031149 However, for noninteger `n`, the differentiation formula for the exponential function must be modified to give the same result as the Riemann-Liouville differintegral:: >>> x = mpf(3.5) >>> c = pi >>> n = 1+2*j >>> differint(lambda x: exp(c*x), x, n) (-123295.005390743 + 140955.117867654j) >>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n) (-123295.005390743 + 140955.117867654j) """ m = max(int(ctx.ceil(ctx.re(n))) + 1, 1) r = m - n - 1 g = lambda x: ctx.quad(lambda t: (x - t) ** r * f(t), [x0, x]) return ctx.diff(g, x, m) / ctx.gamma(m - n)
["def","differint","(","ctx",",","f",",","x",",","n=1",",","x0=0",")",":","r","''","''","''","Calculates","the","Riemann-Liouville","differintegral",",","or","fractional","derivative",",","defined","by","..","math",":",":","\\",",","_","{","x_0","}","{","\\mathbb","{","D","}","}","^n_xf","(","x",")","=","\\frac","{","1","}","{","\\Gamma","(","m-n",")","}","\\frac","{","d^m","}","{","dx^m","}","\\int_","{","x_0","}","^","{","x","}","(","x-t",")","^","{","m-n-1","}","f","(","t",")","dt","where","`","f","`","is","a","given","(","presumably","well-behaved",")","function",",","`","x","`","is","the","evaluation","point",",","`","n","`","is","the","order",",","and","`","x_0","`","is","the","reference","point","of","integration","(","`","m","`","is","an","arbitrary","parameter","selected","automatically",")",".","With","`","n","=","1","`",",","this","is","just","the","standard","derivative","`","f","'","(","x",")","`",";","with","`","n","=","2","`",",","the","second","derivative","`","f","''","(","x",")","`",",","etc",".","With","`","n","=","-1","`",",","it","gives","`","\\int_","{","x_0","}","^x","f","(","t",")","dt","`",",","with","`","n","=","-2","`","it","gives","`","\\int_","{","x_0","}","^x","\\left","(","\\int_","{","x_0","}","^t","f","(","u",")","du","\\right",")","dt","`",",","etc",".","As","`","n","`","is","permitted","to","be","any","number",",","this","operator","generalizes","iterated","differentiation","and","iterated","integration","to","a","single","operator","with","a","continuous","order","parameter",".","*","*","Examples","*","*","There","is","an","exact","formula","for","the","fractional","derivative","of","a","monomial","`","x^p","`",",","which","may","be","used","as","a","reference",".","For","example",",","the","following","gives","a","half-derivative","(","order","0.5",")",":",":",">",">",">","from","mpmath","import","*",">",">",">","mp.dps","=","15",";","mp.pretty","=","True",">",">",">","x","=","mpf","(","3",")",";","p","=","2",";","n","=","0.5",">",">",">","differint","(","lambda","t",":","t","*","*","p",",","x",",","n",")","7.81764019044672",">",">",">","gamma","(","p+1",")","\/gamma","(","p-n+1",")","*","x","*","*","(","p-n",")","7.81764019044672","Another","useful","test","function","is","the","exponential","function",",","whose","integration","\/","differentiation","formula","easy","generalizes","to","arbitrary","order",".","Here","we","first","compute","a","third","derivative",",","and","then","a","triply","nested","integral",".","(","The","reference","point","`","x_0","`","is","set","to","`","-\\infty","`","to","avoid","nonzero","endpoint","terms",".",")",":",":",">",">",">","differint","(","lambda","x",":","exp","(","pi","*","x",")",",","-1.5",",","3",")","0.278538406900792",">",">",">","exp","(","pi","*","-1.5",")","*","pi","*","*","3","0.278538406900792",">",">",">","differint","(","lambda","x",":","exp","(","pi","*","x",")",",","3.5",",","-3",",","-inf",")","1922.50563031149",">",">",">","exp","(","pi","*","3.5",")","\/","pi","*","*","3","1922.50563031149","However",",","for","noninteger","`","n","`",",","the","differentiation","formula","for","the","exponential","function","must","be","modified","to","give","the","same","result","as","the","Riemann-Liouville","differintegral",":",":",">",">",">","x","=","mpf","(","3.5",")",">",">",">","c","=","pi",">",">",">","n","=","1+2","*","j",">",">",">","differint","(","lambda","x",":","exp","(","c","*","x",")",",","x",",","n",")","(","-123295.005390743","+","140955.117867654j",")",">",">",">","x","*","*","(","-n",")","*","exp","(","c",")","*","*","x","*","(","x","*","c",")","*","*","n","*","gammainc","(","-n",",","0",",","x","*","c",")","\/","gamma","(","-n",")","(","-123295.005390743","+","140955.117867654j",")","``","''","''","m","=","max","(","int","(","ctx.ceil","(","ctx.re","(","n",")",")",")","+","1",",","1",")","r","=","m","-","n","-","1","g","=","lambda","x",":","ctx.quad","(","lambda","t",":","(","x","-","t",")","*","*","r","*","f","(","t",")",",","[","x0",",","x","]",")","return","ctx.diff","(","g",",","x",",","m",")","\/","ctx.gamma","(","m","-","n",")"]
449
519
null
differentiation.py
catboost/contrib/python/mpmath/py3/mpmath/calculus/differentiation.py
from ..libmp.backend import xrange from .calculus import defun
15
null
2
13
null
null
null
Use image node_id 10 for calling a global function with example usage: differint(ctx, f, x, n, x0) and returns: unknown
119
node_id 10
407,221
diffun
global
null
false
ctx,f,n
null
null
null
null
g,f,ctx
def diffun(ctx, f, n=1, **options): r""" Given a function `f`, returns a function `g(x)` that evaluates the nth derivative `f^{(n)}(x)`:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> cos2 = diffun(sin) >>> sin2 = diffun(sin, 4) >>> cos(1.3), cos2(1.3) (0.267498828624587, 0.267498828624587) >>> sin(1.3), sin2(1.3) (0.963558185417193, 0.963558185417193) The function `f` must support arbitrary precision evaluation. See :func:`~mpmath.diff` for additional details and supported keyword options. """ if n == 0: return f def g(x): return ctx.diff(f, x, n, **options) return g
["def","diffun","(","ctx",",","f",",","n=1",",","*","*","options",")",":","r","''","''","''","Given","a","function","`","f","`",",","returns","a","function","`","g","(","x",")","`","that","evaluates","the","nth","derivative","`","f^","{","(","n",")","}","(","x",")","`",":",":",">",">",">","from","mpmath","import","*",">",">",">","mp.dps","=","15",";","mp.pretty","=","True",">",">",">","cos2","=","diffun","(","sin",")",">",">",">","sin2","=","diffun","(","sin",",","4",")",">",">",">","cos","(","1.3",")",",","cos2","(","1.3",")","(","0.267498828624587",",","0.267498828624587",")",">",">",">","sin","(","1.3",")",",","sin2","(","1.3",")","(","0.963558185417193",",","0.963558185417193",")","The","function","`","f","`","must","support","arbitrary","precision","evaluation",".","See",":","func",":","`","~mpmath.diff","`","for","additional","details","and","supported","keyword","options.","``","''","''","if","n","==","0",":","return","f","def","g","(","x",")",":","return","ctx.diff","(","f",",","x",",","n",",","*","*","options",")","return","g"]
522
544
null
differentiation.py
catboost/contrib/python/mpmath/py3/mpmath/calculus/differentiation.py
from ..libmp.backend import xrange from .calculus import defun
15
null
2
13
null
null
null
Use image node_id 11 for calling a global function with example usage: diffun(ctx, f, n) and returns: g, f, ctx
111
node_id 11
407,222
taylor
global
null
false
ctx,f,x,n
null
null
null
null
unknown,unknown
def taylor(ctx, f, x, n, **options): r""" Produces a degree-`n` Taylor polynomial around the point `x` of the given function `f`. The coefficients are returned as a list. >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> nprint(chop(taylor(sin, 0, 5))) [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333] The coefficients are computed using high-order numerical differentiation. The function must be possible to evaluate to arbitrary precision. See :func:`~mpmath.diff` for additional details and supported keyword options. Note that to evaluate the Taylor polynomial as an approximation of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed, and the point of the Taylor expansion must be subtracted from the argument: >>> p = taylor(exp, 2.0, 10) >>> polyval(p[::-1], 2.5 - 2.0) 12.1824939606092 >>> exp(2.5) 12.1824939607035 """ gen = enumerate(ctx.diffs(f, x, n, **options)) if options.get("chop", True): return [ctx.chop(d) / ctx.factorial(i) for i, d in gen] else: return [d / ctx.factorial(i) for i, d in gen]
["def","taylor","(","ctx",",","f",",","x",",","n",",","*","*","options",")",":","r","''","''","''","Produces","a","degree-","`","n","`","Taylor","polynomial","around","the","point","`","x","`","of","the","given","function","`","f","`",".","The","coefficients","are","returned","as","a","list",".",">",">",">","from","mpmath","import","*",">",">",">","mp.dps","=","15",";","mp.pretty","=","True",">",">",">","nprint","(","chop","(","taylor","(","sin",",","0",",","5",")",")",")","[","0.0",",","1.0",",","0.0",",","-0.166667",",","0.0",",","0.00833333","]","The","coefficients","are","computed","using","high-order","numerical","differentiation",".","The","function","must","be","possible","to","evaluate","to","arbitrary","precision",".","See",":","func",":","`","~mpmath.diff","`","for","additional","details","and","supported","keyword","options",".","Note","that","to","evaluate","the","Taylor","polynomial","as","an","approximation","of","`","f","`",",","e.g",".","with",":","func",":","`","~mpmath.polyval","`",",","the","coefficients","must","be","reversed",",","and","the","point","of","the","Taylor","expansion","must","be","subtracted","from","the","argument",":",">",">",">","p","=","taylor","(","exp",",","2.0",",","10",")",">",">",">","polyval","(","p","[",":",":-1","]",",","2.5","-","2.0",")","12.1824939606092",">",">",">","exp","(","2.5",")","12.1824939607035","``","''","''","gen","=","enumerate","(","ctx.diffs","(","f",",","x",",","n",",","*","*","options",")",")","if","options.get","(","``","chop","''",",","True",")",":","return","[","ctx.chop","(","d",")","\/","ctx.factorial","(","i",")","for","i",",","d","in","gen","]","else",":","return","[","d","\/","ctx.factorial","(","i",")","for","i",",","d","in","gen","]"]
547
578
null
differentiation.py
catboost/contrib/python/mpmath/py3/mpmath/calculus/differentiation.py
from ..libmp.backend import xrange from .calculus import defun
15
null
2
13
null
null
null
Use image node_id 12 for calling a global function with example usage: taylor(ctx, f, x, n) and returns: unknown, unknown
121
node_id 12
407,223
__init__
DmsTaskBaseSensor
AwsBaseSensor
true
self,replication_task_arn,target_statuses,termination_statuses
Contains general sensor behavior for DMS task. Subclasses should set ``target_statuses`` and ``termination_statuses`` fields. :param replication_task_arn: AWS DMS replication task ARN :param target_statuses: the target statuses, sensor waits until the task reaches any of these states :param termination_statuses: the termination statuses, sensor fails when the task reaches any of these states :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param botocore_config: Configuration dictionary (key-values) for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
["Contains","general","sensor","behavior","for","DMS","task",".","Subclasses","should","set","``","target_statuses","``","and","``","termination_statuses","``","fields",".",":","param","replication_task_arn",":","AWS","DMS","replication","task","ARN",":","param","target_statuses",":","the","target","statuses",",","sensor","waits","until","the","task","reaches","any","of","these","states",":","param","termination_statuses",":","the","termination","statuses",",","sensor","fails","when","the","task","reaches","any","of","these","states",":","param","aws_conn_id",":","The","Airflow","connection","used","for","AWS","credentials",".","If","this","is","``","None","``","or","empty","then","the","default","boto3","behaviour","is","used",".","If","running","Airflow","in","a","distributed","manner","and","aws_conn_id","is","None","or","empty",",","then","default","boto3","configuration","would","be","used","(","and","must","be","maintained","on","each","worker","node",")",".",":","param","region_name",":","AWS","region_name",".","If","not","specified","then","the","default","boto3","behaviour","is","used",".",":","param","verify",":","Whether","or","not","to","verify","SSL","certificates",".","See",":","https",":","\/\/boto3.amazonaws.com\/v1\/documentation\/api\/latest\/reference\/core\/session.html",":","param","botocore_config",":","Configuration","dictionary","(","key-values",")","for","botocore","client",".","See",":","https",":","\/\/botocore.amazonaws.com\/v1\/documentation\/api\/latest\/reference\/config.html"]
null
null
DmsTaskBaseSensor
def __init__( self, replication_task_arn: str, target_statuses: Iterable[str] | None = None, termination_statuses: Iterable[str] | None = None, **kwargs, ): super().__init__(**kwargs) self.replication_task_arn = replication_task_arn self.target_statuses: Iterable[str] = target_statuses or [] self.termination_statuses: Iterable[str] = ( termination_statuses or [] )
["def","__init__","(","self",",","replication_task_arn",":","str",",","target_statuses",":","Iterable","[","str","]","|","None","=","None",",","termination_statuses",":","Iterable","[","str","]","|","None","=","None",",","*","*","kwargs",",",")",":","super","(",")",".__init__","(","*","*","kwargs",")","self.replication_task_arn","=","replication_task_arn","self.target_statuses",":","Iterable","[","str","]","=","target_statuses","or","[","]","self.termination_statuses",":","Iterable","[","str","]","=","(","termination_statuses","or","[","]",")"]
59
69
null
dms.py
airflow/airflow/providers/amazon/aws/sensors/dms.py
from __future__ import annotations from typing import TYPE_CHECKING, Iterable, Sequence from deprecated import deprecated from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning, AirflowSkipException from airflow.providers.amazon.aws.hooks.dms import DmsHook from airflow.providers.amazon.aws.sensors.base_aws import AwsBaseSensor from airflow.providers.amazon.aws.utils.mixins import aws_template_fields
15
2
7
0
2
3
1
Use image node_id 1 to create a new DmsTaskBaseSensor object from inherited base classes: AwsBaseSensor with example: obj = DmsTaskBaseSensor(replication_task_arn, target_statuses, termination_statuses)
202
node_id 1
248,289
get_hook
DmsTaskBaseSensor
AwsBaseSensor
true
self
Contains general sensor behavior for DMS task. Subclasses should set ``target_statuses`` and ``termination_statuses`` fields. :param replication_task_arn: AWS DMS replication task ARN :param target_statuses: the target statuses, sensor waits until the task reaches any of these states :param termination_statuses: the termination statuses, sensor fails when the task reaches any of these states :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param botocore_config: Configuration dictionary (key-values) for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
["Contains","general","sensor","behavior","for","DMS","task",".","Subclasses","should","set","``","target_statuses","``","and","``","termination_statuses","``","fields",".",":","param","replication_task_arn",":","AWS","DMS","replication","task","ARN",":","param","target_statuses",":","the","target","statuses",",","sensor","waits","until","the","task","reaches","any","of","these","states",":","param","termination_statuses",":","the","termination","statuses",",","sensor","fails","when","the","task","reaches","any","of","these","states",":","param","aws_conn_id",":","The","Airflow","connection","used","for","AWS","credentials",".","If","this","is","``","None","``","or","empty","then","the","default","boto3","behaviour","is","used",".","If","running","Airflow","in","a","distributed","manner","and","aws_conn_id","is","None","or","empty",",","then","default","boto3","configuration","would","be","used","(","and","must","be","maintained","on","each","worker","node",")",".",":","param","region_name",":","AWS","region_name",".","If","not","specified","then","the","default","boto3","behaviour","is","used",".",":","param","verify",":","Whether","or","not","to","verify","SSL","certificates",".","See",":","https",":","\/\/boto3.amazonaws.com\/v1\/documentation\/api\/latest\/reference\/core\/session.html",":","param","botocore_config",":","Configuration","dictionary","(","key-values",")","for","botocore","client",".","See",":","https",":","\/\/botocore.amazonaws.com\/v1\/documentation\/api\/latest\/reference\/config.html"]
Get DmsHook.
["Get","DmsHook","."]
self
def get_hook(self) -> DmsHook: """Get DmsHook.""" return self.hook
["def","get_hook","(","self",")","-",">","DmsHook",":","``","''","''","Get","DmsHook",".","''","''","''","return","self.hook"]
72
74
null
dms.py
airflow/airflow/providers/amazon/aws/sensors/dms.py
from __future__ import annotations from typing import TYPE_CHECKING, Iterable, Sequence from deprecated import deprecated from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning, AirflowSkipException from airflow.providers.amazon.aws.hooks.dms import DmsHook from airflow.providers.amazon.aws.sensors.base_aws import AwsBaseSensor from airflow.providers.amazon.aws.utils.mixins import aws_template_fields
15
2
7
0
2
3
1
Use image node_id 2 for calling the DmsTaskBaseSensor obj's underlying member method code with example usage: obj.get_hook() and returns: self
142
node_id 2
248,290
pade
global
null
false
ctx,a,L,M
null
null
null
null
p, q,list, list,a, list
def pade(ctx, a, L, M): r""" Computes a Pade approximation of degree `(L, M)` to a function. Given at least `L+M+1` Taylor coefficients `a` approximating a function `A(x)`, :func:`~mpmath.pade` returns coefficients of polynomials `P, Q` satisfying .. math :: P = \sum_{k=0}^L p_k x^k Q = \sum_{k=0}^M q_k x^k Q_0 = 1 A(x) Q(x) = P(x) + O(x^{L+M+1}) `P(x)/Q(x)` can provide a good approximation to an analytic function beyond the radius of convergence of its Taylor series (example from G.A. Baker 'Essentials of Pade Approximants' Academic Press, Ch.1A):: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> one = mpf(1) >>> def f(x): ... return sqrt((one + 2*x)/(one + x)) ... >>> a = taylor(f, 0, 6) >>> p, q = pade(a, 3, 3) >>> x = 10 >>> polyval(p[::-1], x)/polyval(q[::-1], x) 1.38169105566806 >>> f(x) 1.38169855941551 """ # To determine L+1 coefficients of P and M coefficients of Q # L+M+1 coefficients of A must be provided if len(a) < L + M + 1: raise ValueError("L+M+1 Coefficients should be provided") if M == 0: if L == 0: return [ctx.one], [ctx.one] else: return a[: L + 1], [ctx.one] # Solve first # a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1] # ... # a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M] A = ctx.matrix(M) for j in range(M): for i in range(min(M, L + j + 1)): A[j, i] = a[L + j - i] v = -ctx.matrix(a[(L + 1) : (L + M + 1)]) x = ctx.lu_solve(A, v) q = [ctx.one] + list(x) # compute p p = [0] * (L + 1) for i in range(L + 1): s = a[i] for j in range(1, min(M, i) + 1): s += q[j] * a[i - j] p[i] = s return p, q
["def","pade","(","ctx",",","a",",","L",",","M",")",":","r","''","''","''","Computes","a","Pade","approximation","of","degree","`","(","L",",","M",")","`","to","a","function",".","Given","at","least","`","L+M+1","`","Taylor","coefficients","`","a","`","approximating","a","function","`","A","(","x",")","`",",",":","func",":","`","~mpmath.pade","`","returns","coefficients","of","polynomials","`","P",",","Q","`","satisfying","..","math",":",":","P","=","\\sum_","{","k=0","}","^L","p_k","x^k","Q","=","\\sum_","{","k=0","}","^M","q_k","x^k","Q_0","=","1","A","(","x",")","Q","(","x",")","=","P","(","x",")","+","O","(","x^","{","L+M+1","}",")","`","P","(","x",")","\/Q","(","x",")","`","can","provide","a","good","approximation","to","an","analytic","function","beyond","the","radius","of","convergence","of","its","Taylor","series","(","example","from","G.A",".","Baker","'Essentials","of","Pade","Approximants","'","Academic","Press",",","Ch.1A",")",":",":",">",">",">","from","mpmath","import","*",">",">",">","mp.dps","=","15",";","mp.pretty","=","True",">",">",">","one","=","mpf","(","1",")",">",">",">","def","f","(","x",")",":","...","return","sqrt","(","(","one","+","2","*","x",")","\/","(","one","+","x",")",")","...",">",">",">","a","=","taylor","(","f",",","0",",","6",")",">",">",">","p",",","q","=","pade","(","a",",","3",",","3",")",">",">",">","x","=","10",">",">",">","polyval","(","p","[",":",":-1","]",",","x",")","\/polyval","(","q","[",":",":-1","]",",","x",")","1.38169105566806",">",">",">","f","(","x",")","1.38169855941551","``","''","''","#","To","determine","L+1","coefficients","of","P","and","M","coefficients","of","Q","#","L+M+1","coefficients","of","A","must","be","provided","if","len","(","a",")","<","L","+","M","+","1",":","raise","ValueError","(","``","L+M+1","Coefficients","should","be","provided","''",")","if","M","==","0",":","if","L","==","0",":","return","[","ctx.one","]",",","[","ctx.one","]","else",":","return","a","[",":","L","+","1","]",",","[","ctx.one","]","#","Solve","first","#","a","[","L","]","*","q","[","1","]","+","...","+","a","[","L-M+1","]","*","q","[","M","]","=","-a","[","L+1","]","#","...","#","a","[","L+M-1","]","*","q","[","1","]","+","...","+","a","[","L","]","*","q","[","M","]","=","-a","[","L+M","]","A","=","ctx.matrix","(","M",")","for","j","in","range","(","M",")",":","for","i","in","range","(","min","(","M",",","L","+","j","+","1",")",")",":","A","[","j",",","i","]","=","a","[","L","+","j","-","i","]","v","=","-ctx.matrix","(","a","[","(","L","+","1",")",":","(","L","+","M","+","1",")","]",")","x","=","ctx.lu_solve","(","A",",","v",")","q","=","[","ctx.one","]","+","list","(","x",")","#","compute","p","p","=","[","0","]","*","(","L","+","1",")","for","i","in","range","(","L","+","1",")",":","s","=","a","[","i","]","for","j","in","range","(","1",",","min","(","M",",","i",")","+","1",")",":","s","+=","q","[","j","]","*","a","[","i","-","j","]","p","[","i","]","=","s","return","p",",","q"]
581
647
null
differentiation.py
catboost/contrib/python/mpmath/py3/mpmath/calculus/differentiation.py
from ..libmp.backend import xrange from .calculus import defun
15
null
2
13
null
null
null
Use image node_id 13 for calling a global function with example usage: pade(ctx, a, L, M) and returns: p, q, list, list, a, list
131
node_id 13
407,224
run
TfxRunner
null
true
self,pipeline,run_options
Base runner class for TFX. This is the base class for every TFX runner.
["Base","runner","class","for","TFX",".","This","is","the","base","class","for","every","TFX","runner","."]
Runs a TFX pipeline on a specific platform. Args: pipeline: a pipeline.Pipeline instance representing a pipeline definition. run_options: an Optional pipeline.RunOptions object. See the class definition pipeline.RunOptions for details. If None, runs the full pipeline. **kwargs: extra orchestrator backend-specific keyword arguments. Returns: Optional platform-specific object.
["Runs","a","TFX","pipeline","on","a","specific","platform",".","Args",":","pipeline",":","a","pipeline.Pipeline","instance","representing","a","pipeline","definition",".","run_options",":","an","Optional","pipeline.RunOptions","object",".","See","the","class","definition","pipeline.RunOptions","for","details",".","If","None",",","runs","the","full","pipeline",".","*","*","kwargs",":","extra","orchestrator","backend-specific","keyword","arguments",".","Returns",":","Optional","platform-specific","object","."]
null
def run( self, pipeline: pipeline_py.Pipeline, run_options: Optional[pipeline_py.RunOptions] = None, **kwargs: Any, ) -> Optional[Any]: """Runs a TFX pipeline on a specific platform. Args: pipeline: a pipeline.Pipeline instance representing a pipeline definition. run_options: an Optional pipeline.RunOptions object. See the class definition pipeline.RunOptions for details. If None, runs the full pipeline. **kwargs: extra orchestrator backend-specific keyword arguments. Returns: Optional platform-specific object. """ pass
["def","run","(","self",",","pipeline",":","pipeline_py.Pipeline",",","run_options",":","Optional","[","pipeline_py.RunOptions","]","=","None",",","*","*","kwargs",":","Any",",",")","-",">","Optional","[","Any","]",":","``","''","''","Runs","a","TFX","pipeline","on","a","specific","platform",".","Args",":","pipeline",":","a","pipeline.Pipeline","instance","representing","a","pipeline","definition",".","run_options",":","an","Optional","pipeline.RunOptions","object",".","See","the","class","definition","pipeline.RunOptions","for","details",".","If","None",",","runs","the","full","pipeline",".","*","*","kwargs",":","extra","orchestrator","backend-specific","keyword","arguments",".","Returns",":","Optional","platform-specific","object.","``","''","''","pass"]
33
51
null
tfx_runner.py
tfx/tfx/orchestration/portable/tfx_runner.py
import abc from typing import Any, Optional from tfx.dsl.compiler import compiler from tfx.dsl.components.base import base_component from tfx.orchestration import pipeline from tfx.proto.orchestration import pipeline_pb2 from tfx.utils import doc_controls
15
2
7
2
1
1
null
Use image node_id 1 for calling the TfxRunner obj's underlying member method code with example usage: obj.run(pipeline, run_options) without return types
153
node_id 1
2,199,013
test_split_text_to_chunks_raises_on_invalid_chunk_mode
TestRetrieveUtils
null
true
self
null
null
null
null
null
def test_split_text_to_chunks_raises_on_invalid_chunk_mode(self): with pytest.raises(AssertionError): split_text_to_chunks( "A" * 10000, chunk_mode="bogus_chunk_mode" )
["def","test_split_text_to_chunks_raises_on_invalid_chunk_mode","(","self",")",":","with","pytest.raises","(","AssertionError",")",":","split_text_to_chunks","(","``","A","''","*","10000",",","chunk_mode=","''","bogus_chunk_mode","''",")"]
45
47
null
test_retrieve_utils.py
autogen/test/test_retrieve_utils.py
import pytest import os
15
1
2
0
0
12
null
Use image node_id 2 for calling the TestRetrieveUtils obj's underlying member method code with example usage: obj.test_split_text_to_chunks_raises_on_invalid_chunk_mode() without return types
191
node_id 2
319,447
run_with_ir
IrBasedRunner
TfxRunner
true
self,pipeline,run_options
Base class for IR-based TFX runners.
["Base","class","for","IR-based","TFX","runners","."]
Runs a TFX pipeline on a specific platform. Args: pipeline: a pipeline_pb2.Pipeline instance representing a pipeline definition. run_options: Optional args for the run. **kwargs: extra orchestrator backend-specific keyword arguments. Returns: Optional platform-specific object.
["Runs","a","TFX","pipeline","on","a","specific","platform",".","Args",":","pipeline",":","a","pipeline_pb2.Pipeline","instance","representing","a","pipeline","definition",".","run_options",":","Optional","args","for","the","run",".","*","*","kwargs",":","extra","orchestrator","backend-specific","keyword","arguments",".","Returns",":","Optional","platform-specific","object","."]
null
def run_with_ir( self, pipeline: pipeline_pb2.Pipeline, run_options: Optional[pipeline_pb2.RunOptions] = None, **kwargs: Any, ) -> Optional[Any]: """Runs a TFX pipeline on a specific platform. Args: pipeline: a pipeline_pb2.Pipeline instance representing a pipeline definition. run_options: Optional args for the run. **kwargs: extra orchestrator backend-specific keyword arguments. Returns: Optional platform-specific object. """ pass
["def","run_with_ir","(","self",",","pipeline",":","pipeline_pb2.Pipeline",",","run_options",":","Optional","[","pipeline_pb2.RunOptions","]","=","None",",","*","*","kwargs",":","Any",",",")","-",">","Optional","[","Any","]",":","``","''","''","Runs","a","TFX","pipeline","on","a","specific","platform",".","Args",":","pipeline",":","a","pipeline_pb2.Pipeline","instance","representing","a","pipeline","definition",".","run_options",":","Optional","args","for","the","run",".","*","*","kwargs",":","extra","orchestrator","backend-specific","keyword","arguments",".","Returns",":","Optional","platform-specific","object.","``","''","''","pass"]
93
110
null
tfx_runner.py
tfx/tfx/orchestration/portable/tfx_runner.py
import abc from typing import Any, Optional from tfx.dsl.compiler import compiler from tfx.dsl.components.base import base_component from tfx.orchestration import pipeline from tfx.proto.orchestration import pipeline_pb2 from tfx.utils import doc_controls
15
2
7
2
1
2
1
Use image node_id 1 for calling the IrBasedRunner obj's underlying member method code with example usage: obj.run_with_ir(pipeline, run_options) without return types
165
node_id 1
2,199,014
run
IrBasedRunner
TfxRunner
true
self,pipeline,run_options
Base class for IR-based TFX runners.
["Base","class","for","IR-based","TFX","runners","."]
See TfxRunner.
["See","TfxRunner","."]
self
def run( self, pipeline: pipeline_py.Pipeline, run_options: Optional[pipeline_py.RunOptions] = None, **kwargs: Any, ) -> Optional[Any]: """See TfxRunner.""" pipeline_pb = _make_pipeline_proto(pipeline) if run_options: run_options_pb = _run_opts_to_proto(run_options) else: run_options_pb = None return self.run_with_ir( pipeline_pb, run_options=run_options_pb, **kwargs )
["def","run","(","self",",","pipeline",":","pipeline_py.Pipeline",",","run_options",":","Optional","[","pipeline_py.RunOptions","]","=","None",",","*","*","kwargs",":","Any",",",")","-",">","Optional","[","Any","]",":","``","''","''","See","TfxRunner",".","''","''","''","pipeline_pb","=","_make_pipeline_proto","(","pipeline",")","if","run_options",":","run_options_pb","=","_run_opts_to_proto","(","run_options",")","else",":","run_options_pb","=","None","return","self.run_with_ir","(","pipeline_pb",",","run_options=run_options_pb",",","*","*","kwargs",")"]
112
124
null
tfx_runner.py
tfx/tfx/orchestration/portable/tfx_runner.py
import abc from typing import Any, Optional from tfx.dsl.compiler import compiler from tfx.dsl.components.base import base_component from tfx.orchestration import pipeline from tfx.proto.orchestration import pipeline_pb2 from tfx.utils import doc_controls
15
2
7
2
1
2
1
Use image node_id 2 for calling the IrBasedRunner obj's underlying member method code with example usage: obj.run(pipeline, run_options) and returns: self
154
node_id 2
2,199,015
get_param_names
Ridge
BaseEstimator,SyncFitMixinLinearModel,DelayedPredictionMixin
true
self
Ridge extends LinearRegression by providing L2 regularization on the coefficients when predicting response y with a linear combination of the predictors in X. It can reduce the variance of the predictors, and improves the conditioning of the problem. cuML's dask Ridge (multi-node multi-gpu) expects dask cuDF DataFrame and provides an algorithms, Eig, to fit a linear model. And provides an eigendecomposition-based algorithm to fit a linear model. (SVD, which is more stable than eig, will be added in an upcoming version) Eig algorithm is usually preferred when the X is a tall and skinny matrix. As the number of features in X increases, the accuracy of Eig algorithm drops. This is an experimental implementation of dask Ridge Regression. It supports input X that has more than one column. Single column input X will be supported after SVD algorithm is added in an upcoming version. Parameters ---------- alpha : float (default = 1.0) Regularization strength - must be a positive float. Larger values specify stronger regularization. Array input will be supported later. solver : {'eig'} Eig uses a eigendecomposition of the covariance matrix, and is much faster. Other solvers will be supported in the future. fit_intercept : boolean (default = True) If True, Ridge adds an additional term c to correct for the global mean of y, modeling the response as "x * beta + c". If False, the model expects that you have centered the data. normalize : boolean (default = False) If True, the predictors in X will be normalized by dividing by it's L2 norm. If False, no scaling will be done. Attributes ---------- coef_ : array, shape (n_features) The estimated coefficients for the linear regression model. intercept_ : array The independent term. If `fit_intercept` is False, will be 0.
["Ridge","extends","LinearRegression","by","providing","L2","regularization","on","the","coefficients","when","predicting","response","y","with","a","linear","combination","of","the","predictors","in","X",".","It","can","reduce","the","variance","of","the","predictors",",","and","improves","the","conditioning","of","the","problem",".","cuML","'s","dask","Ridge","(","multi-node","multi-gpu",")","expects","dask","cuDF","DataFrame","and","provides","an","algorithms",",","Eig",",","to","fit","a","linear","model",".","And","provides","an","eigendecomposition-based","algorithm","to","fit","a","linear","model",".","(","SVD",",","which","is","more","stable","than","eig",",","will","be","added","in","an","upcoming","version",")","Eig","algorithm","is","usually","preferred","when","the","X","is","a","tall","and","skinny","matrix",".","As","the","number","of","features","in","X","increases",",","the","accuracy","of","Eig","algorithm","drops",".","This","is","an","experimental","implementation","of","dask","Ridge","Regression",".","It","supports","input","X","that","has","more","than","one","column",".","Single","column","input","X","will","be","supported","after","SVD","algorithm","is","added","in","an","upcoming","version",".","Parameters","--","--","--","--","--","alpha",":","float","(","default","=","1.0",")","Regularization","strength","-","must","be","a","positive","float",".","Larger","values","specify","stronger","regularization",".","Array","input","will","be","supported","later",".","solver",":","{","'eig","'","}","Eig","uses","a","eigendecomposition","of","the","covariance","matrix",",","and","is","much","faster",".","Other","solvers","will","be","supported","in","the","future",".","fit_intercept",":","boolean","(","default","=","True",")","If","True",",","Ridge","adds","an","additional","term","c","to","correct","for","the","global","mean","of","y",",","modeling","the","response","as","``","x","*","beta","+","c","''",".","If","False",",","the","model","expects","that","you","have","centered","the","data",".","normalize",":","boolean","(","default","=","False",")","If","True",",","the","predictors","in","X","will","be","normalized","by","dividing","by","it","'s","L2","norm",".","If","False",",","no","scaling","will","be","done",".","Attributes","--","--","--","--","--","coef_",":","array",",","shape","(","n_features",")","The","estimated","coefficients","for","the","linear","regression","model",".","intercept_",":","array","The","independent","term",".","If","`","fit_intercept","`","is","False",",","will","be","0","."]
null
null
list
def get_param_names(self): return list(self.kwargs.keys())
["def","get_param_names","(","self",")",":","return","list","(","self.kwargs.keys","(",")",")"]
117
118
null
ridge.py
cuml/python/cuml/dask/linear_model/ridge.py
from cuml.dask.common.base import BaseEstimator from cuml.dask.common.base import DelayedPredictionMixin from cuml.dask.common.base import mnmg_import from cuml.dask.common.base import SyncFitMixinLinearModel from raft_dask.common.comms import get_raft_comm_state from dask.distributed import get_worker
15
1
6
0
3
5
3
Use image node_id 4 for calling the Ridge obj's underlying member method code with example usage: obj.get_param_names() and returns: list
137
node_id 4
688,429
__call__
ClipToTensor
object
true
self,clip
Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
["Convert","a","list","of","m","(","H","x","W","x","C",")","numpy.ndarrays","in","the","range","[","0",",","255","]","to","a","torch.FloatTensor","of","shape","(","C","x","m","x","H","x","W",")","in","the","range","[","0",",","1.0","]"]
Args: clip (list of numpy.ndarray): clip (list of images) to be converted to tensor.
["Args",":","clip","(","list","of","numpy.ndarray",")",":","clip","(","list","of","images",")","to","be","converted","to","tensor","."]
np_clip,tensor_clip
def __call__(self, clip): """ Args: clip (list of numpy.ndarray): clip (list of images) to be converted to tensor. """ # Retrieve shape if isinstance(clip[0], np.ndarray): h, w, ch = clip[0].shape assert ( ch == self.channel_nb ), "Got {0} instead of 3 channels".format(ch) elif isinstance(clip[0], Image.Image): w, h = clip[0].size else: raise TypeError( "Expected numpy.ndarray or PIL.Image\ but got list of {0}".format( type(clip[0]) ) ) np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) # Convert for img_idx, img in enumerate(clip): if isinstance(img, np.ndarray): pass elif isinstance(img, Image.Image): img = np.array(img, copy=False) else: raise TypeError( "Expected numpy.ndarray or PIL.Image\ but got list of {0}".format( type(clip[0]) ) ) img = imageutils.convert_img(img) np_clip[:, img_idx, :, :] = img if self.numpy: if self.div_255: np_clip = np_clip / 255.0 return np_clip else: tensor_clip = torch.from_numpy(np_clip) if not isinstance(tensor_clip, torch.FloatTensor): tensor_clip = tensor_clip.float() if self.div_255: tensor_clip = torch.div(tensor_clip, 255) return tensor_clip
["def","__call__","(","self",",","clip",")",":","``","''","''","Args",":","clip","(","list","of","numpy.ndarray",")",":","clip","(","list","of","images",")","to","be","converted","to","tensor.","``","''","''","#","Retrieve","shape","if","isinstance","(","clip","[","0","]",",","np.ndarray",")",":","h",",","w",",","ch","=","clip","[","0","]",".shape","assert","(","ch","==","self.channel_nb",")",",","``","Got","{","0","}","instead","of","3","channels","''",".format","(","ch",")","elif","isinstance","(","clip","[","0","]",",","Image.Image",")",":","w",",","h","=","clip","[","0","]",".size","else",":","raise","TypeError","(","``","Expected","numpy.ndarray","or","PIL.Image\\","but","got","list","of","{","0","}","''",".format","(","type","(","clip","[","0","]",")",")",")","np_clip","=","np.zeros","(","[","self.channel_nb",",","len","(","clip",")",",","int","(","h",")",",","int","(","w",")","]",")","#","Convert","for","img_idx",",","img","in","enumerate","(","clip",")",":","if","isinstance","(","img",",","np.ndarray",")",":","pass","elif","isinstance","(","img",",","Image.Image",")",":","img","=","np.array","(","img",",","copy=False",")","else",":","raise","TypeError","(","``","Expected","numpy.ndarray","or","PIL.Image\\","but","got","list","of","{","0","}","''",".format","(","type","(","clip","[","0","]",")",")",")","img","=","imageutils.convert_img","(","img",")","np_clip","[",":",",","img_idx",",",":",",",":","]","=","img","if","self.numpy",":","if","self.div_255",":","np_clip","=","np_clip","\/","255.0","return","np_clip","else",":","tensor_clip","=","torch.from_numpy","(","np_clip",")","if","not","isinstance","(","tensor_clip",",","torch.FloatTensor",")",":","tensor_clip","=","tensor_clip.float","(",")","if","self.div_255",":","tensor_clip","=","torch.div","(","tensor_clip",",","255",")","return","tensor_clip"]
19
60
null
volume_transforms.py
gluon-cv/gluoncv/torch/data/transforms/videotransforms/volume_transforms.py
import numpy from PIL import Image import torch from .utils import images
15
3
4
0
3
2
1
Use image node_id 2 for calling the ClipToTensor obj's underlying member method code with example usage: obj.__call__(clip) and returns: np_clip, tensor_clip
157
node_id 2
1,095,764
__init__
ClipToTensor_K
object
true
self,channel_nb,div_255,numpy
Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
["Convert","a","list","of","m","(","H","x","W","x","C",")","numpy.ndarrays","in","the","range","[","0",",","255","]","to","a","torch.FloatTensor","of","shape","(","C","x","m","x","H","x","W",")","in","the","range","[","0",",","1.0","]"]
null
null
ClipToTensor_K
def __init__(self, channel_nb=3, div_255=True, numpy=False): self.channel_nb = channel_nb self.div_255 = div_255 self.numpy = numpy
["def","__init__","(","self",",","channel_nb=3",",","div_255=True",",","numpy=False",")",":","self.channel_nb","=","channel_nb","self.div_255","=","div_255","self.numpy","=","numpy"]
69
72
null
volume_transforms.py
gluon-cv/gluoncv/torch/data/transforms/videotransforms/volume_transforms.py
import numpy from PIL import Image import torch from .utils import images
15
3
4
0
3
2
1
Use image node_id 1 to create a new ClipToTensor_K object from inherited base classes: object with example: obj = ClipToTensor_K(channel_nb, div_255, numpy)
156
node_id 1
1,095,765
__call__
ClipToTensor_K
object
true
self,clip
Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0]
["Convert","a","list","of","m","(","H","x","W","x","C",")","numpy.ndarrays","in","the","range","[","0",",","255","]","to","a","torch.FloatTensor","of","shape","(","C","x","m","x","H","x","W",")","in","the","range","[","0",",","1.0","]"]
Args: clip (list of numpy.ndarray): clip (list of images) to be converted to tensor.
["Args",":","clip","(","list","of","numpy.ndarray",")",":","clip","(","list","of","images",")","to","be","converted","to","tensor","."]
np_clip,tensor_clip
def __call__(self, clip): """ Args: clip (list of numpy.ndarray): clip (list of images) to be converted to tensor. """ # Retrieve shape if isinstance(clip[0], np.ndarray): h, w, ch = clip[0].shape assert ( ch == self.channel_nb ), "Got {0} instead of 3 channels".format(ch) elif isinstance(clip[0], Image.Image): w, h = clip[0].size else: raise TypeError( "Expected numpy.ndarray or PIL.Image\ but got list of {0}".format( type(clip[0]) ) ) np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) # Convert for img_idx, img in enumerate(clip): if isinstance(img, np.ndarray): pass elif isinstance(img, Image.Image): img = np.array(img, copy=False) else: raise TypeError( "Expected numpy.ndarray or PIL.Image\ but got list of {0}".format( type(clip[0]) ) ) img = imageutils.convert_img(img) np_clip[:, img_idx, :, :] = img if self.numpy: if self.div_255: np_clip = (np_clip - 127.5) / 127.5 return np_clip else: tensor_clip = torch.from_numpy(np_clip) if not isinstance(tensor_clip, torch.FloatTensor): tensor_clip = tensor_clip.float() if self.div_255: tensor_clip = torch.div( torch.sub(tensor_clip, 127.5), 127.5 ) return tensor_clip
["def","__call__","(","self",",","clip",")",":","``","''","''","Args",":","clip","(","list","of","numpy.ndarray",")",":","clip","(","list","of","images",")","to","be","converted","to","tensor.","``","''","''","#","Retrieve","shape","if","isinstance","(","clip","[","0","]",",","np.ndarray",")",":","h",",","w",",","ch","=","clip","[","0","]",".shape","assert","(","ch","==","self.channel_nb",")",",","``","Got","{","0","}","instead","of","3","channels","''",".format","(","ch",")","elif","isinstance","(","clip","[","0","]",",","Image.Image",")",":","w",",","h","=","clip","[","0","]",".size","else",":","raise","TypeError","(","``","Expected","numpy.ndarray","or","PIL.Image\\","but","got","list","of","{","0","}","''",".format","(","type","(","clip","[","0","]",")",")",")","np_clip","=","np.zeros","(","[","self.channel_nb",",","len","(","clip",")",",","int","(","h",")",",","int","(","w",")","]",")","#","Convert","for","img_idx",",","img","in","enumerate","(","clip",")",":","if","isinstance","(","img",",","np.ndarray",")",":","pass","elif","isinstance","(","img",",","Image.Image",")",":","img","=","np.array","(","img",",","copy=False",")","else",":","raise","TypeError","(","``","Expected","numpy.ndarray","or","PIL.Image\\","but","got","list","of","{","0","}","''",".format","(","type","(","clip","[","0","]",")",")",")","img","=","imageutils.convert_img","(","img",")","np_clip","[",":",",","img_idx",",",":",",",":","]","=","img","if","self.numpy",":","if","self.div_255",":","np_clip","=","(","np_clip","-","127.5",")","\/","127.5","return","np_clip","else",":","tensor_clip","=","torch.from_numpy","(","np_clip",")","if","not","isinstance","(","tensor_clip",",","torch.FloatTensor",")",":","tensor_clip","=","tensor_clip.float","(",")","if","self.div_255",":","tensor_clip","=","torch.div","(","torch.sub","(","tensor_clip",",","127.5",")",",","127.5",")","return","tensor_clip"]
74
115
null
volume_transforms.py
gluon-cv/gluoncv/torch/data/transforms/videotransforms/volume_transforms.py
import numpy from PIL import Image import torch from .utils import images
15
3
4
0
3
2
1
Use image node_id 2 for calling the ClipToTensor_K obj's underlying member method code with example usage: obj.__call__(clip) and returns: np_clip, tensor_clip
159
node_id 2
1,095,766
__call__
ToTensor
object
true
self,array
Converts numpy array to tensor
["Converts","numpy","array","to","tensor"]
null
null
tensor
def __call__(self, array): tensor = torch.from_numpy(array) return tensor
["def","__call__","(","self",",","array",")",":","tensor","=","torch.from_numpy","(","array",")","return","tensor"]
122
124
null
volume_transforms.py
gluon-cv/gluoncv/torch/data/transforms/videotransforms/volume_transforms.py
import numpy from PIL import Image import torch from .utils import images
15
3
4
0
3
1
1
Use image node_id 1 for calling the ToTensor obj's underlying member method code with example usage: obj.__call__(array) and returns: tensor
140
node_id 1
1,095,767
__init__
VideoClsDataset
Dataset
true
self,anno_path,data_path,mode,clip_len,frame_sample_rate,crop_size,short_side_size,new_height,new_width,keep_aspect_ratio,num_segment,num_crop,test_num_segment,test_num_crop,use_multigrid
Load your own video classification dataset.
["Load","your","own","video","classification","dataset","."]
null
null
VideoClsDataset
def __init__( self, anno_path, data_path, mode="train", clip_len=8, frame_sample_rate=2, crop_size=224, short_side_size=256, new_height=256, new_width=340, keep_aspect_ratio=False, num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3, use_multigrid=False, ): self.anno_path = anno_path self.data_path = data_path self.mode = mode self.clip_len = clip_len self.frame_sample_rate = frame_sample_rate self.crop_size = crop_size self.short_side_size = short_side_size self.new_height = new_height self.new_width = new_width self.keep_aspect_ratio = keep_aspect_ratio self.num_segment = num_segment self.test_num_segment = test_num_segment self.num_crop = num_crop self.test_num_crop = test_num_crop self.use_multigrid = use_multigrid and (mode == "train") if VideoReader is None: raise ImportError( "Unable to import `decord` which is required to read videos." ) import pandas as pd cleaned = pd.read_csv(self.anno_path, header=None, delimiter=" ") self.dataset_samples = list(cleaned.values[:, 0]) self.label_array = list(cleaned.values[:, 2]) if mode == "train": if self.use_multigrid: self.mg_helper = multiGridHelper() self.data_transform = [] for alpha in range(self.mg_helper.mod_long): tmp = [] for beta in range(self.mg_helper.mod_short): info = self.mg_helper.get_resize(alpha, beta) scale_s = info[1] tmp.append( video_transforms.Compose( [ video_transforms.Resize( int( self.short_side_size / scale_s ), interpolation="bilinear", ), # TODO: multiscale corner cropping video_transforms.RandomResize( ratio=(1, 1.25), interpolation="bilinear", ), video_transforms.RandomCrop( size=( int(self.crop_size / scale_s), int(self.crop_size / scale_s), ) ), ] ) ) self.data_transform.append(tmp) else: self.data_transform = video_transforms.Compose( [ video_transforms.Resize( int(self.short_side_size), interpolation="bilinear", ), video_transforms.RandomResize( ratio=(1, 1.25), interpolation="bilinear" ), video_transforms.RandomCrop( size=( int(self.crop_size), int(self.crop_size), ) ), ] ) self.data_transform_after = video_transforms.Compose( [ video_transforms.RandomHorizontalFlip(), volume_transforms.ClipToTensor(), video_transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ] ) elif mode == "validation": self.data_transform = video_transforms.Compose( [ video_transforms.Resize( self.short_side_size, interpolation="bilinear" ), video_transforms.CenterCrop( size=(self.crop_size, self.crop_size) ), volume_transforms.ClipToTensor(), video_transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ] ) elif mode == "test": self.data_resize = video_transforms.Compose( [ video_transforms.Resize( size=(short_side_size), interpolation="bilinear" ) ] ) self.data_transform = video_transforms.Compose( [ volume_transforms.ClipToTensor(), video_transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], ), ] ) self.test_seg = [] self.test_dataset = [] self.test_label_array = [] for ck in range(self.test_num_segment): for cp in range(self.test_num_crop): for idx in range(len(self.label_array)): sample_label = self.label_array[idx] self.test_label_array.append(sample_label) self.test_dataset.append( self.dataset_samples[idx] ) self.test_seg.append((ck, cp))
["def","__init__","(","self",",","anno_path",",","data_path",",","mode=","''","train","''",",","clip_len=8",",","frame_sample_rate=2",",","crop_size=224",",","short_side_size=256",",","new_height=256",",","new_width=340",",","keep_aspect_ratio=False",",","num_segment=1",",","num_crop=1",",","test_num_segment=10",",","test_num_crop=3",",","use_multigrid=False",",",")",":","self.anno_path","=","anno_path","self.data_path","=","data_path","self.mode","=","mode","self.clip_len","=","clip_len","self.frame_sample_rate","=","frame_sample_rate","self.crop_size","=","crop_size","self.short_side_size","=","short_side_size","self.new_height","=","new_height","self.new_width","=","new_width","self.keep_aspect_ratio","=","keep_aspect_ratio","self.num_segment","=","num_segment","self.test_num_segment","=","test_num_segment","self.num_crop","=","num_crop","self.test_num_crop","=","test_num_crop","self.use_multigrid","=","use_multigrid","and","(","mode","==","``","train","''",")","if","VideoReader","is","None",":","raise","ImportError","(","``","Unable","to","import","`","decord","`","which","is","required","to","read","videos",".","''",")","import","pandas","as","pd","cleaned","=","pd.read_csv","(","self.anno_path",",","header=None",",","delimiter=","''","``",")","self.dataset_samples","=","list","(","cleaned.values","[",":",",","0","]",")","self.label_array","=","list","(","cleaned.values","[",":",",","2","]",")","if","mode","==","``","train","''",":","if","self.use_multigrid",":","self.mg_helper","=","multiGridHelper","(",")","self.data_transform","=","[","]","for","alpha","in","range","(","self.mg_helper.mod_long",")",":","tmp","=","[","]","for","beta","in","range","(","self.mg_helper.mod_short",")",":","info","=","self.mg_helper.get_resize","(","alpha",",","beta",")","scale_s","=","info","[","1","]","tmp.append","(","video_transforms.Compose","(","[","video_transforms.Resize","(","int","(","self.short_side_size","\/","scale_s",")",",","interpolation=","''","bilinear","''",",",")",",","#","TODO",":","multiscale","corner","cropping","video_transforms.RandomResize","(","ratio=","(","1",",","1.25",")",",","interpolation=","''","bilinear","''",",",")",",","video_transforms.RandomCrop","(","size=","(","int","(","self.crop_size","\/","scale_s",")",",","int","(","self.crop_size","\/","scale_s",")",",",")",")",",","]",")",")","self.data_transform.append","(","tmp",")","else",":","self.data_transform","=","video_transforms.Compose","(","[","video_transforms.Resize","(","int","(","self.short_side_size",")",",","interpolation=","''","bilinear","''",",",")",",","video_transforms.RandomResize","(","ratio=","(","1",",","1.25",")",",","interpolation=","''","bilinear","''",")",",","video_transforms.RandomCrop","(","size=","(","int","(","self.crop_size",")",",","int","(","self.crop_size",")",",",")",")",",","]",")","self.data_transform_after","=","video_transforms.Compose","(","[","video_transforms.RandomHorizontalFlip","(",")",",","volume_transforms.ClipToTensor","(",")",",","video_transforms.Normalize","(","mean=","[","0.485",",","0.456",",","0.406","]",",","std=","[","0.229",",","0.224",",","0.225","]",",",")",",","]",")","elif","mode","==","``","validation","''",":","self.data_transform","=","video_transforms.Compose","(","[","video_transforms.Resize","(","self.short_side_size",",","interpolation=","''","bilinear","''",")",",","video_transforms.CenterCrop","(","size=","(","self.crop_size",",","self.crop_size",")",")",",","volume_transforms.ClipToTensor","(",")",",","video_transforms.Normalize","(","mean=","[","0.485",",","0.456",",","0.406","]",",","std=","[","0.229",",","0.224",",","0.225","]",",",")",",","]",")","elif","mode","==","``","test","''",":","self.data_resize","=","video_transforms.Compose","(","[","video_transforms.Resize","(","size=","(","short_side_size",")",",","interpolation=","''","bilinear","''",")","]",")","self.data_transform","=","video_transforms.Compose","(","[","volume_transforms.ClipToTensor","(",")",",","video_transforms.Normalize","(","mean=","[","0.485",",","0.456",",","0.406","]",",","std=","[","0.229",",","0.224",",","0.225","]",",",")",",","]",")","self.test_seg","=","[","]","self.test_dataset","=","[","]","self.test_label_array","=","[","]","for","ck","in","range","(","self.test_num_segment",")",":","for","cp","in","range","(","self.test_num_crop",")",":","for","idx","in","range","(","len","(","self.label_array",")",")",":","sample_label","=","self.label_array","[","idx","]","self.test_label_array.append","(","sample_label",")","self.test_dataset.append","(","self.dataset_samples","[","idx","]",")","self.test_seg.append","(","(","ck",",","cp",")",")"]
24
111
null
dataset_classification.py
gluon-cv/gluoncv/torch/data/video_cls/dataset_classification.py
import os import warnings import numpy import torch from torch.utils.data import Dataset from ..transforms.videotransforms import video_transforms, volume_transforms from .multigrid_helper import multiGridHelper, MultiGridBatchSampler
15
1
7
2
1
4
1
Use image node_id 1 to create a new VideoClsDataset object from inherited base classes: Dataset with example: obj = VideoClsDataset(anno_path, data_path, mode, clip_len, frame_sample_rate, crop_size, short_side_size, new_height, new_width, keep_aspect_ratio, num_segment, num_crop, test_num_segment, test_num_crop, use_multigrid)
329
node_id 1
1,095,768
on_startup
global
null
false
null
null
null
null
null
def on_startup(): create_db_and_tables()
["def","on_startup","(",")",":","create_db_and_tables","(",")"]
38
39
null
tutorial001_py310.py
sqlmodel/docs_src/tutorial/fastapi/read_one/tutorial001_py310.py
from fastapi import FastAPI, HTTPException from sqlmodel import Field, Session, SQLModel, create_engine, select
15
null
2
5
null
null
null
Use image node_id 2 for calling a global function with example usage: on_startup() without return types
103
node_id 2
1,989,840
create_hero
global
null
false
hero
null
null
null
null
db_hero
def create_hero(hero: HeroCreate): with Session(engine) as session: db_hero = Hero.model_validate(hero) session.add(db_hero) session.commit() session.refresh(db_hero) return db_hero
["def","create_hero","(","hero",":","HeroCreate",")",":","with","Session","(","engine",")","as","session",":","db_hero","=","Hero.model_validate","(","hero",")","session.add","(","db_hero",")","session.commit","(",")","session.refresh","(","db_hero",")","return","db_hero"]
43
49
null
tutorial001_py310.py
sqlmodel/docs_src/tutorial/fastapi/read_one/tutorial001_py310.py
from fastapi import FastAPI, HTTPException from sqlmodel import Field, Session, SQLModel, create_engine, select
15
null
2
5
null
null
null
Use image node_id 3 for calling a global function with example usage: create_hero(hero) and returns: db_hero
108
node_id 3
1,989,841
transform_observation_features
SearchSpaceToChoice
Transform
true
self,observation_features
Replaces the search space with a single choice parameter, whose values are the signatures of the arms observed in the data. This transform is meant to be used with ThompsonSampler. Choice parameter will be unordered unless config["use_ordered"] specifies otherwise. Transform is done in-place.
["Replaces","the","search","space","with","a","single","choice","parameter",",","whose","values","are","the","signatures","of","the","arms","observed","in","the","data",".","This","transform","is","meant","to","be","used","with","ThompsonSampler",".","Choice","parameter","will","be","unordered","unless","config","[","``","use_ordered","''","]","specifies","otherwise",".","Transform","is","done","in-place","."]
null
null
observation_features
def transform_observation_features( self, observation_features: List[ObservationFeatures] ) -> List[ObservationFeatures]: for obsf in observation_features: obsf.parameters = { self.parameter_name: Arm( parameters=obsf.parameters ).signature } return observation_features
["def","transform_observation_features","(","self",",","observation_features",":","List","[","ObservationFeatures","]",")","-",">","List","[","ObservationFeatures","]",":","for","obsf","in","observation_features",":","obsf.parameters","=","{","self.parameter_name",":","Arm","(","parameters=obsf.parameters",")",".signature","}","return","observation_features"]
83
90
null
search_space_to_choice.py
Ax/ax/modelbridge/transforms/search_space_to_choice.py
from typing import List, Optional, TYPE_CHECKING from ax.core.arm import Arm from ax.core.observation import Observation, ObservationFeatures from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType from ax.core.search_space import RobustSearchSpace, SearchSpace from ax.exceptions.core import UnsupportedError from ax.modelbridge.transforms.base import Transform from ax.models.types import TConfig from ax.utils.common.typeutils import checked_cast
15
1
9
0
1
4
1
Use image node_id 3 for calling the SearchSpaceToChoice obj's underlying member method code with example usage: obj.transform_observation_features(observation_features) and returns: observation_features
202
node_id 3
9,098
_create_model
Ridge
BaseEstimator,SyncFitMixinLinearModel,DelayedPredictionMixin
true
sessionId,datatype
Ridge extends LinearRegression by providing L2 regularization on the coefficients when predicting response y with a linear combination of the predictors in X. It can reduce the variance of the predictors, and improves the conditioning of the problem. cuML's dask Ridge (multi-node multi-gpu) expects dask cuDF DataFrame and provides an algorithms, Eig, to fit a linear model. And provides an eigendecomposition-based algorithm to fit a linear model. (SVD, which is more stable than eig, will be added in an upcoming version) Eig algorithm is usually preferred when the X is a tall and skinny matrix. As the number of features in X increases, the accuracy of Eig algorithm drops. This is an experimental implementation of dask Ridge Regression. It supports input X that has more than one column. Single column input X will be supported after SVD algorithm is added in an upcoming version. Parameters ---------- alpha : float (default = 1.0) Regularization strength - must be a positive float. Larger values specify stronger regularization. Array input will be supported later. solver : {'eig'} Eig uses a eigendecomposition of the covariance matrix, and is much faster. Other solvers will be supported in the future. fit_intercept : boolean (default = True) If True, Ridge adds an additional term c to correct for the global mean of y, modeling the response as "x * beta + c". If False, the model expects that you have centered the data. normalize : boolean (default = False) If True, the predictors in X will be normalized by dividing by it's L2 norm. If False, no scaling will be done. Attributes ---------- coef_ : array, shape (n_features) The estimated coefficients for the linear regression model. intercept_ : array The independent term. If `fit_intercept` is False, will be 0.
["Ridge","extends","LinearRegression","by","providing","L2","regularization","on","the","coefficients","when","predicting","response","y","with","a","linear","combination","of","the","predictors","in","X",".","It","can","reduce","the","variance","of","the","predictors",",","and","improves","the","conditioning","of","the","problem",".","cuML","'s","dask","Ridge","(","multi-node","multi-gpu",")","expects","dask","cuDF","DataFrame","and","provides","an","algorithms",",","Eig",",","to","fit","a","linear","model",".","And","provides","an","eigendecomposition-based","algorithm","to","fit","a","linear","model",".","(","SVD",",","which","is","more","stable","than","eig",",","will","be","added","in","an","upcoming","version",")","Eig","algorithm","is","usually","preferred","when","the","X","is","a","tall","and","skinny","matrix",".","As","the","number","of","features","in","X","increases",",","the","accuracy","of","Eig","algorithm","drops",".","This","is","an","experimental","implementation","of","dask","Ridge","Regression",".","It","supports","input","X","that","has","more","than","one","column",".","Single","column","input","X","will","be","supported","after","SVD","algorithm","is","added","in","an","upcoming","version",".","Parameters","--","--","--","--","--","alpha",":","float","(","default","=","1.0",")","Regularization","strength","-","must","be","a","positive","float",".","Larger","values","specify","stronger","regularization",".","Array","input","will","be","supported","later",".","solver",":","{","'eig","'","}","Eig","uses","a","eigendecomposition","of","the","covariance","matrix",",","and","is","much","faster",".","Other","solvers","will","be","supported","in","the","future",".","fit_intercept",":","boolean","(","default","=","True",")","If","True",",","Ridge","adds","an","additional","term","c","to","correct","for","the","global","mean","of","y",",","modeling","the","response","as","``","x","*","beta","+","c","''",".","If","False",",","the","model","expects","that","you","have","centered","the","data",".","normalize",":","boolean","(","default","=","False",")","If","True",",","the","predictors","in","X","will","be","normalized","by","dividing","by","it","'s","L2","norm",".","If","False",",","no","scaling","will","be","done",".","Attributes","--","--","--","--","--","coef_",":","array",",","shape","(","n_features",")","The","estimated","coefficients","for","the","linear","regression","model",".","intercept_",":","array","The","independent","term",".","If","`","fit_intercept","`","is","False",",","will","be","0","."]
null
null
RidgeMG
def _create_model(sessionId, datatype, **kwargs): from cuml.linear_model.ridge_mg import RidgeMG handle = get_raft_comm_state(sessionId, get_worker())["handle"] return RidgeMG(handle=handle, output_type=datatype, **kwargs)
["def","_create_model","(","sessionId",",","datatype",",","*","*","kwargs",")",":","from","cuml.linear_model.ridge_mg","import","RidgeMG","handle","=","get_raft_comm_state","(","sessionId",",","get_worker","(",")",")","[","``","handle","''","]","return","RidgeMG","(","handle=handle",",","output_type=datatype",",","*","*","kwargs",")"]
122
126
null
ridge.py
cuml/python/cuml/dask/linear_model/ridge.py
from cuml.dask.common.base import BaseEstimator from cuml.dask.common.base import DelayedPredictionMixin from cuml.dask.common.base import mnmg_import from cuml.dask.common.base import SyncFitMixinLinearModel from raft_dask.common.comms import get_raft_comm_state from dask.distributed import get_worker
15
1
6
0
3
5
3
Use image node_id 5 for calling the Ridge obj's underlying member method code with example usage: obj._create_model(sessionId, datatype) and returns: RidgeMG
157
node_id 5
688,430
init_states
GPT2Decoder
BaseStepDecoder
true
self,batch_size,ctx
null
null
null
null
self
def init_states(self, batch_size, ctx): return self._gpt2_lm_model.init_states(batch_size, ctx)
["def","init_states","(","self",",","batch_size",",","ctx",")",":","return","self._gpt2_lm_model.init_states","(","batch_size",",","ctx",")"]
51
52
null
interactive_conditional_gpt2_samples.py
gluon-nlp/scripts/generation/interactive_conditional_gpt2_samples.py
import os import mxnet import argparse from gluonnlp.utils import set_seed from gluonnlp.sequence_sampler import BeamSearchSampler, BaseStepDecoder from gluonnlp.models.gpt2 import GPT2ForLM, list_pretrained_gpt2, get_pretrained_gpt2
15
1
6
2
1
5
1
Use image node_id 4 for calling the GPT2Decoder obj's underlying member method code with example usage: obj.init_states(batch_size, ctx) and returns: self
154
node_id 4
1,097,717
diffs_exp
global
null
false
ctx,fdiffs
null
null
null
null
null
def diffs_exp(ctx, fdiffs): r""" Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots` generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`. At high precision and for large orders, this is typically more efficient than numerical differentiation if the derivatives of `f(x)` admit direct computation. Note: This function does not increase the working precision internally, so guard digits may have to be added externally for full accuracy. **Examples** The derivatives of the gamma function can be computed using logarithmic differentiation:: >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> >>> def diffs_loggamma(x): ... yield loggamma(x) ... i = 0 ... while 1: ... yield psi(i,x) ... i += 1 ... >>> u = diffs_exp(diffs_loggamma(3)) >>> v = diffs(gamma, 3) >>> next(u); next(v) 2.0 2.0 >>> next(u); next(v) 1.84556867019693 1.84556867019693 >>> next(u); next(v) 2.49292999190269 2.49292999190269 >>> next(u); next(v) 3.44996501352367 3.44996501352367 """ fn = iterable_to_function(fdiffs) f0 = ctx.exp(fn(0)) yield f0 i = 1 while 1: s = ctx.mpf(0) for powers, c in iteritems(dpoly(i)): s += c * ctx.fprod( fn(k + 1) ** p for (k, p) in enumerate(powers) if p ) yield s * f0 i += 1
["def","diffs_exp","(","ctx",",","fdiffs",")",":","r","''","''","''","Given","an","iterable","or","generator","yielding","`","f","(","x",")",",","f","'","(","x",")",",","f","''","(","x",")",",","\\ldots","`","generate","`","g","(","x",")",",","g","'","(","x",")",",","g","''","(","x",")",",","\\ldots","`","where","`","g","(","x",")","=","\\exp","(","f","(","x",")",")","`",".","At","high","precision","and","for","large","orders",",","this","is","typically","more","efficient","than","numerical","differentiation","if","the","derivatives","of","`","f","(","x",")","`","admit","direct","computation",".","Note",":","This","function","does","not","increase","the","working","precision","internally",",","so","guard","digits","may","have","to","be","added","externally","for","full","accuracy",".","*","*","Examples","*","*","The","derivatives","of","the","gamma","function","can","be","computed","using","logarithmic","differentiation",":",":",">",">",">","from","mpmath","import","*",">",">",">","mp.dps","=","15",";","mp.pretty","=","True",">",">",">",">",">",">","def","diffs_loggamma","(","x",")",":","...","yield","loggamma","(","x",")","...","i","=","0","...","while","1",":","...","yield","psi","(","i",",","x",")","...","i","+=","1","...",">",">",">","u","=","diffs_exp","(","diffs_loggamma","(","3",")",")",">",">",">","v","=","diffs","(","gamma",",","3",")",">",">",">","next","(","u",")",";","next","(","v",")","2.0","2.0",">",">",">","next","(","u",")",";","next","(","v",")","1.84556867019693","1.84556867019693",">",">",">","next","(","u",")",";","next","(","v",")","2.49292999190269","2.49292999190269",">",">",">","next","(","u",")",";","next","(","v",")","3.44996501352367","3.44996501352367","``","''","''","fn","=","iterable_to_function","(","fdiffs",")","f0","=","ctx.exp","(","fn","(","0",")",")","yield","f0","i","=","1","while","1",":","s","=","ctx.mpf","(","0",")","for","powers",",","c","in","iteritems","(","dpoly","(","i",")",")",":","s","+=","c","*","ctx.fprod","(","fn","(","k","+","1",")","*","*","p","for","(","k",",","p",")","in","enumerate","(","powers",")","if","p",")","yield","s","*","f0","i","+=","1"]
394
446
null
differentiation.py
catboost/contrib/python/mpmath/py3/mpmath/calculus/differentiation.py
from ..libmp.backend import xrange from .calculus import defun
15
null
2
13
null
null
null
Use image node_id 9 for calling a global function with example usage: diffs_exp(ctx, fdiffs) without return types
113
node_id 9
407,220
test_split_files_to_chunks
TestRetrieveUtils
null
true
self
null
null
null
null
null
def test_split_files_to_chunks(self): pdf_file_path = os.path.join(test_dir, "example.pdf") txt_file_path = os.path.join(test_dir, "example.txt") chunks = split_files_to_chunks([pdf_file_path, txt_file_path]) assert all( isinstance(chunk, str) and "AutoGen is an advanced tool designed to assist developers" in chunk.strip() for chunk in chunks )
["def","test_split_files_to_chunks","(","self",")",":","pdf_file_path","=","os.path.join","(","test_dir",",","``","example.pdf","''",")","txt_file_path","=","os.path.join","(","test_dir",",","``","example.txt","''",")","chunks","=","split_files_to_chunks","(","[","pdf_file_path",",","txt_file_path","]",")","assert","all","(","isinstance","(","chunk",",","str",")","and","``","AutoGen","is","an","advanced","tool","designed","to","assist","developers","''","in","chunk.strip","(",")","for","chunk","in","chunks",")"]
53
60
null
test_retrieve_utils.py
autogen/test/test_retrieve_utils.py
import pytest import os
15
1
2
0
0
12
null
Use image node_id 4 for calling the TestRetrieveUtils obj's underlying member method code with example usage: obj.test_split_files_to_chunks() without return types
163
node_id 4
319,449
main
global
null
false
null
null
null
null
null
def main(): """Convert standard rttm to sample-based result""" args = get_parser().parse_args() # logging info if args.verbose > 1: logging.basicConfig( level=logging.DEBUG, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) elif args.verbose > 0: logging.basicConfig( level=logging.INFO, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) else: logging.basicConfig( level=logging.WARN, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s", ) logging.warning("Skip DEBUG/INFO messages") sampling_rate = humanfriendly.parse_size(args.sampling_rate) convert_rttm_text( args.rttm, args.wavscp, sampling_rate, args.output_path ) logging.info("Successfully finished RTTM converting.")
["def","main","(",")",":","``","''","''","Convert","standard","rttm","to","sample-based","result","''","''","''","args","=","get_parser","(",")",".parse_args","(",")","#","logging","info","if","args.verbose",">","1",":","logging.basicConfig","(","level=logging.DEBUG",",","format=","''","%","(","asctime",")","s","(","%","(","module",")","s",":","%","(","lineno",")","d",")","%","(","levelname",")","s",":","%","(","message",")","s","''",",",")","elif","args.verbose",">","0",":","logging.basicConfig","(","level=logging.INFO",",","format=","''","%","(","asctime",")","s","(","%","(","module",")","s",":","%","(","lineno",")","d",")","%","(","levelname",")","s",":","%","(","message",")","s","''",",",")","else",":","logging.basicConfig","(","level=logging.WARN",",","format=","''","%","(","asctime",")","s","(","%","(","module",")","s",":","%","(","lineno",")","d",")","%","(","levelname",")","s",":","%","(","message",")","s","''",",",")","logging.warning","(","``","Skip","DEBUG\/INFO","messages","''",")","sampling_rate","=","humanfriendly.parse_size","(","args.sampling_rate",")","convert_rttm_text","(","args.rttm",",","args.wavscp",",","sampling_rate",",","args.output_path",")","logging.info","(","``","Successfully","finished","RTTM","converting",".","''",")"]
111
136
null
convert_rttm.py
espnet/egs2/wsj0_2mix/mixit_enh1/pyscripts/utils/convert_rttm.py
import argparse import collections.abc import logging import os import re from pathlib import Path from typing import Union import humanfriendly import numpy import soundfile from typeguard import check_argument_types from espnet2.utils.types import str_or_int
15
null
12
3
null
null
null
Use image node_id 3 for calling a global function with example usage: main() without return types
97
node_id 3
998,881
test_extract_text_from_pdf
TestRetrieveUtils
null
true
self
null
null
null
null
null
def test_extract_text_from_pdf(self): pdf_file_path = os.path.join(test_dir, "example.pdf") assert "".join(expected_text.split()) == "".join( extract_text_from_pdf(pdf_file_path).strip().split() )
["def","test_extract_text_from_pdf","(","self",")",":","pdf_file_path","=","os.path.join","(","test_dir",",","``","example.pdf","''",")","assert","``","''",".join","(","expected_text.split","(",")",")","==","``","''",".join","(","extract_text_from_pdf","(","pdf_file_path",")",".strip","(",")",".split","(",")",")"]
49
51
null
test_retrieve_utils.py
autogen/test/test_retrieve_utils.py
import pytest import os
15
1
2
0
0
12
null
Use image node_id 3 for calling the TestRetrieveUtils obj's underlying member method code with example usage: obj.test_extract_text_from_pdf() without return types
163
node_id 3
319,448
_to_request_dict
FileSource
object
true
self
Accepts file source parameters for conversion to request dict.
["Accepts","file","source","parameters","for","conversion","to","request","dict","."]
Generates a request dictionary using the parameters provided to the class.
["Generates","a","request","dictionary","using","the","parameters","provided","to","the","class","."]
file_source_request
def _to_request_dict(self): """Generates a request dictionary using the parameters provided to the class.""" file_source_request = {"S3Uri": self.s3_uri} if self.content_digest is not None: file_source_request["ContentDigest"] = self.content_digest if self.content_type is not None: file_source_request["ContentType"] = self.content_type return file_source_request
["def","_to_request_dict","(","self",")",":","``","''","''","Generates","a","request","dictionary","using","the","parameters","provided","to","the","class",".","''","''","''","file_source_request","=","{","``","S3Uri","''",":","self.s3_uri","}","if","self.content_digest","is","not","None",":","file_source_request","[","``","ContentDigest","''","]","=","self.content_digest","if","self.content_type","is","not","None",":","file_source_request","[","``","ContentType","''","]","=","self.content_type","return","file_source_request"]
153
160
null
model_metrics.py
sagemaker-python-sdk/src/sagemaker/model_metrics.py
from __future__ import absolute_import from typing import Optional, Union from sagemaker.workflow.entities import PipelineVariable
15
3
3
0
3
2
1
Use image node_id 2 for calling the FileSource obj's underlying member method code with example usage: obj._to_request_dict() and returns: file_source_request
158
node_id 2
1,845,785
__init__
FileSource
object
true
self,s3_uri,content_digest,content_type
Accepts file source parameters for conversion to request dict.
["Accepts","file","source","parameters","for","conversion","to","request","dict","."]
Initialize a ``FileSource`` instance and turn parameters into dict. Args: s3_uri (str or PipelineVariable): The S3 URI of the metric content_digest (str or PipelineVariable): The digest of the metric (default: None) content_type (str or PipelineVariable): Specifies the type of content in S3 URI (default: None)
["Initialize","a","``","FileSource","``","instance","and","turn","parameters","into","dict",".","Args",":","s3_uri","(","str","or","PipelineVariable",")",":","The","S3","URI","of","the","metric","content_digest","(","str","or","PipelineVariable",")",":","The","digest","of","the","metric","(","default",":","None",")","content_type","(","str","or","PipelineVariable",")",":","Specifies","the","type","of","content","in","S3","URI","(","default",":","None",")"]
FileSource
def __init__( self, s3_uri: Union[str, PipelineVariable], content_digest: Optional[Union[str, PipelineVariable]] = None, content_type: Optional[Union[str, PipelineVariable]] = None, ): """Initialize a ``FileSource`` instance and turn parameters into dict. Args: s3_uri (str or PipelineVariable): The S3 URI of the metric content_digest (str or PipelineVariable): The digest of the metric (default: None) content_type (str or PipelineVariable): Specifies the type of content in S3 URI (default: None) """ self.content_type = content_type self.s3_uri = s3_uri self.content_digest = content_digest
["def","__init__","(","self",",","s3_uri",":","Union","[","str",",","PipelineVariable","]",",","content_digest",":","Optional","[","Union","[","str",",","PipelineVariable","]","]","=","None",",","content_type",":","Optional","[","Union","[","str",",","PipelineVariable","]","]","=","None",",",")",":","``","''","''","Initialize","a","``","FileSource","``","instance","and","turn","parameters","into","dict",".","Args",":","s3_uri","(","str","or","PipelineVariable",")",":","The","S3","URI","of","the","metric","content_digest","(","str","or","PipelineVariable",")",":","The","digest","of","the","metric","(","default",":","None",")","content_type","(","str","or","PipelineVariable",")",":","Specifies","the","type","of","content","in","S3","URI","(","default",":","None",")","``","''","''","self.content_type","=","content_type","self.s3_uri","=","s3_uri","self.content_digest","=","content_digest"]
134
151
null
model_metrics.py
sagemaker-python-sdk/src/sagemaker/model_metrics.py
from __future__ import absolute_import from typing import Optional, Union from sagemaker.workflow.entities import PipelineVariable
15
3
3
0
3
2
1
Use image node_id 1 to create a new FileSource object from inherited base classes: object with example: obj = FileSource(s3_uri, content_digest, content_type)
158
node_id 1
1,845,784
diffs
global
null
false
ctx,f,x,n
null
null
null
null
null
def diffs(ctx, f, x, n=None, **options): r""" Returns a generator that yields the sequence of derivatives .. math :: f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)` function evaluations to generate the first `k` derivatives, rather than the roughly `O(k^2)` evaluations required if one calls :func:`~mpmath.diff` `k` separate times. With `n < \infty`, the generator stops as soon as the `n`-th derivative has been generated. If the exact number of needed derivatives is known in advance, this is further slightly more efficient. Options are the same as for :func:`~mpmath.diff`. **Examples** >>> from mpmath import * >>> mp.dps = 15 >>> nprint(list(diffs(cos, 1, 5))) [0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471] >>> for i, d in zip(range(6), diffs(cos, 1)): ... print("%s %s" % (i, d)) ... 0 0.54030230586814 1 -0.841470984807897 2 -0.54030230586814 3 0.841470984807897 4 0.54030230586814 5 -0.841470984807897 """ if n is None: n = ctx.inf else: n = int(n) if options.get("method", "step") != "step": k = 0 while k < n + 1: yield ctx.diff(f, x, k, **options) k += 1 return singular = options.get("singular") if singular: yield ctx.diff(f, x, 0, singular=True) else: yield f(ctx.convert(x)) if n < 1: return if n == ctx.inf: A, B = 1, 2 else: A, B = 1, n + 1 while 1: callprec = ctx.prec y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options) for k in xrange(A, B): try: ctx.prec = workprec d = ctx.difference(y, k) / norm**k finally: ctx.prec = callprec yield +d if k >= n: return A, B = B, int(A * 1.4 + 1) B = min(B, n)
["def","diffs","(","ctx",",","f",",","x",",","n=None",",","*","*","options",")",":","r","''","''","''","Returns","a","generator","that","yields","the","sequence","of","derivatives","..","math",":",":","f","(","x",")",",","f","'","(","x",")",",","f","''","(","x",")",",","\\ldots",",","f^","{","(","k",")","}","(","x",")",",","\\ldots","With","``","method='step","'","``",",",":","func",":","`","~mpmath.diffs","`","uses","only","`","O","(","k",")","`","function","evaluations","to","generate","the","first","`","k","`","derivatives",",","rather","than","the","roughly","`","O","(","k^2",")","`","evaluations","required","if","one","calls",":","func",":","`","~mpmath.diff","`","`","k","`","separate","times",".","With","`","n","<","\\infty","`",",","the","generator","stops","as","soon","as","the","`","n","`","-th","derivative","has","been","generated",".","If","the","exact","number","of","needed","derivatives","is","known","in","advance",",","this","is","further","slightly","more","efficient",".","Options","are","the","same","as","for",":","func",":","`","~mpmath.diff","`",".","*","*","Examples","*","*",">",">",">","from","mpmath","import","*",">",">",">","mp.dps","=","15",">",">",">","nprint","(","list","(","diffs","(","cos",",","1",",","5",")",")",")","[","0.540302",",","-0.841471",",","-0.540302",",","0.841471",",","0.540302",",","-0.841471","]",">",">",">","for","i",",","d","in","zip","(","range","(","6",")",",","diffs","(","cos",",","1",")",")",":","...","print","(","``","%","s","%","s","''","%","(","i",",","d",")",")","...","0","0.54030230586814","1","-0.841470984807897","2","-0.54030230586814","3","0.841470984807897","4","0.54030230586814","5","-0.841470984807897","``","''","''","if","n","is","None",":","n","=","ctx.inf","else",":","n","=","int","(","n",")","if","options.get","(","``","method","''",",","``","step","''",")","!","=","``","step","''",":","k","=","0","while","k","<","n","+","1",":","yield","ctx.diff","(","f",",","x",",","k",",","*","*","options",")","k","+=","1","return","singular","=","options.get","(","``","singular","''",")","if","singular",":","yield","ctx.diff","(","f",",","x",",","0",",","singular=True",")","else",":","yield","f","(","ctx.convert","(","x",")",")","if","n","<","1",":","return","if","n","==","ctx.inf",":","A",",","B","=","1",",","2","else",":","A",",","B","=","1",",","n","+","1","while","1",":","callprec","=","ctx.prec","y",",","norm",",","workprec","=","hsteps","(","ctx",",","f",",","x",",","B",",","callprec",",","*","*","options",")","for","k","in","xrange","(","A",",","B",")",":","try",":","ctx.prec","=","workprec","d","=","ctx.difference","(","y",",","k",")","\/","norm","*","*","k","finally",":","ctx.prec","=","callprec","yield","+d","if","k",">","=","n",":","return","A",",","B","=","B",",","int","(","A","*","1.4","+","1",")","B","=","min","(","B",",","n",")"]
224
295
null
differentiation.py
catboost/contrib/python/mpmath/py3/mpmath/calculus/differentiation.py
from ..libmp.backend import xrange from .calculus import defun
15
null
2
13
null
null
null
Use image node_id 5 for calling a global function with example usage: diffs(ctx, f, x, n) without return types
110
node_id 5
407,216
iterable_to_function
global
null
false
gen
null
null
null
null
f,data
def iterable_to_function(gen): gen = iter(gen) data = [] def f(k): for i in xrange(len(data), k + 1): data.append(next(gen)) return data[k] return f
["def","iterable_to_function","(","gen",")",":","gen","=","iter","(","gen",")","data","=","[","]","def","f","(","k",")",":","for","i","in","xrange","(","len","(","data",")",",","k","+","1",")",":","data.append","(","next","(","gen",")",")","return","data","[","k","]","return","f"]
297
304
null
differentiation.py
catboost/contrib/python/mpmath/py3/mpmath/calculus/differentiation.py
from ..libmp.backend import xrange from .calculus import defun
15
null
2
13
null
null
null
Use image node_id 6 for calling a global function with example usage: iterable_to_function(gen) and returns: f, data
116
node_id 6
407,217
diffs_prod
global
null
false
ctx,factors
null
null
null
null
null
def diffs_prod(ctx, factors): r""" Given a list of `N` iterables or generators yielding `f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`, generate `g(x), g'(x), g''(x), \ldots` where `g(x) = f_1(x) f_2(x) \cdots f_N(x)`. At high precision and for large orders, this is typically more efficient than numerical differentiation if the derivatives of each `f_k(x)` admit direct computation. Note: This function does not increase the working precision internally, so guard digits may have to be added externally for full accuracy. **Examples** >>> from mpmath import * >>> mp.dps = 15; mp.pretty = True >>> f = lambda x: exp(x)*cos(x)*sin(x) >>> u = diffs(f, 1) >>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)]) >>> next(u); next(v) 1.23586333600241 1.23586333600241 >>> next(u); next(v) 0.104658952245596 0.104658952245596 >>> next(u); next(v) -5.96999877552086 -5.96999877552086 >>> next(u); next(v) -12.4632923122697 -12.4632923122697 """ N = len(factors) if N == 1: for c in factors[0]: yield c else: u = iterable_to_function(ctx.diffs_prod(factors[: N // 2])) v = iterable_to_function(ctx.diffs_prod(factors[N // 2 :])) n = 0 while 1: # yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1)) s = u(n) * v(0) a = 1 for k in xrange(1, n + 1): a = a * (n - k + 1) // k s += a * u(n - k) * v(k) yield s n += 1
["def","diffs_prod","(","ctx",",","factors",")",":","r","''","''","''","Given","a","list","of","`","N","`","iterables","or","generators","yielding","`","f_k","(","x",")",",","f'_k","(","x",")",",","f","''","_k","(","x",")",",","\\ldots","`","for","`","k","=","1",",","\\ldots",",","N","`",",","generate","`","g","(","x",")",",","g","'","(","x",")",",","g","''","(","x",")",",","\\ldots","`","where","`","g","(","x",")","=","f_1","(","x",")","f_2","(","x",")","\\cdots","f_N","(","x",")","`",".","At","high","precision","and","for","large","orders",",","this","is","typically","more","efficient","than","numerical","differentiation","if","the","derivatives","of","each","`","f_k","(","x",")","`","admit","direct","computation",".","Note",":","This","function","does","not","increase","the","working","precision","internally",",","so","guard","digits","may","have","to","be","added","externally","for","full","accuracy",".","*","*","Examples","*","*",">",">",">","from","mpmath","import","*",">",">",">","mp.dps","=","15",";","mp.pretty","=","True",">",">",">","f","=","lambda","x",":","exp","(","x",")","*","cos","(","x",")","*","sin","(","x",")",">",">",">","u","=","diffs","(","f",",","1",")",">",">",">","v","=","mp.diffs_prod","(","[","diffs","(","exp,1",")",",","diffs","(","cos,1",")",",","diffs","(","sin,1",")","]",")",">",">",">","next","(","u",")",";","next","(","v",")","1.23586333600241","1.23586333600241",">",">",">","next","(","u",")",";","next","(","v",")","0.104658952245596","0.104658952245596",">",">",">","next","(","u",")",";","next","(","v",")","-5.96999877552086","-5.96999877552086",">",">",">","next","(","u",")",";","next","(","v",")","-12.4632923122697","-12.4632923122697","``","''","''","N","=","len","(","factors",")","if","N","==","1",":","for","c","in","factors","[","0","]",":","yield","c","else",":","u","=","iterable_to_function","(","ctx.diffs_prod","(","factors","[",":","N","\/\/","2","]",")",")","v","=","iterable_to_function","(","ctx.diffs_prod","(","factors","[","N","\/\/","2",":","]",")",")","n","=","0","while","1",":","#","yield","sum","(","binomial","(","n",",","k",")","*","u","(","n-k",")","*","v","(","k",")","for","k","in","xrange","(","n+1",")",")","s","=","u","(","n",")","*","v","(","0",")","a","=","1","for","k","in","xrange","(","1",",","n","+","1",")",":","a","=","a","*","(","n","-","k","+","1",")","\/\/","k","s","+=","a","*","u","(","n","-","k",")","*","v","(","k",")","yield","s","n","+=","1"]
307
358
null
differentiation.py
catboost/contrib/python/mpmath/py3/mpmath/calculus/differentiation.py
from ..libmp.backend import xrange from .calculus import defun
15
null
2
13
null
null
null
Use image node_id 7 for calling a global function with example usage: diffs_prod(ctx, factors) without return types
115
node_id 7
407,218
test_ascent
TestDatasets
null
true
self
null
null
null
null
null
def test_ascent(self): assert_equal(ascent().shape, (512, 512)) # hash check assert _has_hash( os.path.join(data_dir, "ascent.dat"), registry["ascent.dat"] )
["def","test_ascent","(","self",")",":","assert_equal","(","ascent","(",")",".shape",",","(","512",",","512",")",")","#","hash","check","assert","_has_hash","(","os.path.join","(","data_dir",",","``","ascent.dat","''",")",",","registry","[","``","ascent.dat","''","]",")"]
41
46
null
test_data.py
scipy/scipy/datasets/tests/test_data.py
from scipy.datasets._registry import registry from scipy.datasets._fetchers import data_fetcher from scipy.datasets._utils import _clear_cache from scipy.datasets import ascent, face, electrocardiogram, download_all from numpy.testing import assert_equal, assert_almost_equal import os import pytest
15
1
7
2
0
5
null
Use image node_id 3 for calling the TestDatasets obj's underlying member method code with example usage: obj.test_ascent() without return types
143
node_id 3
1,884,889
_to_request_dict
MetricsSource
object
true
self
Accepts metrics source parameters for conversion to request dict.
["Accepts","metrics","source","parameters","for","conversion","to","request","dict","."]
Generates a request dictionary using the parameters provided to the class.
["Generates","a","request","dictionary","using","the","parameters","provided","to","the","class","."]
metrics_source_request
def _to_request_dict(self): """Generates a request dictionary using the parameters provided to the class.""" metrics_source_request = { "ContentType": self.content_type, "S3Uri": self.s3_uri, } if self.content_digest is not None: metrics_source_request["ContentDigest"] = self.content_digest return metrics_source_request
["def","_to_request_dict","(","self",")",":","``","''","''","Generates","a","request","dictionary","using","the","parameters","provided","to","the","class",".","''","''","''","metrics_source_request","=","{","``","ContentType","''",":","self.content_type",",","``","S3Uri","''",":","self.s3_uri",",","}","if","self.content_digest","is","not","None",":","metrics_source_request","[","``","ContentDigest","''","]","=","self.content_digest","return","metrics_source_request"]
123
128
null
model_metrics.py
sagemaker-python-sdk/src/sagemaker/model_metrics.py
from __future__ import absolute_import from typing import Optional, Union from sagemaker.workflow.entities import PipelineVariable
15
3
3
0
3
2
1
Use image node_id 2 for calling the MetricsSource obj's underlying member method code with example usage: obj._to_request_dict() and returns: metrics_source_request
164
node_id 2
1,845,783
leiden
global
null
false
input_graph,max_iter,resolution,random_state,theta
null
null
null
null
ddf, mod_score
def leiden( input_graph: Graph, max_iter: int = 100, resolution: int = 1.0, random_state: int = None, theta: int = 1.0, ) -> Tuple[dask_cudf.DataFrame, float]: """ Compute the modularity optimizing partition of the input graph using the Leiden method Traag, V. A., Waltman, L., & van Eck, N. J. (2019). From Louvain to Leiden: guaranteeing well-connected communities. Scientific reports, 9(1), 5233. doi: 10.1038/s41598-019-41695-z Parameters ---------- G : cugraph.Graph The graph descriptor should contain the connectivity information and weights. The adjacency list will be computed if not already present. The current implementation only supports undirected graphs. max_iter : integer, optional (default=100) This controls the maximum number of levels/iterations of the Leiden algorithm. When specified the algorithm will terminate after no more than the specified number of iterations. No error occurs when the algorithm terminates early in this manner. resolution: float, optional (default=1.0) Called gamma in the modularity formula, this changes the size of the communities. Higher resolutions lead to more smaller communities, lower resolutions lead to fewer larger communities. Defaults to 1. random_state: int, optional(default=None) Random state to use when generating samples. Optional argument, defaults to a hash of process id, time, and hostname. theta: float, optional (default=1.0) Called theta in the Leiden algorithm, this is used to scale modularity gain in Leiden refinement phase, to compute the probability of joining a random leiden community. Returns ------- parts : dask_cudf.DataFrame GPU data frame of size V containing two columns the vertex id and the partition id it is assigned to. ddf['vertex'] : cudf.Series Contains the vertex identifiers ddf['partition'] : cudf.Series Contains the partition assigned to the vertices modularity_score : float a floating point number containing the global modularity score of the partitioning. Examples -------- >>> import cugraph.dask as dcg >>> import dask_cudf >>> # ... Init a DASK Cluster >>> # see https://docs.rapids.ai/api/cugraph/stable/dask-cugraph.html >>> # Download dataset from https://github.com/rapidsai/cugraph/datasets/.. >>> chunksize = dcg.get_chunksize(datasets_path / "karate.csv") >>> ddf = dask_cudf.read_csv(datasets_path / "karate.csv", ... chunksize=chunksize, delimiter=" ", ... names=["src", "dst", "value"], ... dtype=["int32", "int32", "float32"]) >>> dg = cugraph.Graph() >>> dg.from_dask_cudf_edgelist(ddf, source='src', destination='dst') >>> parts, modularity_score = dcg.leiden(dg) """ if input_graph.is_directed(): raise ValueError("input graph must be undirected") # Return a client if one has started client = default_client() do_expensive_check = False result = [ client.submit( _call_plc_leiden, Comms.get_session_id(), input_graph._plc_graph[w], max_iter, resolution, random_state, theta, do_expensive_check, workers=[w], allow_other_workers=False, ) for w in Comms.get_workers() ] wait(result) part_mod_score = [ client.submit(convert_to_cudf, r) for r in result ] wait(part_mod_score) vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0] empty_df = cudf.DataFrame( { "vertex": numpy.empty(shape=0, dtype=vertex_dtype), "partition": numpy.empty(shape=0, dtype="int32"), } ) part_mod_score = [ delayed(lambda x: x, nout=2)(r) for r in part_mod_score ] ddf = dask_cudf.from_delayed( [r[0] for r in part_mod_score], meta=empty_df, verify_meta=False, ).persist() mod_score = dask.array.from_delayed( part_mod_score[0][1], shape=(1,), dtype=float ).compute() wait(ddf) wait(mod_score) wait([r.release() for r in part_mod_score]) if input_graph.renumbered: ddf = input_graph.unrenumber(ddf, "vertex") return ddf, mod_score
["def","leiden","(","input_graph",":","Graph",",","max_iter",":","int","=","100",",","resolution",":","int","=","1.0",",","random_state",":","int","=","None",",","theta",":","int","=","1.0",",",")","-",">","Tuple","[","dask_cudf.DataFrame",",","float","]",":","``","''","''","Compute","the","modularity","optimizing","partition","of","the","input","graph","using","the","Leiden","method","Traag",",","V.","A.",",","Waltman",",","L.",",","&","van","Eck",",","N.","J",".","(","2019",")",".","From","Louvain","to","Leiden",":","guaranteeing","well-connected","communities",".","Scientific","reports",",","9","(","1",")",",","5233.","doi",":","10.1038\/s41598-019-41695-z","Parameters","--","--","--","--","--","G",":","cugraph.Graph","The","graph","descriptor","should","contain","the","connectivity","information","and","weights",".","The","adjacency","list","will","be","computed","if","not","already","present",".","The","current","implementation","only","supports","undirected","graphs",".","max_iter",":","integer",",","optional","(","default=100",")","This","controls","the","maximum","number","of","levels\/iterations","of","the","Leiden","algorithm",".","When","specified","the","algorithm","will","terminate","after","no","more","than","the","specified","number","of","iterations",".","No","error","occurs","when","the","algorithm","terminates","early","in","this","manner",".","resolution",":","float",",","optional","(","default=1.0",")","Called","gamma","in","the","modularity","formula",",","this","changes","the","size","of","the","communities",".","Higher","resolutions","lead","to","more","smaller","communities",",","lower","resolutions","lead","to","fewer","larger","communities",".","Defaults","to","1.","random_state",":","int",",","optional","(","default=None",")","Random","state","to","use","when","generating","samples",".","Optional","argument",",","defaults","to","a","hash","of","process","id",",","time",",","and","hostname",".","theta",":","float",",","optional","(","default=1.0",")","Called","theta","in","the","Leiden","algorithm",",","this","is","used","to","scale","modularity","gain","in","Leiden","refinement","phase",",","to","compute","the","probability","of","joining","a","random","leiden","community",".","Returns","--","--","--","-","parts",":","dask_cudf.DataFrame","GPU","data","frame","of","size","V","containing","two","columns","the","vertex","id","and","the","partition","id","it","is","assigned","to",".","ddf","[","'vertex","'","]",":","cudf.Series","Contains","the","vertex","identifiers","ddf","[","'partition","'","]",":","cudf.Series","Contains","the","partition","assigned","to","the","vertices","modularity_score",":","float","a","floating","point","number","containing","the","global","modularity","score","of","the","partitioning",".","Examples","--","--","--","--",">",">",">","import","cugraph.dask","as","dcg",">",">",">","import","dask_cudf",">",">",">","#","...","Init","a","DASK","Cluster",">",">",">","#","see","https",":","\/\/docs.rapids.ai\/api\/cugraph\/stable\/dask-cugraph.html",">",">",">","#","Download","dataset","from","https",":","\/\/github.com\/rapidsai\/cugraph\/datasets\/","..",">",">",">","chunksize","=","dcg.get_chunksize","(","datasets_path","\/","``","karate.csv","''",")",">",">",">","ddf","=","dask_cudf.read_csv","(","datasets_path","\/","``","karate.csv","''",",","...","chunksize=chunksize",",","delimiter=","''","``",",","...","names=","[","``","src","''",",","``","dst","''",",","``","value","''","]",",","...","dtype=","[","``","int32","''",",","``","int32","''",",","``","float32","''","]",")",">",">",">","dg","=","cugraph.Graph","(",")",">",">",">","dg.from_dask_cudf_edgelist","(","ddf",",","source='src","'",",","destination='dst","'",")",">",">",">","parts",",","modularity_score","=","dcg.leiden","(","dg",")","``","''","''","if","input_graph.is_directed","(",")",":","raise","ValueError","(","``","input","graph","must","be","undirected","''",")","#","Return","a","client","if","one","has","started","client","=","default_client","(",")","do_expensive_check","=","False","result","=","[","client.submit","(","_call_plc_leiden",",","Comms.get_session_id","(",")",",","input_graph._plc_graph","[","w","]",",","max_iter",",","resolution",",","random_state",",","theta",",","do_expensive_check",",","workers=","[","w","]",",","allow_other_workers=False",",",")","for","w","in","Comms.get_workers","(",")","]","wait","(","result",")","part_mod_score","=","[","client.submit","(","convert_to_cudf",",","r",")","for","r","in","result","]","wait","(","part_mod_score",")","vertex_dtype","=","input_graph.edgelist.edgelist_df.dtypes","[","0","]","empty_df","=","cudf.DataFrame","(","{","``","vertex","''",":","numpy.empty","(","shape=0",",","dtype=vertex_dtype",")",",","``","partition","''",":","numpy.empty","(","shape=0",",","dtype=","''","int32","''",")",",","}",")","part_mod_score","=","[","delayed","(","lambda","x",":","x",",","nout=2",")","(","r",")","for","r","in","part_mod_score","]","ddf","=","dask_cudf.from_delayed","(","[","r","[","0","]","for","r","in","part_mod_score","]",",","meta=empty_df",",","verify_meta=False",",",")",".persist","(",")","mod_score","=","dask.array.from_delayed","(","part_mod_score","[","0","]","[","1","]",",","shape=","(","1",",",")",",","dtype=float",")",".compute","(",")","wait","(","ddf",")","wait","(","mod_score",")","wait","(","[","r.release","(",")","for","r","in","part_mod_score","]",")","if","input_graph.renumbered",":","ddf","=","input_graph.unrenumber","(","ddf",",","``","vertex","''",")","return","ddf",",","mod_score"]
67
199
null
leiden.py
cugraph/python/cugraph/cugraph/dask/community/leiden.py
from __future__ import annotations from dask.distributed import wait, default_client import cugraph.dask.comms.comms import dask_cudf import dask from dask import delayed import cudf from pylibcugraph import ResourceHandle from pylibcugraph import leiden import numpy import cupy from typing import Tuple, TYPE_CHECKING
15
null
12
3
null
null
null
Use image node_id 3 for calling a global function with example usage: leiden(input_graph, max_iter, resolution, random_state, theta) and returns: ddf, mod_score
161
node_id 3
686,170
test_face
TestDatasets
null
true
self
null
null
null
null
null
def test_face(self): assert_equal(face().shape, (768, 1024, 3)) # hash check assert _has_hash( os.path.join(data_dir, "face.dat"), registry["face.dat"] )
["def","test_face","(","self",")",":","assert_equal","(","face","(",")",".shape",",","(","768",",","1024",",","3",")",")","#","hash","check","assert","_has_hash","(","os.path.join","(","data_dir",",","``","face.dat","''",")",",","registry","[","``","face.dat","''","]",")"]
48
53
null
test_data.py
scipy/scipy/datasets/tests/test_data.py
from scipy.datasets._registry import registry from scipy.datasets._fetchers import data_fetcher from scipy.datasets._utils import _clear_cache from scipy.datasets import ascent, face, electrocardiogram, download_all from numpy.testing import assert_equal, assert_almost_equal import os import pytest
15
1
7
2
0
5
null
Use image node_id 4 for calling the TestDatasets obj's underlying member method code with example usage: obj.test_face() without return types
141
node_id 4
1,884,890
_no_op
global
null
false
null
null
null
null
args, kwargs
def _no_op(*args, **kwargs) -> Any: """no_op A function that returns its arguments :return: whatever was passed in :rtype: Any """ return args, kwargs
["def","_no_op","(","*","args",",","*","*","kwargs",")","-",">","Any",":","``","''","''","no_op","A","function","that","returns","its","arguments",":","return",":","whatever","was","passed","in",":","rtype",":","Any","``","''","''","return","args",",","kwargs"]
38
44
null
test_consume.py
airflow/tests/providers/apache/kafka/operators/test_consume.py
from __future__ import annotations import json import logging from typing import Any from unittest import mock import pytest from airflow.models import Connection from airflow.providers.apache.kafka.operators.consume import ConsumeFromTopicOperator from airflow.utils import db
15
null
9
1
null
null
null
Use image node_id 1 for calling a global function with example usage: _no_op() and returns: args, kwargs
105
node_id 1
263,603
create_db_and_tables
global
null
false
null
null
null
null
null
def create_db_and_tables(): SQLModel.metadata.create_all(engine)
["def","create_db_and_tables","(",")",":","SQLModel.metadata.create_all","(","engine",")"]
30
31
null
tutorial001_py310.py
sqlmodel/docs_src/tutorial/fastapi/read_one/tutorial001_py310.py
from fastapi import FastAPI, HTTPException from sqlmodel import Field, Session, SQLModel, create_engine, select
15
null
2
5
null
null
null
Use image node_id 1 for calling a global function with example usage: create_db_and_tables() without return types
113
node_id 1
1,989,839
_to_request_dict
ModelMetrics
object
true
self
Accepts model metrics parameters for conversion to request dict.
["Accepts","model","metrics","parameters","for","conversion","to","request","dict","."]
Generates a request dictionary using the parameters provided to the class.
["Generates","a","request","dictionary","using","the","parameters","provided","to","the","class","."]
model_metrics_request
def _to_request_dict(self): """Generates a request dictionary using the parameters provided to the class.""" model_metrics_request = {} model_quality = {} if self.model_statistics is not None: model_quality[ "Statistics" ] = self.model_statistics._to_request_dict() if self.model_constraints is not None: model_quality[ "Constraints" ] = self.model_constraints._to_request_dict() if model_quality: model_metrics_request["ModelQuality"] = model_quality model_data_quality = {} if self.model_data_statistics is not None: model_data_quality[ "Statistics" ] = self.model_data_statistics._to_request_dict() if self.model_data_constraints is not None: model_data_quality[ "Constraints" ] = self.model_data_constraints._to_request_dict() if model_data_quality: model_metrics_request["ModelDataQuality"] = model_data_quality bias = {} if self.bias is not None: bias["Report"] = self.bias._to_request_dict() if self.bias_pre_training is not None: bias[ "PreTrainingReport" ] = self.bias_pre_training._to_request_dict() if self.bias_post_training is not None: bias[ "PostTrainingReport" ] = self.bias_post_training._to_request_dict() model_metrics_request["Bias"] = bias explainability = {} if self.explainability is not None: explainability[ "Report" ] = self.explainability._to_request_dict() model_metrics_request["Explainability"] = explainability return model_metrics_request
["def","_to_request_dict","(","self",")",":","``","''","''","Generates","a","request","dictionary","using","the","parameters","provided","to","the","class",".","''","''","''","model_metrics_request","=","{","}","model_quality","=","{","}","if","self.model_statistics","is","not","None",":","model_quality","[","``","Statistics","''","]","=","self.model_statistics._to_request_dict","(",")","if","self.model_constraints","is","not","None",":","model_quality","[","``","Constraints","''","]","=","self.model_constraints._to_request_dict","(",")","if","model_quality",":","model_metrics_request","[","``","ModelQuality","''","]","=","model_quality","model_data_quality","=","{","}","if","self.model_data_statistics","is","not","None",":","model_data_quality","[","``","Statistics","''","]","=","self.model_data_statistics._to_request_dict","(",")","if","self.model_data_constraints","is","not","None",":","model_data_quality","[","``","Constraints","''","]","=","self.model_data_constraints._to_request_dict","(",")","if","model_data_quality",":","model_metrics_request","[","``","ModelDataQuality","''","]","=","model_data_quality","bias","=","{","}","if","self.bias","is","not","None",":","bias","[","``","Report","''","]","=","self.bias._to_request_dict","(",")","if","self.bias_pre_training","is","not","None",":","bias","[","``","PreTrainingReport","''","]","=","self.bias_pre_training._to_request_dict","(",")","if","self.bias_post_training","is","not","None",":","bias","[","``","PostTrainingReport","''","]","=","self.bias_post_training._to_request_dict","(",")","model_metrics_request","[","``","Bias","''","]","=","bias","explainability","=","{","}","if","self.explainability","is","not","None",":","explainability","[","``","Report","''","]","=","self.explainability._to_request_dict","(",")","model_metrics_request","[","``","Explainability","''","]","=","explainability","return","model_metrics_request"]
64
98
null
model_metrics.py
sagemaker-python-sdk/src/sagemaker/model_metrics.py
from __future__ import absolute_import from typing import Optional, Union from sagemaker.workflow.entities import PipelineVariable
15
3
3
0
3
2
1
Use image node_id 2 for calling the ModelMetrics obj's underlying member method code with example usage: obj._to_request_dict() and returns: model_metrics_request
162
node_id 2
1,845,781
__init__
ModelMetrics
object
true
self,model_statistics,model_constraints,model_data_statistics,model_data_constraints,bias,explainability,bias_pre_training,bias_post_training
Accepts model metrics parameters for conversion to request dict.
["Accepts","model","metrics","parameters","for","conversion","to","request","dict","."]
Initialize a ``ModelMetrics`` instance and turn parameters into dict. Args: model_statistics (MetricsSource): A metric source object that represents model statistics (default: None). model_constraints (MetricsSource): A metric source object that represents model constraints (default: None). model_data_statistics (MetricsSource): A metric source object that represents model data statistics (default: None). model_data_constraints (MetricsSource): A metric source object that represents model data constraints (default: None). bias (MetricsSource): A metric source object that represents bias report (default: None). explainability (MetricsSource): A metric source object that represents explainability report (default: None). bias_pre_training (MetricsSource): A metric source object that represents Pre-training report (default: None). bias_post_training (MetricsSource): A metric source object that represents Post-training report (default: None).
["Initialize","a","``","ModelMetrics","``","instance","and","turn","parameters","into","dict",".","Args",":","model_statistics","(","MetricsSource",")",":","A","metric","source","object","that","represents","model","statistics","(","default",":","None",")",".","model_constraints","(","MetricsSource",")",":","A","metric","source","object","that","represents","model","constraints","(","default",":","None",")",".","model_data_statistics","(","MetricsSource",")",":","A","metric","source","object","that","represents","model","data","statistics","(","default",":","None",")",".","model_data_constraints","(","MetricsSource",")",":","A","metric","source","object","that","represents","model","data","constraints","(","default",":","None",")",".","bias","(","MetricsSource",")",":","A","metric","source","object","that","represents","bias","report","(","default",":","None",")",".","explainability","(","MetricsSource",")",":","A","metric","source","object","that","represents","explainability","report","(","default",":","None",")",".","bias_pre_training","(","MetricsSource",")",":","A","metric","source","object","that","represents","Pre-training","report","(","default",":","None",")",".","bias_post_training","(","MetricsSource",")",":","A","metric","source","object","that","represents","Post-training","report","(","default",":","None",")","."]
ModelMetrics
def __init__( self, model_statistics: Optional["MetricsSource"] = None, model_constraints: Optional["MetricsSource"] = None, model_data_statistics: Optional["MetricsSource"] = None, model_data_constraints: Optional["MetricsSource"] = None, bias: Optional["MetricsSource"] = None, explainability: Optional["MetricsSource"] = None, bias_pre_training: Optional["MetricsSource"] = None, bias_post_training: Optional["MetricsSource"] = None, ): """Initialize a ``ModelMetrics`` instance and turn parameters into dict. Args: model_statistics (MetricsSource): A metric source object that represents model statistics (default: None). model_constraints (MetricsSource): A metric source object that represents model constraints (default: None). model_data_statistics (MetricsSource): A metric source object that represents model data statistics (default: None). model_data_constraints (MetricsSource): A metric source object that represents model data constraints (default: None). bias (MetricsSource): A metric source object that represents bias report (default: None). explainability (MetricsSource): A metric source object that represents explainability report (default: None). bias_pre_training (MetricsSource): A metric source object that represents Pre-training report (default: None). bias_post_training (MetricsSource): A metric source object that represents Post-training report (default: None). """ self.model_statistics = model_statistics self.model_constraints = model_constraints self.model_data_statistics = model_data_statistics self.model_data_constraints = model_data_constraints self.bias = bias self.bias_pre_training = bias_pre_training self.bias_post_training = bias_post_training self.explainability = explainability
["def","__init__","(","self",",","model_statistics",":","Optional","[","``","MetricsSource","''","]","=","None",",","model_constraints",":","Optional","[","``","MetricsSource","''","]","=","None",",","model_data_statistics",":","Optional","[","``","MetricsSource","''","]","=","None",",","model_data_constraints",":","Optional","[","``","MetricsSource","''","]","=","None",",","bias",":","Optional","[","``","MetricsSource","''","]","=","None",",","explainability",":","Optional","[","``","MetricsSource","''","]","=","None",",","bias_pre_training",":","Optional","[","``","MetricsSource","''","]","=","None",",","bias_post_training",":","Optional","[","``","MetricsSource","''","]","=","None",",",")",":","``","''","''","Initialize","a","``","ModelMetrics","``","instance","and","turn","parameters","into","dict",".","Args",":","model_statistics","(","MetricsSource",")",":","A","metric","source","object","that","represents","model","statistics","(","default",":","None",")",".","model_constraints","(","MetricsSource",")",":","A","metric","source","object","that","represents","model","constraints","(","default",":","None",")",".","model_data_statistics","(","MetricsSource",")",":","A","metric","source","object","that","represents","model","data","statistics","(","default",":","None",")",".","model_data_constraints","(","MetricsSource",")",":","A","metric","source","object","that","represents","model","data","constraints","(","default",":","None",")",".","bias","(","MetricsSource",")",":","A","metric","source","object","that","represents","bias","report","(","default",":","None",")",".","explainability","(","MetricsSource",")",":","A","metric","source","object","that","represents","explainability","report","(","default",":","None",")",".","bias_pre_training","(","MetricsSource",")",":","A","metric","source","object","that","represents","Pre-training","report","(","default",":","None",")",".","bias_post_training","(","MetricsSource",")",":","A","metric","source","object","that","represents","Post-training","report","(","default",":","None",")",".","``","''","''","self.model_statistics","=","model_statistics","self.model_constraints","=","model_constraints","self.model_data_statistics","=","model_data_statistics","self.model_data_constraints","=","model_data_constraints","self.bias","=","bias","self.bias_pre_training","=","bias_pre_training","self.bias_post_training","=","bias_post_training","self.explainability","=","explainability"]
24
62
null
model_metrics.py
sagemaker-python-sdk/src/sagemaker/model_metrics.py
from __future__ import absolute_import from typing import Optional, Union from sagemaker.workflow.entities import PipelineVariable
15
3
3
0
3
2
1
Use image node_id 1 to create a new ModelMetrics object from inherited base classes: object with example: obj = ModelMetrics(model_statistics, model_constraints, model_data_statistics, model_data_constraints, bias, explainability, bias_pre_training, bias_post_training)
269
node_id 1
1,845,780
untransform_observation_features
SearchSpaceToChoice
Transform
true
self,observation_features
Replaces the search space with a single choice parameter, whose values are the signatures of the arms observed in the data. This transform is meant to be used with ThompsonSampler. Choice parameter will be unordered unless config["use_ordered"] specifies otherwise. Transform is done in-place.
["Replaces","the","search","space","with","a","single","choice","parameter",",","whose","values","are","the","signatures","of","the","arms","observed","in","the","data",".","This","transform","is","meant","to","be","used","with","ThompsonSampler",".","Choice","parameter","will","be","unordered","unless","config","[","``","use_ordered","''","]","specifies","otherwise",".","Transform","is","done","in-place","."]
null
null
observation_features
def untransform_observation_features( self, observation_features: List[ObservationFeatures] ) -> List[ObservationFeatures]: for obsf in observation_features: signature = obsf.parameters[self.parameter_name] obsf.parameters = self.signature_to_parameterization[ signature ] return observation_features
["def","untransform_observation_features","(","self",",","observation_features",":","List","[","ObservationFeatures","]",")","-",">","List","[","ObservationFeatures","]",":","for","obsf","in","observation_features",":","signature","=","obsf.parameters","[","self.parameter_name","]","obsf.parameters","=","self.signature_to_parameterization","[","signature","]","return","observation_features"]
92
98
null
search_space_to_choice.py
Ax/ax/modelbridge/transforms/search_space_to_choice.py
from typing import List, Optional, TYPE_CHECKING from ax.core.arm import Arm from ax.core.observation import Observation, ObservationFeatures from ax.core.parameter import ChoiceParameter, FixedParameter, ParameterType from ax.core.search_space import RobustSearchSpace, SearchSpace from ax.exceptions.core import UnsupportedError from ax.modelbridge.transforms.base import Transform from ax.models.types import TConfig from ax.utils.common.typeutils import checked_cast
15
1
9
0
1
4
1
Use image node_id 4 for calling the SearchSpaceToChoice obj's underlying member method code with example usage: obj.untransform_observation_features(observation_features) and returns: observation_features
204
node_id 4
9,099
get_parser
global
null
false
null
null
null
null
parser
def get_parser() -> argparse.Namespace: """Get argument parser.""" parser = argparse.ArgumentParser( description="Convert standard rttm file to ESPnet format" ) parser.add_argument( "--rttm", required=True, type=str, help="Path of rttm file" ) parser.add_argument( "--wavscp", required=True, type=str, help="Path of corresponding scp file", ) parser.add_argument( "--output_path", required=True, type=str, help="Output directory to storry espnet_rttm", ) parser.add_argument( "--sampling_rate", type=str_or_int, default=16000, help="Sampling rate of the audio", ) parser.add_argument( "--verbose", default=1, type=int, help="Verbosity level. Higher is more logging.", ) return parser
["def","get_parser","(",")","-",">","argparse.Namespace",":","``","''","''","Get","argument","parser",".","''","''","''","parser","=","argparse.ArgumentParser","(","description=","''","Convert","standard","rttm","file","to","ESPnet","format","''",")","parser.add_argument","(","``","--","rttm","''",",","required=True",",","type=str",",","help=","''","Path","of","rttm","file","''",")","parser.add_argument","(","``","--","wavscp","''",",","required=True",",","type=str",",","help=","''","Path","of","corresponding","scp","file","''",",",")","parser.add_argument","(","``","--","output_path","''",",","required=True",",","type=str",",","help=","''","Output","directory","to","storry","espnet_rttm","''",",",")","parser.add_argument","(","``","--","sampling_rate","''",",","type=str_or_int",",","default=16000",",","help=","''","Sampling","rate","of","the","audio","''",",",")","parser.add_argument","(","``","--","verbose","''",",","default=1",",","type=int",",","help=","''","Verbosity","level",".","Higher","is","more","logging",".","``",",",")","return","parser"]
78
108
null
convert_rttm.py
espnet/egs2/wsj0_2mix/mixit_enh1/pyscripts/utils/convert_rttm.py
import argparse import collections.abc import logging import os import re from pathlib import Path from typing import Union import humanfriendly import numpy import soundfile from typeguard import check_argument_types from espnet2.utils.types import str_or_int
15
null
12
3
null
null
null
Use image node_id 2 for calling a global function with example usage: get_parser() and returns: parser
102
node_id 2
998,880
convert_rttm_text
global
null
false
path,wavscp_path,sampling_rate,output_path
null
null
null
null
null
def convert_rttm_text( path: Union[Path, str], wavscp_path: Union[Path, str], sampling_rate: int, output_path: Union[Path, str], ) -> None: """Convert a RTTM file Note: only support speaker information now """ output_handler = Path( os.path.join(output_path, "espnet_rttm") ).open("w", encoding="utf-8") assert check_argument_types() utt_ids = set() with Path(path).open("r", encoding="utf-8") as f: for linenum, line in enumerate(f, 1): sps = re.split(" +", line.rstrip()) # RTTM format must have exactly 9 fields assert ( len(sps) == 9 ), "{} does not have exactly 9 fields".format(path) ( label_type, utt_id, channel, start, duration, _, _, spk_id, _, ) = sps # Only support speaker label now assert label_type == "SPEAKER" utt_ids.add(utt_id) start = int(np.rint(float(start) * sampling_rate)) end = start + int( np.rint(float(duration) * sampling_rate) ) output_handler.write( "{} {} {} {} {} <NA> <NA> {} <NA>\n".format( label_type, utt_id, channel, start, end, spk_id ) ) with Path(wavscp_path).open("r", encoding="utf-8") as f: for linenum, line in enumerate(f, 1): sps = re.split("[ \t]+", line.rstrip()) utt_id, wav_path = sps assert ( utt_id in utt_ids ), "{} is not in corresponding rttm {}".foramt( utt_id, path ) sf = soundfile.SoundFile(wav_path) assert sf.samplerate == sampling_rate output_handler.write( ( "{} {} <NA> <NA> {} <NA> <NA> <NA> <NA>\n".format( "END", utt_id, sf.frames ) ) ) output_handler.close()
["def","convert_rttm_text","(","path",":","Union","[","Path",",","str","]",",","wavscp_path",":","Union","[","Path",",","str","]",",","sampling_rate",":","int",",","output_path",":","Union","[","Path",",","str","]",",",")","-",">","None",":","``","''","''","Convert","a","RTTM","file","Note",":","only","support","speaker","information","now","``","''","''","output_handler","=","Path","(","os.path.join","(","output_path",",","``","espnet_rttm","''",")",")",".open","(","``","w","''",",","encoding=","''","utf-8","''",")","assert","check_argument_types","(",")","utt_ids","=","set","(",")","with","Path","(","path",")",".open","(","``","r","''",",","encoding=","''","utf-8","''",")","as","f",":","for","linenum",",","line","in","enumerate","(","f",",","1",")",":","sps","=","re.split","(","``","+","''",",","line.rstrip","(",")",")","#","RTTM","format","must","have","exactly","9","fields","assert","(","len","(","sps",")","==","9",")",",","``","{","}","does","not","have","exactly","9","fields","''",".format","(","path",")","(","label_type",",","utt_id",",","channel",",","start",",","duration",",","_",",","_",",","spk_id",",","_",",",")","=","sps","#","Only","support","speaker","label","now","assert","label_type","==","``","SPEAKER","''","utt_ids.add","(","utt_id",")","start","=","int","(","np.rint","(","float","(","start",")","*","sampling_rate",")",")","end","=","start","+","int","(","np.rint","(","float","(","duration",")","*","sampling_rate",")",")","output_handler.write","(","``","{","}","{","}","{","}","{","}","{","}","<","NA",">","<","NA",">","{","}","<","NA",">","\\n","''",".format","(","label_type",",","utt_id",",","channel",",","start",",","end",",","spk_id",")",")","with","Path","(","wavscp_path",")",".open","(","``","r","''",",","encoding=","''","utf-8","''",")","as","f",":","for","linenum",",","line","in","enumerate","(","f",",","1",")",":","sps","=","re.split","(","``","[","\\t","]","+","''",",","line.rstrip","(",")",")","utt_id",",","wav_path","=","sps","assert","(","utt_id","in","utt_ids",")",",","``","{","}","is","not","in","corresponding","rttm","{","}","''",".foramt","(","utt_id",",","path",")","sf","=","soundfile.SoundFile","(","wav_path",")","assert","sf.samplerate","==","sampling_rate","output_handler.write","(","(","``","{","}","{","}","<","NA",">","<","NA",">","{","}","<","NA",">","<","NA",">","<","NA",">","<","NA",">","\\n","''",".format","(","``","END","''",",","utt_id",",","sf.frames",")",")",")","output_handler.close","(",")"]
19
75
null
convert_rttm.py
espnet/egs2/wsj0_2mix/mixit_enh1/pyscripts/utils/convert_rttm.py
import argparse import collections.abc import logging import os import re from pathlib import Path from typing import Union import humanfriendly import numpy import soundfile from typeguard import check_argument_types from espnet2.utils.types import str_or_int
15
null
12
3
null
null
null
Use image node_id 1 for calling a global function with example usage: convert_rttm_text(path, wavscp_path, sampling_rate, output_path) without return types
155
node_id 1
998,879
data_batch_axis
GPT2Decoder
BaseStepDecoder
true
self
null
null
null
null
unknown
def data_batch_axis(self): return 0 if self._layout == "NT" else 1
["def","data_batch_axis","(","self",")",":","return","0","if","self._layout","==","``","NT","''","else","1"]
48
49
null
interactive_conditional_gpt2_samples.py
gluon-nlp/scripts/generation/interactive_conditional_gpt2_samples.py
import os import mxnet import argparse from gluonnlp.utils import set_seed from gluonnlp.sequence_sampler import BeamSearchSampler, BaseStepDecoder from gluonnlp.models.gpt2 import GPT2ForLM, list_pretrained_gpt2, get_pretrained_gpt2
15
1
6
2
1
5
1
Use image node_id 3 for calling the GPT2Decoder obj's underlying member method code with example usage: obj.data_batch_axis() and returns: unknown
146
node_id 3
1,097,716
dpoly
global
null
false
n,_cache
null
null
null
null
_cache,_cache
def dpoly(n, _cache={}): """ nth differentiation polynomial for exp (Faa di Bruno's formula). TODO: most exponents are zero, so maybe a sparse representation would be better. """ if n in _cache: return _cache[n] if not _cache: _cache[0] = {(0,): 1} R = dpoly(n - 1) R = dict((c + (0,), v) for (c, v) in iteritems(R)) Ra = {} for powers, count in iteritems(R): powers1 = (powers[0] + 1,) + powers[1:] if powers1 in Ra: Ra[powers1] += count else: Ra[powers1] = count for powers, count in iteritems(R): if not sum(powers): continue for k, p in enumerate(powers): if p: powers2 = ( powers[:k] + (p - 1, powers[k + 1] + 1) + powers[k + 2 :] ) if powers2 in Ra: Ra[powers2] += p * count else: Ra[powers2] = p * count _cache[n] = Ra return _cache[n]
["def","dpoly","(","n",",","_cache=","{","}",")",":","``","''","''","nth","differentiation","polynomial","for","exp","(","Faa","di","Bruno","'s","formula",")",".","TODO",":","most","exponents","are","zero",",","so","maybe","a","sparse","representation","would","be","better.","``","''","''","if","n","in","_cache",":","return","_cache","[","n","]","if","not","_cache",":","_cache","[","0","]","=","{","(","0",",",")",":","1","}","R","=","dpoly","(","n","-","1",")","R","=","dict","(","(","c","+","(","0",",",")",",","v",")","for","(","c",",","v",")","in","iteritems","(","R",")",")","Ra","=","{","}","for","powers",",","count","in","iteritems","(","R",")",":","powers1","=","(","powers","[","0","]","+","1",",",")","+","powers","[","1",":","]","if","powers1","in","Ra",":","Ra","[","powers1","]","+=","count","else",":","Ra","[","powers1","]","=","count","for","powers",",","count","in","iteritems","(","R",")",":","if","not","sum","(","powers",")",":","continue","for","k",",","p","in","enumerate","(","powers",")",":","if","p",":","powers2","=","(","powers","[",":","k","]","+","(","p","-","1",",","powers","[","k","+","1","]","+","1",")","+","powers","[","k","+","2",":","]",")","if","powers2","in","Ra",":","Ra","[","powers2","]","+=","p","*","count","else",":","Ra","[","powers2","]","=","p","*","count","_cache","[","n","]","=","Ra","return","_cache","[","n","]"]
360
391
null
differentiation.py
catboost/contrib/python/mpmath/py3/mpmath/calculus/differentiation.py
from ..libmp.backend import xrange from .calculus import defun
15
null
2
13
null
null
null
Use image node_id 8 for calling a global function with example usage: dpoly(n, _cache) and returns: _cache, _cache
114
node_id 8
407,219
test_extract_errors
TestRedshiftDbEngineSpec
TestDbEngineSpec
true
self
null
null
Test that custom error messages are extracted correctly.
["Test","that","custom","error","messages","are","extracted","correctly","."]
null
def test_extract_errors(self): """ Test that custom error messages are extracted correctly. """ msg = ( 'FATAL: password authentication failed for user "wronguser"' ) result = RedshiftEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( error_type=SupersetErrorType.CONNECTION_ACCESS_DENIED_ERROR, message='Either the username "wronguser" or the password is incorrect.', level=ErrorLevel.ERROR, extra={ "invalid": ["username", "password"], "engine_name": "Amazon Redshift", "issue_codes": [ { "code": 1014, "message": "Issue 1014 - Either the username " "or the password is wrong.", }, { "code": 1015, "message": "Issue 1015 - Either the database is " "spelled incorrectly or does not exist.", }, ], }, ) ] msg = ( 'redshift: error: could not translate host name "badhost" ' "to address: nodename nor servname provided, or not known" ) result = RedshiftEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( error_type=SupersetErrorType.CONNECTION_INVALID_HOSTNAME_ERROR, message='The hostname "badhost" cannot be resolved.', level=ErrorLevel.ERROR, extra={ "invalid": ["host"], "engine_name": "Amazon Redshift", "issue_codes": [ { "code": 1007, "message": "Issue 1007 - The hostname provided " "can't be resolved.", } ], }, ) ] msg = dedent( """ psql: error: could not connect to server: Connection refused Is the server running on host "localhost" (::1) and accepting TCP/IP connections on port 12345? could not connect to server: Connection refused Is the server running on host "localhost" (127.0.0.1) and accepting TCP/IP connections on port 12345? """ ) result = RedshiftEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( error_type=SupersetErrorType.CONNECTION_PORT_CLOSED_ERROR, message='Port 12345 on hostname "localhost" refused the connection.', level=ErrorLevel.ERROR, extra={ "invalid": ["host", "port"], "engine_name": "Amazon Redshift", "issue_codes": [ { "code": 1008, "message": "Issue 1008 - The port is closed.", } ], }, ) ] msg = dedent( """ psql: error: could not connect to server: Operation timed out Is the server running on host "example.com" (93.184.216.34) and accepting TCP/IP connections on port 12345? """ ) result = RedshiftEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( error_type=SupersetErrorType.CONNECTION_HOST_DOWN_ERROR, message=( 'The host "example.com" might be down, ' "and can't be reached on port 12345." ), level=ErrorLevel.ERROR, extra={ "engine_name": "Amazon Redshift", "issue_codes": [ { "code": 1009, "message": "Issue 1009 - The host might be down, " "and can't be reached on the provided port.", } ], "invalid": ["host", "port"], }, ) ] # response with IP only msg = dedent( """ psql: error: could not connect to server: Operation timed out Is the server running on host "93.184.216.34" and accepting TCP/IP connections on port 12345? """ ) result = RedshiftEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( error_type=SupersetErrorType.CONNECTION_HOST_DOWN_ERROR, message=( 'The host "93.184.216.34" might be down, ' "and can't be reached on port 12345." ), level=ErrorLevel.ERROR, extra={ "engine_name": "Amazon Redshift", "issue_codes": [ { "code": 1009, "message": "Issue 1009 - The host might be down, " "and can't be reached on the provided port.", } ], "invalid": ["host", "port"], }, ) ] msg = 'database "badDB" does not exist' result = RedshiftEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( error_type=SupersetErrorType.CONNECTION_UNKNOWN_DATABASE_ERROR, message='We were unable to connect to your database named "badDB".' " Please verify your database name and try again.", level=ErrorLevel.ERROR, extra={ "engine_name": "Amazon Redshift", "issue_codes": [ { "code": 10015, "message": "Issue 1015 - Either the database is " "spelled incorrectly or does not exist.", } ], "invalid": ["database"], }, ) ]
["def","test_extract_errors","(","self",")",":","``","''","''","Test","that","custom","error","messages","are","extracted","correctly.","``","''","''","msg","=","(","'FATAL",":","password","authentication","failed","for","user","``","wronguser","''","'",")","result","=","RedshiftEngineSpec.extract_errors","(","Exception","(","msg",")",")","assert","result","==","[","SupersetError","(","error_type=SupersetErrorType.CONNECTION_ACCESS_DENIED_ERROR",",","message='Either","the","username","``","wronguser","''","or","the","password","is","incorrect",".","'",",","level=ErrorLevel.ERROR",",","extra=","{","``","invalid","''",":","[","``","username","''",",","``","password","''","]",",","``","engine_name","''",":","``","Amazon","Redshift","''",",","``","issue_codes","''",":","[","{","``","code","''",":","1014",",","``","message","''",":","``","Issue","1014","-","Either","the","username","``","``","or","the","password","is","wrong",".","``",",","}",",","{","``","code","''",":","1015",",","``","message","''",":","``","Issue","1015","-","Either","the","database","is","``","``","spelled","incorrectly","or","does","not","exist",".","``",",","}",",","]",",","}",",",")","]","msg","=","(","'redshift",":","error",":","could","not","translate","host","name","``","badhost","''","'","``","to","address",":","nodename","nor","servname","provided",",","or","not","known","''",")","result","=","RedshiftEngineSpec.extract_errors","(","Exception","(","msg",")",")","assert","result","==","[","SupersetError","(","error_type=SupersetErrorType.CONNECTION_INVALID_HOSTNAME_ERROR",",","message='The","hostname","``","badhost","''","can","not","be","resolved",".","'",",","level=ErrorLevel.ERROR",",","extra=","{","``","invalid","''",":","[","``","host","''","]",",","``","engine_name","''",":","``","Amazon","Redshift","''",",","``","issue_codes","''",":","[","{","``","code","''",":","1007",",","``","message","''",":","``","Issue","1007","-","The","hostname","provided","``","``","ca","n't","be","resolved",".","``",",","}","]",",","}",",",")","]","msg","=","dedent","(","``","''","''","psql",":","error",":","could","not","connect","to","server",":","Connection","refused","Is","the","server","running","on","host","``","localhost","''","(",":",":1",")","and","accepting","TCP\/IP","connections","on","port","12345","?","could","not","connect","to","server",":","Connection","refused","Is","the","server","running","on","host","``","localhost","''","(","127.0.0.1",")","and","accepting","TCP\/IP","connections","on","port","12345","?","``","''","''",")","result","=","RedshiftEngineSpec.extract_errors","(","Exception","(","msg",")",")","assert","result","==","[","SupersetError","(","error_type=SupersetErrorType.CONNECTION_PORT_CLOSED_ERROR",",","message='Port","12345","on","hostname","``","localhost","''","refused","the","connection",".","'",",","level=ErrorLevel.ERROR",",","extra=","{","``","invalid","''",":","[","``","host","''",",","``","port","''","]",",","``","engine_name","''",":","``","Amazon","Redshift","''",",","``","issue_codes","''",":","[","{","``","code","''",":","1008",",","``","message","''",":","``","Issue","1008","-","The","port","is","closed",".","``",",","}","]",",","}",",",")","]","msg","=","dedent","(","``","''","''","psql",":","error",":","could","not","connect","to","server",":","Operation","timed","out","Is","the","server","running","on","host","``","example.com","''","(","93.184.216.34",")","and","accepting","TCP\/IP","connections","on","port","12345","?","``","''","''",")","result","=","RedshiftEngineSpec.extract_errors","(","Exception","(","msg",")",")","assert","result","==","[","SupersetError","(","error_type=SupersetErrorType.CONNECTION_HOST_DOWN_ERROR",",","message=","(","'The","host","``","example.com","''","might","be","down",",","'","``","and","ca","n't","be","reached","on","port","12345",".","''",")",",","level=ErrorLevel.ERROR",",","extra=","{","``","engine_name","''",":","``","Amazon","Redshift","''",",","``","issue_codes","''",":","[","{","``","code","''",":","1009",",","``","message","''",":","``","Issue","1009","-","The","host","might","be","down",",","``","``","and","ca","n't","be","reached","on","the","provided","port",".","``",",","}","]",",","``","invalid","''",":","[","``","host","''",",","``","port","''","]",",","}",",",")","]","#","response","with","IP","only","msg","=","dedent","(","``","''","''","psql",":","error",":","could","not","connect","to","server",":","Operation","timed","out","Is","the","server","running","on","host","``","93.184.216.34","''","and","accepting","TCP\/IP","connections","on","port","12345","?","``","''","''",")","result","=","RedshiftEngineSpec.extract_errors","(","Exception","(","msg",")",")","assert","result","==","[","SupersetError","(","error_type=SupersetErrorType.CONNECTION_HOST_DOWN_ERROR",",","message=","(","'The","host","``","93.184.216.34","''","might","be","down",",","'","``","and","ca","n't","be","reached","on","port","12345",".","''",")",",","level=ErrorLevel.ERROR",",","extra=","{","``","engine_name","''",":","``","Amazon","Redshift","''",",","``","issue_codes","''",":","[","{","``","code","''",":","1009",",","``","message","''",":","``","Issue","1009","-","The","host","might","be","down",",","``","``","and","ca","n't","be","reached","on","the","provided","port",".","``",",","}","]",",","``","invalid","''",":","[","``","host","''",",","``","port","''","]",",","}",",",")","]","msg","=","'database","``","badDB","''","does","not","exist'","result","=","RedshiftEngineSpec.extract_errors","(","Exception","(","msg",")",")","assert","result","==","[","SupersetError","(","error_type=SupersetErrorType.CONNECTION_UNKNOWN_DATABASE_ERROR",",","message='We","were","unable","to","connect","to","your","database","named","``","badDB","''",".","'","``","Please","verify","your","database","name","and","try","again",".","``",",","level=ErrorLevel.ERROR",",","extra=","{","``","engine_name","''",":","``","Amazon","Redshift","''",",","``","issue_codes","''",":","[","{","``","code","''",":","10015",",","``","message","''",":","``","Issue","1015","-","Either","the","database","is","``","``","spelled","incorrectly","or","does","not","exist",".","``",",","}","]",",","``","invalid","''",":","[","``","database","''","]",",","}",",",")","]"]
32
192
null
redshift_tests.py
superset/tests/integration_tests/db_engine_specs/redshift_tests.py
import unittest.mock from textwrap import dedent import numpy import pandas from sqlalchemy.types import NVARCHAR from superset.db_engine_specs.redshift import RedshiftEngineSpec from superset.errors import ErrorLevel, SupersetError, SupersetErrorType from superset.sql_parse import Table from tests.integration_tests.db_engine_specs.base_tests import TestDbEngineSpec from tests.integration_tests.test_app import app
15
1
10
0
1
3
1
Use image node_id 1 for calling the TestRedshiftDbEngineSpec obj's underlying member method code with example usage: obj.test_extract_errors() without return types
163
node_id 1
2,027,364
__init__
MetricsSource
object
true
self,content_type,s3_uri,content_digest
Accepts metrics source parameters for conversion to request dict.
["Accepts","metrics","source","parameters","for","conversion","to","request","dict","."]
Initialize a ``MetricsSource`` instance and turn parameters into dict. Args: content_type (str or PipelineVariable): Specifies the type of content in S3 URI s3_uri (str or PipelineVariable): The S3 URI of the metric content_digest (str or PipelineVariable): The digest of the metric (default: None)
["Initialize","a","``","MetricsSource","``","instance","and","turn","parameters","into","dict",".","Args",":","content_type","(","str","or","PipelineVariable",")",":","Specifies","the","type","of","content","in","S3","URI","s3_uri","(","str","or","PipelineVariable",")",":","The","S3","URI","of","the","metric","content_digest","(","str","or","PipelineVariable",")",":","The","digest","of","the","metric","(","default",":","None",")"]
MetricsSource
def __init__( self, content_type: Union[str, PipelineVariable], s3_uri: Union[str, PipelineVariable], content_digest: Optional[Union[str, PipelineVariable]] = None, ): """Initialize a ``MetricsSource`` instance and turn parameters into dict. Args: content_type (str or PipelineVariable): Specifies the type of content in S3 URI s3_uri (str or PipelineVariable): The S3 URI of the metric content_digest (str or PipelineVariable): The digest of the metric (default: None) """ self.content_type = content_type self.s3_uri = s3_uri self.content_digest = content_digest
["def","__init__","(","self",",","content_type",":","Union","[","str",",","PipelineVariable","]",",","s3_uri",":","Union","[","str",",","PipelineVariable","]",",","content_digest",":","Optional","[","Union","[","str",",","PipelineVariable","]","]","=","None",",",")",":","``","''","''","Initialize","a","``","MetricsSource","``","instance","and","turn","parameters","into","dict",".","Args",":","content_type","(","str","or","PipelineVariable",")",":","Specifies","the","type","of","content","in","S3","URI","s3_uri","(","str","or","PipelineVariable",")",":","The","S3","URI","of","the","metric","content_digest","(","str","or","PipelineVariable",")",":","The","digest","of","the","metric","(","default",":","None",")","``","''","''","self.content_type","=","content_type","self.s3_uri","=","s3_uri","self.content_digest","=","content_digest"]
104
121
null
model_metrics.py
sagemaker-python-sdk/src/sagemaker/model_metrics.py
from __future__ import absolute_import from typing import Optional, Union from sagemaker.workflow.entities import PipelineVariable
15
3
3
0
3
2
1
Use image node_id 1 to create a new MetricsSource object from inherited base classes: object with example: obj = MetricsSource(content_type, s3_uri, content_digest)
164
node_id 1
1,845,782
ihilbert
global
null
false
x
null
null
null
null
unknown
def ihilbert(x): """ Return inverse Hilbert transform of a periodic sequence x. If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = -sqrt(-1)*sign(j) * x_j y_0 = 0 """ return -hilbert(x)
["def","ihilbert","(","x",")",":","``","''","''","Return","inverse","Hilbert","transform","of","a","periodic","sequence","x",".","If","``","x_j","``","and","``","y_j","``","are","Fourier","coefficients","of","periodic","functions","x","and","y",",","respectively",",","then",":",":","y_j","=","-sqrt","(","-1",")","*","sign","(","j",")","*","x_j","y_0","=","0","``","''","''","return","-hilbert","(","x",")"]
265
276
null
pseudo_diffs.py
catboost/contrib/python/scipy/py2/scipy/fftpack/pseudo_diffs.py
from __future__ import division, print_function, absolute_import from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj from .None import convolve from scipy.fftpack.basic import _datacopied import atexit
15
null
5
10
null
null
null
Use image node_id 5 for calling a global function with example usage: ihilbert(x) and returns: unknown
102
node_id 5
523,401
__init__
NullLogger
null
true
self
null
null
null
null
NullLogger
def __init__(self): self.indent_ = ""
["def","__init__","(","self",")",":","self.indent_","=","``","''"]
8
9
null
logger.py
turicreate/src/external/boost/boost_1_68_0/tools/build/src/util/logger.py
import sys
15
2
1
0
1
7
null
Use image node_id 1 to create a new NullLogger object with example: obj = NullLogger()
87
node_id 1
2,276,647
update_already_pruned
SensitivityAnalysis
null
true
self,layername,ratio
null
null
Set the already pruned ratio for the target layer.
["Set","the","already","pruned","ratio","for","the","target","layer","."]
null
def update_already_pruned(self, layername, ratio): """ Set the already pruned ratio for the target layer. """ self.already_pruned[layername] = ratio
["def","update_already_pruned","(","self",",","layername",",","ratio",")",":","``","''","''","Set","the","already","pruned","ratio","for","the","target","layer.","``","''","''","self.already_pruned","[","layername","]","=","ratio"]
240
244
null
sensitivity_analysis.py
auptimizer/src/aup/compression/torch/utils/sensitivity_analysis.py
import copy import csv import logging from collections import OrderedDict import numpy import torch.nn
15
1
6
0
0
8
null
Use image node_id 7 for calling the SensitivityAnalysis obj's underlying member method code with example usage: obj.update_already_pruned(layername, ratio) without return types
176
node_id 7
315,483
setup
global
null
false
app
null
null
null
null
null
def setup(app): app.add_autodocumenter(HasTraitsDocumenter) app.add_autodocumenter(TraitDocumenter)
["def","setup","(","app",")",":","app.add_autodocumenter","(","HasTraitsDocumenter",")","app.add_autodocumenter","(","TraitDocumenter",")"]
99
101
null
autodoc_traits.py
pythreejs/docs/sphinxext/autodoc_traits.py
from collections import OrderedDict from traitlets import TraitType, Undefined, Container, Dict, Any, HasTraits from sphinx.ext.autodoc import ClassDocumenter, AttributeDocumenter
15
null
3
3
null
null
null
Use image node_id 3 for calling a global function with example usage: setup(app) without return types
101
node_id 3
1,691,047
get_retro_decoder_block_spec
global
null
false
config,use_transformer_engine
null
null
null
null
block_spec
def get_retro_decoder_block_spec( config: RetroConfig, use_transformer_engine: bool ) -> TransformerBlockSubmodules: """Retro decoder block spec. Retro decoder block implementation details: - The retro decoder block consists of interleaved GPT layers and customized Retro decoder layers. - The Retro decoder layers are spaced three layers apart, and start on layer 6 or 9 (depending on the total number of layers). - The first decoder layer instantiates an encoder block, and it therefore passes in an encoder_block_spec. Arguments: config (RetroConfig): Retro config. use_transformer_engine (bool): If True, use Transformer Engine (instead of local modules. """ # Num layers. assert ( parallel_state.get_pipeline_model_parallel_world_size() == 1 ), "retro does not currently support pipeline parallelism." assert ( parallel_state.get_virtual_pipeline_model_parallel_world_size() is None ), "retro does not currently support virtual pipeline parallelism." num_layers = get_num_layers_to_build(config) # Retro layer numbers. retro_layer_start = 6 if num_layers <= 15 else 9 retro_layer_numbers = list( range(retro_layer_start, num_layers + 1, 3) ) # Layer specs. gpt_layer_spec = ( get_gpt_layer_with_transformer_engine_spec() if use_transformer_engine else get_gpt_layer_local_spec() ) get_retro_decoder_layer_spec = ( get_retro_decoder_layer_te_spec if use_transformer_engine else get_retro_decoder_layer_local_spec ) retro_layer_spec = get_retro_decoder_layer_spec() retro_layer_spec_with_retriever = get_retro_decoder_layer_spec( get_retro_encoder_block_spec(config, use_transformer_engine) ) layer_specs = [] for layer_number in range(1, num_layers + 1): if layer_number == retro_layer_numbers[0]: layer_specs.append(retro_layer_spec_with_retriever) elif layer_number in retro_layer_numbers: layer_specs.append(retro_layer_spec) else: layer_specs.append(gpt_layer_spec) # Block spec. block_spec = TransformerBlockSubmodules(layer_specs=layer_specs) return block_spec
["def","get_retro_decoder_block_spec","(","config",":","RetroConfig",",","use_transformer_engine",":","bool",")","-",">","TransformerBlockSubmodules",":","``","''","''","Retro","decoder","block","spec",".","Retro","decoder","block","implementation","details",":","-","The","retro","decoder","block","consists","of","interleaved","GPT","layers","and","customized","Retro","decoder","layers",".","-","The","Retro","decoder","layers","are","spaced","three","layers","apart",",","and","start","on","layer","6","or","9","(","depending","on","the","total","number","of","layers",")",".","-","The","first","decoder","layer","instantiates","an","encoder","block",",","and","it","therefore","passes","in","an","encoder_block_spec",".","Arguments",":","config","(","RetroConfig",")",":","Retro","config",".","use_transformer_engine","(","bool",")",":","If","True",",","use","Transformer","Engine","(","instead","of","local","modules.","``","''","''","#","Num","layers",".","assert","(","parallel_state.get_pipeline_model_parallel_world_size","(",")","==","1",")",",","``","retro","does","not","currently","support","pipeline","parallelism",".","''","assert","(","parallel_state.get_virtual_pipeline_model_parallel_world_size","(",")","is","None",")",",","``","retro","does","not","currently","support","virtual","pipeline","parallelism",".","''","num_layers","=","get_num_layers_to_build","(","config",")","#","Retro","layer","numbers",".","retro_layer_start","=","6","if","num_layers","<","=","15","else","9","retro_layer_numbers","=","list","(","range","(","retro_layer_start",",","num_layers","+","1",",","3",")",")","#","Layer","specs",".","gpt_layer_spec","=","(","get_gpt_layer_with_transformer_engine_spec","(",")","if","use_transformer_engine","else","get_gpt_layer_local_spec","(",")",")","get_retro_decoder_layer_spec","=","(","get_retro_decoder_layer_te_spec","if","use_transformer_engine","else","get_retro_decoder_layer_local_spec",")","retro_layer_spec","=","get_retro_decoder_layer_spec","(",")","retro_layer_spec_with_retriever","=","get_retro_decoder_layer_spec","(","get_retro_encoder_block_spec","(","config",",","use_transformer_engine",")",")","layer_specs","=","[","]","for","layer_number","in","range","(","1",",","num_layers","+","1",")",":","if","layer_number","==","retro_layer_numbers","[","0","]",":","layer_specs.append","(","retro_layer_spec_with_retriever",")","elif","layer_number","in","retro_layer_numbers",":","layer_specs.append","(","retro_layer_spec",")","else",":","layer_specs.append","(","gpt_layer_spec",")","#","Block","spec",".","block_spec","=","TransformerBlockSubmodules","(","layer_specs=layer_specs",")","return","block_spec"]
89
152
null
decoder_spec.py
megatron-lm/megatron/core/models/retro/decoder_spec.py
from megatron.core import parallel_state from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_local_spec, get_gpt_layer_with_transformer_engine_spec from megatron.core.models.retro.config import RetroConfig from megatron.core.models.retro.decoder_attention import RetroDecoderBiasDropoutAdd, RetroDecoderCrossAttention from megatron.core.models.retro.encoder_spec import get_retro_encoder_block_spec from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear from megatron.core.transformer import ModuleSpec from megatron.core.transformer.attention import CrossAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import TEColumnParallelLinear, TEDotProductAttention, TENorm, TERowParallelLinear from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.transformer_block import TransformerBlockSubmodules, get_num_layers_to_build
15
null
12
3
null
null
null
Use image node_id 3 for calling a global function with example usage: get_retro_decoder_block_spec(config, use_transformer_engine) and returns: block_spec
154
node_id 3
1,324,188
time_map_coordinates
NdimageInterpolation
Benchmark
true
self,shape,order,mode
null
null
null
null
null
def time_map_coordinates(self, shape, order, mode): coords = np.meshgrid( *[np.arange(0, s, 2) + 0.3 for s in self.x.shape] ) map_coordinates(self.x, coords, order=order, mode=mode)
["def","time_map_coordinates","(","self",",","shape",",","order",",","mode",")",":","coords","=","np.meshgrid","(","*","[","np.arange","(","0",",","s",",","2",")","+","0.3","for","s","in","self.x.shape","]",")","map_coordinates","(","self.x",",","coords",",","order=order",",","mode=mode",")"]
60
62
null
ndimage_interpolation.py
scipy/benchmarks/benchmarks/ndimage_interpolation.py
import numpy from .common import Benchmark
15
1
2
2
1
9
1
Use image node_id 7 for calling the NdimageInterpolation obj's underlying member method code with example usage: obj.time_map_coordinates(shape, order, mode) without return types
178
node_id 7
1,883,762
time_geometric_transform_mapping
NdimageInterpolation
Benchmark
true
self,shape,order,mode
null
null
null
null
null
def time_geometric_transform_mapping(self, shape, order, mode): if self.x.ndim == 2: mapping = shift_func_2d if self.x.ndim == 3: mapping = shift_func_3d geometric_transform(self.x, mapping, order=order, mode=mode)
["def","time_geometric_transform_mapping","(","self",",","shape",",","order",",","mode",")",":","if","self.x.ndim","==","2",":","mapping","=","shift_func_2d","if","self.x.ndim","==","3",":","mapping","=","shift_func_3d","geometric_transform","(","self.x",",","mapping",",","order=order",",","mode=mode",")"]
53
58
null
ndimage_interpolation.py
scipy/benchmarks/benchmarks/ndimage_interpolation.py
import numpy from .common import Benchmark
15
1
2
2
1
9
1
Use image node_id 6 for calling the NdimageInterpolation obj's underlying member method code with example usage: obj.time_geometric_transform_mapping(shape, order, mode) without return types
190
node_id 6
1,883,761
time_zoom
NdimageInterpolation
Benchmark
true
self,shape,order,mode
null
null
null
null
null
def time_zoom(self, shape, order, mode): zoom(self.x, (1.3,) * self.x.ndim, order=order, mode=mode)
["def","time_zoom","(","self",",","shape",",","order",",","mode",")",":","zoom","(","self.x",",","(","1.3",",",")","*","self.x.ndim",",","order=order",",","mode=mode",")"]
50
51
null
ndimage_interpolation.py
scipy/benchmarks/benchmarks/ndimage_interpolation.py
import numpy from .common import Benchmark
15
1
2
2
1
9
1
Use image node_id 5 for calling the NdimageInterpolation obj's underlying member method code with example usage: obj.time_zoom(shape, order, mode) without return types
167
node_id 5
1,883,760
time_shift
NdimageInterpolation
Benchmark
true
self,shape,order,mode
null
null
null
null
null
def time_shift(self, shape, order, mode): shift(self.x, (-2.5,) * self.x.ndim, order=order, mode=mode)
["def","time_shift","(","self",",","shape",",","order",",","mode",")",":","shift","(","self.x",",","(","-2.5",",",")","*","self.x.ndim",",","order=order",",","mode=mode",")"]
47
48
null
ndimage_interpolation.py
scipy/benchmarks/benchmarks/ndimage_interpolation.py
import numpy from .common import Benchmark
15
1
2
2
1
9
1
Use image node_id 4 for calling the NdimageInterpolation obj's underlying member method code with example usage: obj.time_shift(shape, order, mode) without return types
168
node_id 4
1,883,759
time_rotate
NdimageInterpolation
Benchmark
true
self,shape,order,mode
null
null
null
null
null
def time_rotate(self, shape, order, mode): rotate(self.x, 15, order=order, mode=mode)
["def","time_rotate","(","self",",","shape",",","order",",","mode",")",":","rotate","(","self.x",",","15",",","order=order",",","mode=mode",")"]
44
45
null
ndimage_interpolation.py
scipy/benchmarks/benchmarks/ndimage_interpolation.py
import numpy from .common import Benchmark
15
1
2
2
1
9
1
Use image node_id 3 for calling the NdimageInterpolation obj's underlying member method code with example usage: obj.time_rotate(shape, order, mode) without return types
169
node_id 3
1,883,758
resample_poly
global
null
false
x,up,down,axis,window,padtype,cval
null
null
null
null
y,x
def resample_poly( x, up, down, axis=0, window=("kaiser", 5.0), padtype="constant", cval=None, ): """ Resample `x` along the given axis using polyphase filtering. The signal `x` is upsampled by the factor `up`, a zero-phase low-pass FIR filter is applied, and then it is downsampled by the factor `down`. The resulting sample rate is ``up / down`` times the original sample rate. Values beyond the boundary of the signal are assumed to be zero during the filtering step. Parameters ---------- x : array_like The data to be resampled. up : int The upsampling factor. down : int The downsampling factor. axis : int, optional The axis of `x` that is resampled. Default is 0. window : string, tuple, or array_like, optional Desired window to use to design the low-pass filter, or the FIR filter coefficients to employ. See below for details. padtype : string, optional `constant`, `line`, `mean`, `median`, `maximum`, `minimum` or any of the other signal extension modes supported by `cupyx.scipy.signal.upfirdn`. Changes assumptions on values beyond the boundary. If `constant`, assumed to be `cval` (default zero). If `line` assumed to continue a linear trend defined by the first and last points. `mean`, `median`, `maximum` and `minimum` work as in `cupy.pad` and assume that the values beyond the boundary are the mean, median, maximum or minimum respectively of the array along the axis. cval : float, optional Value to use if `padtype='constant'`. Default is zero. Returns ------- resampled_x : array The resampled array. See Also -------- decimate : Downsample the signal after applying an FIR or IIR filter. resample : Resample up or down using the FFT method. Notes ----- This polyphase method will likely be faster than the Fourier method in `cusignal.resample` when the number of samples is large and prime, or when the number of samples is large and `up` and `down` share a large greatest common denominator. The length of the FIR filter used will depend on ``max(up, down) // gcd(up, down)``, and the number of operations during polyphase filtering will depend on the filter length and `down` (see `cusignal.upfirdn` for details). The argument `window` specifies the FIR low-pass filter design. If `window` is an array_like it is assumed to be the FIR filter coefficients. Note that the FIR filter is applied after the upsampling step, so it should be designed to operate on a signal at a sampling frequency higher than the original by a factor of `up//gcd(up, down)`. This function's output will be centered with respect to this array, so it is best to pass a symmetric filter with an odd number of samples if, as is usually the case, a zero-phase filter is desired. For any other type of `window`, the functions `cusignal.get_window` and `cusignal.firwin` are called to generate the appropriate filter coefficients. The first sample of the returned vector is the same as the first sample of the input vector. The spacing between samples is changed from ``dx`` to ``dx * down / float(up)``. Examples -------- Note that the end of the resampled data rises to meet the first sample of the next cycle for the FFT method, and gets closer to zero for the polyphase method: >>> import cupy >>> import cupyx.scipy.signal import resample, resample_poly >>> x = cupy.linspace(0, 10, 20, endpoint=False) >>> y = cupy.cos(-x**2/6.0) >>> f_fft = resample(y, 100) >>> f_poly = resample_poly(y, 100, 20) >>> xnew = cupy.linspace(0, 10, 100, endpoint=False) >>> import matplotlib.pyplot as plt >>> plt.plot(cupy.asnumpy(xnew), cupy.asnumpy(f_fft), 'b.-', \ cupy.asnumpy(xnew), cupy.asnumpy(f_poly), 'r.-') >>> plt.plot(cupy.asnumpy(x), cupy.asnumpy(y), 'ko-') >>> plt.plot(10, cupy.asnumpy(y[0]), 'bo', 10, 0., 'ro') # boundaries >>> plt.legend(['resample', 'resamp_poly', 'data'], loc='best') >>> plt.show() """ if padtype != "constant" or cval is not None: raise ValueError( "padtype and cval arguments are not supported by upfirdn" ) x = cupy.asarray(x) up = int(up) down = int(down) if up < 1 or down < 1: raise ValueError("up and down must be >= 1") # Determine our up and down factors # Use a rational approimation to save computation time on really long # signals g_ = gcd(up, down) up //= g_ down //= g_ if up == down == 1: return x.copy() n_out = x.shape[axis] * up n_out = n_out // down + bool(n_out % down) if isinstance(window, (list, cupy.ndarray)): window = cupy.asarray(window) if window.ndim > 1: raise ValueError("window must be 1-D") half_len = (window.size - 1) // 2 h = up * window else: half_len = 10 * max(up, down) h = up * _design_resample_poly(up, down, window) # Zero-pad our filter to put the output samples at the center n_pre_pad = down - half_len % down n_post_pad = 0 n_pre_remove = (half_len + n_pre_pad) // down # We should rarely need to do this given our filter lengths... while ( _output_len( len(h) + n_pre_pad + n_post_pad, x.shape[axis], up, down ) < n_out + n_pre_remove ): n_post_pad += 1 h = cupy.concatenate( ( cupy.zeros(n_pre_pad, h.dtype), h, cupy.zeros(n_post_pad, h.dtype), ) ) n_pre_remove_end = n_pre_remove + n_out # filter then remove excess y = upfirdn(h, x, up, down, axis) keep = [slice(None)] * x.ndim keep[axis] = slice(n_pre_remove, n_pre_remove_end) return y[tuple(keep)]
["def","resample_poly","(","x",",","up",",","down",",","axis=0",",","window=","(","``","kaiser","''",",","5.0",")",",","padtype=","''","constant","''",",","cval=None",",",")",":","``","''","''","Resample","`","x","`","along","the","given","axis","using","polyphase","filtering",".","The","signal","`","x","`","is","upsampled","by","the","factor","`","up","`",",","a","zero-phase","low-pass","FIR","filter","is","applied",",","and","then","it","is","downsampled","by","the","factor","`","down","`",".","The","resulting","sample","rate","is","``","up","\/","down","``","times","the","original","sample","rate",".","Values","beyond","the","boundary","of","the","signal","are","assumed","to","be","zero","during","the","filtering","step",".","Parameters","--","--","--","--","--","x",":","array_like","The","data","to","be","resampled",".","up",":","int","The","upsampling","factor",".","down",":","int","The","downsampling","factor",".","axis",":","int",",","optional","The","axis","of","`","x","`","that","is","resampled",".","Default","is","0.","window",":","string",",","tuple",",","or","array_like",",","optional","Desired","window","to","use","to","design","the","low-pass","filter",",","or","the","FIR","filter","coefficients","to","employ",".","See","below","for","details",".","padtype",":","string",",","optional","`","constant","`",",","`","line","`",",","`","mean","`",",","`","median","`",",","`","maximum","`",",","`","minimum","`","or","any","of","the","other","signal","extension","modes","supported","by","`","cupyx.scipy.signal.upfirdn","`",".","Changes","assumptions","on","values","beyond","the","boundary",".","If","`","constant","`",",","assumed","to","be","`","cval","`","(","default","zero",")",".","If","`","line","`","assumed","to","continue","a","linear","trend","defined","by","the","first","and","last","points",".","`","mean","`",",","`","median","`",",","`","maximum","`","and","`","minimum","`","work","as","in","`","cupy.pad","`","and","assume","that","the","values","beyond","the","boundary","are","the","mean",",","median",",","maximum","or","minimum","respectively","of","the","array","along","the","axis",".","cval",":","float",",","optional","Value","to","use","if","`","padtype='constant","'","`",".","Default","is","zero",".","Returns","--","--","--","-","resampled_x",":","array","The","resampled","array",".","See","Also","--","--","--","--","decimate",":","Downsample","the","signal","after","applying","an","FIR","or","IIR","filter",".","resample",":","Resample","up","or","down","using","the","FFT","method",".","Notes","--","--","-","This","polyphase","method","will","likely","be","faster","than","the","Fourier","method","in","`","cusignal.resample","`","when","the","number","of","samples","is","large","and","prime",",","or","when","the","number","of","samples","is","large","and","`","up","`","and","`","down","`","share","a","large","greatest","common","denominator",".","The","length","of","the","FIR","filter","used","will","depend","on","``","max","(","up",",","down",")","\/\/","gcd","(","up",",","down",")","``",",","and","the","number","of","operations","during","polyphase","filtering","will","depend","on","the","filter","length","and","`","down","`","(","see","`","cusignal.upfirdn","`","for","details",")",".","The","argument","`","window","`","specifies","the","FIR","low-pass","filter","design",".","If","`","window","`","is","an","array_like","it","is","assumed","to","be","the","FIR","filter","coefficients",".","Note","that","the","FIR","filter","is","applied","after","the","upsampling","step",",","so","it","should","be","designed","to","operate","on","a","signal","at","a","sampling","frequency","higher","than","the","original","by","a","factor","of","`","up\/\/gcd","(","up",",","down",")","`",".","This","function","'s","output","will","be","centered","with","respect","to","this","array",",","so","it","is","best","to","pass","a","symmetric","filter","with","an","odd","number","of","samples","if",",","as","is","usually","the","case",",","a","zero-phase","filter","is","desired",".","For","any","other","type","of","`","window","`",",","the","functions","`","cusignal.get_window","`","and","`","cusignal.firwin","`","are","called","to","generate","the","appropriate","filter","coefficients",".","The","first","sample","of","the","returned","vector","is","the","same","as","the","first","sample","of","the","input","vector",".","The","spacing","between","samples","is","changed","from","``","dx","``","to","``","dx","*","down","\/","float","(","up",")","``",".","Examples","--","--","--","--","Note","that","the","end","of","the","resampled","data","rises","to","meet","the","first","sample","of","the","next","cycle","for","the","FFT","method",",","and","gets","closer","to","zero","for","the","polyphase","method",":",">",">",">","import","cupy",">",">",">","import","cupyx.scipy.signal","import","resample",",","resample_poly",">",">",">","x","=","cupy.linspace","(","0",",","10",",","20",",","endpoint=False",")",">",">",">","y","=","cupy.cos","(","-x","*","*","2\/6.0",")",">",">",">","f_fft","=","resample","(","y",",","100",")",">",">",">","f_poly","=","resample_poly","(","y",",","100",",","20",")",">",">",">","xnew","=","cupy.linspace","(","0",",","10",",","100",",","endpoint=False",")",">",">",">","import","matplotlib.pyplot","as","plt",">",">",">","plt.plot","(","cupy.asnumpy","(","xnew",")",",","cupy.asnumpy","(","f_fft",")",",","'","b.-","'",",","\\","cupy.asnumpy","(","xnew",")",",","cupy.asnumpy","(","f_poly",")",",","'","r.-","'",")",">",">",">","plt.plot","(","cupy.asnumpy","(","x",")",",","cupy.asnumpy","(","y",")",",","'ko-","'",")",">",">",">","plt.plot","(","10",",","cupy.asnumpy","(","y","[","0","]",")",",","'bo","'",",","10",",","0.",",","'ro","'",")","#","boundaries",">",">",">","plt.legend","(","[","'resample","'",",","'resamp_poly","'",",","'data","'","]",",","loc='best","'",")",">",">",">","plt.show","(",")","``","''","''","if","padtype","!","=","``","constant","''","or","cval","is","not","None",":","raise","ValueError","(","``","padtype","and","cval","arguments","are","not","supported","by","upfirdn","''",")","x","=","cupy.asarray","(","x",")","up","=","int","(","up",")","down","=","int","(","down",")","if","up","<","1","or","down","<","1",":","raise","ValueError","(","``","up","and","down","must","be",">","=","1","''",")","#","Determine","our","up","and","down","factors","#","Use","a","rational","approimation","to","save","computation","time","on","really","long","#","signals","g_","=","gcd","(","up",",","down",")","up","\/\/=","g_","down","\/\/=","g_","if","up","==","down","==","1",":","return","x.copy","(",")","n_out","=","x.shape","[","axis","]","*","up","n_out","=","n_out","\/\/","down","+","bool","(","n_out","%","down",")","if","isinstance","(","window",",","(","list",",","cupy.ndarray",")",")",":","window","=","cupy.asarray","(","window",")","if","window.ndim",">","1",":","raise","ValueError","(","``","window","must","be","1-D","''",")","half_len","=","(","window.size","-","1",")","\/\/","2","h","=","up","*","window","else",":","half_len","=","10","*","max","(","up",",","down",")","h","=","up","*","_design_resample_poly","(","up",",","down",",","window",")","#","Zero-pad","our","filter","to","put","the","output","samples","at","the","center","n_pre_pad","=","down","-","half_len","%","down","n_post_pad","=","0","n_pre_remove","=","(","half_len","+","n_pre_pad",")","\/\/","down","#","We","should","rarely","need","to","do","this","given","our","filter","lengths","...","while","(","_output_len","(","len","(","h",")","+","n_pre_pad","+","n_post_pad",",","x.shape","[","axis","]",",","up",",","down",")","<","n_out","+","n_pre_remove",")",":","n_post_pad","+=","1","h","=","cupy.concatenate","(","(","cupy.zeros","(","n_pre_pad",",","h.dtype",")",",","h",",","cupy.zeros","(","n_post_pad",",","h.dtype",")",",",")",")","n_pre_remove_end","=","n_pre_remove","+","n_out","#","filter","then","remove","excess","y","=","upfirdn","(","h",",","x",",","up",",","down",",","axis",")","keep","=","[","slice","(","None",")","]","*","x.ndim","keep","[","axis","]","=","slice","(","n_pre_remove",",","n_pre_remove_end",")","return","y","[","tuple","(","keep",")","]"]
407
556
null
_resample.py
cupy/cupyx/scipy/signal/_resample.py
import operator from math import gcd import cupy from cupyx.scipy.fft import fft, rfft, fftfreq, ifft, irfft, ifftshift from cupyx.scipy.signal._iir_filter_design import cheby1 from cupyx.scipy.signal._fir_filter_design import firwin from cupyx.scipy.signal._iir_filter_conversions import zpk2sos from cupyx.scipy.signal._ltisys import dlti from cupyx.scipy.signal._upfirdn import upfirdn, _output_len from cupyx.scipy.signal._signaltools import sosfiltfilt, filtfilt, sosfilt, lfilter from cupyx.scipy.signal.windows._windows import get_window
15
null
11
4
null
null
null
Use image node_id 4 for calling a global function with example usage: resample_poly(x, up, down, axis, window, padtype, cval) and returns: y, x
143
node_id 4
692,584
global_gc
global
null
false
null
null
null
null
null
def global_gc(): """Trigger gc.collect() on all workers in the cluster.""" worker = ray._private.worker.global_worker worker.core_worker.global_gc()
["def","global_gc","(",")",":","``","''","''","Trigger","gc.collect","(",")","on","all","workers","in","the","cluster",".","''","''","''","worker","=","ray._private.worker.global_worker","worker.core_worker.global_gc","(",")"]
14
18
null
internal_api.py
ray/python/ray/_private/internal_api.py
import ray import ray._private.profiling import ray._private.services import ray._private.utils import ray._private.worker from ray._private import ray_constants from ray._private.state import GlobalState from ray._raylet import GcsClientOptions
15
null
8
7
null
null
null
Use image node_id 1 for calling a global function with example usage: global_gc() without return types
102
node_id 1
1,801,716
get_state_from_address
global
null
false
address
null
null
null
null
state
def get_state_from_address(address=None): address = services.canonicalize_bootstrap_address_or_die(address) state = GlobalState() options = GcsClientOptions.from_gcs_address(address) state._initialize_global_state(options) return state
["def","get_state_from_address","(","address=None",")",":","address","=","services.canonicalize_bootstrap_address_or_die","(","address",")","state","=","GlobalState","(",")","options","=","GcsClientOptions.from_gcs_address","(","address",")","state._initialize_global_state","(","options",")","return","state"]
21
27
null
internal_api.py
ray/python/ray/_private/internal_api.py
import ray import ray._private.profiling import ray._private.services import ray._private.utils import ray._private.worker from ray._private import ray_constants from ray._private.state import GlobalState from ray._raylet import GcsClientOptions
15
null
8
7
null
null
null
Use image node_id 2 for calling a global function with example usage: get_state_from_address(address) and returns: state
120
node_id 2
1,801,717
resample
global
null
false
x,num,t,axis,window,domain
null
null
null
null
y,y, new_t
def resample(x, num, t=None, axis=0, window=None, domain="time"): """ Resample `x` to `num` samples using Fourier method along the given axis. The resampled signal starts at the same value as `x` but is sampled with a spacing of ``len(x) / num * (spacing of x)``. Because a Fourier method is used, the signal is assumed to be periodic. Parameters ---------- x : array_like The data to be resampled. num : int The number of samples in the resampled signal. t : array_like, optional If `t` is given, it is assumed to be the sample positions associated with the signal data in `x`. axis : int, optional The axis of `x` that is resampled. Default is 0. window : array_like, callable, string, float, or tuple, optional Specifies the window applied to the signal in the Fourier domain. See below for details. domain : string, optional A string indicating the domain of the input `x`: ``time`` Consider the input `x` as time-domain. (Default) ``freq`` Consider the input `x` as frequency-domain. Returns ------- resampled_x or (resampled_x, resampled_t) Either the resampled array, or, if `t` was given, a tuple containing the resampled array and the corresponding resampled positions. See Also -------- decimate : Downsample the signal after applying an FIR or IIR filter. resample_poly : Resample using polyphase filtering and an FIR filter. Notes ----- The argument `window` controls a Fourier-domain window that tapers the Fourier spectrum before zero-padding to alleviate ringing in the resampled values for sampled signals you didn't intend to be interpreted as band-limited. If `window` is a function, then it is called with a vector of inputs indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ). If `window` is an array of the same length as `x.shape[axis]` it is assumed to be the window to be applied directly in the Fourier domain (with dc and low-frequency first). For any other type of `window`, the function `cusignal.get_window` is called to generate the window. The first sample of the returned vector is the same as the first sample of the input vector. The spacing between samples is changed from ``dx`` to ``dx * len(x) / num``. If `t` is not None, then it represents the old sample positions, and the new sample positions will be returned as well as the new samples. As noted, `resample` uses FFT transformations, which can be very slow if the number of input or output samples is large and prime; see `scipy.fftpack.fft`. Examples -------- Note that the end of the resampled data rises to meet the first sample of the next cycle: >>> import cupy as cp >>> import cupyx.scipy.signal import resample >>> x = cupy.linspace(0, 10, 20, endpoint=False) >>> y = cupy.cos(-x**2/6.0) >>> f = resample(y, 100) >>> xnew = cupy.linspace(0, 10, 100, endpoint=False) >>> import matplotlib.pyplot as plt >>> plt.plot(cupy.asnumpy(x), cupy.asnumpy(y), 'go-', cupy.asnumpy(xnew), \ cupy.asnumpy(f), '.-', 10, cupy.asnumpy(y[0]), 'ro') >>> plt.legend(['data', 'resampled'], loc='best') >>> plt.show() """ if domain not in ("time", "freq"): raise ValueError( "Acceptable domain flags are 'time' or" " 'freq', not domain={}".format(domain) ) x = cupy.asarray(x) Nx = x.shape[axis] # Check if we can use faster real FFT real_input = cupy.isrealobj(x) if domain == "time": # Forward transform if real_input: X = rfft(x, axis=axis) else: # Full complex FFT X = fft(x, axis=axis) else: # domain == 'freq' X = x # Apply window to spectrum if window is not None: if callable(window): W = window(fftfreq(Nx)) elif isinstance(window, cupy.ndarray): if window.shape != (Nx,): raise ValueError( "window must have the same length as data" ) W = window else: W = ifftshift(get_window(window, Nx)) newshape_W = [1] * x.ndim newshape_W[axis] = X.shape[axis] if real_input: # Fold the window back on itself to mimic complex behavior W_real = W.copy() W_real[1:] += W_real[-1:0:-1] W_real[1:] *= 0.5 X *= W_real[: newshape_W[axis]].reshape(newshape_W) else: X *= W.reshape(newshape_W) # Copy each half of the original spectrum to the output spectrum, either # truncating high frequences (downsampling) or zero-padding them # (upsampling) # Placeholder array for output spectrum newshape = list(x.shape) if real_input: newshape[axis] = num // 2 + 1 else: newshape[axis] = num Y = cupy.zeros(newshape, X.dtype) # Copy positive frequency components (and Nyquist, if present) N = min(num, Nx) nyq = N // 2 + 1 # Slice index that includes Nyquist if present sl = [slice(None)] * x.ndim sl[axis] = slice(0, nyq) Y[tuple(sl)] = X[tuple(sl)] if not real_input: # Copy negative frequency components if ( N > 2 ): # (slice expression doesn't collapse to empty array) sl[axis] = slice(nyq - N, None) Y[tuple(sl)] = X[tuple(sl)] # Split/join Nyquist component(s) if present # So far we have set Y[+N/2]=X[+N/2] if N % 2 == 0: if num < Nx: # downsampling if real_input: sl[axis] = slice(N // 2, N // 2 + 1) Y[tuple(sl)] *= 2.0 else: # select the component of Y at frequency +N/2, # add the component of X at -N/2 sl[axis] = slice(-N // 2, -N // 2 + 1) Y[tuple(sl)] += X[tuple(sl)] elif Nx < num: # upsampling # select the component at frequency +N/2 and halve it sl[axis] = slice(N // 2, N // 2 + 1) Y[tuple(sl)] *= 0.5 if not real_input: temp = Y[tuple(sl)] # set the component at -N/2 equal to the component at +N/2 sl[axis] = slice(num - N // 2, num - N // 2 + 1) Y[tuple(sl)] = temp # Inverse transform if real_input: y = irfft(Y, num, axis=axis) else: y = ifft(Y, axis=axis, overwrite_x=True) y *= float(num) / float(Nx) if t is None: return y else: new_t = ( cupy.arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0] ) return y, new_t
["def","resample","(","x",",","num",",","t=None",",","axis=0",",","window=None",",","domain=","''","time","''",")",":","``","''","''","Resample","`","x","`","to","`","num","`","samples","using","Fourier","method","along","the","given","axis",".","The","resampled","signal","starts","at","the","same","value","as","`","x","`","but","is","sampled","with","a","spacing","of","``","len","(","x",")","\/","num","*","(","spacing","of","x",")","``",".","Because","a","Fourier","method","is","used",",","the","signal","is","assumed","to","be","periodic",".","Parameters","--","--","--","--","--","x",":","array_like","The","data","to","be","resampled",".","num",":","int","The","number","of","samples","in","the","resampled","signal",".","t",":","array_like",",","optional","If","`","t","`","is","given",",","it","is","assumed","to","be","the","sample","positions","associated","with","the","signal","data","in","`","x","`",".","axis",":","int",",","optional","The","axis","of","`","x","`","that","is","resampled",".","Default","is","0.","window",":","array_like",",","callable",",","string",",","float",",","or","tuple",",","optional","Specifies","the","window","applied","to","the","signal","in","the","Fourier","domain",".","See","below","for","details",".","domain",":","string",",","optional","A","string","indicating","the","domain","of","the","input","`","x","`",":","``","time","``","Consider","the","input","`","x","`","as","time-domain",".","(","Default",")","``","freq","``","Consider","the","input","`","x","`","as","frequency-domain",".","Returns","--","--","--","-","resampled_x","or","(","resampled_x",",","resampled_t",")","Either","the","resampled","array",",","or",",","if","`","t","`","was","given",",","a","tuple","containing","the","resampled","array","and","the","corresponding","resampled","positions",".","See","Also","--","--","--","--","decimate",":","Downsample","the","signal","after","applying","an","FIR","or","IIR","filter",".","resample_poly",":","Resample","using","polyphase","filtering","and","an","FIR","filter",".","Notes","--","--","-","The","argument","`","window","`","controls","a","Fourier-domain","window","that","tapers","the","Fourier","spectrum","before","zero-padding","to","alleviate","ringing","in","the","resampled","values","for","sampled","signals","you","did","n't","intend","to","be","interpreted","as","band-limited",".","If","`","window","`","is","a","function",",","then","it","is","called","with","a","vector","of","inputs","indicating","the","frequency","bins","(","i.e",".","fftfreq","(","x.shape","[","axis","]",")",")",".","If","`","window","`","is","an","array","of","the","same","length","as","`","x.shape","[","axis","]","`","it","is","assumed","to","be","the","window","to","be","applied","directly","in","the","Fourier","domain","(","with","dc","and","low-frequency","first",")",".","For","any","other","type","of","`","window","`",",","the","function","`","cusignal.get_window","`","is","called","to","generate","the","window",".","The","first","sample","of","the","returned","vector","is","the","same","as","the","first","sample","of","the","input","vector",".","The","spacing","between","samples","is","changed","from","``","dx","``","to","``","dx","*","len","(","x",")","\/","num","``",".","If","`","t","`","is","not","None",",","then","it","represents","the","old","sample","positions",",","and","the","new","sample","positions","will","be","returned","as","well","as","the","new","samples",".","As","noted",",","`","resample","`","uses","FFT","transformations",",","which","can","be","very","slow","if","the","number","of","input","or","output","samples","is","large","and","prime",";","see","`","scipy.fftpack.fft","`",".","Examples","--","--","--","--","Note","that","the","end","of","the","resampled","data","rises","to","meet","the","first","sample","of","the","next","cycle",":",">",">",">","import","cupy","as","cp",">",">",">","import","cupyx.scipy.signal","import","resample",">",">",">","x","=","cupy.linspace","(","0",",","10",",","20",",","endpoint=False",")",">",">",">","y","=","cupy.cos","(","-x","*","*","2\/6.0",")",">",">",">","f","=","resample","(","y",",","100",")",">",">",">","xnew","=","cupy.linspace","(","0",",","10",",","100",",","endpoint=False",")",">",">",">","import","matplotlib.pyplot","as","plt",">",">",">","plt.plot","(","cupy.asnumpy","(","x",")",",","cupy.asnumpy","(","y",")",",","'go-","'",",","cupy.asnumpy","(","xnew",")",",","\\","cupy.asnumpy","(","f",")",",","'.-","'",",","10",",","cupy.asnumpy","(","y","[","0","]",")",",","'ro","'",")",">",">",">","plt.legend","(","[","'data","'",",","'resampled","'","]",",","loc='best","'",")",">",">",">","plt.show","(",")","``","''","''","if","domain","not","in","(","``","time","''",",","``","freq","''",")",":","raise","ValueError","(","``","Acceptable","domain","flags","are","'time","'","or","''","``","'freq","'",",","not","domain=","{","}","''",".format","(","domain",")",")","x","=","cupy.asarray","(","x",")","Nx","=","x.shape","[","axis","]","#","Check","if","we","can","use","faster","real","FFT","real_input","=","cupy.isrealobj","(","x",")","if","domain","==","``","time","''",":","#","Forward","transform","if","real_input",":","X","=","rfft","(","x",",","axis=axis",")","else",":","#","Full","complex","FFT","X","=","fft","(","x",",","axis=axis",")","else",":","#","domain","==","'freq'","X","=","x","#","Apply","window","to","spectrum","if","window","is","not","None",":","if","callable","(","window",")",":","W","=","window","(","fftfreq","(","Nx",")",")","elif","isinstance","(","window",",","cupy.ndarray",")",":","if","window.shape","!","=","(","Nx",",",")",":","raise","ValueError","(","``","window","must","have","the","same","length","as","data","''",")","W","=","window","else",":","W","=","ifftshift","(","get_window","(","window",",","Nx",")",")","newshape_W","=","[","1","]","*","x.ndim","newshape_W","[","axis","]","=","X.shape","[","axis","]","if","real_input",":","#","Fold","the","window","back","on","itself","to","mimic","complex","behavior","W_real","=","W.copy","(",")","W_real","[","1",":","]","+=","W_real","[","-1:0",":","-1","]","W_real","[","1",":","]","*","=","0.5","X","*","=","W_real","[",":","newshape_W","[","axis","]","]",".reshape","(","newshape_W",")","else",":","X","*","=","W.reshape","(","newshape_W",")","#","Copy","each","half","of","the","original","spectrum","to","the","output","spectrum",",","either","#","truncating","high","frequences","(","downsampling",")","or","zero-padding","them","#","(","upsampling",")","#","Placeholder","array","for","output","spectrum","newshape","=","list","(","x.shape",")","if","real_input",":","newshape","[","axis","]","=","num","\/\/","2","+","1","else",":","newshape","[","axis","]","=","num","Y","=","cupy.zeros","(","newshape",",","X.dtype",")","#","Copy","positive","frequency","components","(","and","Nyquist",",","if","present",")","N","=","min","(","num",",","Nx",")","nyq","=","N","\/\/","2","+","1","#","Slice","index","that","includes","Nyquist","if","present","sl","=","[","slice","(","None",")","]","*","x.ndim","sl","[","axis","]","=","slice","(","0",",","nyq",")","Y","[","tuple","(","sl",")","]","=","X","[","tuple","(","sl",")","]","if","not","real_input",":","#","Copy","negative","frequency","components","if","(","N",">","2",")",":","#","(","slice","expression","does","n't","collapse","to","empty","array",")","sl","[","axis","]","=","slice","(","nyq","-","N",",","None",")","Y","[","tuple","(","sl",")","]","=","X","[","tuple","(","sl",")","]","#","Split\/join","Nyquist","component","(","s",")","if","present","#","So","far","we","have","set","Y","[","+N\/2","]","=X","[","+N\/2","]","if","N","%","2","==","0",":","if","num","<","Nx",":","#","downsampling","if","real_input",":","sl","[","axis","]","=","slice","(","N","\/\/","2",",","N","\/\/","2","+","1",")","Y","[","tuple","(","sl",")","]","*","=","2.0","else",":","#","select","the","component","of","Y","at","frequency","+N\/2",",","#","add","the","component","of","X","at","-N\/2","sl","[","axis","]","=","slice","(","-N","\/\/","2",",","-N","\/\/","2","+","1",")","Y","[","tuple","(","sl",")","]","+=","X","[","tuple","(","sl",")","]","elif","Nx","<","num",":","#","upsampling","#","select","the","component","at","frequency","+N\/2","and","halve","it","sl","[","axis","]","=","slice","(","N","\/\/","2",",","N","\/\/","2","+","1",")","Y","[","tuple","(","sl",")","]","*","=","0.5","if","not","real_input",":","temp","=","Y","[","tuple","(","sl",")","]","#","set","the","component","at","-N\/2","equal","to","the","component","at","+N\/2","sl","[","axis","]","=","slice","(","num","-","N","\/\/","2",",","num","-","N","\/\/","2","+","1",")","Y","[","tuple","(","sl",")","]","=","temp","#","Inverse","transform","if","real_input",":","y","=","irfft","(","Y",",","num",",","axis=axis",")","else",":","y","=","ifft","(","Y",",","axis=axis",",","overwrite_x=True",")","y","*","=","float","(","num",")","\/","float","(","Nx",")","if","t","is","None",":","return","y","else",":","new_t","=","(","cupy.arange","(","0",",","num",")","*","(","t","[","1","]","-","t","[","0","]",")","*","Nx","\/","float","(","num",")","+","t","[","0","]",")","return","y",",","new_t"]
215
404
null
_resample.py
cupy/cupyx/scipy/signal/_resample.py
import operator from math import gcd import cupy from cupyx.scipy.fft import fft, rfft, fftfreq, ifft, irfft, ifftshift from cupyx.scipy.signal._iir_filter_design import cheby1 from cupyx.scipy.signal._fir_filter_design import firwin from cupyx.scipy.signal._iir_filter_conversions import zpk2sos from cupyx.scipy.signal._ltisys import dlti from cupyx.scipy.signal._upfirdn import upfirdn, _output_len from cupyx.scipy.signal._signaltools import sosfiltfilt, filtfilt, sosfilt, lfilter from cupyx.scipy.signal.windows._windows import get_window
15
null
11
4
null
null
null
Use image node_id 3 for calling a global function with example usage: resample(x, num, t, axis, window, domain) and returns: y, y, new_t
137
node_id 3
692,583
time_affine_transform
NdimageInterpolation
Benchmark
true
self,shape,order,mode
null
null
null
null
null
def time_affine_transform(self, shape, order, mode): if self.x.ndim == 2: matrix = self.matrix_2d else: matrix = self.matrix_3d affine_transform(self.x, matrix, order=order, mode=mode)
["def","time_affine_transform","(","self",",","shape",",","order",",","mode",")",":","if","self.x.ndim","==","2",":","matrix","=","self.matrix_2d","else",":","matrix","=","self.matrix_3d","affine_transform","(","self.x",",","matrix",",","order=order",",","mode=mode",")"]
37
42
null
ndimage_interpolation.py
scipy/benchmarks/benchmarks/ndimage_interpolation.py
import numpy from .common import Benchmark
15
1
2
2
1
9
1
Use image node_id 2 for calling the NdimageInterpolation obj's underlying member method code with example usage: obj.time_affine_transform(shape, order, mode) without return types
179
node_id 2
1,883,757
get_retro_encoder_layer_te_spec
global
null
false
null
null
null
null
spec
def get_retro_encoder_layer_te_spec() -> ModuleSpec: """Retro encoder TE spec (uses Transformer Engine components). A Retro encoder layer uses custom attention, bias-dropout-add, and layernorm operators to encode neighboring chunks that are retrieved from the chunk database. Each operator is responsible for iterating the retrieved chunks and processing them individually. """ spec = get_gpt_layer_with_transformer_engine_spec() spec.submodules.pre_cross_attn_layernorm = TENorm spec.submodules.cross_attention = ModuleSpec( module=RetroEncoderCrossAttention, params={ "attn_mask_type": AttnMaskType.padding, }, submodules=CrossAttentionSubmodules( linear_q=TEColumnParallelLinear, linear_kv=TEColumnParallelLinear, core_attention=TEDotProductAttention, linear_proj=TERowParallelLinear, ), ) spec.submodules.cross_attn_bda = ModuleSpec( module=RetroEncoderBiasDropoutAdd ) spec.submodules.pre_mlp_layernorm = ModuleSpec( module=RetroEncoderLayerNorm, submodules=TENorm, ) spec.submodules.mlp = ModuleSpec( module=MLP, submodules=MLPSubmodules( linear_fc1=TEColumnParallelLinear, linear_fc2=TERowParallelLinear, ), ) return spec
["def","get_retro_encoder_layer_te_spec","(",")","-",">","ModuleSpec",":","``","''","''","Retro","encoder","TE","spec","(","uses","Transformer","Engine","components",")",".","A","Retro","encoder","layer","uses","custom","attention",",","bias-dropout-add",",","and","layernorm","operators","to","encode","neighboring","chunks","that","are","retrieved","from","the","chunk","database",".","Each","operator","is","responsible","for","iterating","the","retrieved","chunks","and","processing","them","individually.","``","''","''","spec","=","get_gpt_layer_with_transformer_engine_spec","(",")","spec.submodules.pre_cross_attn_layernorm","=","TENorm","spec.submodules.cross_attention","=","ModuleSpec","(","module=RetroEncoderCrossAttention",",","params=","{","``","attn_mask_type","''",":","AttnMaskType.padding",",","}",",","submodules=CrossAttentionSubmodules","(","linear_q=TEColumnParallelLinear",",","linear_kv=TEColumnParallelLinear",",","core_attention=TEDotProductAttention",",","linear_proj=TERowParallelLinear",",",")",",",")","spec.submodules.cross_attn_bda","=","ModuleSpec","(","module=RetroEncoderBiasDropoutAdd",")","spec.submodules.pre_mlp_layernorm","=","ModuleSpec","(","module=RetroEncoderLayerNorm",",","submodules=TENorm",",",")","spec.submodules.mlp","=","ModuleSpec","(","module=MLP",",","submodules=MLPSubmodules","(","linear_fc1=TEColumnParallelLinear",",","linear_fc2=TERowParallelLinear",",",")",",",")","return","spec"]
29
57
null
encoder_spec.py
megatron-lm/megatron/core/models/retro/encoder_spec.py
from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_local_spec, get_gpt_layer_with_transformer_engine_spec from megatron.core.models.retro.config import RetroConfig from megatron.core.models.retro.encoder_attention import RetroEncoderBiasDropoutAdd, RetroEncoderCrossAttention, RetroEncoderLayerNorm from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear from megatron.core.transformer import ModuleSpec from megatron.core.transformer.attention import CrossAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import TEColumnParallelLinear, TEDotProductAttention, TENorm, TERowParallelLinear from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.transformer_block import TransformerBlockSubmodules
15
null
12
3
null
null
null
Use image node_id 1 for calling a global function with example usage: get_retro_encoder_layer_te_spec() and returns: spec
121
node_id 1
1,324,195
get_retro_encoder_layer_local_spec
global
null
false
null
null
null
null
spec
def get_retro_encoder_layer_local_spec() -> ModuleSpec: """Retro encoder local spec (uses Megatron-Core components). A Retro encoder layer uses custom attention, bias-dropout-add, and layernorm operators to encode neighboring chunks that are retrieved from the chunk database. Each operator is responsible for iterating the retrieved chunks and processing them individually. """ spec = get_gpt_layer_local_spec() spec.submodules.pre_cross_attn_layernorm = FusedLayerNorm spec.submodules.cross_attention = ModuleSpec( module=RetroEncoderCrossAttention, params={ "attn_mask_type": AttnMaskType.padding, }, submodules=CrossAttentionSubmodules( linear_q=ColumnParallelLinear, linear_kv=ColumnParallelLinear, core_attention=DotProductAttention, linear_proj=RowParallelLinear, ), ) spec.submodules.cross_attn_bda = ModuleSpec( module=RetroEncoderBiasDropoutAdd ) spec.submodules.pre_mlp_layernorm = ModuleSpec( module=RetroEncoderLayerNorm, submodules=FusedLayerNorm, ) spec.submodules.mlp = ModuleSpec( module=MLP, submodules=MLPSubmodules( linear_fc1=ColumnParallelLinear, linear_fc2=RowParallelLinear, ), ) return spec
["def","get_retro_encoder_layer_local_spec","(",")","-",">","ModuleSpec",":","``","''","''","Retro","encoder","local","spec","(","uses","Megatron-Core","components",")",".","A","Retro","encoder","layer","uses","custom","attention",",","bias-dropout-add",",","and","layernorm","operators","to","encode","neighboring","chunks","that","are","retrieved","from","the","chunk","database",".","Each","operator","is","responsible","for","iterating","the","retrieved","chunks","and","processing","them","individually.","``","''","''","spec","=","get_gpt_layer_local_spec","(",")","spec.submodules.pre_cross_attn_layernorm","=","FusedLayerNorm","spec.submodules.cross_attention","=","ModuleSpec","(","module=RetroEncoderCrossAttention",",","params=","{","``","attn_mask_type","''",":","AttnMaskType.padding",",","}",",","submodules=CrossAttentionSubmodules","(","linear_q=ColumnParallelLinear",",","linear_kv=ColumnParallelLinear",",","core_attention=DotProductAttention",",","linear_proj=RowParallelLinear",",",")",",",")","spec.submodules.cross_attn_bda","=","ModuleSpec","(","module=RetroEncoderBiasDropoutAdd",")","spec.submodules.pre_mlp_layernorm","=","ModuleSpec","(","module=RetroEncoderLayerNorm",",","submodules=FusedLayerNorm",",",")","spec.submodules.mlp","=","ModuleSpec","(","module=MLP",",","submodules=MLPSubmodules","(","linear_fc1=ColumnParallelLinear",",","linear_fc2=RowParallelLinear",",",")",",",")","return","spec"]
60
88
null
encoder_spec.py
megatron-lm/megatron/core/models/retro/encoder_spec.py
from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_local_spec, get_gpt_layer_with_transformer_engine_spec from megatron.core.models.retro.config import RetroConfig from megatron.core.models.retro.encoder_attention import RetroEncoderBiasDropoutAdd, RetroEncoderCrossAttention, RetroEncoderLayerNorm from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear from megatron.core.transformer import ModuleSpec from megatron.core.transformer.attention import CrossAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import TEColumnParallelLinear, TEDotProductAttention, TENorm, TERowParallelLinear from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.transformer_block import TransformerBlockSubmodules
15
null
12
3
null
null
null
Use image node_id 2 for calling a global function with example usage: get_retro_encoder_layer_local_spec() and returns: spec
124
node_id 2
1,324,196
get_retro_encoder_block_spec
global
null
false
config,use_transformer_engine
null
null
null
null
block_spec
def get_retro_encoder_block_spec( config: RetroConfig, use_transformer_engine: bool ) -> TransformerBlockSubmodules: """Retro encoder block spec. The retro encoder block consists of one customized Retro encoder layer (layer 1), and all of the following layers are standard GPT layers. Arguments: config (RetroConfig): Retro config. use_transformer_engine (bool): If True, use Transformer Engine (instead of local modules. """ # Num layers. num_layers = config.retro_encoder_num_layers retro_layer_numbers = [1] # Layer specs. gpt_layer_spec = ( get_gpt_layer_with_transformer_engine_spec() if use_transformer_engine else get_gpt_layer_local_spec() ) get_retro_encoder_layer_spec = ( get_retro_encoder_layer_te_spec if use_transformer_engine else get_retro_encoder_layer_local_spec ) retro_layer_spec = get_retro_encoder_layer_spec() for spec in (gpt_layer_spec, retro_layer_spec): spec.params[ "hidden_dropout" ] = config.retro_encoder_hidden_dropout spec.submodules.self_attention.params[ "attn_mask_type" ] = AttnMaskType.padding spec.submodules.self_attention.submodules.core_attention = ModuleSpec( module=TEDotProductAttention if use_transformer_engine else DotProductAttention, params={ "attention_dropout": config.retro_encoder_attention_dropout, }, ) layer_specs = [] for layer_number in range(1, num_layers + 1): if layer_number in retro_layer_numbers: layer_specs.append(retro_layer_spec) else: layer_specs.append(gpt_layer_spec) # Block spec. block_spec = TransformerBlockSubmodules(layer_specs=layer_specs) return block_spec
["def","get_retro_encoder_block_spec","(","config",":","RetroConfig",",","use_transformer_engine",":","bool",")","-",">","TransformerBlockSubmodules",":","``","''","''","Retro","encoder","block","spec",".","The","retro","encoder","block","consists","of","one","customized","Retro","encoder","layer","(","layer","1",")",",","and","all","of","the","following","layers","are","standard","GPT","layers",".","Arguments",":","config","(","RetroConfig",")",":","Retro","config",".","use_transformer_engine","(","bool",")",":","If","True",",","use","Transformer","Engine","(","instead","of","local","modules.","``","''","''","#","Num","layers",".","num_layers","=","config.retro_encoder_num_layers","retro_layer_numbers","=","[","1","]","#","Layer","specs",".","gpt_layer_spec","=","(","get_gpt_layer_with_transformer_engine_spec","(",")","if","use_transformer_engine","else","get_gpt_layer_local_spec","(",")",")","get_retro_encoder_layer_spec","=","(","get_retro_encoder_layer_te_spec","if","use_transformer_engine","else","get_retro_encoder_layer_local_spec",")","retro_layer_spec","=","get_retro_encoder_layer_spec","(",")","for","spec","in","(","gpt_layer_spec",",","retro_layer_spec",")",":","spec.params","[","``","hidden_dropout","''","]","=","config.retro_encoder_hidden_dropout","spec.submodules.self_attention.params","[","``","attn_mask_type","''","]","=","AttnMaskType.padding","spec.submodules.self_attention.submodules.core_attention","=","ModuleSpec","(","module=TEDotProductAttention","if","use_transformer_engine","else","DotProductAttention",",","params=","{","``","attention_dropout","''",":","config.retro_encoder_attention_dropout",",","}",",",")","layer_specs","=","[","]","for","layer_number","in","range","(","1",",","num_layers","+","1",")",":","if","layer_number","in","retro_layer_numbers",":","layer_specs.append","(","retro_layer_spec",")","else",":","layer_specs.append","(","gpt_layer_spec",")","#","Block","spec",".","block_spec","=","TransformerBlockSubmodules","(","layer_specs=layer_specs",")","return","block_spec"]
91
141
null
encoder_spec.py
megatron-lm/megatron/core/models/retro/encoder_spec.py
from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_local_spec, get_gpt_layer_with_transformer_engine_spec from megatron.core.models.retro.config import RetroConfig from megatron.core.models.retro.encoder_attention import RetroEncoderBiasDropoutAdd, RetroEncoderCrossAttention, RetroEncoderLayerNorm from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear from megatron.core.transformer import ModuleSpec from megatron.core.transformer.attention import CrossAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import TEColumnParallelLinear, TEDotProductAttention, TENorm, TERowParallelLinear from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.enums import AttnMaskType from megatron.core.transformer.mlp import MLP, MLPSubmodules from megatron.core.transformer.transformer_block import TransformerBlockSubmodules
15
null
12
3
null
null
null
Use image node_id 3 for calling a global function with example usage: get_retro_encoder_block_spec(config, use_transformer_engine) and returns: block_spec
154
node_id 3
1,324,197
memory_summary
global
null
false
address,redis_password,group_by,sort_by,units,line_wrap,stats_only,num_entries
null
null
null
null
unknown,store_stats_summary
def memory_summary( address=None, redis_password=ray_constants.REDIS_DEFAULT_PASSWORD, group_by="NODE_ADDRESS", sort_by="OBJECT_SIZE", units="B", line_wrap=True, stats_only=False, num_entries=None, ): from ray.dashboard.memory_utils import memory_summary state = get_state_from_address(address) reply = get_memory_info_reply(state) if stats_only: return store_stats_summary(reply) return memory_summary( state, group_by, sort_by, line_wrap, units, num_entries ) + store_stats_summary(reply)
["def","memory_summary","(","address=None",",","redis_password=ray_constants.REDIS_DEFAULT_PASSWORD",",","group_by=","''","NODE_ADDRESS","''",",","sort_by=","''","OBJECT_SIZE","''",",","units=","''","B","''",",","line_wrap=True",",","stats_only=False",",","num_entries=None",",",")",":","from","ray.dashboard.memory_utils","import","memory_summary","state","=","get_state_from_address","(","address",")","reply","=","get_memory_info_reply","(","state",")","if","stats_only",":","return","store_stats_summary","(","reply",")","return","memory_summary","(","state",",","group_by",",","sort_by",",","line_wrap",",","units",",","num_entries",")","+","store_stats_summary","(","reply",")"]
30
49
null
internal_api.py
ray/python/ray/_private/internal_api.py
import ray import ray._private.profiling import ray._private.services import ray._private.utils import ray._private.worker from ray._private import ray_constants from ray._private.state import GlobalState from ray._raylet import GcsClientOptions
15
null
8
7
null
null
null
Use image node_id 3 for calling a global function with example usage: memory_summary(address, redis_password, group_by, sort_by, units, line_wrap, stats_only, num_entries) and returns: unknown, store_stats_summary
213
node_id 3
1,801,718
get_memory_info_reply
global
null
false
state,node_manager_address,node_manager_port
null
null
null
null
reply
def get_memory_info_reply( state, node_manager_address=None, node_manager_port=None ): """Returns global memory info.""" from ray.core.generated import ( node_manager_pb2, node_manager_pb2_grpc, ) # We can ask any Raylet for the global memory info, that Raylet internally # asks all nodes in the cluster for memory stats. if node_manager_address is None or node_manager_port is None: # We should ask for a raylet that is alive. raylet = None for node in state.node_table(): if node["Alive"]: raylet = node break assert raylet is not None, "Every raylet is dead" raylet_address = "{}:{}".format( raylet["NodeManagerAddress"], raylet["NodeManagerPort"] ) else: raylet_address = "{}:{}".format( node_manager_address, node_manager_port ) channel = utils.init_grpc_channel( raylet_address, options=[ ("grpc.max_send_message_length", MAX_MESSAGE_LENGTH), ("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH), ], ) stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel) reply = stub.FormatGlobalMemoryInfo( node_manager_pb2.FormatGlobalMemoryInfoRequest( include_memory_info=False ), timeout=60.0, ) return reply
["def","get_memory_info_reply","(","state",",","node_manager_address=None",",","node_manager_port=None",")",":","``","''","''","Returns","global","memory","info",".","''","''","''","from","ray.core.generated","import","(","node_manager_pb2",",","node_manager_pb2_grpc",",",")","#","We","can","ask","any","Raylet","for","the","global","memory","info",",","that","Raylet","internally","#","asks","all","nodes","in","the","cluster","for","memory","stats",".","if","node_manager_address","is","None","or","node_manager_port","is","None",":","#","We","should","ask","for","a","raylet","that","is","alive",".","raylet","=","None","for","node","in","state.node_table","(",")",":","if","node","[","``","Alive","''","]",":","raylet","=","node","break","assert","raylet","is","not","None",",","``","Every","raylet","is","dead","''","raylet_address","=","``","{","}",":","{","}","''",".format","(","raylet","[","``","NodeManagerAddress","''","]",",","raylet","[","``","NodeManagerPort","''","]",")","else",":","raylet_address","=","``","{","}",":","{","}","''",".format","(","node_manager_address",",","node_manager_port",")","channel","=","utils.init_grpc_channel","(","raylet_address",",","options=","[","(","``","grpc.max_send_message_length","''",",","MAX_MESSAGE_LENGTH",")",",","(","``","grpc.max_receive_message_length","''",",","MAX_MESSAGE_LENGTH",")",",","]",",",")","stub","=","node_manager_pb2_grpc.NodeManagerServiceStub","(","channel",")","reply","=","stub.FormatGlobalMemoryInfo","(","node_manager_pb2.FormatGlobalMemoryInfoRequest","(","include_memory_info=False",")",",","timeout=60.0",",",")","return","reply"]
52
86
null
internal_api.py
ray/python/ray/_private/internal_api.py
import ray import ray._private.profiling import ray._private.services import ray._private.utils import ray._private.worker from ray._private import ray_constants from ray._private.state import GlobalState from ray._raylet import GcsClientOptions
15
null
8
7
null
null
null
Use image node_id 4 for calling a global function with example usage: get_memory_info_reply(state, node_manager_address, node_manager_port) and returns: reply
158
node_id 4
1,801,719
node_stats
global
null
false
node_manager_address,node_manager_port,include_memory_info
null
null
null
null
node_stats
def node_stats( node_manager_address=None, node_manager_port=None, include_memory_info=True, ): """Returns NodeStats object describing memory usage in the cluster.""" from ray.core.generated import ( node_manager_pb2, node_manager_pb2_grpc, ) # We can ask any Raylet for the global memory info. assert ( node_manager_address is not None and node_manager_port is not None ) raylet_address = "{}:{}".format( node_manager_address, node_manager_port ) channel = utils.init_grpc_channel( raylet_address, options=[ ("grpc.max_send_message_length", MAX_MESSAGE_LENGTH), ("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH), ], ) stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel) node_stats = stub.GetNodeStats( node_manager_pb2.GetNodeStatsRequest( include_memory_info=include_memory_info ), timeout=30.0, ) return node_stats
["def","node_stats","(","node_manager_address=None",",","node_manager_port=None",",","include_memory_info=True",",",")",":","``","''","''","Returns","NodeStats","object","describing","memory","usage","in","the","cluster",".","''","''","''","from","ray.core.generated","import","(","node_manager_pb2",",","node_manager_pb2_grpc",",",")","#","We","can","ask","any","Raylet","for","the","global","memory","info",".","assert","(","node_manager_address","is","not","None","and","node_manager_port","is","not","None",")","raylet_address","=","``","{","}",":","{","}","''",".format","(","node_manager_address",",","node_manager_port",")","channel","=","utils.init_grpc_channel","(","raylet_address",",","options=","[","(","``","grpc.max_send_message_length","''",",","MAX_MESSAGE_LENGTH",")",",","(","``","grpc.max_receive_message_length","''",",","MAX_MESSAGE_LENGTH",")",",","]",",",")","stub","=","node_manager_pb2_grpc.NodeManagerServiceStub","(","channel",")","node_stats","=","stub.GetNodeStats","(","node_manager_pb2.GetNodeStatsRequest","(","include_memory_info=include_memory_info",")",",","timeout=30.0",",",")","return","node_stats"]
89
112
null
internal_api.py
ray/python/ray/_private/internal_api.py
import ray import ray._private.profiling import ray._private.services import ray._private.utils import ray._private.worker from ray._private import ray_constants from ray._private.state import GlobalState from ray._raylet import GcsClientOptions
15
null
8
7
null
null
null
Use image node_id 5 for calling a global function with example usage: node_stats(node_manager_address, node_manager_port, include_memory_info) and returns: node_stats
166
node_id 5
1,801,720
store_stats_summary
global
null
false
reply
null
null
null
null
store_summary
def store_stats_summary(reply): """Returns formatted string describing object store stats in all nodes.""" store_summary = ( "--- Aggregate object store stats across all nodes ---\n" ) # TODO(ekl) it would be nice if we could provide a full memory usage # breakdown by type (e.g., pinned by worker, primary, etc.) store_summary += ( "Plasma memory usage {} MiB, {} objects, {}% full, {}% " "needed\n".format( int( reply.store_stats.object_store_bytes_used / (1024 * 1024) ), reply.store_stats.num_local_objects, round( 100 * reply.store_stats.object_store_bytes_used / reply.store_stats.object_store_bytes_avail, 2, ), round( 100 * reply.store_stats.object_store_bytes_primary_copy / reply.store_stats.object_store_bytes_avail, 2, ), ) ) if reply.store_stats.object_store_bytes_fallback > 0: store_summary += ( "Plasma filesystem mmap usage: {} MiB\n".format( int( reply.store_stats.object_store_bytes_fallback / (1024 * 1024) ) ) ) if reply.store_stats.spill_time_total_s > 0: store_summary += "Spilled {} MiB, {} objects, avg write throughput {} MiB/s\n".format( int( reply.store_stats.spilled_bytes_total / (1024 * 1024) ), reply.store_stats.spilled_objects_total, int( reply.store_stats.spilled_bytes_total / (1024 * 1024) / reply.store_stats.spill_time_total_s ), ) if reply.store_stats.restore_time_total_s > 0: store_summary += "Restored {} MiB, {} objects, avg read throughput {} MiB/s\n".format( int( reply.store_stats.restored_bytes_total / (1024 * 1024) ), reply.store_stats.restored_objects_total, int( reply.store_stats.restored_bytes_total / (1024 * 1024) / reply.store_stats.restore_time_total_s ), ) if reply.store_stats.consumed_bytes > 0: store_summary += ( "Objects consumed by Ray tasks: {} MiB.\n".format( int(reply.store_stats.consumed_bytes / (1024 * 1024)) ) ) if reply.store_stats.object_pulls_queued: store_summary += ( "Object fetches queued, waiting for available memory." ) return store_summary
["def","store_stats_summary","(","reply",")",":","``","''","''","Returns","formatted","string","describing","object","store","stats","in","all","nodes",".","''","''","''","store_summary","=","(","``","--","-","Aggregate","object","store","stats","across","all","nodes","--","-\\n","''",")","#","TODO","(","ekl",")","it","would","be","nice","if","we","could","provide","a","full","memory","usage","#","breakdown","by","type","(","e.g.",",","pinned","by","worker",",","primary",",","etc",".",")","store_summary","+=","(","``","Plasma","memory","usage","{","}","MiB",",","{","}","objects",",","{","}","%","full",",","{","}","%","``","``","needed\\n","''",".format","(","int","(","reply.store_stats.object_store_bytes_used","\/","(","1024","*","1024",")",")",",","reply.store_stats.num_local_objects",",","round","(","100","*","reply.store_stats.object_store_bytes_used","\/","reply.store_stats.object_store_bytes_avail",",","2",",",")",",","round","(","100","*","reply.store_stats.object_store_bytes_primary_copy","\/","reply.store_stats.object_store_bytes_avail",",","2",",",")",",",")",")","if","reply.store_stats.object_store_bytes_fallback",">","0",":","store_summary","+=","(","``","Plasma","filesystem","mmap","usage",":","{","}","MiB\\n","''",".format","(","int","(","reply.store_stats.object_store_bytes_fallback","\/","(","1024","*","1024",")",")",")",")","if","reply.store_stats.spill_time_total_s",">","0",":","store_summary","+=","``","Spilled","{","}","MiB",",","{","}","objects",",","avg","write","throughput","{","}","MiB\/s\\n","''",".format","(","int","(","reply.store_stats.spilled_bytes_total","\/","(","1024","*","1024",")",")",",","reply.store_stats.spilled_objects_total",",","int","(","reply.store_stats.spilled_bytes_total","\/","(","1024","*","1024",")","\/","reply.store_stats.spill_time_total_s",")",",",")","if","reply.store_stats.restore_time_total_s",">","0",":","store_summary","+=","``","Restored","{","}","MiB",",","{","}","objects",",","avg","read","throughput","{","}","MiB\/s\\n","''",".format","(","int","(","reply.store_stats.restored_bytes_total","\/","(","1024","*","1024",")",")",",","reply.store_stats.restored_objects_total",",","int","(","reply.store_stats.restored_bytes_total","\/","(","1024","*","1024",")","\/","reply.store_stats.restore_time_total_s",")",",",")","if","reply.store_stats.consumed_bytes",">","0",":","store_summary","+=","(","``","Objects","consumed","by","Ray","tasks",":","{","}","MiB.\\n","''",".format","(","int","(","reply.store_stats.consumed_bytes","\/","(","1024","*","1024",")",")",")",")","if","reply.store_stats.object_pulls_queued",":","store_summary","+=","(","``","Object","fetches","queued",",","waiting","for","available","memory",".","''",")","return","store_summary"]
115
174
null
internal_api.py
ray/python/ray/_private/internal_api.py
import ray import ray._private.profiling import ray._private.services import ray._private.utils import ray._private.worker from ray._private import ray_constants from ray._private.state import GlobalState from ray._raylet import GcsClientOptions
15
null
8
7
null
null
null
Use image node_id 6 for calling a global function with example usage: store_stats_summary(reply) and returns: store_summary
123
node_id 6
1,801,721
free
global
null
false
object_refs,local_only
null
null
null
null
null
def free(object_refs: list, local_only: bool = False): """Free a list of IDs from the in-process and plasma object stores. This function is a low-level API which should be used in restricted scenarios. If local_only is false, the request will be send to all object stores. This method will not return any value to indicate whether the deletion is successful or not. This function is an instruction to the object store. If some of the objects are in use, the object stores will delete them later when the ref count is down to 0. Examples: .. testcode:: import ray @ray.remote def f(): return 0 obj_ref = f.remote() ray.get(obj_ref) # wait for object to be created first free([obj_ref]) # unpin & delete object globally Args: object_refs (List[ObjectRef]): List of object refs to delete. local_only: Whether only deleting the list of objects in local object store or all object stores. """ worker = ray._private.worker.global_worker if isinstance(object_refs, ray.ObjectRef): object_refs = [object_refs] if not isinstance(object_refs, list): raise TypeError( "free() expects a list of ObjectRef, got {}".format( type(object_refs) ) ) # Make sure that the values are object refs. for object_ref in object_refs: if not isinstance(object_ref, ray.ObjectRef): raise TypeError( "Attempting to call `free` on the value {}, " "which is not an ray.ObjectRef.".format(object_ref) ) worker.check_connected() with profiling.profile("ray.free"): if len(object_refs) == 0: return worker.core_worker.free_objects(object_refs, local_only)
["def","free","(","object_refs",":","list",",","local_only",":","bool","=","False",")",":","``","''","''","Free","a","list","of","IDs","from","the","in-process","and","plasma","object","stores",".","This","function","is","a","low-level","API","which","should","be","used","in","restricted","scenarios",".","If","local_only","is","false",",","the","request","will","be","send","to","all","object","stores",".","This","method","will","not","return","any","value","to","indicate","whether","the","deletion","is","successful","or","not",".","This","function","is","an","instruction","to","the","object","store",".","If","some","of","the","objects","are","in","use",",","the","object","stores","will","delete","them","later","when","the","ref","count","is","down","to","0",".","Examples",":","..","testcode",":",":","import","ray","@","ray.remote","def","f","(",")",":","return","0","obj_ref","=","f.remote","(",")","ray.get","(","obj_ref",")","#","wait","for","object","to","be","created","first","free","(","[","obj_ref","]",")","#","unpin","&","delete","object","globally","Args",":","object_refs","(","List","[","ObjectRef","]",")",":","List","of","object","refs","to","delete",".","local_only",":","Whether","only","deleting","the","list","of","objects","in","local","object","store","or","all","object","stores.","``","''","''","worker","=","ray._private.worker.global_worker","if","isinstance","(","object_refs",",","ray.ObjectRef",")",":","object_refs","=","[","object_refs","]","if","not","isinstance","(","object_refs",",","list",")",":","raise","TypeError","(","``","free","(",")","expects","a","list","of","ObjectRef",",","got","{","}","''",".format","(","type","(","object_refs",")",")",")","#","Make","sure","that","the","values","are","object","refs",".","for","object_ref","in","object_refs",":","if","not","isinstance","(","object_ref",",","ray.ObjectRef",")",":","raise","TypeError","(","``","Attempting","to","call","`","free","`","on","the","value","{","}",",","``","``","which","is","not","an","ray.ObjectRef",".","``",".format","(","object_ref",")",")","worker.check_connected","(",")","with","profiling.profile","(","``","ray.free","''",")",":","if","len","(","object_refs",")","==","0",":","return","worker.core_worker.free_objects","(","object_refs",",","local_only",")"]
177
232
null
internal_api.py
ray/python/ray/_private/internal_api.py
import ray import ray._private.profiling import ray._private.services import ray._private.utils import ray._private.worker from ray._private import ray_constants from ray._private.state import GlobalState from ray._raylet import GcsClientOptions
15
null
8
7
null
null
null
Use image node_id 7 for calling a global function with example usage: free(object_refs, local_only) without return types
120
node_id 7
1,801,722
drop_block_fast_2d
global
null
false
x,drop_prob,block_size,gamma_scale,with_noise,inplace
null
null
null
null
x
def drop_block_fast_2d( x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, ): """DropBlock. See https://arxiv.org/pdf/1810.12890.pdf DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid block mask at edges. """ B, C, H, W = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) gamma = ( gamma_scale * drop_prob * total_size / clipped_block_size**2 / ((W - block_size + 1) * (H - block_size + 1)) ) block_mask = torch.empty_like(x).bernoulli_(gamma) block_mask = F.max_pool2d( block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2, ) if with_noise: normal_noise = torch.empty_like(x).normal_() if inplace: x.mul_(1.0 - block_mask).add_(normal_noise * block_mask) else: x = x * (1.0 - block_mask) + normal_noise * block_mask else: block_mask = 1 - block_mask normalize_scale = ( block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6) ).to(dtype=x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x
["def","drop_block_fast_2d","(","x",":","torch.Tensor",",","drop_prob",":","float","=","0.1",",","block_size",":","int","=","7",",","gamma_scale",":","float","=","1.0",",","with_noise",":","bool","=","False",",","inplace",":","bool","=","False",",",")",":","``","''","''","DropBlock",".","See","https",":","\/\/arxiv.org\/pdf\/1810.12890.pdf","DropBlock","with","an","experimental","gaussian","noise","option",".","Simplied","from","above","without","concern","for","valid","block","mask","at","edges.","``","''","''","B",",","C",",","H",",","W","=","x.shape","total_size","=","W","*","H","clipped_block_size","=","min","(","block_size",",","min","(","W",",","H",")",")","gamma","=","(","gamma_scale","*","drop_prob","*","total_size","\/","clipped_block_size","*","*","2","\/","(","(","W","-","block_size","+","1",")","*","(","H","-","block_size","+","1",")",")",")","block_mask","=","torch.empty_like","(","x",")",".bernoulli_","(","gamma",")","block_mask","=","F.max_pool2d","(","block_mask.to","(","x.dtype",")",",","kernel_size=clipped_block_size",",","stride=1",",","padding=clipped_block_size","\/\/","2",",",")","if","with_noise",":","normal_noise","=","torch.empty_like","(","x",")",".normal_","(",")","if","inplace",":","x.mul_","(","1.0","-","block_mask",")",".add_","(","normal_noise","*","block_mask",")","else",":","x","=","x","*","(","1.0","-","block_mask",")","+","normal_noise","*","block_mask","else",":","block_mask","=","1","-","block_mask","normalize_scale","=","(","block_mask.numel","(",")","\/","block_mask.to","(","dtype=torch.float32",")",".sum","(",")",".add","(","1e-6",")",")",".to","(","dtype=x.dtype",")","if","inplace",":","x.mul_","(","block_mask","*","normalize_scale",")","else",":","x","=","x","*","block_mask","*","normalize_scale","return","x"]
70
101
null
drop.py
pytorch-image-models/timm/layers/drop.py
import torch import torch.nn import torch.nn.functional
15
null
3
3
null
null
null
Use image node_id 2 for calling a global function with example usage: drop_block_fast_2d(x, drop_prob, block_size, gamma_scale, with_noise, inplace) and returns: x
163
node_id 2
1,692,292
extended_trait_info
global
null
false
trait
null
null
null
null
trait,dict_info,str,str
def extended_trait_info(trait): if isinstance(trait, Dict): return dict_info(trait) elif isinstance(trait, Container): if trait._trait is None: return "{} of any type".format(trait.info()) return "{} with values that are: {}".format( trait.info(), trait._trait.info() ) return trait.info()
["def","extended_trait_info","(","trait",")",":","if","isinstance","(","trait",",","Dict",")",":","return","dict_info","(","trait",")","elif","isinstance","(","trait",",","Container",")",":","if","trait._trait","is","None",":","return","``","{","}","of","any","type","''",".format","(","trait.info","(",")",")","return","``","{","}","with","values","that","are",":","{","}","''",".format","(","trait.info","(",")",",","trait._trait.info","(",")",")","return","trait.info","(",")"]
32
39
null
autodoc_traits.py
pythreejs/docs/sphinxext/autodoc_traits.py
from collections import OrderedDict from traitlets import TraitType, Undefined, Container, Dict, Any, HasTraits from sphinx.ext.autodoc import ClassDocumenter, AttributeDocumenter
15
null
3
3
null
null
null
Use image node_id 2 for calling a global function with example usage: extended_trait_info(trait) and returns: trait, dict_info, str, str
136
node_id 2
1,691,046
decimate
global
null
false
x,q,n,ftype,axis,zero_phase
null
null
null
null
y
def decimate(x, q, n=None, ftype="iir", axis=-1, zero_phase=True): """ Downsample the signal after applying an anti-aliasing filter. By default, an order 8 Chebyshev type I filter is used. A 30 point FIR filter with Hamming window is used if `ftype` is 'fir'. Parameters ---------- x : array_like The signal to be downsampled, as an N-dimensional array. q : int The downsampling factor. When using IIR downsampling, it is recommended to call `decimate` multiple times for downsampling factors higher than 13. n : int, optional The order of the filter (1 less than the length for 'fir'). Defaults to 8 for 'iir' and 20 times the downsampling factor for 'fir'. ftype : str {'iir', 'fir'} or ``dlti`` instance, optional If 'iir' or 'fir', specifies the type of lowpass filter. If an instance of an `dlti` object, uses that object to filter before downsampling. axis : int, optional The axis along which to decimate. zero_phase : bool, optional Prevent phase shift by filtering with `filtfilt` instead of `lfilter` when using an IIR filter, and shifting the outputs back by the filter's group delay when using an FIR filter. The default value of ``True`` is recommended, since a phase shift is generally not desired. Returns ------- y : ndarray The down-sampled signal. See Also -------- resample : Resample up or down using the FFT method. resample_poly : Resample using polyphase filtering and an FIR filter. """ x = cupy.asarray(x) q = operator.index(q) if n is not None: n = operator.index(n) result_type = x.dtype if ( not cupy.issubdtype(result_type, cupy.inexact) or result_type.type == cupy.float16 ): # upcast integers and float16 to float64 result_type = cupy.float64 if ftype == "fir": if n is None: half_len = ( 10 * q ) # reasonable cutoff for our sinc-like function n = 2 * half_len b, a = firwin(n + 1, 1.0 / q, window="hamming"), 1.0 b = cupy.asarray(b, dtype=result_type) a = cupy.asarray(a, dtype=result_type) elif ftype == "iir": iir_use_sos = True if n is None: n = 8 sos = cheby1(n, 0.05, 0.8 / q, output="sos") sos = cupy.asarray(sos, dtype=result_type) elif isinstance(ftype, dlti): system = ftype._as_zpk() if system.poles.shape[0] == 0: # FIR system = ftype._as_tf() b, a = system.num, system.den ftype = "fir" elif ( any(cupy.iscomplex(system.poles)) or any(cupy.iscomplex(system.poles)) or cupy.iscomplex(system.gain) ): # sosfilt & sosfiltfilt don't handle complex coeffs iir_use_sos = False system = ftype._as_tf() b, a = system.num, system.den else: iir_use_sos = True sos = zpk2sos(system.zeros, system.poles, system.gain) sos = cupy.asarray(sos, dtype=result_type) else: raise ValueError("invalid ftype") sl = [slice(None)] * x.ndim if ftype == "fir": b = b / a if zero_phase: y = resample_poly(x, 1, q, axis=axis, window=b) else: # upfirdn is generally faster than lfilter by a factor equal to the # downsampling factor, since it only calculates the needed outputs n_out = x.shape[axis] // q + bool(x.shape[axis] % q) y = upfirdn(b, x, up=1, down=q, axis=axis) sl[axis] = slice(None, n_out, None) else: # IIR case if zero_phase: if iir_use_sos: y = sosfiltfilt(sos, x, axis=axis) else: y = filtfilt(b, a, x, axis=axis) else: if iir_use_sos: y = sosfilt(sos, x, axis=axis) else: y = lfilter(b, a, x, axis=axis) sl[axis] = slice(None, None, q) return y[tuple(sl)]
["def","decimate","(","x",",","q",",","n=None",",","ftype=","''","iir","''",",","axis=-1",",","zero_phase=True",")",":","``","''","''","Downsample","the","signal","after","applying","an","anti-aliasing","filter",".","By","default",",","an","order","8","Chebyshev","type","I","filter","is","used",".","A","30","point","FIR","filter","with","Hamming","window","is","used","if","`","ftype","`","is","'fir","'",".","Parameters","--","--","--","--","--","x",":","array_like","The","signal","to","be","downsampled",",","as","an","N-dimensional","array",".","q",":","int","The","downsampling","factor",".","When","using","IIR","downsampling",",","it","is","recommended","to","call","`","decimate","`","multiple","times","for","downsampling","factors","higher","than","13.","n",":","int",",","optional","The","order","of","the","filter","(","1","less","than","the","length","for","'fir","'",")",".","Defaults","to","8","for","'iir","'","and","20","times","the","downsampling","factor","for","'fir","'",".","ftype",":","str","{","'iir","'",",","'fir","'","}","or","``","dlti","``","instance",",","optional","If","'iir","'","or","'fir","'",",","specifies","the","type","of","lowpass","filter",".","If","an","instance","of","an","`","dlti","`","object",",","uses","that","object","to","filter","before","downsampling",".","axis",":","int",",","optional","The","axis","along","which","to","decimate",".","zero_phase",":","bool",",","optional","Prevent","phase","shift","by","filtering","with","`","filtfilt","`","instead","of","`","lfilter","`","when","using","an","IIR","filter",",","and","shifting","the","outputs","back","by","the","filter's","group","delay","when","using","an","FIR","filter",".","The","default","value","of","``","True","``","is","recommended",",","since","a","phase","shift","is","generally","not","desired",".","Returns","--","--","--","-","y",":","ndarray","The","down-sampled","signal",".","See","Also","--","--","--","--","resample",":","Resample","up","or","down","using","the","FFT","method",".","resample_poly",":","Resample","using","polyphase","filtering","and","an","FIR","filter.","``","''","''","x","=","cupy.asarray","(","x",")","q","=","operator.index","(","q",")","if","n","is","not","None",":","n","=","operator.index","(","n",")","result_type","=","x.dtype","if","(","not","cupy.issubdtype","(","result_type",",","cupy.inexact",")","or","result_type.type","==","cupy.float16",")",":","#","upcast","integers","and","float16","to","float64","result_type","=","cupy.float64","if","ftype","==","``","fir","''",":","if","n","is","None",":","half_len","=","(","10","*","q",")","#","reasonable","cutoff","for","our","sinc-like","function","n","=","2","*","half_len","b",",","a","=","firwin","(","n","+","1",",","1.0","\/","q",",","window=","''","hamming","''",")",",","1.0","b","=","cupy.asarray","(","b",",","dtype=result_type",")","a","=","cupy.asarray","(","a",",","dtype=result_type",")","elif","ftype","==","``","iir","''",":","iir_use_sos","=","True","if","n","is","None",":","n","=","8","sos","=","cheby1","(","n",",","0.05",",","0.8","\/","q",",","output=","''","sos","''",")","sos","=","cupy.asarray","(","sos",",","dtype=result_type",")","elif","isinstance","(","ftype",",","dlti",")",":","system","=","ftype._as_zpk","(",")","if","system.poles.shape","[","0","]","==","0",":","#","FIR","system","=","ftype._as_tf","(",")","b",",","a","=","system.num",",","system.den","ftype","=","``","fir","''","elif","(","any","(","cupy.iscomplex","(","system.poles",")",")","or","any","(","cupy.iscomplex","(","system.poles",")",")","or","cupy.iscomplex","(","system.gain",")",")",":","#","sosfilt","&","sosfiltfilt","do","n't","handle","complex","coeffs","iir_use_sos","=","False","system","=","ftype._as_tf","(",")","b",",","a","=","system.num",",","system.den","else",":","iir_use_sos","=","True","sos","=","zpk2sos","(","system.zeros",",","system.poles",",","system.gain",")","sos","=","cupy.asarray","(","sos",",","dtype=result_type",")","else",":","raise","ValueError","(","``","invalid","ftype","''",")","sl","=","[","slice","(","None",")","]","*","x.ndim","if","ftype","==","``","fir","''",":","b","=","b","\/","a","if","zero_phase",":","y","=","resample_poly","(","x",",","1",",","q",",","axis=axis",",","window=b",")","else",":","#","upfirdn","is","generally","faster","than","lfilter","by","a","factor","equal","to","the","#","downsampling","factor",",","since","it","only","calculates","the","needed","outputs","n_out","=","x.shape","[","axis","]","\/\/","q","+","bool","(","x.shape","[","axis","]","%","q",")","y","=","upfirdn","(","b",",","x",",","up=1",",","down=q",",","axis=axis",")","sl","[","axis","]","=","slice","(","None",",","n_out",",","None",")","else",":","#","IIR","case","if","zero_phase",":","if","iir_use_sos",":","y","=","sosfiltfilt","(","sos",",","x",",","axis=axis",")","else",":","y","=","filtfilt","(","b",",","a",",","x",",","axis=axis",")","else",":","if","iir_use_sos",":","y","=","sosfilt","(","sos",",","x",",","axis=axis",")","else",":","y","=","lfilter","(","b",",","a",",","x",",","axis=axis",")","sl","[","axis","]","=","slice","(","None",",","None",",","q",")","return","y","[","tuple","(","sl",")","]"]
99
212
null
_resample.py
cupy/cupyx/scipy/signal/_resample.py
import operator from math import gcd import cupy from cupyx.scipy.fft import fft, rfft, fftfreq, ifft, irfft, ifftshift from cupyx.scipy.signal._iir_filter_design import cheby1 from cupyx.scipy.signal._fir_filter_design import firwin from cupyx.scipy.signal._iir_filter_conversions import zpk2sos from cupyx.scipy.signal._ltisys import dlti from cupyx.scipy.signal._upfirdn import upfirdn, _output_len from cupyx.scipy.signal._signaltools import sosfiltfilt, filtfilt, sosfilt, lfilter from cupyx.scipy.signal.windows._windows import get_window
15
null
11
4
null
null
null
Use image node_id 2 for calling a global function with example usage: decimate(x, q, n, ftype, axis, zero_phase) and returns: y
127
node_id 2
692,582
dict_info
global
null
false
trait
null
null
null
null
str
def dict_info(trait): try: trait_base = trait._value_trait except AttributeError: trait_base = trait._trait try: traits = trait._per_key_traits except AttributeError: traits = trait._traits if traits is None and ( trait_base is None or isinstance(trait_base, Any) ): value_string = "elements of any type" else: parts = [] if traits: parts.append( "the following types: %r" % {k: v.info() for k, v in traits} ) if trait_base: parts.append("values that are: %s" % trait_base.info()) value_string = "elements with " + ", and ".join(parts) return "{} with {}".format(trait.info(), value_string)
["def","dict_info","(","trait",")",":","try",":","trait_base","=","trait._value_trait","except","AttributeError",":","trait_base","=","trait._trait","try",":","traits","=","trait._per_key_traits","except","AttributeError",":","traits","=","trait._traits","if","traits","is","None","and","(","trait_base","is","None","or","isinstance","(","trait_base",",","Any",")",")",":","value_string","=","``","elements","of","any","type","''","else",":","parts","=","[","]","if","traits",":","parts.append","(","``","the","following","types",":","%","r","''","%","{","k",":","v.info","(",")","for","k",",","v","in","traits","}",")","if","trait_base",":","parts.append","(","``","values","that","are",":","%","s","''","%","trait_base.info","(",")",")","value_string","=","``","elements","with","``","+","``",",","and","``",".join","(","parts",")","return","``","{","}","with","{","}","''",".format","(","trait.info","(",")",",","value_string",")"]
9
29
null
autodoc_traits.py
pythreejs/docs/sphinxext/autodoc_traits.py
from collections import OrderedDict from traitlets import TraitType, Undefined, Container, Dict, Any, HasTraits from sphinx.ext.autodoc import ClassDocumenter, AttributeDocumenter
15
null
3
3
null
null
null
Use image node_id 1 for calling a global function with example usage: dict_info(trait) and returns: str
103
node_id 1
1,691,045
format_name
TraitDocumenter
AttributeDocumenter
true
self
null
null
null
null
self
def format_name(self): return self.objpath[-1]
["def","format_name","(","self",")",":","return","self.objpath","[","-1","]"]
83
84
null
autodoc_traits.py
pythreejs/docs/sphinxext/autodoc_traits.py
from collections import OrderedDict from traitlets import TraitType, Undefined, Container, Dict, Any, HasTraits from sphinx.ext.autodoc import ClassDocumenter, AttributeDocumenter
15
2
3
3
2
3
1
Use image node_id 2 for calling the TraitDocumenter obj's underlying member method code with example usage: obj.format_name() and returns: self
143
node_id 2
1,691,043
_fetch_lfw_people
global
null
false
data_folder_path,slice_,color,resize,min_faces_per_person
null
null
null
null
faces, target, target_names
def _fetch_lfw_people( data_folder_path, slice_=None, color=False, resize=None, min_faces_per_person=0, ): """Perform the actual data loading for the lfw people dataset This operation is meant to be cached by a joblib wrapper. """ # scan the data folder content to retain people with more that # `min_faces_per_person` face pictures person_names, file_paths = [], [] for person_name in sorted(listdir(data_folder_path)): folder_path = join(data_folder_path, person_name) if not isdir(folder_path): continue paths = [ join(folder_path, f) for f in sorted(listdir(folder_path)) ] n_pictures = len(paths) if n_pictures >= min_faces_per_person: person_name = person_name.replace("_", " ") person_names.extend([person_name] * n_pictures) file_paths.extend(paths) n_faces = len(file_paths) if n_faces == 0: raise ValueError( "min_faces_per_person=%d is too restrictive" % min_faces_per_person ) target_names = np.unique(person_names) target = np.searchsorted(target_names, person_names) faces = _load_imgs(file_paths, slice_, color, resize) # shuffle the faces with a deterministic RNG scheme to avoid having # all faces of the same person in a row, as it would break some # cross validation and learning algorithms such as SGD and online # k-means that make an IID assumption indices = np.arange(n_faces) np.random.RandomState(42).shuffle(indices) faces, target = faces[indices], target[indices] return faces, target, target_names
["def","_fetch_lfw_people","(","data_folder_path",",","slice_=None",",","color=False",",","resize=None",",","min_faces_per_person=0",",",")",":","``","''","''","Perform","the","actual","data","loading","for","the","lfw","people","dataset","This","operation","is","meant","to","be","cached","by","a","joblib","wrapper.","``","''","''","#","scan","the","data","folder","content","to","retain","people","with","more","that","#","`","min_faces_per_person","`","face","pictures","person_names",",","file_paths","=","[","]",",","[","]","for","person_name","in","sorted","(","listdir","(","data_folder_path",")",")",":","folder_path","=","join","(","data_folder_path",",","person_name",")","if","not","isdir","(","folder_path",")",":","continue","paths","=","[","join","(","folder_path",",","f",")","for","f","in","sorted","(","listdir","(","folder_path",")",")","]","n_pictures","=","len","(","paths",")","if","n_pictures",">","=","min_faces_per_person",":","person_name","=","person_name.replace","(","``","_","''",",","``","``",")","person_names.extend","(","[","person_name","]","*","n_pictures",")","file_paths.extend","(","paths",")","n_faces","=","len","(","file_paths",")","if","n_faces","==","0",":","raise","ValueError","(","``","min_faces_per_person=","%","d","is","too","restrictive","''","%","min_faces_per_person",")","target_names","=","np.unique","(","person_names",")","target","=","np.searchsorted","(","target_names",",","person_names",")","faces","=","_load_imgs","(","file_paths",",","slice_",",","color",",","resize",")","#","shuffle","the","faces","with","a","deterministic","RNG","scheme","to","avoid","having","#","all","faces","of","the","same","person","in","a","row",",","as","it","would","break","some","#","cross","validation","and","learning","algorithms","such","as","SGD","and","online","#","k-means","that","make","an","IID","assumption","indices","=","np.arange","(","n_faces",")","np.random.RandomState","(","42",")",".shuffle","(","indices",")","faces",",","target","=","faces","[","indices","]",",","target","[","indices","]","return","faces",",","target",",","target_names"]
192
232
null
_lfw.py
catboost/contrib/python/scikit-learn/py3/sklearn/datasets/_lfw.py
import logging from numbers import Integral, Real from os import PathLike, listdir, makedirs, remove from os.path import exists, isdir, join import numpy from joblib import Memory from ..utils import Bunch from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params from ._base import RemoteFileMetadata, _fetch_remote, get_data_home, load_descr
15
null
9
6
null
null
null
Use image node_id 3 for calling a global function with example usage: _fetch_lfw_people(data_folder_path, slice_, color, resize, min_faces_per_person) and returns: faces, target, target_names
193
node_id 3
520,101
__init__
AudioToSlowFastFusionBuilder
null
true
self,slowfast_channel_reduction_ratio,slowfast_audio_reduction_ratio,conv_fusion_channel_ratio,conv_kernel_size,conv_kernel_size_a,conv_stride,conv_stride_a,conv_fusion_channel_interm_dim,conv_num_a,norm,norm_eps,norm_momentum,activation,max_stage_idx
null
null
Given a list of two tensors from Slow pathway and Fast pathway, fusion information from the Fast pathway to the Slow on through a convolution followed by a concatenation, then return the fused list of tensors from Slow and Fast pathway in order. Args: slowfast_channel_reduction_ratio (int): Reduction ratio from the stage dimension. Used to compute conv_dim_in = fusion_dim_in // slowfast_channel_reduction_ratio slowfast_audio_reduction_ratio (int): Audio Reduction ratio from the stage dimension. Used to compute conv_dim_in_a = fusion_dim_in // slowfast_audio_reduction_ratio conv_fusion_channel_ratio (int): channel ratio for the convolution used to fuse from Fast pathway to Slow pathway. conv_kernel_size (int): kernel size of the convolution used to fuse from Fast pathway to Slow pathway. conv_kernel_size_a (int): kernel size of the convolution used to fuse from Audio pathway to FastSlow pathway. conv_stride (int): stride size of the convolution used to fuse from Fast pathway to Slow pathway. Optionally indexed by stage. conv_stride_a (int): stride size of the convolution used to fuse from Audio pathway to FastSlow pathway. Optionally indexed by stage. conv_fusion_channel_interm_dim (Union[int, float]): When conv_num_a > 1 this value controls the dimensions of the intermediate conv conv_num_a (int): Number of intermediate conv for audio channel norm (callable): a callable that constructs normalization layer, examples include nn.BatchNorm3d, None (not performing normalization). norm_eps (float): normalization epsilon. norm_momentum (float): normalization momentum. activation (callable): a callable that constructs activation layer, examples include: nn.ReLU, nn.Softmax, nn.Sigmoid, and None (not performing activation). max_stage_idx (int): Returns identity module if we exceed this
["Given","a","list","of","two","tensors","from","Slow","pathway","and","Fast","pathway",",","fusion","information","from","the","Fast","pathway","to","the","Slow","on","through","a","convolution","followed","by","a","concatenation",",","then","return","the","fused","list","of","tensors","from","Slow","and","Fast","pathway","in","order",".","Args",":","slowfast_channel_reduction_ratio","(","int",")",":","Reduction","ratio","from","the","stage","dimension",".","Used","to","compute","conv_dim_in","=","fusion_dim_in","\/\/","slowfast_channel_reduction_ratio","slowfast_audio_reduction_ratio","(","int",")",":","Audio","Reduction","ratio","from","the","stage","dimension",".","Used","to","compute","conv_dim_in_a","=","fusion_dim_in","\/\/","slowfast_audio_reduction_ratio","conv_fusion_channel_ratio","(","int",")",":","channel","ratio","for","the","convolution","used","to","fuse","from","Fast","pathway","to","Slow","pathway",".","conv_kernel_size","(","int",")",":","kernel","size","of","the","convolution","used","to","fuse","from","Fast","pathway","to","Slow","pathway",".","conv_kernel_size_a","(","int",")",":","kernel","size","of","the","convolution","used","to","fuse","from","Audio","pathway","to","FastSlow","pathway",".","conv_stride","(","int",")",":","stride","size","of","the","convolution","used","to","fuse","from","Fast","pathway","to","Slow","pathway",".","Optionally","indexed","by","stage",".","conv_stride_a","(","int",")",":","stride","size","of","the","convolution","used","to","fuse","from","Audio","pathway","to","FastSlow","pathway",".","Optionally","indexed","by","stage",".","conv_fusion_channel_interm_dim","(","Union","[","int",",","float","]",")",":","When","conv_num_a",">","1","this","value","controls","the","dimensions","of","the","intermediate","conv","conv_num_a","(","int",")",":","Number","of","intermediate","conv","for","audio","channel","norm","(","callable",")",":","a","callable","that","constructs","normalization","layer",",","examples","include","nn.BatchNorm3d",",","None","(","not","performing","normalization",")",".","norm_eps","(","float",")",":","normalization","epsilon",".","norm_momentum","(","float",")",":","normalization","momentum",".","activation","(","callable",")",":","a","callable","that","constructs","activation","layer",",","examples","include",":","nn.ReLU",",","nn.Softmax",",","nn.Sigmoid",",","and","None","(","not","performing","activation",")",".","max_stage_idx","(","int",")",":","Returns","identity","module","if","we","exceed","this"]
AudioToSlowFastFusionBuilder
def __init__( self, slowfast_channel_reduction_ratio: int, slowfast_audio_reduction_ratio: int, conv_fusion_channel_ratio: float, conv_kernel_size: Tuple[int], conv_kernel_size_a: Tuple[int], conv_stride: Union[Tuple[int], Tuple[Tuple[int]]], conv_stride_a: Union[Tuple[int], Tuple[Tuple[int]]], conv_fusion_channel_interm_dim: Union[ int, float ] = 0.25, # also, 64 conv_num_a: int = 2, norm: Callable = nn.BatchNorm3d, norm_eps: float = 1e-5, norm_momentum: float = 0.1, activation: Callable = nn.ReLU, max_stage_idx: int = 3, ) -> None: """ Given a list of two tensors from Slow pathway and Fast pathway, fusion information from the Fast pathway to the Slow on through a convolution followed by a concatenation, then return the fused list of tensors from Slow and Fast pathway in order. Args: slowfast_channel_reduction_ratio (int): Reduction ratio from the stage dimension. Used to compute conv_dim_in = fusion_dim_in // slowfast_channel_reduction_ratio slowfast_audio_reduction_ratio (int): Audio Reduction ratio from the stage dimension. Used to compute conv_dim_in_a = fusion_dim_in // slowfast_audio_reduction_ratio conv_fusion_channel_ratio (int): channel ratio for the convolution used to fuse from Fast pathway to Slow pathway. conv_kernel_size (int): kernel size of the convolution used to fuse from Fast pathway to Slow pathway. conv_kernel_size_a (int): kernel size of the convolution used to fuse from Audio pathway to FastSlow pathway. conv_stride (int): stride size of the convolution used to fuse from Fast pathway to Slow pathway. Optionally indexed by stage. conv_stride_a (int): stride size of the convolution used to fuse from Audio pathway to FastSlow pathway. Optionally indexed by stage. conv_fusion_channel_interm_dim (Union[int, float]): When conv_num_a > 1 this value controls the dimensions of the intermediate conv conv_num_a (int): Number of intermediate conv for audio channel norm (callable): a callable that constructs normalization layer, examples include nn.BatchNorm3d, None (not performing normalization). norm_eps (float): normalization epsilon. norm_momentum (float): normalization momentum. activation (callable): a callable that constructs activation layer, examples include: nn.ReLU, nn.Softmax, nn.Sigmoid, and None (not performing activation). max_stage_idx (int): Returns identity module if we exceed this """ set_attributes(self, locals())
["def","__init__","(","self",",","slowfast_channel_reduction_ratio",":","int",",","slowfast_audio_reduction_ratio",":","int",",","conv_fusion_channel_ratio",":","float",",","conv_kernel_size",":","Tuple","[","int","]",",","conv_kernel_size_a",":","Tuple","[","int","]",",","conv_stride",":","Union","[","Tuple","[","int","]",",","Tuple","[","Tuple","[","int","]","]","]",",","conv_stride_a",":","Union","[","Tuple","[","int","]",",","Tuple","[","Tuple","[","int","]","]","]",",","conv_fusion_channel_interm_dim",":","Union","[","int",",","float","]","=","0.25",",","#","also",",","64","conv_num_a",":","int","=","2",",","norm",":","Callable","=","nn.BatchNorm3d",",","norm_eps",":","float","=","1e-5",",","norm_momentum",":","float","=","0.1",",","activation",":","Callable","=","nn.ReLU",",","max_stage_idx",":","int","=","3",",",")","-",">","None",":","``","''","''","Given","a","list","of","two","tensors","from","Slow","pathway","and","Fast","pathway",",","fusion","information","from","the","Fast","pathway","to","the","Slow","on","through","a","convolution","followed","by","a","concatenation",",","then","return","the","fused","list","of","tensors","from","Slow","and","Fast","pathway","in","order",".","Args",":","slowfast_channel_reduction_ratio","(","int",")",":","Reduction","ratio","from","the","stage","dimension",".","Used","to","compute","conv_dim_in","=","fusion_dim_in","\/\/","slowfast_channel_reduction_ratio","slowfast_audio_reduction_ratio","(","int",")",":","Audio","Reduction","ratio","from","the","stage","dimension",".","Used","to","compute","conv_dim_in_a","=","fusion_dim_in","\/\/","slowfast_audio_reduction_ratio","conv_fusion_channel_ratio","(","int",")",":","channel","ratio","for","the","convolution","used","to","fuse","from","Fast","pathway","to","Slow","pathway",".","conv_kernel_size","(","int",")",":","kernel","size","of","the","convolution","used","to","fuse","from","Fast","pathway","to","Slow","pathway",".","conv_kernel_size_a","(","int",")",":","kernel","size","of","the","convolution","used","to","fuse","from","Audio","pathway","to","FastSlow","pathway",".","conv_stride","(","int",")",":","stride","size","of","the","convolution","used","to","fuse","from","Fast","pathway","to","Slow","pathway",".","Optionally","indexed","by","stage",".","conv_stride_a","(","int",")",":","stride","size","of","the","convolution","used","to","fuse","from","Audio","pathway","to","FastSlow","pathway",".","Optionally","indexed","by","stage",".","conv_fusion_channel_interm_dim","(","Union","[","int",",","float","]",")",":","When","conv_num_a",">","1","this","value","controls","the","dimensions","of","the","intermediate","conv","conv_num_a","(","int",")",":","Number","of","intermediate","conv","for","audio","channel","norm","(","callable",")",":","a","callable","that","constructs","normalization","layer",",","examples","include","nn.BatchNorm3d",",","None","(","not","performing","normalization",")",".","norm_eps","(","float",")",":","normalization","epsilon",".","norm_momentum","(","float",")",":","normalization","momentum",".","activation","(","callable",")",":","a","callable","that","constructs","activation","layer",",","examples","include",":","nn.ReLU",",","nn.Softmax",",","nn.Sigmoid",",","and","None","(","not","performing","activation",")",".","max_stage_idx","(","int",")",":","Returns","identity","module","if","we","exceed","this","``","''","''","set_attributes","(","self",",","locals","(",")",")"]
241
290
null
audio_visual_slowfast.py
pytorchvideo/pytorchvideo/models/audio_visual_slowfast.py
from typing import Callable, Tuple, Union import torch import torch.nn from pytorchvideo.layers.utils import set_attributes from pytorchvideo.models.resnet import create_acoustic_bottleneck_block, create_bottleneck_block from pytorchvideo.models.slowfast import create_slowfast from pytorchvideo.models.stem import create_acoustic_res_basic_stem, create_res_basic_stem
15
2
7
1
1
2
null
Use image node_id 1 to create a new AudioToSlowFastFusionBuilder object with example: obj = AudioToSlowFastFusionBuilder(slowfast_channel_reduction_ratio, slowfast_audio_reduction_ratio, conv_fusion_channel_ratio, conv_kernel_size, conv_kernel_size_a, conv_stride, conv_stride_a, conv_fusion_channel_interm_dim, conv_num_a, norm, norm_eps, norm_momentum, activation, max_stage_idx)
382
node_id 1
1,777,572
_load_imgs
global
null
false
file_paths,slice_,color,resize
null
null
null
null
faces
def _load_imgs(file_paths, slice_, color, resize): """Internally used to load images""" try: from PIL import Image except ImportError: raise ImportError( "The Python Imaging Library (PIL) is required to load data " "from jpeg files. Please refer to " "https://pillow.readthedocs.io/en/stable/installation.html " "for installing PIL." ) # compute the portion of the images to load to respect the slice_ parameter # given by the caller default_slice = (slice(0, 250), slice(0, 250)) if slice_ is None: slice_ = default_slice else: slice_ = tuple( s or ds for s, ds in zip(slice_, default_slice) ) h_slice, w_slice = slice_ h = (h_slice.stop - h_slice.start) // (h_slice.step or 1) w = (w_slice.stop - w_slice.start) // (w_slice.step or 1) if resize is not None: resize = float(resize) h = int(resize * h) w = int(resize * w) # allocate some contiguous memory to host the decoded image slices n_faces = len(file_paths) if not color: faces = np.zeros((n_faces, h, w), dtype=np.float32) else: faces = np.zeros((n_faces, h, w, 3), dtype=np.float32) # iterate over the collected file path to load the jpeg files as numpy # arrays for i, file_path in enumerate(file_paths): if i % 1000 == 0: logger.debug("Loading face #%05d / %05d", i + 1, n_faces) # Checks if jpeg reading worked. Refer to issue #3594 for more # details. pil_img = Image.open(file_path) pil_img = pil_img.crop( (w_slice.start, h_slice.start, w_slice.stop, h_slice.stop) ) if resize is not None: pil_img = pil_img.resize((w, h)) face = np.asarray(pil_img, dtype=np.float32) if face.ndim == 0: raise RuntimeError( "Failed to read the image file %s, " "Please make sure that libjpeg is installed" % file_path ) face /= ( 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats ) if not color: # average the color channels to compute a gray levels # representation face = face.mean(axis=2) faces[i, ...] = face return faces
["def","_load_imgs","(","file_paths",",","slice_",",","color",",","resize",")",":","``","''","''","Internally","used","to","load","images","''","''","''","try",":","from","PIL","import","Image","except","ImportError",":","raise","ImportError","(","``","The","Python","Imaging","Library","(","PIL",")","is","required","to","load","data","``","``","from","jpeg","files",".","Please","refer","to","``","``","https",":","\/\/pillow.readthedocs.io\/en\/stable\/installation.html","``","``","for","installing","PIL",".","''",")","#","compute","the","portion","of","the","images","to","load","to","respect","the","slice_","parameter","#","given","by","the","caller","default_slice","=","(","slice","(","0",",","250",")",",","slice","(","0",",","250",")",")","if","slice_","is","None",":","slice_","=","default_slice","else",":","slice_","=","tuple","(","s","or","ds","for","s",",","ds","in","zip","(","slice_",",","default_slice",")",")","h_slice",",","w_slice","=","slice_","h","=","(","h_slice.stop","-","h_slice.start",")","\/\/","(","h_slice.step","or","1",")","w","=","(","w_slice.stop","-","w_slice.start",")","\/\/","(","w_slice.step","or","1",")","if","resize","is","not","None",":","resize","=","float","(","resize",")","h","=","int","(","resize","*","h",")","w","=","int","(","resize","*","w",")","#","allocate","some","contiguous","memory","to","host","the","decoded","image","slices","n_faces","=","len","(","file_paths",")","if","not","color",":","faces","=","np.zeros","(","(","n_faces",",","h",",","w",")",",","dtype=np.float32",")","else",":","faces","=","np.zeros","(","(","n_faces",",","h",",","w",",","3",")",",","dtype=np.float32",")","#","iterate","over","the","collected","file","path","to","load","the","jpeg","files","as","numpy","#","arrays","for","i",",","file_path","in","enumerate","(","file_paths",")",":","if","i","%","1000","==","0",":","logger.debug","(","``","Loading","face","#","%","05d","\/","%","05d","''",",","i","+","1",",","n_faces",")","#","Checks","if","jpeg","reading","worked",".","Refer","to","issue","#","3594","for","more","#","details",".","pil_img","=","Image.open","(","file_path",")","pil_img","=","pil_img.crop","(","(","w_slice.start",",","h_slice.start",",","w_slice.stop",",","h_slice.stop",")",")","if","resize","is","not","None",":","pil_img","=","pil_img.resize","(","(","w",",","h",")",")","face","=","np.asarray","(","pil_img",",","dtype=np.float32",")","if","face.ndim","==","0",":","raise","RuntimeError","(","``","Failed","to","read","the","image","file","%","s",",","``","``","Please","make","sure","that","libjpeg","is","installed","''","%","file_path",")","face","\/=","(","255.0","#","scale","uint8","coded","colors","to","the","[","0.0",",","1.0","]","floats",")","if","not","color",":","#","average","the","color","channels","to","compute","a","gray","levels","#","representation","face","=","face.mean","(","axis=2",")","faces","[","i",",","...","]","=","face","return","faces"]
118
184
null
_lfw.py
catboost/contrib/python/scikit-learn/py3/sklearn/datasets/_lfw.py
import logging from numbers import Integral, Real from os import PathLike, listdir, makedirs, remove from os.path import exists, isdir, join import numpy from joblib import Memory from ..utils import Bunch from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params from ._base import RemoteFileMetadata, _fetch_remote, get_data_home, load_descr
15
null
9
6
null
null
null
Use image node_id 2 for calling a global function with example usage: _load_imgs(file_paths, slice_, color, resize) and returns: faces
134
node_id 2
520,100
itilbert
global
null
false
x,h,period,_cache
null
null
null
null
convolve,unknown,int,unknown
def itilbert(x, h, period=None, _cache=_cache): """ Return inverse h-Tilbert transform of a periodic sequence x. If ``x_j`` and ``y_j`` are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = -sqrt(-1)*tanh(j*h*2*pi/period) * x_j y_0 = 0 For more details, see `tilbert`. """ tmp = asarray(x) if iscomplexobj(tmp): return itilbert(tmp.real, h, period) + 1j * itilbert( tmp.imag, h, period ) if period is not None: h = h * 2 * pi / period n = len(x) omega = _cache.get((n, h)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k, h=h): if k: return -tanh(h * k) return 0 omega = convolve.init_convolution_kernel(n, kernel, d=1) _cache[(n, h)] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve( tmp, omega, swap_real_imag=1, overwrite_x=overwrite_x )
["def","itilbert","(","x",",","h",",","period=None",",","_cache=_cache",")",":","``","''","''","Return","inverse","h-Tilbert","transform","of","a","periodic","sequence","x",".","If","``","x_j","``","and","``","y_j","``","are","Fourier","coefficients","of","periodic","functions","x","and","y",",","respectively",",","then",":",":","y_j","=","-sqrt","(","-1",")","*","tanh","(","j","*","h","*","2","*","pi\/period",")","*","x_j","y_0","=","0","For","more","details",",","see","`","tilbert","`",".","``","''","''","tmp","=","asarray","(","x",")","if","iscomplexobj","(","tmp",")",":","return","itilbert","(","tmp.real",",","h",",","period",")","+","1j","*","itilbert","(","tmp.imag",",","h",",","period",")","if","period","is","not","None",":","h","=","h","*","2","*","pi","\/","period","n","=","len","(","x",")","omega","=","_cache.get","(","(","n",",","h",")",")","if","omega","is","None",":","if","len","(","_cache",")",">","20",":","while","_cache",":","_cache.popitem","(",")","def","kernel","(","k",",","h=h",")",":","if","k",":","return","-tanh","(","h","*","k",")","return","0","omega","=","convolve.init_convolution_kernel","(","n",",","kernel",",","d=1",")","_cache","[","(","n",",","h",")","]","=","omega","overwrite_x","=","_datacopied","(","tmp",",","x",")","return","convolve.convolve","(","tmp",",","omega",",","swap_real_imag=1",",","overwrite_x=overwrite_x",")"]
159
192
null
pseudo_diffs.py
catboost/contrib/python/scipy/py2/scipy/fftpack/pseudo_diffs.py
from __future__ import division, print_function, absolute_import from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj from .None import convolve from scipy.fftpack.basic import _datacopied import atexit
15
null
5
10
null
null
null
Use image node_id 3 for calling a global function with example usage: itilbert(x, h, period, _cache) and returns: convolve, unknown, int, unknown
145
node_id 3
523,399
tilbert
global
null
false
x,h,period,_cache
null
null
null
null
convolve,unknown,int,unknown
def tilbert(x, h, period=None, _cache=_cache): """ Return h-Tilbert transform of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = sqrt(-1)*coth(j*h*2*pi/period) * x_j y_0 = 0 Parameters ---------- x : array_like The input array to transform. h : float Defines the parameter of the Tilbert transform. period : float, optional The assumed period of the sequence. Default period is ``2*pi``. Returns ------- tilbert : ndarray The result of the transform. Notes ----- If ``sum(x, axis=0) == 0`` and ``n = len(x)`` is odd then ``tilbert(itilbert(x)) == x``. If ``2 * pi * h / period`` is approximately 10 or larger, then numerically ``tilbert == hilbert`` (theoretically oo-Tilbert == Hilbert). For even ``len(x)``, the Nyquist mode of ``x`` is taken zero. """ tmp = asarray(x) if iscomplexobj(tmp): return tilbert(tmp.real, h, period) + 1j * tilbert( tmp.imag, h, period ) if period is not None: h = h * 2 * pi / period n = len(x) omega = _cache.get((n, h)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k, h=h): if k: return 1.0 / tanh(h * k) return 0 omega = convolve.init_convolution_kernel(n, kernel, d=1) _cache[(n, h)] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve( tmp, omega, swap_real_imag=1, overwrite_x=overwrite_x )
["def","tilbert","(","x",",","h",",","period=None",",","_cache=_cache",")",":","``","''","''","Return","h-Tilbert","transform","of","a","periodic","sequence","x",".","If","x_j","and","y_j","are","Fourier","coefficients","of","periodic","functions","x","and","y",",","respectively",",","then",":",":","y_j","=","sqrt","(","-1",")","*","coth","(","j","*","h","*","2","*","pi\/period",")","*","x_j","y_0","=","0","Parameters","--","--","--","--","--","x",":","array_like","The","input","array","to","transform",".","h",":","float","Defines","the","parameter","of","the","Tilbert","transform",".","period",":","float",",","optional","The","assumed","period","of","the","sequence",".","Default","period","is","``","2","*","pi","``",".","Returns","--","--","--","-","tilbert",":","ndarray","The","result","of","the","transform",".","Notes","--","--","-","If","``","sum","(","x",",","axis=0",")","==","0","``","and","``","n","=","len","(","x",")","``","is","odd","then","``","tilbert","(","itilbert","(","x",")",")","==","x","``",".","If","``","2","*","pi","*","h","\/","period","``","is","approximately","10","or","larger",",","then","numerically","``","tilbert","==","hilbert","``","(","theoretically","oo-Tilbert","==","Hilbert",")",".","For","even","``","len","(","x",")","``",",","the","Nyquist","mode","of","``","x","``","is","taken","zero.","``","''","''","tmp","=","asarray","(","x",")","if","iscomplexobj","(","tmp",")",":","return","tilbert","(","tmp.real",",","h",",","period",")","+","1j","*","tilbert","(","tmp.imag",",","h",",","period",")","if","period","is","not","None",":","h","=","h","*","2","*","pi","\/","period","n","=","len","(","x",")","omega","=","_cache.get","(","(","n",",","h",")",")","if","omega","is","None",":","if","len","(","_cache",")",">","20",":","while","_cache",":","_cache.popitem","(",")","def","kernel","(","k",",","h=h",")",":","if","k",":","return","1.0","\/","tanh","(","h","*","k",")","return","0","omega","=","convolve.init_convolution_kernel","(","n",",","kernel",",","d=1",")","_cache","[","(","n",",","h",")","]","=","omega","overwrite_x","=","_datacopied","(","tmp",",","x",")","return","convolve.convolve","(","tmp",",","omega",",","swap_real_imag=1",",","overwrite_x=overwrite_x",")"]
89
150
null
pseudo_diffs.py
catboost/contrib/python/scipy/py2/scipy/fftpack/pseudo_diffs.py
from __future__ import division, print_function, absolute_import from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj from .None import convolve from scipy.fftpack.basic import _datacopied import atexit
15
null
5
10
null
null
null
Use image node_id 2 for calling a global function with example usage: tilbert(x, h, period, _cache) and returns: convolve, unknown, int, unknown
144
node_id 2
523,398
diff
global
null
false
x,order,period,_cache
null
null
null
null
convolve,tmp,unknown,int,pow
def diff(x, order=1, period=None, _cache=_cache): """ Return k-th derivative (or integral) of a periodic sequence x. If x_j and y_j are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = pow(sqrt(-1)*j*2*pi/period, order) * x_j y_0 = 0 if order is not 0. Parameters ---------- x : array_like Input array. order : int, optional The order of differentiation. Default order is 1. If order is negative, then integration is carried out under the assumption that ``x_0 == 0``. period : float, optional The assumed period of the sequence. Default is ``2*pi``. Notes ----- If ``sum(x, axis=0) = 0`` then ``diff(diff(x, k), -k) == x`` (within numerical accuracy). For odd order and even ``len(x)``, the Nyquist mode is taken zero. """ tmp = asarray(x) if order == 0: return tmp if iscomplexobj(tmp): return diff(tmp.real, order, period) + 1j * diff( tmp.imag, order, period ) if period is not None: c = 2 * pi / period else: c = 1.0 n = len(x) omega = _cache.get((n, order, c)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k, order=order, c=c): if k: return pow(c * k, order) return 0 omega = convolve.init_convolution_kernel( n, kernel, d=order, zero_nyquist=1 ) _cache[(n, order, c)] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve( tmp, omega, swap_real_imag=order % 2, overwrite_x=overwrite_x )
["def","diff","(","x",",","order=1",",","period=None",",","_cache=_cache",")",":","``","''","''","Return","k-th","derivative","(","or","integral",")","of","a","periodic","sequence","x",".","If","x_j","and","y_j","are","Fourier","coefficients","of","periodic","functions","x","and","y",",","respectively",",","then",":",":","y_j","=","pow","(","sqrt","(","-1",")","*","j","*","2","*","pi\/period",",","order",")","*","x_j","y_0","=","0","if","order","is","not","0",".","Parameters","--","--","--","--","--","x",":","array_like","Input","array",".","order",":","int",",","optional","The","order","of","differentiation",".","Default","order","is","1",".","If","order","is","negative",",","then","integration","is","carried","out","under","the","assumption","that","``","x_0","==","0","``",".","period",":","float",",","optional","The","assumed","period","of","the","sequence",".","Default","is","``","2","*","pi","``",".","Notes","--","--","-","If","``","sum","(","x",",","axis=0",")","=","0","``","then","``","diff","(","diff","(","x",",","k",")",",","-k",")","==","x","``","(","within","numerical","accuracy",")",".","For","odd","order","and","even","``","len","(","x",")","``",",","the","Nyquist","mode","is","taken","zero.","``","''","''","tmp","=","asarray","(","x",")","if","order","==","0",":","return","tmp","if","iscomplexobj","(","tmp",")",":","return","diff","(","tmp.real",",","order",",","period",")","+","1j","*","diff","(","tmp.imag",",","order",",","period",")","if","period","is","not","None",":","c","=","2","*","pi","\/","period","else",":","c","=","1.0","n","=","len","(","x",")","omega","=","_cache.get","(","(","n",",","order",",","c",")",")","if","omega","is","None",":","if","len","(","_cache",")",">","20",":","while","_cache",":","_cache.popitem","(",")","def","kernel","(","k",",","order=order",",","c=c",")",":","if","k",":","return","pow","(","c","*","k",",","order",")","return","0","omega","=","convolve.init_convolution_kernel","(","n",",","kernel",",","d=order",",","zero_nyquist=1",")","_cache","[","(","n",",","order",",","c",")","]","=","omega","overwrite_x","=","_datacopied","(","tmp",",","x",")","return","convolve.convolve","(","tmp",",","omega",",","swap_real_imag=order","%","2",",","overwrite_x=overwrite_x",")"]
26
80
null
pseudo_diffs.py
catboost/contrib/python/scipy/py2/scipy/fftpack/pseudo_diffs.py
from __future__ import division, print_function, absolute_import from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj from .None import convolve from scipy.fftpack.basic import _datacopied import atexit
15
null
5
10
null
null
null
Use image node_id 1 for calling a global function with example usage: diff(x, order, period, _cache) and returns: convolve, tmp, unknown, int, pow
146
node_id 1
523,397
betweenness_centrality
global
null
false
input_graph,k,normalized,weight,endpoints,random_state
null
null
null
null
ddf,input_graph
def betweenness_centrality( input_graph, k: Union[ int, list, cudf.Series, cudf.DataFrame, dask_cudf.Series, dask_cudf.DataFrame, ] = None, normalized: bool = True, weight: cudf.DataFrame = None, endpoints: bool = False, random_state: int = None, ) -> dask_cudf.DataFrame: """ Compute the betweenness centrality for all vertices of the graph G. Betweenness centrality is a measure of the number of shortest paths that pass through a vertex. A vertex with a high betweenness centrality score has more paths passing through it and is therefore believed to be more important. To improve performance. rather than doing an all-pair shortest path, a sample of k starting vertices can be used. CuGraph does not currently support 'weight' parameters. Parameters ---------- input_graph: cuGraph.Graph The graph can be either directed (Graph(directed=True)) or undirected. The current implementation uses a parallel variation of the Brandes Algorithm (2001) to compute exact or approximate betweenness. If weights are provided in the edgelist, they will not be used. k : int, list or (dask)cudf object or None, optional (default=None) If k is not None, use k node samples to estimate betweenness. Higher values give better approximation. If k is either a list, a cudf DataFrame, or a dask_cudf DataFrame, then its contents are assumed to be vertex identifiers to be used for estimation. If k is None (the default), all the vertices are used to estimate betweenness. Vertices obtained through sampling or defined as a list will be used as sources for traversals inside the algorithm. normalized : bool, optional (default=True) If True, normalize the resulting betweenness centrality values by __2 / ((n - 1) * (n - 2))__ for undirected Graphs, and __1 / ((n - 1) * (n - 2))__ for directed Graphs where n is the number of nodes in G. Normalization will ensure that values are in [0, 1], this normalization scales for the highest possible value where one node is crossed by every single shortest path. weight : (dask)cudf.DataFrame, optional (default=None) Specifies the weights to be used for each edge. Should contain a mapping between edges and weights. (Not Supported) endpoints : bool, optional (default=False) If true, include the endpoints in the shortest path counts. random_state : int, optional (default=None) if k is specified and k is an integer, use random_state to initialize the random number generator. Using None defaults to a hash of process id, time, and hostname If k is either None or list or cudf objects: random_state parameter is ignored. Returns ------- betweenness_centrality : dask_cudf.DataFrame GPU distributed data frame containing two dask_cudf.Series of size V: the vertex identifiers and the corresponding betweenness centrality values. ddf['vertex'] : dask_cudf.Series Contains the vertex identifiers ddf['betweenness_centrality'] : dask_cudf.Series Contains the betweenness centrality of vertices Examples -------- >>> import cugraph.dask as dcg >>> import dask_cudf >>> # ... Init a DASK Cluster >>> # see https://docs.rapids.ai/api/cugraph/stable/dask-cugraph.html >>> # Download dataset from https://github.com/rapidsai/cugraph/datasets/.. >>> chunksize = dcg.get_chunksize(datasets_path / "karate.csv") >>> ddf = dask_cudf.read_csv(datasets_path / "karate.csv", ... chunksize=chunksize, delimiter=" ", ... names=["src", "dst", "value"], ... dtype=["int32", "int32", "float32"]) >>> dg = cugraph.Graph(directed=True) >>> dg.from_dask_cudf_edgelist(ddf, source='src', destination='dst') >>> pr = dcg.betweenness_centrality(dg) """ if input_graph.store_transposed is True: warning_msg = ( "Betweenness centrality expects the 'store_transposed' flag " "to be set to 'False' for optimal performance during " "the graph creation" ) warnings.warn(warning_msg, UserWarning) if weight is not None: raise NotImplementedError( "weighted implementation of betweenness " "centrality not currently supported" ) if not isinstance(k, (dask_cudf.DataFrame, dask_cudf.Series)): if isinstance(k, (cudf.DataFrame, cudf.Series, list)): if isinstance(k, list): k_dtype = input_graph.nodes().dtype k = cudf.Series(k, dtype=k_dtype) if isinstance(k, (cudf.Series, cudf.DataFrame)): splits = cp.array_split( cp.arange(len(k)), len(Comms.get_workers()) ) k = { w: [k.iloc[splits[i]]] for i, w in enumerate(Comms.get_workers()) } else: if k is not None: k = get_distributed_data(k) wait(k) k = k.worker_to_parts if input_graph.renumbered: if isinstance(k, dask_cudf.DataFrame): tmp_col_names = k.columns elif isinstance(k, dask_cudf.Series): tmp_col_names = None if isinstance(k, (dask_cudf.DataFrame, dask_cudf.Series)): k = input_graph.lookup_internal_vertex_id( k, tmp_col_names ) # FIXME: should we add this parameter as an option? do_expensive_check = False client = get_client() ddf = _mg_call_plc_betweenness_centrality( input_graph=input_graph, client=client, sID=Comms.get_session_id(), k=k, random_state=random_state, normalized=normalized, endpoints=endpoints, do_expensive_check=do_expensive_check, ) if input_graph.renumbered: return input_graph.unrenumber(ddf, "vertex") return ddf
["def","betweenness_centrality","(","input_graph",",","k",":","Union","[","int",",","list",",","cudf.Series",",","cudf.DataFrame",",","dask_cudf.Series",",","dask_cudf.DataFrame",",","]","=","None",",","normalized",":","bool","=","True",",","weight",":","cudf.DataFrame","=","None",",","endpoints",":","bool","=","False",",","random_state",":","int","=","None",",",")","-",">","dask_cudf.DataFrame",":","``","''","''","Compute","the","betweenness","centrality","for","all","vertices","of","the","graph","G.","Betweenness","centrality","is","a","measure","of","the","number","of","shortest","paths","that","pass","through","a","vertex",".","A","vertex","with","a","high","betweenness","centrality","score","has","more","paths","passing","through","it","and","is","therefore","believed","to","be","more","important",".","To","improve","performance",".","rather","than","doing","an","all-pair","shortest","path",",","a","sample","of","k","starting","vertices","can","be","used",".","CuGraph","does","not","currently","support","'weight","'","parameters",".","Parameters","--","--","--","--","--","input_graph",":","cuGraph.Graph","The","graph","can","be","either","directed","(","Graph","(","directed=True",")",")","or","undirected",".","The","current","implementation","uses","a","parallel","variation","of","the","Brandes","Algorithm","(","2001",")","to","compute","exact","or","approximate","betweenness",".","If","weights","are","provided","in","the","edgelist",",","they","will","not","be","used",".","k",":","int",",","list","or","(","dask",")","cudf","object","or","None",",","optional","(","default=None",")","If","k","is","not","None",",","use","k","node","samples","to","estimate","betweenness",".","Higher","values","give","better","approximation",".","If","k","is","either","a","list",",","a","cudf","DataFrame",",","or","a","dask_cudf","DataFrame",",","then","its","contents","are","assumed","to","be","vertex","identifiers","to","be","used","for","estimation",".","If","k","is","None","(","the","default",")",",","all","the","vertices","are","used","to","estimate","betweenness",".","Vertices","obtained","through","sampling","or","defined","as","a","list","will","be","used","as","sources","for","traversals","inside","the","algorithm",".","normalized",":","bool",",","optional","(","default=True",")","If","True",",","normalize","the","resulting","betweenness","centrality","values","by","__2","\/","(","(","n","-","1",")","*","(","n","-","2",")",")","__","for","undirected","Graphs",",","and","__1","\/","(","(","n","-","1",")","*","(","n","-","2",")",")","__","for","directed","Graphs","where","n","is","the","number","of","nodes","in","G.","Normalization","will","ensure","that","values","are","in","[","0",",","1","]",",","this","normalization","scales","for","the","highest","possible","value","where","one","node","is","crossed","by","every","single","shortest","path",".","weight",":","(","dask",")","cudf.DataFrame",",","optional","(","default=None",")","Specifies","the","weights","to","be","used","for","each","edge",".","Should","contain","a","mapping","between","edges","and","weights",".","(","Not","Supported",")","endpoints",":","bool",",","optional","(","default=False",")","If","true",",","include","the","endpoints","in","the","shortest","path","counts",".","random_state",":","int",",","optional","(","default=None",")","if","k","is","specified","and","k","is","an","integer",",","use","random_state","to","initialize","the","random","number","generator",".","Using","None","defaults","to","a","hash","of","process","id",",","time",",","and","hostname","If","k","is","either","None","or","list","or","cudf","objects",":","random_state","parameter","is","ignored",".","Returns","--","--","--","-","betweenness_centrality",":","dask_cudf.DataFrame","GPU","distributed","data","frame","containing","two","dask_cudf.Series","of","size","V",":","the","vertex","identifiers","and","the","corresponding","betweenness","centrality","values",".","ddf","[","'vertex","'","]",":","dask_cudf.Series","Contains","the","vertex","identifiers","ddf","[","'betweenness_centrality","'","]",":","dask_cudf.Series","Contains","the","betweenness","centrality","of","vertices","Examples","--","--","--","--",">",">",">","import","cugraph.dask","as","dcg",">",">",">","import","dask_cudf",">",">",">","#","...","Init","a","DASK","Cluster",">",">",">","#","see","https",":","\/\/docs.rapids.ai\/api\/cugraph\/stable\/dask-cugraph.html",">",">",">","#","Download","dataset","from","https",":","\/\/github.com\/rapidsai\/cugraph\/datasets\/","..",">",">",">","chunksize","=","dcg.get_chunksize","(","datasets_path","\/","``","karate.csv","''",")",">",">",">","ddf","=","dask_cudf.read_csv","(","datasets_path","\/","``","karate.csv","''",",","...","chunksize=chunksize",",","delimiter=","''","``",",","...","names=","[","``","src","''",",","``","dst","''",",","``","value","''","]",",","...","dtype=","[","``","int32","''",",","``","int32","''",",","``","float32","''","]",")",">",">",">","dg","=","cugraph.Graph","(","directed=True",")",">",">",">","dg.from_dask_cudf_edgelist","(","ddf",",","source='src","'",",","destination='dst","'",")",">",">",">","pr","=","dcg.betweenness_centrality","(","dg",")","``","''","''","if","input_graph.store_transposed","is","True",":","warning_msg","=","(","``","Betweenness","centrality","expects","the","'store_transposed","'","flag","``","``","to","be","set","to","'False","'","for","optimal","performance","during","``","``","the","graph","creation","''",")","warnings.warn","(","warning_msg",",","UserWarning",")","if","weight","is","not","None",":","raise","NotImplementedError","(","``","weighted","implementation","of","betweenness","``","``","centrality","not","currently","supported","''",")","if","not","isinstance","(","k",",","(","dask_cudf.DataFrame",",","dask_cudf.Series",")",")",":","if","isinstance","(","k",",","(","cudf.DataFrame",",","cudf.Series",",","list",")",")",":","if","isinstance","(","k",",","list",")",":","k_dtype","=","input_graph.nodes","(",")",".dtype","k","=","cudf.Series","(","k",",","dtype=k_dtype",")","if","isinstance","(","k",",","(","cudf.Series",",","cudf.DataFrame",")",")",":","splits","=","cp.array_split","(","cp.arange","(","len","(","k",")",")",",","len","(","Comms.get_workers","(",")",")",")","k","=","{","w",":","[","k.iloc","[","splits","[","i","]","]","]","for","i",",","w","in","enumerate","(","Comms.get_workers","(",")",")","}","else",":","if","k","is","not","None",":","k","=","get_distributed_data","(","k",")","wait","(","k",")","k","=","k.worker_to_parts","if","input_graph.renumbered",":","if","isinstance","(","k",",","dask_cudf.DataFrame",")",":","tmp_col_names","=","k.columns","elif","isinstance","(","k",",","dask_cudf.Series",")",":","tmp_col_names","=","None","if","isinstance","(","k",",","(","dask_cudf.DataFrame",",","dask_cudf.Series",")",")",":","k","=","input_graph.lookup_internal_vertex_id","(","k",",","tmp_col_names",")","#","FIXME",":","should","we","add","this","parameter","as","an","option","?","do_expensive_check","=","False","client","=","get_client","(",")","ddf","=","_mg_call_plc_betweenness_centrality","(","input_graph=input_graph",",","client=client",",","sID=Comms.get_session_id","(",")",",","k=k",",","random_state=random_state",",","normalized=normalized",",","endpoints=endpoints",",","do_expensive_check=do_expensive_check",",",")","if","input_graph.renumbered",":","return","input_graph.unrenumber","(","ddf",",","``","vertex","''",")","return","ddf"]
123
275
null
betweenness_centrality.py
cugraph/python/cugraph/cugraph/dask/centrality/betweenness_centrality.py
from dask.distributed import wait, get_client from pylibcugraph import ResourceHandle, betweenness_centrality, edge_betweenness_centrality import cugraph.dask.comms.comms from cugraph.dask.common.input_utils import get_distributed_data import dask_cudf import cudf import cupy import warnings import dask from typing import Union
15
null
10
5
null
null
null
Use image node_id 4 for calling a global function with example usage: betweenness_centrality(input_graph, k, normalized, weight, endpoints, random_state) and returns: ddf, input_graph
183
node_id 4
686,101
edge_betweenness_centrality
global
null
false
input_graph,k,normalized,weight,random_state
null
null
null
null
ddf,input_graph
def edge_betweenness_centrality( input_graph, k: Union[ int, list, cudf.Series, cudf.DataFrame, dask_cudf.Series, dask_cudf.DataFrame, ] = None, normalized: bool = True, weight: cudf.DataFrame = None, random_state: int = None, ) -> dask_cudf.DataFrame: """ Compute the edge betweenness centrality for all edges of the graph G. Betweenness centrality is a measure of the number of shortest paths that pass over an edge. An edge with a high betweenness centrality score has more paths passing over it and is therefore believed to be more important. To improve performance. rather than doing an all-pair shortest path, a sample of k starting vertices can be used. CuGraph does not currently support the 'weight' parameter. Parameters ---------- input_graph: cuGraph.Graph The graph can be either directed (Graph(directed=True)) or undirected. The current implementation uses a parallel variation of the Brandes Algorithm (2001) to compute exact or approximate betweenness. If weights are provided in the edgelist, they will not be used. k : int, list or (dask)cudf object or None, optional (default=None) If k is not None, use k node samples to estimate betweenness. Higher values give better approximation. If k is either a list, a cudf DataFrame, or a dask_cudf DataFrame, then its contents are assumed to be vertex identifiers to be used for estimation. If k is None (the default), all the vertices are used to estimate betweenness. Vertices obtained through sampling or defined as a list will be used as sources for traversals inside the algorithm. normalized : bool, optional (default=True) If True, normalize the resulting betweenness centrality values by __2 / (n * (n - 1))__ for undirected Graphs, and __1 / (n * (n - 1))__ for directed Graphs where n is the number of nodes in G. Normalization will ensure that values are in [0, 1], this normalization scales for the highest possible value where one edge is crossed by every single shortest path. weight : (dask)cudf.DataFrame, optional (default=None) Specifies the weights to be used for each edge. Should contain a mapping between edges and weights. (Not Supported) random_state : int, optional (default=None) if k is specified and k is an integer, use random_state to initialize the random number generator. Using None defaults to a hash of process id, time, and hostname If k is either None or list or cudf objects: random_state parameter is ignored. Returns ------- betweenness_centrality : dask_cudf.DataFrame GPU distributed data frame containing two dask_cudf.Series of size V: the vertex identifiers and the corresponding betweenness centrality values. ddf['src'] : dask_cudf.Series Contains the vertex identifiers of the source of each edge ddf['dst'] : dask_cudf.Series Contains the vertex identifiers of the destination of each edge ddf['betweenness_centrality'] : dask_cudf.Series Contains the betweenness centrality of edges ddf["edge_id"] : dask_cudf.Series Contains the edge ids of edges if present. Examples -------- >>> import cugraph.dask as dcg >>> import dask_cudf >>> # ... Init a DASK Cluster >>> # see https://docs.rapids.ai/api/cugraph/stable/dask-cugraph.html >>> # Download dataset from https://github.com/rapidsai/cugraph/datasets/.. >>> chunksize = dcg.get_chunksize(datasets_path / "karate.csv") >>> ddf = dask_cudf.read_csv(datasets_path / "karate.csv", ... chunksize=chunksize, delimiter=" ", ... names=["src", "dst", "value"], ... dtype=["int32", "int32", "float32"]) >>> dg = cugraph.Graph(directed=True) >>> dg.from_dask_cudf_edgelist(ddf, source='src', destination='dst') >>> pr = dcg.edge_betweenness_centrality(dg) """ if input_graph.store_transposed is True: warning_msg = ( "Betweenness centrality expects the 'store_transposed' flag " "to be set to 'False' for optimal performance during " "the graph creation" ) warnings.warn(warning_msg, UserWarning) if weight is not None: raise NotImplementedError( "weighted implementation of edge betweenness " "centrality not currently supported" ) if not isinstance(k, (dask_cudf.DataFrame, dask_cudf.Series)): if isinstance(k, (cudf.DataFrame, cudf.Series, list)): if isinstance(k, list): k_dtype = input_graph.nodes().dtype k = cudf.Series(k, dtype=k_dtype) if isinstance(k, (cudf.Series, cudf.DataFrame)): splits = cp.array_split( cp.arange(len(k)), len(Comms.get_workers()) ) k = { w: [k.iloc[splits[i]]] for i, w in enumerate(Comms.get_workers()) } else: if k is not None: k = get_distributed_data(k) wait(k) k = k.worker_to_parts if input_graph.renumbered: if isinstance(k, dask_cudf.DataFrame): tmp_col_names = k.columns elif isinstance(k, dask_cudf.Series): tmp_col_names = None if isinstance(k, (dask_cudf.DataFrame, dask_cudf.Series)): k = input_graph.lookup_internal_vertex_id( k, tmp_col_names ) # FIXME: should we add this parameter as an option? do_expensive_check = False client = get_client() ddf = _mg_call_plc_betweenness_centrality( input_graph=input_graph, client=client, sID=Comms.get_session_id(), k=k, random_state=random_state, normalized=normalized, do_expensive_check=do_expensive_check, edge_bc=True, ) if input_graph.renumbered: return input_graph.unrenumber(ddf, "vertex") if input_graph.is_directed() is False: # swap the src and dst vertices for the lower triangle only. Because # this is a symmeterized graph, this operation results in a df with # multiple src/dst entries. ddf["src"], ddf["dst"] = ddf[["src", "dst"]].min(axis=1), ddf[ ["src", "dst"] ].max(axis=1) # overwrite the df with the sum of the values for all alike src/dst # vertex pairs, resulting in half the edges of the original df from the # symmeterized graph. ddf = ddf.groupby(by=["src", "dst"]).sum().reset_index() return ddf
["def","edge_betweenness_centrality","(","input_graph",",","k",":","Union","[","int",",","list",",","cudf.Series",",","cudf.DataFrame",",","dask_cudf.Series",",","dask_cudf.DataFrame",",","]","=","None",",","normalized",":","bool","=","True",",","weight",":","cudf.DataFrame","=","None",",","random_state",":","int","=","None",",",")","-",">","dask_cudf.DataFrame",":","``","''","''","Compute","the","edge","betweenness","centrality","for","all","edges","of","the","graph","G.","Betweenness","centrality","is","a","measure","of","the","number","of","shortest","paths","that","pass","over","an","edge",".","An","edge","with","a","high","betweenness","centrality","score","has","more","paths","passing","over","it","and","is","therefore","believed","to","be","more","important",".","To","improve","performance",".","rather","than","doing","an","all-pair","shortest","path",",","a","sample","of","k","starting","vertices","can","be","used",".","CuGraph","does","not","currently","support","the","'weight","'","parameter",".","Parameters","--","--","--","--","--","input_graph",":","cuGraph.Graph","The","graph","can","be","either","directed","(","Graph","(","directed=True",")",")","or","undirected",".","The","current","implementation","uses","a","parallel","variation","of","the","Brandes","Algorithm","(","2001",")","to","compute","exact","or","approximate","betweenness",".","If","weights","are","provided","in","the","edgelist",",","they","will","not","be","used",".","k",":","int",",","list","or","(","dask",")","cudf","object","or","None",",","optional","(","default=None",")","If","k","is","not","None",",","use","k","node","samples","to","estimate","betweenness",".","Higher","values","give","better","approximation",".","If","k","is","either","a","list",",","a","cudf","DataFrame",",","or","a","dask_cudf","DataFrame",",","then","its","contents","are","assumed","to","be","vertex","identifiers","to","be","used","for","estimation",".","If","k","is","None","(","the","default",")",",","all","the","vertices","are","used","to","estimate","betweenness",".","Vertices","obtained","through","sampling","or","defined","as","a","list","will","be","used","as","sources","for","traversals","inside","the","algorithm",".","normalized",":","bool",",","optional","(","default=True",")","If","True",",","normalize","the","resulting","betweenness","centrality","values","by","__2","\/","(","n","*","(","n","-","1",")",")","__","for","undirected","Graphs",",","and","__1","\/","(","n","*","(","n","-","1",")",")","__","for","directed","Graphs","where","n","is","the","number","of","nodes","in","G.","Normalization","will","ensure","that","values","are","in","[","0",",","1","]",",","this","normalization","scales","for","the","highest","possible","value","where","one","edge","is","crossed","by","every","single","shortest","path",".","weight",":","(","dask",")","cudf.DataFrame",",","optional","(","default=None",")","Specifies","the","weights","to","be","used","for","each","edge",".","Should","contain","a","mapping","between","edges","and","weights",".","(","Not","Supported",")","random_state",":","int",",","optional","(","default=None",")","if","k","is","specified","and","k","is","an","integer",",","use","random_state","to","initialize","the","random","number","generator",".","Using","None","defaults","to","a","hash","of","process","id",",","time",",","and","hostname","If","k","is","either","None","or","list","or","cudf","objects",":","random_state","parameter","is","ignored",".","Returns","--","--","--","-","betweenness_centrality",":","dask_cudf.DataFrame","GPU","distributed","data","frame","containing","two","dask_cudf.Series","of","size","V",":","the","vertex","identifiers","and","the","corresponding","betweenness","centrality","values",".","ddf","[","'src","'","]",":","dask_cudf.Series","Contains","the","vertex","identifiers","of","the","source","of","each","edge","ddf","[","'dst","'","]",":","dask_cudf.Series","Contains","the","vertex","identifiers","of","the","destination","of","each","edge","ddf","[","'betweenness_centrality","'","]",":","dask_cudf.Series","Contains","the","betweenness","centrality","of","edges","ddf","[","``","edge_id","''","]",":","dask_cudf.Series","Contains","the","edge","ids","of","edges","if","present",".","Examples","--","--","--","--",">",">",">","import","cugraph.dask","as","dcg",">",">",">","import","dask_cudf",">",">",">","#","...","Init","a","DASK","Cluster",">",">",">","#","see","https",":","\/\/docs.rapids.ai\/api\/cugraph\/stable\/dask-cugraph.html",">",">",">","#","Download","dataset","from","https",":","\/\/github.com\/rapidsai\/cugraph\/datasets\/","..",">",">",">","chunksize","=","dcg.get_chunksize","(","datasets_path","\/","``","karate.csv","''",")",">",">",">","ddf","=","dask_cudf.read_csv","(","datasets_path","\/","``","karate.csv","''",",","...","chunksize=chunksize",",","delimiter=","''","``",",","...","names=","[","``","src","''",",","``","dst","''",",","``","value","''","]",",","...","dtype=","[","``","int32","''",",","``","int32","''",",","``","float32","''","]",")",">",">",">","dg","=","cugraph.Graph","(","directed=True",")",">",">",">","dg.from_dask_cudf_edgelist","(","ddf",",","source='src","'",",","destination='dst","'",")",">",">",">","pr","=","dcg.edge_betweenness_centrality","(","dg",")","``","''","''","if","input_graph.store_transposed","is","True",":","warning_msg","=","(","``","Betweenness","centrality","expects","the","'store_transposed","'","flag","``","``","to","be","set","to","'False","'","for","optimal","performance","during","``","``","the","graph","creation","''",")","warnings.warn","(","warning_msg",",","UserWarning",")","if","weight","is","not","None",":","raise","NotImplementedError","(","``","weighted","implementation","of","edge","betweenness","``","``","centrality","not","currently","supported","''",")","if","not","isinstance","(","k",",","(","dask_cudf.DataFrame",",","dask_cudf.Series",")",")",":","if","isinstance","(","k",",","(","cudf.DataFrame",",","cudf.Series",",","list",")",")",":","if","isinstance","(","k",",","list",")",":","k_dtype","=","input_graph.nodes","(",")",".dtype","k","=","cudf.Series","(","k",",","dtype=k_dtype",")","if","isinstance","(","k",",","(","cudf.Series",",","cudf.DataFrame",")",")",":","splits","=","cp.array_split","(","cp.arange","(","len","(","k",")",")",",","len","(","Comms.get_workers","(",")",")",")","k","=","{","w",":","[","k.iloc","[","splits","[","i","]","]","]","for","i",",","w","in","enumerate","(","Comms.get_workers","(",")",")","}","else",":","if","k","is","not","None",":","k","=","get_distributed_data","(","k",")","wait","(","k",")","k","=","k.worker_to_parts","if","input_graph.renumbered",":","if","isinstance","(","k",",","dask_cudf.DataFrame",")",":","tmp_col_names","=","k.columns","elif","isinstance","(","k",",","dask_cudf.Series",")",":","tmp_col_names","=","None","if","isinstance","(","k",",","(","dask_cudf.DataFrame",",","dask_cudf.Series",")",")",":","k","=","input_graph.lookup_internal_vertex_id","(","k",",","tmp_col_names",")","#","FIXME",":","should","we","add","this","parameter","as","an","option","?","do_expensive_check","=","False","client","=","get_client","(",")","ddf","=","_mg_call_plc_betweenness_centrality","(","input_graph=input_graph",",","client=client",",","sID=Comms.get_session_id","(",")",",","k=k",",","random_state=random_state",",","normalized=normalized",",","do_expensive_check=do_expensive_check",",","edge_bc=True",",",")","if","input_graph.renumbered",":","return","input_graph.unrenumber","(","ddf",",","``","vertex","''",")","if","input_graph.is_directed","(",")","is","False",":","#","swap","the","src","and","dst","vertices","for","the","lower","triangle","only",".","Because","#","this","is","a","symmeterized","graph",",","this","operation","results","in","a","df","with","#","multiple","src\/dst","entries",".","ddf","[","``","src","''","]",",","ddf","[","``","dst","''","]","=","ddf","[","[","``","src","''",",","``","dst","''","]","]",".min","(","axis=1",")",",","ddf","[","[","``","src","''",",","``","dst","''","]","]",".max","(","axis=1",")","#","overwrite","the","df","with","the","sum","of","the","values","for","all","alike","src\/dst","#","vertex","pairs",",","resulting","in","half","the","edges","of","the","original","df","from","the","#","symmeterized","graph",".","ddf","=","ddf.groupby","(","by=","[","``","src","''",",","``","dst","''","]",")",".sum","(",")",".reset_index","(",")","return","ddf"]
278
445
null
betweenness_centrality.py
cugraph/python/cugraph/cugraph/dask/centrality/betweenness_centrality.py
from dask.distributed import wait, get_client from pylibcugraph import ResourceHandle, betweenness_centrality, edge_betweenness_centrality import cugraph.dask.comms.comms from cugraph.dask.common.input_utils import get_distributed_data import dask_cudf import cudf import cupy import warnings import dask from typing import Union
15
null
10
5
null
null
null
Use image node_id 5 for calling a global function with example usage: edge_betweenness_centrality(input_graph, k, normalized, weight, random_state) and returns: ddf, input_graph
177
node_id 5
686,102
_check_fetch_lfw
global
null
false
data_home,funneled,download_if_missing
null
null
null
null
lfw_home, data_folder_path
def _check_fetch_lfw( data_home=None, funneled=True, download_if_missing=True ): """Helper function to download any missing LFW data""" data_home = get_data_home(data_home=data_home) lfw_home = join(data_home, "lfw_home") if not exists(lfw_home): makedirs(lfw_home) for target in TARGETS: target_filepath = join(lfw_home, target.filename) if not exists(target_filepath): if download_if_missing: logger.info( "Downloading LFW metadata: %s", target.url ) _fetch_remote(target, dirname=lfw_home) else: raise OSError("%s is missing" % target_filepath) if funneled: data_folder_path = join(lfw_home, "lfw_funneled") archive = FUNNELED_ARCHIVE else: data_folder_path = join(lfw_home, "lfw") archive = ARCHIVE if not exists(data_folder_path): archive_path = join(lfw_home, archive.filename) if not exists(archive_path): if download_if_missing: logger.info( "Downloading LFW data (~200MB): %s", archive.url ) _fetch_remote(archive, dirname=lfw_home) else: raise OSError("%s is missing" % archive_path) import tarfile logger.debug( "Decompressing the data archive to %s", data_folder_path ) tarfile.open(archive_path, "r:gz").extractall(path=lfw_home) remove(archive_path) return lfw_home, data_folder_path
["def","_check_fetch_lfw","(","data_home=None",",","funneled=True",",","download_if_missing=True",")",":","``","''","''","Helper","function","to","download","any","missing","LFW","data","''","''","''","data_home","=","get_data_home","(","data_home=data_home",")","lfw_home","=","join","(","data_home",",","``","lfw_home","''",")","if","not","exists","(","lfw_home",")",":","makedirs","(","lfw_home",")","for","target","in","TARGETS",":","target_filepath","=","join","(","lfw_home",",","target.filename",")","if","not","exists","(","target_filepath",")",":","if","download_if_missing",":","logger.info","(","``","Downloading","LFW","metadata",":","%","s","''",",","target.url",")","_fetch_remote","(","target",",","dirname=lfw_home",")","else",":","raise","OSError","(","``","%","s","is","missing","''","%","target_filepath",")","if","funneled",":","data_folder_path","=","join","(","lfw_home",",","``","lfw_funneled","''",")","archive","=","FUNNELED_ARCHIVE","else",":","data_folder_path","=","join","(","lfw_home",",","``","lfw","''",")","archive","=","ARCHIVE","if","not","exists","(","data_folder_path",")",":","archive_path","=","join","(","lfw_home",",","archive.filename",")","if","not","exists","(","archive_path",")",":","if","download_if_missing",":","logger.info","(","``","Downloading","LFW","data","(","~200MB",")",":","%","s","''",",","archive.url",")","_fetch_remote","(","archive",",","dirname=lfw_home",")","else",":","raise","OSError","(","``","%","s","is","missing","''","%","archive_path",")","import","tarfile","logger.debug","(","``","Decompressing","the","data","archive","to","%","s","''",",","data_folder_path",")","tarfile.open","(","archive_path",",","``","r",":","gz","''",")",".extractall","(","path=lfw_home",")","remove","(","archive_path",")","return","lfw_home",",","data_folder_path"]
75
115
null
_lfw.py
catboost/contrib/python/scikit-learn/py3/sklearn/datasets/_lfw.py
import logging from numbers import Integral, Real from os import PathLike, listdir, makedirs, remove from os.path import exists, isdir, join import numpy from joblib import Memory from ..utils import Bunch from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params from ._base import RemoteFileMetadata, _fetch_remote, get_data_home, load_descr
15
null
9
6
null
null
null
Use image node_id 1 for calling a global function with example usage: _check_fetch_lfw(data_home, funneled, download_if_missing) and returns: lfw_home, data_folder_path
169
node_id 1
520,099
_random_samples
global
null
false
lb,ub,npts
null
null
null
null
pts
def _random_samples(lb, ub, npts=10000): """ generate npts random samples between given lb & ub Inputs: lower bounds -- a list of the lower bounds upper bounds -- a list of the upper bounds npts -- number of sample points [default = 10000]""" from mystic.tools import random_state dim = len(lb) pts = random_state(module="numpy.random").rand(dim, npts) for i in range(dim): pts[i] = (pts[i] * abs(ub[i] - lb[i])) + lb[i] return pts
["def","_random_samples","(","lb",",","ub",",","npts=10000",")",":","``","''","''","generate","npts","random","samples","between","given","lb","&","ub","Inputs",":","lower","bounds","--","a","list","of","the","lower","bounds","upper","bounds","--","a","list","of","the","upper","bounds","npts","--","number","of","sample","points","[","default","=","10000","]","''","''","''","from","mystic.tools","import","random_state","dim","=","len","(","lb",")","pts","=","random_state","(","module=","''","numpy.random","''",")",".rand","(","dim",",","npts",")","for","i","in","range","(","dim",")",":","pts","[","i","]","=","(","pts","[","i","]","*","abs","(","ub","[","i","]","-","lb","[","i","]",")",")","+","lb","[","i","]","return","pts"]
17
31
null
samples.py
mystic/mystic/math/samples.py
15
null
0
15
null
null
null
Use image node_id 1 for calling a global function with example usage: _random_samples(lb, ub, npts) and returns: pts
116
node_id 1
1,407,021
random_samples
global
null
false
lb,ub,npts,dist,clip
null
null
null
null
pts,_random_samples,pts
def random_samples(lb, ub, npts=10000, dist=None, clip=False): """ generate npts samples from the given distribution between given lb & ub Inputs: dist -- a mystic.tools.Distribution instance (or list of Distributions) lower bounds -- a list of the lower bounds upper bounds -- a list of the upper bounds npts -- number of sample points [default = 10000] clip -- if True, clip at bounds, else resample [default = False] """ if dist is None: return _random_samples(lb, ub, npts) import numpy as np if hasattr(dist, "__len__"): # FIXME: isiterable pts = np.array(tuple(di(npts) for di in dist)).T else: pts = dist((npts, len(lb))) # transpose of desired shape dist = (dist,) * len(lb) pts = np.clip(pts, lb, ub).T if clip: return pts # XXX: returns a numpy.array bad = ((pts.T == lb) + (pts.T == ub)).T new = bad.sum(-1) _n, n = 1, 1000 # FIXME: fixed number of max tries while any(new): if _n == n: # XXX: slows the while loop... raise RuntimeError( "bounds could not be applied in %s iterations" % n ) for i, inew in enumerate( new ): # XXX: slows... but enables iterable dist if inew: pts[i][bad[i]] = dist[i](inew) pts = np.clip(pts.T, lb, ub).T bad = ((pts.T == lb) + (pts.T == ub)).T new = bad.sum(-1) _n += 1 return pts
["def","random_samples","(","lb",",","ub",",","npts=10000",",","dist=None",",","clip=False",")",":","``","''","''","generate","npts","samples","from","the","given","distribution","between","given","lb","&","ub","Inputs",":","dist","--","a","mystic.tools.Distribution","instance","(","or","list","of","Distributions",")","lower","bounds","--","a","list","of","the","lower","bounds","upper","bounds","--","a","list","of","the","upper","bounds","npts","--","number","of","sample","points","[","default","=","10000","]","clip","--","if","True",",","clip","at","bounds",",","else","resample","[","default","=","False","]","``","''","''","if","dist","is","None",":","return","_random_samples","(","lb",",","ub",",","npts",")","import","numpy","as","np","if","hasattr","(","dist",",","``","__len__","''",")",":","#","FIXME",":","isiterable","pts","=","np.array","(","tuple","(","di","(","npts",")","for","di","in","dist",")",")",".T","else",":","pts","=","dist","(","(","npts",",","len","(","lb",")",")",")","#","transpose","of","desired","shape","dist","=","(","dist",",",")","*","len","(","lb",")","pts","=","np.clip","(","pts",",","lb",",","ub",")",".T","if","clip",":","return","pts","#","XXX",":","returns","a","numpy.array","bad","=","(","(","pts.T","==","lb",")","+","(","pts.T","==","ub",")",")",".T","new","=","bad.sum","(","-1",")","_n",",","n","=","1",",","1000","#","FIXME",":","fixed","number","of","max","tries","while","any","(","new",")",":","if","_n","==","n",":","#","XXX",":","slows","the","while","loop","...","raise","RuntimeError","(","``","bounds","could","not","be","applied","in","%","s","iterations","''","%","n",")","for","i",",","inew","in","enumerate","(","new",")",":","#","XXX",":","slows","...","but","enables","iterable","dist","if","inew",":","pts","[","i","]","[","bad","[","i","]","]","=","dist","[","i","]","(","inew",")","pts","=","np.clip","(","pts.T",",","lb",",","ub",")",".T","bad","=","(","(","pts.T","==","lb",")","+","(","pts.T","==","ub",")",")",".T","new","=","bad.sum","(","-1",")","_n","+=","1","return","pts"]
35
68
null
samples.py
mystic/mystic/math/samples.py
15
null
0
15
null
null
null
Use image node_id 2 for calling a global function with example usage: random_samples(lb, ub, npts, dist, clip) and returns: pts, _random_samples, pts
149
node_id 2
1,407,022
sample
global
null
false
f,lb,ub,npts,map
null
null
null
null
failure, success
def sample(f, lb, ub, npts=10000, map=None): """ return number of failures and successes for some boolean function f Inputs: f -- a function that returns True for 'success' and False for 'failure' lb -- a list of lower bounds ub -- a list of upper bounds npts -- the number of points to sample [Default is npts=10000] map -- the mapping function [Default is builtins.map]""" if map is None: from builtins import map from numpy import transpose, atleast_2d pts = _random_samples(lb, ub, npts) results = list(map(f, atleast_2d(transpose(pts)).tolist())) failure = results.count(False) success = len(results) - failure return failure, success
["def","sample","(","f",",","lb",",","ub",",","npts=10000",",","map=None",")",":","``","''","''","return","number","of","failures","and","successes","for","some","boolean","function","f","Inputs",":","f","--","a","function","that","returns","True","for","'success","'","and","False","for","'failure'","lb","--","a","list","of","lower","bounds","ub","--","a","list","of","upper","bounds","npts","--","the","number","of","points","to","sample","[","Default","is","npts=10000","]","map","--","the","mapping","function","[","Default","is","builtins.map","]","''","''","''","if","map","is","None",":","from","builtins","import","map","from","numpy","import","transpose",",","atleast_2d","pts","=","_random_samples","(","lb",",","ub",",","npts",")","results","=","list","(","map","(","f",",","atleast_2d","(","transpose","(","pts",")",")",".tolist","(",")",")",")","failure","=","results.count","(","False",")","success","=","len","(","results",")","-","failure","return","failure",",","success"]
71
90
null
samples.py
mystic/mystic/math/samples.py
15
null
0
15
null
null
null
Use image node_id 3 for calling a global function with example usage: sample(f, lb, ub, npts, map) and returns: failure, success
129
node_id 3
1,407,023
__init__
MockTorchCSCTensor
null
true
self,edge_index,edge_attr,size
null
null
null
null
MockTorchCSCTensor
def __init__( self, edge_index: Tensor, edge_attr: Optional[Tensor] = None, size: Optional[Union[int, Tuple[int, int]]] = None, ): self.edge_index = edge_index self.edge_attr = edge_attr self.size = size
["def","__init__","(","self",",","edge_index",":","Tensor",",","edge_attr",":","Optional","[","Tensor","]","=","None",",","size",":","Optional","[","Union","[","int",",","Tuple","[","int",",","int","]","]","]","=","None",",",")",":","self.edge_index","=","edge_index","self.edge_attr","=","edge_attr","self.size","=","size"]
253
261
null
typing.py
pytorch_geometric/torch_geometric/typing.py
import inspect import os import sys import warnings from typing import Any, Dict, List, Optional, Tuple, Union import numpy import torch from torch import Tensor
15
2
8
0
1
2
null
Use image node_id 1 to create a new MockTorchCSCTensor object with example: obj = MockTorchCSCTensor(edge_index, edge_attr, size)
130
node_id 1
1,775,551
t
MockTorchCSCTensor
null
true
self
null
null
null
null
to_torch_csr_tensor
def t(self) -> Tensor: # Only support accessing its transpose: from torch_geometric.utils import to_torch_csr_tensor size = self.size return to_torch_csr_tensor( self.edge_index.flip([0]), self.edge_attr, size[::-1] if isinstance(size, (tuple, list)) else size, )
["def","t","(","self",")","-",">","Tensor",":","#","Only","support","accessing","its","transpose",":","from","torch_geometric.utils","import","to_torch_csr_tensor","size","=","self.size","return","to_torch_csr_tensor","(","self.edge_index.flip","(","[","0","]",")",",","self.edge_attr",",","size","[",":",":-1","]","if","isinstance","(","size",",","(","tuple",",","list",")",")","else","size",",",")"]
263
270
null
typing.py
pytorch_geometric/torch_geometric/typing.py
import inspect import os import sys import warnings from typing import Any, Dict, List, Optional, Tuple, Union import numpy import torch from torch import Tensor
15
2
8
0
1
2
null
Use image node_id 2 for calling the MockTorchCSCTensor obj's underlying member method code with example usage: obj.t() and returns: to_torch_csr_tensor
151
node_id 2
1,775,552
__call__
_Fitness
object
true
self
A metric to measure the fitness of a program. This object is able to be called with NumPy vectorized arguments and return a resulting floating point score quantifying the quality of the program's representation of the true relationship. Parameters ---------- function : callable A function with signature function(y, y_pred, sample_weight) that returns a floating point number. Where `y` is the input target y vector, `y_pred` is the predicted values from the genetic program, and sample_weight is the sample_weight vector. greater_is_better : bool Whether a higher value from `function` indicates a better fit. In general this would be False for metrics indicating the magnitude of the error, and True for metrics indicating the quality of fit.
["A","metric","to","measure","the","fitness","of","a","program",".","This","object","is","able","to","be","called","with","NumPy","vectorized","arguments","and","return","a","resulting","floating","point","score","quantifying","the","quality","of","the","program's","representation","of","the","true","relationship",".","Parameters","--","--","--","--","--","function",":","callable","A","function","with","signature","function","(","y",",","y_pred",",","sample_weight",")","that","returns","a","floating","point","number",".","Where","`","y","`","is","the","input","target","y","vector",",","`","y_pred","`","is","the","predicted","values","from","the","genetic","program",",","and","sample_weight","is","the","sample_weight","vector",".","greater_is_better",":","bool","Whether","a","higher","value","from","`","function","`","indicates","a","better","fit",".","In","general","this","would","be","False","for","metrics","indicating","the","magnitude","of","the","error",",","and","True","for","metrics","indicating","the","quality","of","fit","."]
null
null
self
def __call__(self, *args): return self.function(*args)
["def","__call__","(","self",",","*","args",")",":","return","self.function","(","*","args",")"]
48
49
null
fitness.py
gplearn/gplearn/fitness.py
import numbers import numpy from joblib import wrap_non_picklable_objects from scipy.stats import rankdata
15
1
4
7
1
2
1
Use image node_id 2 for calling the _Fitness obj's underlying member method code with example usage: obj.__call__() and returns: self
133
node_id 2
1,106,029
peakmem_rotate
NdimageInterpolation
Benchmark
true
self,shape,order,mode
null
null
null
null
null
def peakmem_rotate(self, shape, order, mode): rotate(self.x, 15, order=order, mode=mode)
["def","peakmem_rotate","(","self",",","shape",",","order",",","mode",")",":","rotate","(","self.x",",","15",",","order=order",",","mode=mode",")"]
64
65
null
ndimage_interpolation.py
scipy/benchmarks/benchmarks/ndimage_interpolation.py
import numpy from .common import Benchmark
15
1
2
2
1
9
1
Use image node_id 8 for calling the NdimageInterpolation obj's underlying member method code with example usage: obj.peakmem_rotate(shape, order, mode) without return types
172
node_id 8
1,883,763
get_retro_decoder_layer_te_spec
global
null
false
encoder_block_spec
null
null
null
null
spec
def get_retro_decoder_layer_te_spec( encoder_block_spec: ModuleSpec = None, ) -> ModuleSpec: """Retro decoder TE spec (uses Transformer Engine components). A Retro decoder layer uses custom attention and bias-dropout-add operators to perform chunked-cross attention. Additionally, the first Retro decoder layer instantiates an entire encoder transformer block. As such, the decoder cross attention module takes an optional encoder block spec, which is only provided for the first Retro decoder layer. Arguments: encoder_block_spec (ModuleSpec): Retro encoder block spec, to be provided for the first Retro decoder layer. """ spec = get_gpt_layer_with_transformer_engine_spec() spec.submodules.pre_cross_attn_layernorm = TENorm spec.submodules.cross_attention = ModuleSpec( module=RetroDecoderCrossAttention, params={ "encoder_block_spec": encoder_block_spec, }, submodules=CrossAttentionSubmodules( linear_q=TEColumnParallelLinear, linear_kv=TEColumnParallelLinear, core_attention=TEDotProductAttention, linear_proj=TERowParallelLinear, ), ) spec.submodules.cross_attn_bda = ModuleSpec( module=RetroDecoderBiasDropoutAdd ) return spec
["def","get_retro_decoder_layer_te_spec","(","encoder_block_spec",":","ModuleSpec","=","None",",",")","-",">","ModuleSpec",":","``","''","''","Retro","decoder","TE","spec","(","uses","Transformer","Engine","components",")",".","A","Retro","decoder","layer","uses","custom","attention","and","bias-dropout-add","operators","to","perform","chunked-cross","attention",".","Additionally",",","the","first","Retro","decoder","layer","instantiates","an","entire","encoder","transformer","block",".","As","such",",","the","decoder","cross","attention","module","takes","an","optional","encoder","block","spec",",","which","is","only","provided","for","the","first","Retro","decoder","layer",".","Arguments",":","encoder_block_spec","(","ModuleSpec",")",":","Retro","encoder","block","spec",",","to","be","provided","for","the","first","Retro","decoder","layer.","``","''","''","spec","=","get_gpt_layer_with_transformer_engine_spec","(",")","spec.submodules.pre_cross_attn_layernorm","=","TENorm","spec.submodules.cross_attention","=","ModuleSpec","(","module=RetroDecoderCrossAttention",",","params=","{","``","encoder_block_spec","''",":","encoder_block_spec",",","}",",","submodules=CrossAttentionSubmodules","(","linear_q=TEColumnParallelLinear",",","linear_kv=TEColumnParallelLinear",",","core_attention=TEDotProductAttention",",","linear_proj=TERowParallelLinear",",",")",",",")","spec.submodules.cross_attn_bda","=","ModuleSpec","(","module=RetroDecoderBiasDropoutAdd",")","return","spec"]
31
57
null
decoder_spec.py
megatron-lm/megatron/core/models/retro/decoder_spec.py
from megatron.core import parallel_state from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_local_spec, get_gpt_layer_with_transformer_engine_spec from megatron.core.models.retro.config import RetroConfig from megatron.core.models.retro.decoder_attention import RetroDecoderBiasDropoutAdd, RetroDecoderCrossAttention from megatron.core.models.retro.encoder_spec import get_retro_encoder_block_spec from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear from megatron.core.transformer import ModuleSpec from megatron.core.transformer.attention import CrossAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import TEColumnParallelLinear, TEDotProductAttention, TENorm, TERowParallelLinear from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.transformer_block import TransformerBlockSubmodules, get_num_layers_to_build
15
null
12
3
null
null
null
Use image node_id 1 for calling a global function with example usage: get_retro_decoder_layer_te_spec(encoder_block_spec) and returns: spec
139
node_id 1
1,324,186
get_retro_decoder_layer_local_spec
global
null
false
encoder_block_spec
null
null
null
null
spec
def get_retro_decoder_layer_local_spec( encoder_block_spec: ModuleSpec = None, ) -> ModuleSpec: """Retro decoder local spec (uses Megatron-Core components). A Retro decoder layer uses custom attention and bias-dropout-add operators to perform chunked-cross attention. Additionally, the first Retro decoder layer instantiates an entire encoder transformer block. As such, the decoder cross attention module takes an optional encoder block spec, which is only provided for the first Retro decoder layer. Arguments: encoder_block_spec (ModuleSpec): Retro encoder block spec, to be provided for the first Retro decoder layer. """ spec = get_gpt_layer_local_spec() spec.submodules.pre_cross_attn_layernorm = FusedLayerNorm spec.submodules.cross_attention = ModuleSpec( module=RetroDecoderCrossAttention, params={ "encoder_block_spec": encoder_block_spec, }, submodules=CrossAttentionSubmodules( linear_q=ColumnParallelLinear, linear_kv=ColumnParallelLinear, core_attention=DotProductAttention, linear_proj=RowParallelLinear, ), ) spec.submodules.cross_attn_bda = ModuleSpec( module=RetroDecoderBiasDropoutAdd ) return spec
["def","get_retro_decoder_layer_local_spec","(","encoder_block_spec",":","ModuleSpec","=","None",",",")","-",">","ModuleSpec",":","``","''","''","Retro","decoder","local","spec","(","uses","Megatron-Core","components",")",".","A","Retro","decoder","layer","uses","custom","attention","and","bias-dropout-add","operators","to","perform","chunked-cross","attention",".","Additionally",",","the","first","Retro","decoder","layer","instantiates","an","entire","encoder","transformer","block",".","As","such",",","the","decoder","cross","attention","module","takes","an","optional","encoder","block","spec",",","which","is","only","provided","for","the","first","Retro","decoder","layer",".","Arguments",":","encoder_block_spec","(","ModuleSpec",")",":","Retro","encoder","block","spec",",","to","be","provided","for","the","first","Retro","decoder","layer.","``","''","''","spec","=","get_gpt_layer_local_spec","(",")","spec.submodules.pre_cross_attn_layernorm","=","FusedLayerNorm","spec.submodules.cross_attention","=","ModuleSpec","(","module=RetroDecoderCrossAttention",",","params=","{","``","encoder_block_spec","''",":","encoder_block_spec",",","}",",","submodules=CrossAttentionSubmodules","(","linear_q=ColumnParallelLinear",",","linear_kv=ColumnParallelLinear",",","core_attention=DotProductAttention",",","linear_proj=RowParallelLinear",",",")",",",")","spec.submodules.cross_attn_bda","=","ModuleSpec","(","module=RetroDecoderBiasDropoutAdd",")","return","spec"]
60
86
null
decoder_spec.py
megatron-lm/megatron/core/models/retro/decoder_spec.py
from megatron.core import parallel_state from megatron.core.fusions.fused_layer_norm import FusedLayerNorm from megatron.core.models.gpt.gpt_layer_specs import get_gpt_layer_local_spec, get_gpt_layer_with_transformer_engine_spec from megatron.core.models.retro.config import RetroConfig from megatron.core.models.retro.decoder_attention import RetroDecoderBiasDropoutAdd, RetroDecoderCrossAttention from megatron.core.models.retro.encoder_spec import get_retro_encoder_block_spec from megatron.core.tensor_parallel.layers import ColumnParallelLinear, RowParallelLinear from megatron.core.transformer import ModuleSpec from megatron.core.transformer.attention import CrossAttentionSubmodules from megatron.core.transformer.custom_layers.transformer_engine import TEColumnParallelLinear, TEDotProductAttention, TENorm, TERowParallelLinear from megatron.core.transformer.dot_product_attention import DotProductAttention from megatron.core.transformer.transformer_block import TransformerBlockSubmodules, get_num_layers_to_build
15
null
12
3
null
null
null
Use image node_id 2 for calling a global function with example usage: get_retro_decoder_layer_local_spec(encoder_block_spec) and returns: spec
142
node_id 2
1,324,187
can_document_member
HasTraitsDocumenter
ClassDocumenter
true
cls,member,membername,isattr,parent
Specialized Documenter subclass for traits
["Specialized","Documenter","subclass","for","traits"]
null
null
isinstance
def can_document_member(cls, member, membername, isattr, parent): return isinstance(member, HasTraits)
["def","can_document_member","(","cls",",","member",",","membername",",","isattr",",","parent",")",":","return","isinstance","(","member",",","HasTraits",")"]
48
49
null
autodoc_traits.py
pythreejs/docs/sphinxext/autodoc_traits.py
from collections import OrderedDict from traitlets import TraitType, Undefined, Container, Dict, Any, HasTraits from sphinx.ext.autodoc import ClassDocumenter, AttributeDocumenter
15
2
3
3
2
2
1
Use image node_id 1 for calling the HasTraitsDocumenter obj's underlying member method code with example usage: obj.can_document_member(cls, member, membername, isattr, parent) and returns: isinstance
200
node_id 1
1,691,040