name
stringlengths
1
94
class_name
stringlengths
1
56
class_bases
stringlengths
0
73
is_member
bool
2 classes
args
stringlengths
0
1.04k
class_docstr
stringlengths
0
13k
class_docstr_tok
stringlengths
2
18.2k
docstr
stringlengths
0
6.72k
docstr_tok
stringlengths
13
9.01k
returns
stringlengths
1
279
code
stringlengths
20
27.8k
code_tok
stringlengths
32
38.4k
lstart
int64
1
1.03k
lend
int64
2
1.18k
raises
stringclasses
9 values
filename
stringlengths
5
81
file_path
stringlengths
14
129
imports
stringlengths
0
786
total_objects
int64
7
10
num_classes
float64
1
4
num_imports
int64
0
9
num_all_bases
float64
0
4
num_bases
float64
1
4
num_functions
int64
0
10
num_methods
float64
1
8
label_desc
stringlengths
70
1.25k
label_desc_len
int64
70
1.25k
label_id
stringclasses
10 values
__index_level_0__
int64
0
514k
mark_step
global
null
false
device,wait
null
null
null
null
null
def mark_step(device: str = "", wait=False): """Triggers a mark step, which amounts to - collecting a group of 'live' lazy tensors to index into the compilation cache (lowering/compiling their IR graphs if not cached) - kicking off execution of the compiled function - (optionally, wait=True) waiting for cpu-side execution to complete (does not sync the accelerator) """ # TODO(whc) expand this to include backend hooks and align with XLA backend needs torch._C._lazy._mark_step(device, [], wait=wait) run_step_closures()
["def","mark_step","(","device",":","str","=","``","''",",","wait=False",")",":","``","''","''","Triggers","a","mark","step",",","which","amounts","to","-","collecting","a","group","of","'live","'","lazy","tensors","to","index","into","the","compilation","cache","(","lowering\/compiling","their","IR","graphs","if","not","cached",")","-","kicking","off","execution","of","the","compiled","function","-","(","optionally",",","wait=True",")","waiting","for","cpu-side","execution","to","complete","(","does","not","sync","the","accelerator",")","``","''","''","#","TODO","(","whc",")","expand","this","to","include","backend","hooks","and","align","with","XLA","backend","needs","torch._C._lazy._mark_step","(","device",",","[","]",",","wait=wait",")","run_step_closures","(",")"]
9
19
null
__init__.py
pytorch/torch/_lazy/__init__.py
import threading import torch._C._lazy from torch.utils._pytree import tree_flatten, tree_unflatten from .closure import add_step_closure, run_step_closures
10
null
4
null
null
6
null
Use image node_id 1 for calling a global function with example usage: mark_step(device, wait) without return types
114
node_id 1
310,258
indexing_dtype_strength_reduction
global
null
false
loop_body
null
null
null
null
null
def indexing_dtype_strength_reduction(loop_body: LoopBody): """ Performs Value Range Analysis on LoopBody's fx graph to reduce precision of intermediaries from int64 to int32 """ bv = loop_body.bounds() int64_dtype_nodes = [ node for node in loop_body.get_nodes() if ( node.target == "to_dtype" and node.args[2] == torch.int64 and node not in bv.unbounded_vars ) ] if not int64_dtype_nodes: return bounds = bv.get_bounds() # TODO - if dominated node of one to_dtype is not expressible in int32, # we should short circuit another to_dtype node if that node also dominates for node in int64_dtype_nodes: try_to_reduce_precision( node, bounds, loop_body.indirect_vars, loop_body.indexing_exprs, bv.replacement_vals, )
["def","indexing_dtype_strength_reduction","(","loop_body",":","LoopBody",")",":","``","''","''","Performs","Value","Range","Analysis","on","LoopBody","'s","fx","graph","to","reduce","precision","of","intermediaries","from","int64","to","int32","``","''","''","bv","=","loop_body.bounds","(",")","int64_dtype_nodes","=","[","node","for","node","in","loop_body.get_nodes","(",")","if","(","node.target","==","``","to_dtype","''","and","node.args","[","2","]","==","torch.int64","and","node","not","in","bv.unbounded_vars",")","]","if","not","int64_dtype_nodes",":","return","bounds","=","bv.get_bounds","(",")","#","TODO","-","if","dominated","node","of","one","to_dtype","is","not","expressible","in","int32",",","#","we","should","short","circuit","another","to_dtype","node","if","that","node","also","dominates","for","node","in","int64_dtype_nodes",":","try_to_reduce_precision","(","node",",","bounds",",","loop_body.indirect_vars",",","loop_body.indexing_exprs",",","bv.replacement_vals",",",")"]
88
118
null
optimize_indexing.py
pytorch/torch/_inductor/optimize_indexing.py
import math import sympy import torch from torch.utils._sympy.value_ranges import ValueRanges from .ir import LoopBody from .utils import dominated_nodes
10
null
6
null
null
4
null
Use image node_id 4 for calling a global function with example usage: indexing_dtype_strength_reduction(loop_body) without return types
135
node_id 4
309,662
try_to_reduce_precision
global
null
false
node,bounds,indirect_vars,indices,replacement_vals
null
null
null
null
unknown
def try_to_reduce_precision( node, bounds, indirect_vars, indices, replacement_vals ): # if a downstream use of a node explicitly converts to int32, or float16/float32/float64, # then it's precision is set for that chain of uses, and we don't need to consider those # dominated values def skip_filter(node): return node.target == "to_dtype" and node.args[2] in ( torch.int32, torch.float32, torch.float64, ) # TODO - there are dominated uses whose dtype does not depend on whether # we reduce the precision here, e.g. add(int64, int64) one of the args can be reduced to # int32 without changing the output precision of the node. this case hasn't shown up for dominated in dominated_nodes([node], skip_filter): if dominated.target in ["store", "output"]: continue if ( isinstance(dominated.target, str) and "set_indirect" in dominated.target ): idx = int(dominated.target[len("set_indirect") :]) indirect_var = indirect_vars[idx] # We check that we can compute all the indices it's involved in with int32 for index, expr in indices.items(): if indirect_var in expr.free_symbols: index_val = replacement_vals[index] if math.isinf(index_val.lower) or math.isinf( index_val.upper ): return # all indices are integers, so make sure that we # use the bounds of integers instead of floats. # TODO - not sure if we should be doing int/float casts while tracing, # might interfere with sympy. index_val_int = ValueRanges( int(index_val.lower), int(index_val.upper) ) if not range_expressable_in_32_bits( index_val_int ): return if not range_expressable_in_32_bits(bounds[dominated]): return args = list(node.args) args[2] = torch.int32 node.args = tuple(args)
["def","try_to_reduce_precision","(","node",",","bounds",",","indirect_vars",",","indices",",","replacement_vals",")",":","#","if","a","downstream","use","of","a","node","explicitly","converts","to","int32",",","or","float16\/float32\/float64",",","#","then","it","'s","precision","is","set","for","that","chain","of","uses",",","and","we","do","n't","need","to","consider","those","#","dominated","values","def","skip_filter","(","node",")",":","return","node.target","==","``","to_dtype","''","and","node.args","[","2","]","in","(","torch.int32",",","torch.float32",",","torch.float64",",",")","#","TODO","-","there","are","dominated","uses","whose","dtype","does","not","depend","on","whether","#","we","reduce","the","precision","here",",","e.g",".","add","(","int64",",","int64",")","one","of","the","args","can","be","reduced","to","#","int32","without","changing","the","output","precision","of","the","node",".","this","case","has","n't","shown","up","for","dominated","in","dominated_nodes","(","[","node","]",",","skip_filter",")",":","if","dominated.target","in","[","``","store","''",",","``","output","''","]",":","continue","if","(","isinstance","(","dominated.target",",","str",")","and","``","set_indirect","''","in","dominated.target",")",":","idx","=","int","(","dominated.target","[","len","(","``","set_indirect","''",")",":","]",")","indirect_var","=","indirect_vars","[","idx","]","#","We","check","that","we","can","compute","all","the","indices","it","'s","involved","in","with","int32","for","index",",","expr","in","indices.items","(",")",":","if","indirect_var","in","expr.free_symbols",":","index_val","=","replacement_vals","[","index","]","if","math.isinf","(","index_val.lower",")","or","math.isinf","(","index_val.upper",")",":","return","#","all","indices","are","integers",",","so","make","sure","that","we","#","use","the","bounds","of","integers","instead","of","floats",".","#","TODO","-","not","sure","if","we","should","be","doing","int\/float","casts","while","tracing",",","#","might","interfere","with","sympy",".","index_val_int","=","ValueRanges","(","int","(","index_val.lower",")",",","int","(","index_val.upper",")",")","if","not","range_expressable_in_32_bits","(","index_val_int",")",":","return","if","not","range_expressable_in_32_bits","(","bounds","[","dominated","]",")",":","return","args","=","list","(","node.args",")","args","[","2","]","=","torch.int32","node.args","=","tuple","(","args",")"]
39
85
null
optimize_indexing.py
pytorch/torch/_inductor/optimize_indexing.py
import math import sympy import torch from torch.utils._sympy.value_ranges import ValueRanges from .ir import LoopBody from .utils import dominated_nodes
10
null
6
null
null
4
null
Use image node_id 3 for calling a global function with example usage: try_to_reduce_precision(node, bounds, indirect_vars, indices, replacement_vals) and returns: unknown
170
node_id 3
309,661
setup_method
TestTrigger
null
true
self
null
null
null
null
null
def setup_method(self): for num in [1]: db.merge_conn( Connection( conn_id=f"trigger.await_message.test.integration.test_{num}", conn_type="kafka", extra=json.dumps( { "socket.timeout.ms": 10, "bootstrap.servers": "broker:29092", "group.id": f"trigger.await_message.test.integration.test_{num}", "enable.auto.commit": False, "auto.offset.reset": "beginning", } ), ) )
["def","setup_method","(","self",")",":","for","num","in","[","1","]",":","db.merge_conn","(","Connection","(","conn_id=f","''","trigger.await_message.test.integration.test_","{","num","}","''",",","conn_type=","''","kafka","''",",","extra=json.dumps","(","{","``","socket.timeout.ms","''",":","10",",","``","bootstrap.servers","''",":","``","broker:29092","''",",","``","group.id","''",":","f","''","trigger.await_message.test.integration.test_","{","num","}","''",",","``","enable.auto.commit","''",":","False",",","``","auto.offset.reset","''",":","``","beginning","''",",","}",")",",",")",")"]
39
55
null
test_await_message.py
airflow/tests/integration/providers/apache/kafka/triggers/test_await_message.py
from __future__ import annotations import json import pytest from confluent_kafka import Producer from airflow.models import Connection from airflow.providers.apache.kafka.triggers.await_message import AwaitMessageTrigger from airflow.utils import db
10
1
7
0
null
1
1
Use image node_id 1 for calling the TestTrigger obj's underlying member method code with example usage: obj.setup_method() without return types
143
node_id 1
16,399
_apply_function
global
null
false
message
null
null
null
null
message
def _apply_function(message): if message.value() == bytes(TOPIC, "utf-8"): return message
["def","_apply_function","(","message",")",":","if","message.value","(",")","==","bytes","(","TOPIC",",","``","utf-8","''",")",":","return","message"]
32
34
null
test_await_message.py
airflow/tests/integration/providers/apache/kafka/triggers/test_await_message.py
from __future__ import annotations import json import pytest from confluent_kafka import Producer from airflow.models import Connection from airflow.providers.apache.kafka.triggers.await_message import AwaitMessageTrigger from airflow.utils import db
10
null
7
null
null
1
null
Use image node_id 1 for calling a global function with example usage: _apply_function(message) and returns: message
115
node_id 1
16,400
_find_text_in_file
global
null
false
filename,start_prompt,end_prompt
null
null
null
null
str, start_index, end_index, lines
def _find_text_in_file(filename, start_prompt, end_prompt): """ Find the text in `filename` between a line beginning with `start_prompt` and before `end_prompt`, removing empty lines. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Find the start prompt. start_index = 0 while not lines[start_index].startswith(start_prompt): start_index += 1 start_index += 1 end_index = start_index while not lines[end_index].startswith(end_prompt): end_index += 1 end_index -= 1 while len(lines[start_index]) <= 1: start_index += 1 while len(lines[end_index]) <= 1: end_index -= 1 end_index += 1 return ( "".join(lines[start_index:end_index]), start_index, end_index, lines, )
["def","_find_text_in_file","(","filename",",","start_prompt",",","end_prompt",")",":","``","''","''","Find","the","text","in","`","filename","`","between","a","line","beginning","with","`","start_prompt","`","and","before","`","end_prompt","`",",","removing","empty","lines.","``","''","''","with","open","(","filename",",","``","r","''",",","encoding=","''","utf-8","''",",","newline=","''","\\n","''",")","as","f",":","lines","=","f.readlines","(",")","#","Find","the","start","prompt",".","start_index","=","0","while","not","lines","[","start_index","]",".startswith","(","start_prompt",")",":","start_index","+=","1","start_index","+=","1","end_index","=","start_index","while","not","lines","[","end_index","]",".startswith","(","end_prompt",")",":","end_index","+=","1","end_index","-=","1","while","len","(","lines","[","start_index","]",")","<","=","1",":","start_index","+=","1","while","len","(","lines","[","end_index","]",")","<","=","1",":","end_index","-=","1","end_index","+=","1","return","(","``","''",".join","(","lines","[","start_index",":","end_index","]",")",",","start_index",",","end_index",",","lines",",",")"]
30
53
null
check_table.py
llmga/llmga/diffusers/utils/check_table.py
import argparse import collections import importlib.util import os import re
10
null
5
null
null
5
null
Use image node_id 1 for calling a global function with example usage: _find_text_in_file(filename, start_prompt, end_prompt) and returns: str, start_index, end_index, lines
175
node_id 1
175,217
test_abstract
TestABC
null
true
self
null
null
null
null
null
def test_abstract(self): assert_(issubclass(np.number, numbers.Number)) assert_(issubclass(np.inexact, numbers.Complex)) assert_(issubclass(np.complexfloating, numbers.Complex)) assert_(issubclass(np.floating, numbers.Real)) assert_(issubclass(np.integer, numbers.Integral)) assert_(issubclass(np.signedinteger, numbers.Integral)) assert_(issubclass(np.unsignedinteger, numbers.Integral))
["def","test_abstract","(","self",")",":","assert_","(","issubclass","(","np.number",",","numbers.Number",")",")","assert_","(","issubclass","(","np.inexact",",","numbers.Complex",")",")","assert_","(","issubclass","(","np.complexfloating",",","numbers.Complex",")",")","assert_","(","issubclass","(","np.floating",",","numbers.Real",")",")","assert_","(","issubclass","(","np.integer",",","numbers.Integral",")",")","assert_","(","issubclass","(","np.signedinteger",",","numbers.Integral",")",")","assert_","(","issubclass","(","np.unsignedinteger",",","numbers.Integral",")",")"]
9
18
null
test_abc.py
numpy/numpy/_core/tests/test_abc.py
from numpy.testing import assert_ import numbers import numpy from numpy._core.numerictypes import sctypes
10
1
4
0
null
0
5
Use image node_id 1 for calling the TestABC obj's underlying member method code with example usage: obj.test_abstract() without return types
140
node_id 1
203,773
test_floats
TestABC
null
true
self
null
null
null
null
null
def test_floats(self): for t in sctypes["float"]: assert_( isinstance(t(), numbers.Real), f"{t.__name__} is not instance of Real", ) assert_( issubclass(t, numbers.Real), f"{t.__name__} is not subclass of Real", ) assert_( not isinstance(t(), numbers.Rational), f"{t.__name__} is instance of Rational", ) assert_( not issubclass(t, numbers.Rational), f"{t.__name__} is subclass of Rational", )
["def","test_floats","(","self",")",":","for","t","in","sctypes","[","``","float","''","]",":","assert_","(","isinstance","(","t","(",")",",","numbers.Real",")",",","f","''","{","t.__name__","}","is","not","instance","of","Real","''",",",")","assert_","(","issubclass","(","t",",","numbers.Real",")",",","f","''","{","t.__name__","}","is","not","subclass","of","Real","''",",",")","assert_","(","not","isinstance","(","t","(",")",",","numbers.Rational",")",",","f","''","{","t.__name__","}","is","instance","of","Rational","''",",",")","assert_","(","not","issubclass","(","t",",","numbers.Rational",")",",","f","''","{","t.__name__","}","is","subclass","of","Rational","''",",",")"]
20
29
null
test_abc.py
numpy/numpy/_core/tests/test_abc.py
from numpy.testing import assert_ import numbers import numpy from numpy._core.numerictypes import sctypes
10
1
4
0
null
0
5
Use image node_id 2 for calling the TestABC obj's underlying member method code with example usage: obj.test_floats() without return types
138
node_id 2
203,774
test_complex
TestABC
null
true
self
null
null
null
null
null
def test_complex(self): for t in sctypes["complex"]: assert_( isinstance(t(), numbers.Complex), f"{t.__name__} is not instance of Complex", ) assert_( issubclass(t, numbers.Complex), f"{t.__name__} is not subclass of Complex", ) assert_( not isinstance(t(), numbers.Real), f"{t.__name__} is instance of Real", ) assert_( not issubclass(t, numbers.Real), f"{t.__name__} is subclass of Real", )
["def","test_complex","(","self",")",":","for","t","in","sctypes","[","``","complex","''","]",":","assert_","(","isinstance","(","t","(",")",",","numbers.Complex",")",",","f","''","{","t.__name__","}","is","not","instance","of","Complex","''",",",")","assert_","(","issubclass","(","t",",","numbers.Complex",")",",","f","''","{","t.__name__","}","is","not","subclass","of","Complex","''",",",")","assert_","(","not","isinstance","(","t","(",")",",","numbers.Real",")",",","f","''","{","t.__name__","}","is","instance","of","Real","''",",",")","assert_","(","not","issubclass","(","t",",","numbers.Real",")",",","f","''","{","t.__name__","}","is","subclass","of","Real","''",",",")"]
31
40
null
test_abc.py
numpy/numpy/_core/tests/test_abc.py
from numpy.testing import assert_ import numbers import numpy from numpy._core.numerictypes import sctypes
10
1
4
0
null
0
5
Use image node_id 3 for calling the TestABC obj's underlying member method code with example usage: obj.test_complex() without return types
139
node_id 3
203,775
openxla_eval_boxed
global
null
false
model,fake_tensor_inputs
null
null
null
null
xla_backend_helper
def openxla_eval_boxed(model, fake_tensor_inputs): return xla_backend_helper(model, fake_tensor_inputs, boxed=True)
["def","openxla_eval_boxed","(","model",",","fake_tensor_inputs",")",":","return","xla_backend_helper","(","model",",","fake_tensor_inputs",",","boxed=True",")"]
31
32
null
torchxla.py
pytorch/torch/_dynamo/backends/torchxla.py
import logging import warnings from functorch.compile import make_boxed_func from ..backends.common import aot_autograd from .registry import register_backend, register_experimental_backend
10
null
5
null
null
5
null
Use image node_id 4 for calling a global function with example usage: openxla_eval_boxed(model, fake_tensor_inputs) and returns: xla_backend_helper
147
node_id 4
304,545
openxla_eval
global
null
false
model,fake_tensor_inputs
null
null
null
null
xla_backend_helper
def openxla_eval(model, fake_tensor_inputs): return xla_backend_helper(model, fake_tensor_inputs, boxed=False)
["def","openxla_eval","(","model",",","fake_tensor_inputs",")",":","return","xla_backend_helper","(","model",",","fake_tensor_inputs",",","boxed=False",")"]
27
28
null
torchxla.py
pytorch/torch/_dynamo/backends/torchxla.py
import logging import warnings from functorch.compile import make_boxed_func from ..backends.common import aot_autograd from .registry import register_backend, register_experimental_backend
10
null
5
null
null
5
null
Use image node_id 3 for calling a global function with example usage: openxla_eval(model, fake_tensor_inputs) and returns: xla_backend_helper
141
node_id 3
304,544
torchxla_trace_once
global
null
false
model,fake_tensor_inputs
null
null
null
null
xla_backend_helper
def torchxla_trace_once(model, fake_tensor_inputs): warnings.warn( "This backend will be deprecated in 2.2, please use `openxla` backend instead" ) return xla_backend_helper(model, fake_tensor_inputs)
["def","torchxla_trace_once","(","model",",","fake_tensor_inputs",")",":","warnings.warn","(","``","This","backend","will","be","deprecated","in","2.2",",","please","use","`","openxla","`","backend","instead","''",")","return","xla_backend_helper","(","model",",","fake_tensor_inputs",")"]
18
23
null
torchxla.py
pytorch/torch/_dynamo/backends/torchxla.py
import logging import warnings from functorch.compile import make_boxed_func from ..backends.common import aot_autograd from .registry import register_backend, register_experimental_backend
10
null
5
null
null
5
null
Use image node_id 2 for calling a global function with example usage: torchxla_trace_once(model, fake_tensor_inputs) and returns: xla_backend_helper
148
node_id 2
304,543
torchxla_trivial
global
null
false
gm,fake_tensor_inputs
null
null
null
null
gm
def torchxla_trivial(gm, fake_tensor_inputs): return gm
["def","torchxla_trivial","(","gm",",","fake_tensor_inputs",")",":","return","gm"]
13
14
null
torchxla.py
pytorch/torch/_dynamo/backends/torchxla.py
import logging import warnings from functorch.compile import make_boxed_func from ..backends.common import aot_autograd from .registry import register_backend, register_experimental_backend
10
null
5
null
null
5
null
Use image node_id 1 for calling a global function with example usage: torchxla_trivial(gm, fake_tensor_inputs) and returns: gm
126
node_id 1
304,542
reset
global
null
false
null
null
null
null
null
def reset() -> None: """Clear all compile caches and restore initial state""" with eval_frame.compile_lock: for weak_code in ( convert_frame.input_codes.seen + convert_frame.output_codes.seen ): code = weak_code() if code: reset_code(code) convert_frame.input_codes.clear() convert_frame.output_codes.clear() orig_code_map.clear() guard_failures.clear() graph_break_reasons.clear() resume_execution.ContinueExecutionCache.cache.clear() _reset_guarded_backend_cache() reset_frame_count() torch._C._dynamo.compiled_autograd.clear_cache() code_context.clear()
["def","reset","(",")","-",">","None",":","``","''","''","Clear","all","compile","caches","and","restore","initial","state","''","''","''","with","eval_frame.compile_lock",":","for","weak_code","in","(","convert_frame.input_codes.seen","+","convert_frame.output_codes.seen",")",":","code","=","weak_code","(",")","if","code",":","reset_code","(","code",")","convert_frame.input_codes.clear","(",")","convert_frame.output_codes.clear","(",")","orig_code_map.clear","(",")","guard_failures.clear","(",")","graph_break_reasons.clear","(",")","resume_execution.ContinueExecutionCache.cache.clear","(",")","_reset_guarded_backend_cache","(",")","reset_frame_count","(",")","torch._C._dynamo.compiled_autograd.clear_cache","(",")","code_context.clear","(",")"]
67
85
null
__init__.py
pytorch/torch/_dynamo/__init__.py
import torch from .None import convert_frame, eval_frame, resume_execution from .backends.registry import list_backends, lookup_backend, register_backend from .code_context import code_context from .convert_frame import replay from .decorators import allow_in_graph, assume_constant_result, disable, disallow_in_graph, forbid_in_graph, graph_break, mark_dynamic, mark_static, mark_static_address, maybe_mark_dynamic, run from .eval_frame import _reset_guarded_backend_cache, explain, export, is_dynamo_supported, optimize, optimize_assert, OptimizedModule, reset_code from .external_utils import is_compiling from .utils import graph_break_reasons, guard_failures, orig_code_map, reset_frame_count
10
null
9
null
null
1
null
Use image node_id 1 for calling a global function with example usage: reset() without return types
98
node_id 1
304,494
initialize_options
build
old_build
true
self
null
null
null
null
null
def initialize_options(self): old_build.initialize_options(self) self.fcompiler = None self.warn_error = False self.cpu_baseline = "min" self.cpu_dispatch = ( "max -xop -fma4" # drop AMD legacy features by default ) self.disable_optimization = False """ the '_simd' module is a very large. Adding more dispatched features will increase binary size and compile time. By default we minimize the targeted features to those most commonly used by the NumPy SIMD interface(NPYV), NOTE: any specified features will be ignored if they're: - part of the baseline(--cpu-baseline) - not part of dispatch-able features(--cpu-dispatch) - not supported by compiler or platform """ self.simd_test = ( "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F " "AVX512_SKX VSX VSX2 VSX3 VSX4 NEON ASIMD VX VXE VXE2" )
["def","initialize_options","(","self",")",":","old_build.initialize_options","(","self",")","self.fcompiler","=","None","self.warn_error","=","False","self.cpu_baseline","=","``","min","''","self.cpu_dispatch","=","(","``","max","-xop","-fma4","''","#","drop","AMD","legacy","features","by","default",")","self.disable_optimization","=","False","``","''","''","the","'_simd","'","module","is","a","very","large",".","Adding","more","dispatched","features","will","increase","binary","size","and","compile","time",".","By","default","we","minimize","the","targeted","features","to","those","most","commonly","used","by","the","NumPy","SIMD","interface","(","NPYV",")",",","NOTE",":","any","specified","features","will","be","ignored","if","they","'re",":","-","part","of","the","baseline","(","--","cpu-baseline",")","-","not","part","of","dispatch-able","features","(","--","cpu-dispatch",")","-","not","supported","by","compiler","or","platform","``","''","''","self.simd_test","=","(","``","BASELINE","SSE2","SSE42","XOP","FMA4","(","FMA3","AVX2",")","AVX512F","``","``","AVX512_SKX","VSX","VSX2","VSX3","VSX4","NEON","ASIMD","VX","VXE","VXE2","''",")"]
34
51
null
build.py
numpy/numpy/distutils/command/build.py
import os import sys from distutils.command.build import build from distutils.util import get_platform from numpy.distutils.command.config_compiler import show_fortran_compilers
10
1
5
1
1
0
3
Use image node_id 1 for calling the build obj's underlying member method code with example usage: obj.initialize_options() without return types
143
node_id 1
207,518
finalize_options
build
old_build
true
self
null
null
null
null
null
def finalize_options(self): build_scripts = self.build_scripts old_build.finalize_options(self) plat_specifier = ".{}-{}.{}".format( get_platform(), *sys.version_info[:2] ) if build_scripts is None: self.build_scripts = os.path.join( self.build_base, "scripts" + plat_specifier )
["def","finalize_options","(","self",")",":","build_scripts","=","self.build_scripts","old_build.finalize_options","(","self",")","plat_specifier","=","``",".","{","}","-","{","}",".","{","}","''",".format","(","get_platform","(",")",",","*","sys.version_info","[",":2","]",")","if","build_scripts","is","None",":","self.build_scripts","=","os.path.join","(","self.build_base",",","``","scripts","''","+","plat_specifier",")"]
53
59
null
build.py
numpy/numpy/distutils/command/build.py
import os import sys from distutils.command.build import build from distutils.util import get_platform from numpy.distutils.command.config_compiler import show_fortran_compilers
10
1
5
1
1
0
3
Use image node_id 2 for calling the build obj's underlying member method code with example usage: obj.finalize_options() without return types
141
node_id 2
207,519
run
build
old_build
true
self
null
null
null
null
null
def run(self): old_build.run(self)
["def","run","(","self",")",":","old_build.run","(","self",")"]
61
62
null
build.py
numpy/numpy/distutils/command/build.py
import os import sys from distutils.command.build import build from distutils.util import get_platform from numpy.distutils.command.config_compiler import show_fortran_compilers
10
1
5
1
1
0
3
Use image node_id 3 for calling the build obj's underlying member method code with example usage: obj.run() without return types
128
node_id 3
207,520
__call__
KarrasVePipeline
DiffusionPipeline
true
self,batch_size,num_inference_steps,generator,output_type,return_dict
Pipeline for unconditional image generation. Parameters: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image. scheduler ([`KarrasVeScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image.
["Pipeline","for","unconditional","image","generation",".","Parameters",":","unet","(","[","`","UNet2DModel","`","]",")",":","A","`","UNet2DModel","`","to","denoise","the","encoded","image",".","scheduler","(","[","`","KarrasVeScheduler","`","]",")",":","A","scheduler","to","be","used","in","combination","with","`","unet","`","to","denoise","the","encoded","image","."]
The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. Example: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images.
["The","call","function","to","the","pipeline","for","generation",".","Args",":","batch_size","(","`","int","`",",","*","optional","*",",","defaults","to","1",")",":","The","number","of","images","to","generate",".","generator","(","`","torch.Generator","`",",","*","optional","*",")",":","A","[","`","torch.Generator","`","]","(","https",":","\/\/pytorch.org\/docs\/stable\/generated\/torch.Generator.html",")","to","make","generation","deterministic",".","num_inference_steps","(","`","int","`",",","*","optional","*",",","defaults","to","50",")",":","The","number","of","denoising","steps",".","More","denoising","steps","usually","lead","to","a","higher","quality","image","at","the","expense","of","slower","inference",".","output_type","(","`","str","`",",","*","optional","*",",","defaults","to","`","``","pil","''","`",")",":","The","output","format","of","the","generated","image",".","Choose","between","`","PIL.Image","`","or","`","np.array","`",".","return_dict","(","`","bool","`",",","*","optional","*",",","defaults","to","`","True","`",")",":","Whether","or","not","to","return","a","[","`","ImagePipelineOutput","`","]","instead","of","a","plain","tuple",".","Example",":","Returns",":","[","`","~pipelines.ImagePipelineOutput","`","]","or","`","tuple","`",":","If","`","return_dict","`","is","`","True","`",",","[","`","~pipelines.ImagePipelineOutput","`","]","is","returned",",","otherwise","a","`","tuple","`","is","returned","where","the","first","element","is","a","list","with","the","generated","images","."]
ImagePipelineOutput,image
def __call__( self, batch_size: int = 1, num_inference_steps: int = 50, generator: Optional[ Union[torch.Generator, List[torch.Generator]] ] = None, output_type: Optional[str] = "pil", return_dict: bool = True, **kwargs, ) -> Union[Tuple, ImagePipelineOutput]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple. Example: Returns: [`~pipelines.ImagePipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images. """ img_size = self.unet.config.sample_size shape = (batch_size, 3, img_size, img_size) model = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) sample = ( randn_tensor(shape, generator=generator, device=self.device) * self.scheduler.init_noise_sigma ) self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): # here sigma_t == t_i from the paper sigma = self.scheduler.schedule[t] sigma_prev = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat sample_hat, sigma_hat = self.scheduler.add_noise_to_input( sample, sigma, generator=generator ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. model_output = (sigma_hat / 2) * model( (sample_hat + 1) / 2, sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev step_output = self.scheduler.step( model_output, sigma_hat, sigma_prev, sample_hat ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. model_output = (sigma_prev / 2) * model( (step_output.prev_sample + 1) / 2, sigma_prev / 2 ).sample step_output = self.scheduler.step_correct( model_output, sigma_hat, sigma_prev, sample_hat, step_output.prev_sample, step_output["derivative"], ) sample = step_output.prev_sample sample = (sample / 2 + 0.5).clamp(0, 1) image = sample.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,) return ImagePipelineOutput(images=image)
["def","__call__","(","self",",","batch_size",":","int","=","1",",","num_inference_steps",":","int","=","50",",","generator",":","Optional","[","Union","[","torch.Generator",",","List","[","torch.Generator","]","]","]","=","None",",","output_type",":","Optional","[","str","]","=","``","pil","''",",","return_dict",":","bool","=","True",",","*","*","kwargs",",",")","-",">","Union","[","Tuple",",","ImagePipelineOutput","]",":","r","''","''","''","The","call","function","to","the","pipeline","for","generation",".","Args",":","batch_size","(","`","int","`",",","*","optional","*",",","defaults","to","1",")",":","The","number","of","images","to","generate",".","generator","(","`","torch.Generator","`",",","*","optional","*",")",":","A","[","`","torch.Generator","`","]","(","https",":","\/\/pytorch.org\/docs\/stable\/generated\/torch.Generator.html",")","to","make","generation","deterministic",".","num_inference_steps","(","`","int","`",",","*","optional","*",",","defaults","to","50",")",":","The","number","of","denoising","steps",".","More","denoising","steps","usually","lead","to","a","higher","quality","image","at","the","expense","of","slower","inference",".","output_type","(","`","str","`",",","*","optional","*",",","defaults","to","`","``","pil","''","`",")",":","The","output","format","of","the","generated","image",".","Choose","between","`","PIL.Image","`","or","`","np.array","`",".","return_dict","(","`","bool","`",",","*","optional","*",",","defaults","to","`","True","`",")",":","Whether","or","not","to","return","a","[","`","ImagePipelineOutput","`","]","instead","of","a","plain","tuple",".","Example",":","Returns",":","[","`","~pipelines.ImagePipelineOutput","`","]","or","`","tuple","`",":","If","`","return_dict","`","is","`","True","`",",","[","`","~pipelines.ImagePipelineOutput","`","]","is","returned",",","otherwise","a","`","tuple","`","is","returned","where","the","first","element","is","a","list","with","the","generated","images.","``","''","''","img_size","=","self.unet.config.sample_size","shape","=","(","batch_size",",","3",",","img_size",",","img_size",")","model","=","self.unet","#","sample","x_0","~","N","(","0",",","sigma_0^2","*","I",")","sample","=","(","randn_tensor","(","shape",",","generator=generator",",","device=self.device",")","*","self.scheduler.init_noise_sigma",")","self.scheduler.set_timesteps","(","num_inference_steps",")","for","t","in","self.progress_bar","(","self.scheduler.timesteps",")",":","#","here","sigma_t","==","t_i","from","the","paper","sigma","=","self.scheduler.schedule","[","t","]","sigma_prev","=","self.scheduler.schedule","[","t","-","1","]","if","t",">","0","else","0","#","1",".","Select","temporarily","increased","noise","level","sigma_hat","#","2",".","Add","new","noise","to","move","from","sample_i","to","sample_hat","sample_hat",",","sigma_hat","=","self.scheduler.add_noise_to_input","(","sample",",","sigma",",","generator=generator",")","#","3",".","Predict","the","noise","residual","given","the","noise","magnitude","`","sigma_hat","`","#","The","model","inputs","and","output","are","adjusted","by","following","eq",".","(","213",")","in","[","1","]",".","model_output","=","(","sigma_hat","\/","2",")","*","model","(","(","sample_hat","+","1",")","\/","2",",","sigma_hat","\/","2",")",".sample","#","4",".","Evaluate","dx\/dt","at","sigma_hat","#","5",".","Take","Euler","step","from","sigma","to","sigma_prev","step_output","=","self.scheduler.step","(","model_output",",","sigma_hat",",","sigma_prev",",","sample_hat",")","if","sigma_prev","!","=","0",":","#","6",".","Apply","2nd","order","correction","#","The","model","inputs","and","output","are","adjusted","by","following","eq",".","(","213",")","in","[","1","]",".","model_output","=","(","sigma_prev","\/","2",")","*","model","(","(","step_output.prev_sample","+","1",")","\/","2",",","sigma_prev","\/","2",")",".sample","step_output","=","self.scheduler.step_correct","(","model_output",",","sigma_hat",",","sigma_prev",",","sample_hat",",","step_output.prev_sample",",","step_output","[","``","derivative","''","]",",",")","sample","=","step_output.prev_sample","sample","=","(","sample","\/","2","+","0.5",")",".clamp","(","0",",","1",")","image","=","sample.cpu","(",")",".permute","(","0",",","2",",","3",",","1",")",".numpy","(",")","if","output_type","==","``","pil","''",":","image","=","self.numpy_to_pil","(","image",")","if","not","return_dict",":","return","(","image",",",")","return","ImagePipelineOutput","(","images=image",")"]
45
128
null
pipeline_stochastic_karras_ve.py
llmga/llmga/diffusers/src/diffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py
from typing import List, Optional, Tuple, Union import torch from ...models import UNet2DModel from ...schedulers import KarrasVeScheduler from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
10
1
6
1
1
0
2
Use image node_id 2 for calling the KarrasVePipeline obj's underlying member method code with example usage: obj.__call__(batch_size, num_inference_steps, generator, output_type, return_dict) and returns: ImagePipelineOutput, image
231
node_id 2
171,183
__init__
KarrasVePipeline
DiffusionPipeline
true
self,unet,scheduler
Pipeline for unconditional image generation. Parameters: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image. scheduler ([`KarrasVeScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image.
["Pipeline","for","unconditional","image","generation",".","Parameters",":","unet","(","[","`","UNet2DModel","`","]",")",":","A","`","UNet2DModel","`","to","denoise","the","encoded","image",".","scheduler","(","[","`","KarrasVeScheduler","`","]",")",":","A","scheduler","to","be","used","in","combination","with","`","unet","`","to","denoise","the","encoded","image","."]
null
null
KarrasVePipeline
def __init__(self, unet: UNet2DModel, scheduler: KarrasVeScheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler)
["def","__init__","(","self",",","unet",":","UNet2DModel",",","scheduler",":","KarrasVeScheduler",")",":","super","(",")",".__init__","(",")","self.register_modules","(","unet=unet",",","scheduler=scheduler",")"]
40
42
null
pipeline_stochastic_karras_ve.py
llmga/llmga/diffusers/src/diffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py
from typing import List, Optional, Tuple, Union import torch from ...models import UNet2DModel from ...schedulers import KarrasVeScheduler from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
10
1
6
1
1
0
2
Use image node_id 1 to create a new KarrasVePipeline object from inherited base classes: DiffusionPipeline with example: obj = KarrasVePipeline(unet, scheduler)
160
node_id 1
171,182
xla_backend_helper
global
null
false
model,fake_tensor_inputs,boxed
null
null
null
null
unknown,compiled_graph
def xla_backend_helper(model, fake_tensor_inputs, boxed=False): try: import torch_xla.core.dynamo_bridge as bridge except ImportError as e: raise ImportError( "Please follow the instruction in https://github.com/pytorch/xla#pytorchxla to install torch_xla" ) from e compiled_graph = None def fwd(*args): nonlocal model nonlocal compiled_graph if compiled_graph is None: compiled_graph = bridge.extract_compiled_graph( model, args ) del model return compiled_graph(*args) return make_boxed_func(fwd) if boxed else fwd
["def","xla_backend_helper","(","model",",","fake_tensor_inputs",",","boxed=False",")",":","try",":","import","torch_xla.core.dynamo_bridge","as","bridge","except","ImportError","as","e",":","raise","ImportError","(","``","Please","follow","the","instruction","in","https",":","\/\/github.com\/pytorch\/xla","#","pytorchxla","to","install","torch_xla","''",")","from","e","compiled_graph","=","None","def","fwd","(","*","args",")",":","nonlocal","model","nonlocal","compiled_graph","if","compiled_graph","is","None",":","compiled_graph","=","bridge.extract_compiled_graph","(","model",",","args",")","del","model","return","compiled_graph","(","*","args",")","return","make_boxed_func","(","fwd",")","if","boxed","else","fwd"]
35
53
null
torchxla.py
pytorch/torch/_dynamo/backends/torchxla.py
import logging import warnings from functorch.compile import make_boxed_func from ..backends.common import aot_autograd from .registry import register_backend, register_experimental_backend
10
null
5
null
null
5
null
Use image node_id 5 for calling a global function with example usage: xla_backend_helper(model, fake_tensor_inputs, boxed) and returns: unknown, compiled_graph
159
node_id 5
304,546
_get_modified_tests
global
null
false
null
null
null
null
python_test_file_to_test_name,set
def _get_modified_tests() -> Set[str]: try: changed_files = query_changed_files() except Exception as e: warn(f"Can't query changed test files due to {e}") # If unable to get changed files from git, quit without doing any sorting return set() return python_test_file_to_test_name(set(changed_files))
["def","_get_modified_tests","(",")","-",">","Set","[","str","]",":","try",":","changed_files","=","query_changed_files","(",")","except","Exception","as","e",":","warn","(","f","''","Ca","n't","query","changed","test","files","due","to","{","e","}","''",")","#","If","unable","to","get","changed","files","from","git",",","quit","without","doing","any","sorting","return","set","(",")","return","python_test_file_to_test_name","(","set","(","changed_files",")",")"]
33
41
null
edited_by_pr.py
pytorch/tools/testing/target_determination/heuristics/edited_by_pr.py
from typing import Any, Dict, List, Set from warnings import warn from tools.testing.target_determination.heuristics.interface import HeuristicInterface, TestPrioritizations from tools.testing.target_determination.heuristics.utils import python_test_file_to_test_name, query_changed_files
10
null
4
null
null
1
null
Use image node_id 1 for calling a global function with example usage: _get_modified_tests() and returns: python_test_file_to_test_name, set
139
node_id 1
303,933
finalize_options
install_clib
Command
true
self
null
null
null
null
null
def finalize_options(self): self.set_undefined_options( "install", ("install_lib", "install_dir") )
["def","finalize_options","(","self",")",":","self.set_undefined_options","(","``","install","''",",","(","``","install_lib","''",",","``","install_dir","''",")",")"]
15
16
null
install_clib.py
numpy/numpy/distutils/command/install_clib.py
import os from distutils.core import Command from distutils.ccompiler import new_compiler from numpy.distutils.misc_util import get_cmd
10
1
4
1
1
0
4
Use image node_id 2 for calling the install_clib obj's underlying member method code with example usage: obj.finalize_options() without return types
148
node_id 2
207,607
get_prediction_confidence
EditedByPR
HeuristicInterface
true
self,tests
null
null
null
null
unknown
def get_prediction_confidence( self, tests: List[str] ) -> Dict[str, float]: critical_tests = _get_modified_tests() return {test: 1 for test in critical_tests if test in tests}
["def","get_prediction_confidence","(","self",",","tests",":","List","[","str","]",")","-",">","Dict","[","str",",","float","]",":","critical_tests","=","_get_modified_tests","(",")","return","{","test",":","1","for","test","in","critical_tests","if","test","in","tests","}"]
28
30
null
edited_by_pr.py
pytorch/tools/testing/target_determination/heuristics/edited_by_pr.py
from typing import Any, Dict, List, Set from warnings import warn from tools.testing.target_determination.heuristics.interface import HeuristicInterface, TestPrioritizations from tools.testing.target_determination.heuristics.utils import python_test_file_to_test_name, query_changed_files
10
1
4
1
1
1
3
Use image node_id 3 for calling the EditedByPR obj's underlying member method code with example usage: obj.get_prediction_confidence(tests) and returns: unknown
160
node_id 3
303,932
get_test_priorities
EditedByPR
HeuristicInterface
true
self,tests
null
null
null
null
test_rankings
def get_test_priorities( self, tests: List[str] ) -> TestPrioritizations: # Tests must always be returned in a deterministic order. # Otherwise it breaks our test sharding logic critical_tests = sorted(_get_modified_tests()) test_rankings = TestPrioritizations( tests_being_ranked=tests, high_relevance=critical_tests ) return test_rankings
["def","get_test_priorities","(","self",",","tests",":","List","[","str","]",")","-",">","TestPrioritizations",":","#","Tests","must","always","be","returned","in","a","deterministic","order",".","#","Otherwise","it","breaks","our","test","sharding","logic","critical_tests","=","sorted","(","_get_modified_tests","(",")",")","test_rankings","=","TestPrioritizations","(","tests_being_ranked=tests",",","high_relevance=critical_tests",")","return","test_rankings"]
18
26
null
edited_by_pr.py
pytorch/tools/testing/target_determination/heuristics/edited_by_pr.py
from typing import Any, Dict, List, Set from warnings import warn from tools.testing.target_determination.heuristics.interface import HeuristicInterface, TestPrioritizations from tools.testing.target_determination.heuristics.utils import python_test_file_to_test_name, query_changed_files
10
1
4
1
1
1
3
Use image node_id 2 for calling the EditedByPR obj's underlying member method code with example usage: obj.get_test_priorities(tests) and returns: test_rankings
160
node_id 2
303,931
__init__
EditedByPR
HeuristicInterface
true
self
null
null
null
null
EditedByPR
def __init__(self, **kwargs: Dict[str, Any]): super().__init__(**kwargs)
["def","__init__","(","self",",","*","*","kwargs",":","Dict","[","str",",","Any","]",")",":","super","(",")",".__init__","(","*","*","kwargs",")"]
15
16
null
edited_by_pr.py
pytorch/tools/testing/target_determination/heuristics/edited_by_pr.py
from typing import Any, Dict, List, Set from warnings import warn from tools.testing.target_determination.heuristics.interface import HeuristicInterface, TestPrioritizations from tools.testing.target_determination.heuristics.utils import python_test_file_to_test_name, query_changed_files
10
1
4
1
1
1
3
Use image node_id 1 to create a new EditedByPR object from inherited base classes: HeuristicInterface with example: obj = EditedByPR()
134
node_id 1
303,930
commit_ci
global
null
false
files,message
null
null
null
null
null
def commit_ci(files: List[str], message: str) -> None: # Check that there are no other modified files than the ones edited by this # tool stdout = subprocess.run( ["git", "status", "--porcelain"], stdout=subprocess.PIPE ).stdout.decode() for line in stdout.split("\n"): if line == "": continue if line[0] != " ": raise RuntimeError( f"Refusing to commit while other changes are already staged: {line}" ) # Make the commit subprocess.run(["git", "add"] + files) subprocess.run(["git", "commit", "-m", message])
["def","commit_ci","(","files",":","List","[","str","]",",","message",":","str",")","-",">","None",":","#","Check","that","there","are","no","other","modified","files","than","the","ones","edited","by","this","#","tool","stdout","=","subprocess.run","(","[","``","git","''",",","``","status","''",",","``","--","porcelain","''","]",",","stdout=subprocess.PIPE",")",".stdout.decode","(",")","for","line","in","stdout.split","(","``","\\n","''",")",":","if","line","==","``","''",":","continue","if","line","[","0","]","!","=","``","``",":","raise","RuntimeError","(","f","''","Refusing","to","commit","while","other","changes","are","already","staged",":","{","line","}","''",")","#","Make","the","commit","subprocess.run","(","[","``","git","''",",","``","add","''","]","+","files",")","subprocess.run","(","[","``","git","''",",","``","commit","''",",","``","-m","''",",","message","]",")"]
95
111
null
explicit_ci_jobs.py
pytorch/tools/testing/explicit_ci_jobs.py
import argparse import fnmatch import pathlib import subprocess import textwrap from typing import Any, Dict, List import yaml
10
null
7
null
null
3
null
Use image node_id 3 for calling a global function with example usage: commit_ci(files, message) without return types
116
node_id 3
303,918
get_filtered_circleci_config
global
null
false
workflows,relevant_jobs
null
null
null
null
new_workflows
def get_filtered_circleci_config( workflows: Dict[str, Any], relevant_jobs: List[str] ) -> Dict[str, Any]: """ Given an existing CircleCI config, remove every job that's not listed in 'relevant_jobs' """ new_workflows: Dict[str, Any] = {} past_jobs: Dict[str, Any] = {} for workflow_name, workflow in workflows.items(): if workflow_name not in WORKFLOWS_TO_CHECK: # Don't care about this workflow, skip it entirely continue for job_dict in workflow["jobs"]: for type, job in job_dict.items(): if "name" not in job: # Job doesn't have a name so it can't be handled print("Skipping", type) else: if job["name"] in relevant_jobs: # Found a job that was specified at the CLI, add it to # the new result add_job( new_workflows, workflow_name, type, job, past_jobs, ) # Record the job in case it's needed as a dependency later past_jobs[job["name"]] = { "workflow_name": workflow_name, "type": type, "job": job, } return new_workflows
["def","get_filtered_circleci_config","(","workflows",":","Dict","[","str",",","Any","]",",","relevant_jobs",":","List","[","str","]",")","-",">","Dict","[","str",",","Any","]",":","``","''","''","Given","an","existing","CircleCI","config",",","remove","every","job","that","'s","not","listed","in","'relevant_jobs'","``","''","''","new_workflows",":","Dict","[","str",",","Any","]","=","{","}","past_jobs",":","Dict","[","str",",","Any","]","=","{","}","for","workflow_name",",","workflow","in","workflows.items","(",")",":","if","workflow_name","not","in","WORKFLOWS_TO_CHECK",":","#","Do","n't","care","about","this","workflow",",","skip","it","entirely","continue","for","job_dict","in","workflow","[","``","jobs","''","]",":","for","type",",","job","in","job_dict.items","(",")",":","if","``","name","''","not","in","job",":","#","Job","does","n't","have","a","name","so","it","ca","n't","be","handled","print","(","``","Skipping","''",",","type",")","else",":","if","job","[","``","name","''","]","in","relevant_jobs",":","#","Found","a","job","that","was","specified","at","the","CLI",",","add","it","to","#","the","new","result","add_job","(","new_workflows",",","workflow_name",",","type",",","job",",","past_jobs",",",")","#","Record","the","job","in","case","it","'s","needed","as","a","dependency","later","past_jobs","[","job","[","``","name","''","]","]","=","{","``","workflow_name","''",":","workflow_name",",","``","type","''",":","type",",","``","job","''",":","job",",","}","return","new_workflows"]
60
92
null
explicit_ci_jobs.py
pytorch/tools/testing/explicit_ci_jobs.py
import argparse import fnmatch import pathlib import subprocess import textwrap from typing import Any, Dict, List import yaml
10
null
7
null
null
3
null
Use image node_id 2 for calling a global function with example usage: get_filtered_circleci_config(workflows, relevant_jobs) and returns: new_workflows
151
node_id 2
303,917
add_job
global
null
false
workflows,workflow_name,type,job,past_jobs
null
null
null
null
null
def add_job( workflows: Dict[str, Any], workflow_name: str, type: str, job: Dict[str, Any], past_jobs: Dict[str, Any], ) -> None: """ Add job 'job' under 'type' and 'workflow_name' to 'workflow' in place. Also add any dependencies (they must already be in 'past_jobs') """ if workflow_name not in workflows: workflows[workflow_name] = {"when": "always", "jobs": []} requires = job.get("requires", None) if requires is not None: for requirement in requires: dependency = past_jobs[requirement] add_job( workflows, dependency["workflow_name"], dependency["type"], dependency["job"], past_jobs, ) workflows[workflow_name]["jobs"].append({type: job})
["def","add_job","(","workflows",":","Dict","[","str",",","Any","]",",","workflow_name",":","str",",","type",":","str",",","job",":","Dict","[","str",",","Any","]",",","past_jobs",":","Dict","[","str",",","Any","]",",",")","-",">","None",":","``","''","''","Add","job","'job","'","under","'type","'","and","'workflow_name","'","to","'workflow","'","in","place",".","Also","add","any","dependencies","(","they","must","already","be","in","'past_jobs","'",")","``","''","''","if","workflow_name","not","in","workflows",":","workflows","[","workflow_name","]","=","{","``","when","''",":","``","always","''",",","``","jobs","''",":","[","]","}","requires","=","job.get","(","``","requires","''",",","None",")","if","requires","is","not","None",":","for","requirement","in","requires",":","dependency","=","past_jobs","[","requirement","]","add_job","(","workflows",",","dependency","[","``","workflow_name","''","]",",","dependency","[","``","type","''","]",",","dependency","[","``","job","''","]",",","past_jobs",",",")","workflows","[","workflow_name","]","[","``","jobs","''","]",".append","(","{","type",":","job","}",")"]
31
57
null
explicit_ci_jobs.py
pytorch/tools/testing/explicit_ci_jobs.py
import argparse import fnmatch import pathlib import subprocess import textwrap from typing import Any, Dict, List import yaml
10
null
7
null
null
3
null
Use image node_id 1 for calling a global function with example usage: add_job(workflows, workflow_name, type, job, past_jobs) without return types
146
node_id 1
303,916
test_runtime_signature_declaration_correct
ExecutorchCppSignatureTest
unittest
true
self
null
null
null
null
null
def test_runtime_signature_declaration_correct(self) -> None: with parametrize( use_const_ref_for_mutable_tensors=False, use_ilistref_for_tensor_lists=False, ): decl = self.sig.decl(include_context=True) self.assertEqual( decl, ( "torch::executor::Tensor & foo_outf(" "torch::executor::KernelRuntimeContext & context, " "const torch::executor::Tensor & input, " "torch::executor::Tensor & out)" ), ) no_context_decl = self.sig.decl(include_context=False) self.assertEqual( no_context_decl, ( "torch::executor::Tensor & foo_outf(" "const torch::executor::Tensor & input, " "torch::executor::Tensor & out)" ), )
["def","test_runtime_signature_declaration_correct","(","self",")","-",">","None",":","with","parametrize","(","use_const_ref_for_mutable_tensors=False",",","use_ilistref_for_tensor_lists=False",",",")",":","decl","=","self.sig.decl","(","include_context=True",")","self.assertEqual","(","decl",",","(","``","torch",":",":executor",":",":Tensor","&","foo_outf","(","``","``","torch",":",":executor",":",":KernelRuntimeContext","&","context",",","``","``","const","torch",":",":executor",":",":Tensor","&","input",",","``","``","torch",":",":executor",":",":Tensor","&","out",")","''",")",",",")","no_context_decl","=","self.sig.decl","(","include_context=False",")","self.assertEqual","(","no_context_decl",",","(","``","torch",":",":executor",":",":Tensor","&","foo_outf","(","``","``","const","torch",":",":executor",":",":Tensor","&","input",",","``","``","torch",":",":executor",":",":Tensor","&","out",")","''",")",",",")"]
36
58
null
test_executorch_signatures.py
pytorch/tools/test/test_executorch_signatures.py
import unittest from torchgen.executorch.api.types import ExecutorchCppSignature from torchgen.local import parametrize from torchgen.model import Location, NativeFunction
10
1
4
1
1
0
4
Use image node_id 4 for calling the ExecutorchCppSignatureTest obj's underlying member method code with example usage: obj.test_runtime_signature_declaration_correct() without return types
188
node_id 4
303,814
test_runtime_signature_does_not_contain_runtime_context
ExecutorchCppSignatureTest
unittest
true
self
null
null
null
null
null
def test_runtime_signature_does_not_contain_runtime_context( self, ) -> None: # test if `KernelRuntimeContext` argument is missing in `RuntimeSignature` with parametrize( use_const_ref_for_mutable_tensors=False, use_ilistref_for_tensor_lists=False, ): args = self.sig.arguments(include_context=False) self.assertEqual(len(args), 2) self.assertFalse(any(a.name == "context" for a in args))
["def","test_runtime_signature_does_not_contain_runtime_context","(","self",",",")","-",">","None",":","#","test","if","`","KernelRuntimeContext","`","argument","is","missing","in","`","RuntimeSignature","`","with","parametrize","(","use_const_ref_for_mutable_tensors=False",",","use_ilistref_for_tensor_lists=False",",",")",":","args","=","self.sig.arguments","(","include_context=False",")","self.assertEqual","(","len","(","args",")",",","2",")","self.assertFalse","(","any","(","a.name","==","``","context","''","for","a","in","args",")",")"]
27
34
null
test_executorch_signatures.py
pytorch/tools/test/test_executorch_signatures.py
import unittest from torchgen.executorch.api.types import ExecutorchCppSignature from torchgen.local import parametrize from torchgen.model import Location, NativeFunction
10
1
4
1
1
0
4
Use image node_id 3 for calling the ExecutorchCppSignatureTest obj's underlying member method code with example usage: obj.test_runtime_signature_does_not_contain_runtime_context() without return types
201
node_id 3
303,813
test_runtime_signature_contains_runtime_context
ExecutorchCppSignatureTest
unittest
true
self
null
null
null
null
null
def test_runtime_signature_contains_runtime_context(self) -> None: # test if `KernelRuntimeContext` argument exists in `RuntimeSignature` with parametrize( use_const_ref_for_mutable_tensors=False, use_ilistref_for_tensor_lists=False, ): args = self.sig.arguments(include_context=True) self.assertEqual(len(args), 3) self.assertTrue(any(a.name == "context" for a in args))
["def","test_runtime_signature_contains_runtime_context","(","self",")","-",">","None",":","#","test","if","`","KernelRuntimeContext","`","argument","exists","in","`","RuntimeSignature","`","with","parametrize","(","use_const_ref_for_mutable_tensors=False",",","use_ilistref_for_tensor_lists=False",",",")",":","args","=","self.sig.arguments","(","include_context=True",")","self.assertEqual","(","len","(","args",")",",","3",")","self.assertTrue","(","any","(","a.name","==","``","context","''","for","a","in","args",")",")"]
18
25
null
test_executorch_signatures.py
pytorch/tools/test/test_executorch_signatures.py
import unittest from torchgen.executorch.api.types import ExecutorchCppSignature from torchgen.local import parametrize from torchgen.model import Location, NativeFunction
10
1
4
1
1
0
4
Use image node_id 2 for calling the ExecutorchCppSignatureTest obj's underlying member method code with example usage: obj.test_runtime_signature_contains_runtime_context() without return types
193
node_id 2
303,812
initialize_options
install_clib
Command
true
self
null
null
null
null
null
def initialize_options(self): self.install_dir = None self.outfiles = []
["def","initialize_options","(","self",")",":","self.install_dir","=","None","self.outfiles","=","[","]"]
11
13
null
install_clib.py
numpy/numpy/distutils/command/install_clib.py
import os from distutils.core import Command from distutils.ccompiler import new_compiler from numpy.distutils.misc_util import get_cmd
10
1
4
1
1
0
4
Use image node_id 1 for calling the install_clib obj's underlying member method code with example usage: obj.initialize_options() without return types
150
node_id 1
207,606
is_fbcode
global
null
false
null
null
null
null
unknown
def is_fbcode(): return not hasattr(torch.version, "git_version")
["def","is_fbcode","(",")",":","return","not","hasattr","(","torch.version",",","``","git_version","''",")"]
283
284
null
config.py
pytorch/torch/_dynamo/config.py
import inspect import os import re import sys import tempfile from os.path import abspath, dirname from typing import Any, Dict, Set, Type, TYPE_CHECKING import torch from torch.utils._config_module import install_config_module
10
null
9
null
null
1
null
Use image node_id 1 for calling a global function with example usage: is_fbcode() and returns: unknown
102
node_id 1
304,705
__init__
FakeContext
null
true
self,saved_tensors
null
null
null
null
FakeContext
def __init__(self, saved_tensors): # this will cache the results of saved_tensors # and will no longer call into c++ binding self.saved_tensors = saved_tensors
["def","__init__","(","self",",","saved_tensors",")",":","#","this","will","cache","the","results","of","saved_tensors","#","and","will","no","longer","call","into","c++","binding","self.saved_tensors","=","saved_tensors"]
59
62
null
external_utils.py
pytorch/torch/_dynamo/external_utils.py
import functools import torch import torch.utils._pytree
10
1
3
0
null
5
1
Use image node_id 1 to create a new FakeContext object with example: obj = FakeContext(saved_tensors)
102
node_id 1
304,850
is_compiling
global
null
false
null
null
null
null
False
def is_compiling() -> bool: return False
["def","is_compiling","(",")","-",">","bool",":","return","False"]
14
15
null
external_utils.py
pytorch/torch/_dynamo/external_utils.py
import functools import torch import torch.utils._pytree
10
null
3
null
null
5
null
Use image node_id 1 for calling a global function with example usage: is_compiling() and returns: False
103
node_id 1
304,851
test_gelu
ActivationsTests
unittest
true
self
null
null
null
null
null
def test_gelu(self): act = get_activation("gelu") self.assertIsInstance(act, nn.GELU) self.assertEqual( act(torch.tensor(-100, dtype=torch.float32)).item(), 0 ) self.assertNotEqual( act(torch.tensor(-1, dtype=torch.float32)).item(), 0 ) self.assertEqual( act(torch.tensor(0, dtype=torch.float32)).item(), 0 ) self.assertEqual( act(torch.tensor(20, dtype=torch.float32)).item(), 20 )
["def","test_gelu","(","self",")",":","act","=","get_activation","(","``","gelu","''",")","self.assertIsInstance","(","act",",","nn.GELU",")","self.assertEqual","(","act","(","torch.tensor","(","-100",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertNotEqual","(","act","(","torch.tensor","(","-1",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertEqual","(","act","(","torch.tensor","(","0",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertEqual","(","act","(","torch.tensor","(","20",",","dtype=torch.float32",")",")",".item","(",")",",","20",")"]
40
48
null
test_activations.py
llmga/llmga/diffusers/tests/models/test_activations.py
import unittest import torch from torch import nn from diffusers.models.activations import get_activation
10
1
4
1
1
0
4
Use image node_id 4 for calling the ActivationsTests obj's underlying member method code with example usage: obj.test_gelu() without return types
145
node_id 4
172,742
test_mish
ActivationsTests
unittest
true
self
null
null
null
null
null
def test_mish(self): act = get_activation("mish") self.assertIsInstance(act, nn.Mish) self.assertEqual( act(torch.tensor(-200, dtype=torch.float32)).item(), 0 ) self.assertNotEqual( act(torch.tensor(-1, dtype=torch.float32)).item(), 0 ) self.assertEqual( act(torch.tensor(0, dtype=torch.float32)).item(), 0 ) self.assertEqual( act(torch.tensor(20, dtype=torch.float32)).item(), 20 )
["def","test_mish","(","self",")",":","act","=","get_activation","(","``","mish","''",")","self.assertIsInstance","(","act",",","nn.Mish",")","self.assertEqual","(","act","(","torch.tensor","(","-200",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertNotEqual","(","act","(","torch.tensor","(","-1",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertEqual","(","act","(","torch.tensor","(","0",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertEqual","(","act","(","torch.tensor","(","20",",","dtype=torch.float32",")",")",".item","(",")",",","20",")"]
30
38
null
test_activations.py
llmga/llmga/diffusers/tests/models/test_activations.py
import unittest import torch from torch import nn from diffusers.models.activations import get_activation
10
1
4
1
1
0
4
Use image node_id 3 for calling the ActivationsTests obj's underlying member method code with example usage: obj.test_mish() without return types
145
node_id 3
172,741
test_silu
ActivationsTests
unittest
true
self
null
null
null
null
null
def test_silu(self): act = get_activation("silu") self.assertIsInstance(act, nn.SiLU) self.assertEqual( act(torch.tensor(-100, dtype=torch.float32)).item(), 0 ) self.assertNotEqual( act(torch.tensor(-1, dtype=torch.float32)).item(), 0 ) self.assertEqual( act(torch.tensor(0, dtype=torch.float32)).item(), 0 ) self.assertEqual( act(torch.tensor(20, dtype=torch.float32)).item(), 20 )
["def","test_silu","(","self",")",":","act","=","get_activation","(","``","silu","''",")","self.assertIsInstance","(","act",",","nn.SiLU",")","self.assertEqual","(","act","(","torch.tensor","(","-100",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertNotEqual","(","act","(","torch.tensor","(","-1",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertEqual","(","act","(","torch.tensor","(","0",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertEqual","(","act","(","torch.tensor","(","20",",","dtype=torch.float32",")",")",".item","(",")",",","20",")"]
20
28
null
test_activations.py
llmga/llmga/diffusers/tests/models/test_activations.py
import unittest import torch from torch import nn from diffusers.models.activations import get_activation
10
1
4
1
1
0
4
Use image node_id 2 for calling the ActivationsTests obj's underlying member method code with example usage: obj.test_silu() without return types
145
node_id 2
172,740
test_swish
ActivationsTests
unittest
true
self
null
null
null
null
null
def test_swish(self): act = get_activation("swish") self.assertIsInstance(act, nn.SiLU) self.assertEqual( act(torch.tensor(-100, dtype=torch.float32)).item(), 0 ) self.assertNotEqual( act(torch.tensor(-1, dtype=torch.float32)).item(), 0 ) self.assertEqual( act(torch.tensor(0, dtype=torch.float32)).item(), 0 ) self.assertEqual( act(torch.tensor(20, dtype=torch.float32)).item(), 20 )
["def","test_swish","(","self",")",":","act","=","get_activation","(","``","swish","''",")","self.assertIsInstance","(","act",",","nn.SiLU",")","self.assertEqual","(","act","(","torch.tensor","(","-100",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertNotEqual","(","act","(","torch.tensor","(","-1",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertEqual","(","act","(","torch.tensor","(","0",",","dtype=torch.float32",")",")",".item","(",")",",","0",")","self.assertEqual","(","act","(","torch.tensor","(","20",",","dtype=torch.float32",")",")",".item","(",")",",","20",")"]
10
18
null
test_activations.py
llmga/llmga/diffusers/tests/models/test_activations.py
import unittest import torch from torch import nn from diffusers.models.activations import get_activation
10
1
4
1
1
0
4
Use image node_id 1 for calling the ActivationsTests obj's underlying member method code with example usage: obj.test_swish() without return types
146
node_id 1
172,739
test_int
TestABC
null
true
self
null
null
null
null
null
def test_int(self): for t in sctypes["int"]: assert_( isinstance(t(), numbers.Integral), f"{t.__name__} is not instance of Integral", ) assert_( issubclass(t, numbers.Integral), f"{t.__name__} is not subclass of Integral", )
["def","test_int","(","self",")",":","for","t","in","sctypes","[","``","int","''","]",":","assert_","(","isinstance","(","t","(",")",",","numbers.Integral",")",",","f","''","{","t.__name__","}","is","not","instance","of","Integral","''",",",")","assert_","(","issubclass","(","t",",","numbers.Integral",")",",","f","''","{","t.__name__","}","is","not","subclass","of","Integral","''",",",")"]
42
47
null
test_abc.py
numpy/numpy/_core/tests/test_abc.py
from numpy.testing import assert_ import numbers import numpy from numpy._core.numerictypes import sctypes
10
1
4
0
null
0
5
Use image node_id 4 for calling the TestABC obj's underlying member method code with example usage: obj.test_int() without return types
135
node_id 4
203,776
test_uint
TestABC
null
true
self
null
null
null
null
null
def test_uint(self): for t in sctypes["uint"]: assert_( isinstance(t(), numbers.Integral), f"{t.__name__} is not instance of Integral", ) assert_( issubclass(t, numbers.Integral), f"{t.__name__} is not subclass of Integral", )
["def","test_uint","(","self",")",":","for","t","in","sctypes","[","``","uint","''","]",":","assert_","(","isinstance","(","t","(",")",",","numbers.Integral",")",",","f","''","{","t.__name__","}","is","not","instance","of","Integral","''",",",")","assert_","(","issubclass","(","t",",","numbers.Integral",")",",","f","''","{","t.__name__","}","is","not","subclass","of","Integral","''",",",")"]
49
54
null
test_abc.py
numpy/numpy/_core/tests/test_abc.py
from numpy.testing import assert_ import numbers import numpy from numpy._core.numerictypes import sctypes
10
1
4
0
null
0
5
Use image node_id 5 for calling the TestABC obj's underlying member method code with example usage: obj.test_uint() without return types
136
node_id 5
203,777
__iter__
ProxyValue
null
true
self
null
null
null
null
null
def __iter__(self): yield from self.data
["def","__iter__","(","self",")",":","yield","from","self.data"]
37
38
null
proxy_value.py
pytorch/torch/_export/pass_infra/proxy_value.py
from typing import Union import torch
10
1
2
0
null
0
7
Use image node_id 6 for calling the ProxyValue obj's underlying member method code with example usage: obj.__iter__() without return types
138
node_id 6
306,398
is_tensor
ProxyValue
null
true
self
null
null
null
null
isinstance
def is_tensor(self) -> bool: return isinstance(self.data, torch.Tensor)
["def","is_tensor","(","self",")","-",">","bool",":","return","isinstance","(","self.data",",","torch.Tensor",")"]
33
34
null
proxy_value.py
pytorch/torch/_export/pass_infra/proxy_value.py
from typing import Union import torch
10
1
2
0
null
0
7
Use image node_id 5 for calling the ProxyValue obj's underlying member method code with example usage: obj.is_tensor() and returns: isinstance
142
node_id 5
306,397
to_tensor
ProxyValue
null
true
self
null
null
null
null
self
def to_tensor(self) -> torch.Tensor: assert isinstance(self.data, torch.Tensor) return self.data
["def","to_tensor","(","self",")","-",">","torch.Tensor",":","assert","isinstance","(","self.data",",","torch.Tensor",")","return","self.data"]
29
31
null
proxy_value.py
pytorch/torch/_export/pass_infra/proxy_value.py
from typing import Union import torch
10
1
2
0
null
0
7
Use image node_id 4 for calling the ProxyValue obj's underlying member method code with example usage: obj.to_tensor() and returns: self
136
node_id 4
306,396
proxy
ProxyValue
null
true
self
null
null
null
null
self
def proxy(self) -> torch.fx.Proxy: if not isinstance(self.proxy_or_node, torch.fx.Proxy): raise RuntimeError( f"ProxyValue doesn't have attached Proxy object. Node: {self.proxy_or_node.format_node()}" ) return self.proxy_or_node
["def","proxy","(","self",")","-",">","torch.fx.Proxy",":","if","not","isinstance","(","self.proxy_or_node",",","torch.fx.Proxy",")",":","raise","RuntimeError","(","f","''","ProxyValue","does","n't","have","attached","Proxy","object",".","Node",":","{","self.proxy_or_node.format_node","(",")","}","''",")","return","self.proxy_or_node"]
22
27
null
proxy_value.py
pytorch/torch/_export/pass_infra/proxy_value.py
from typing import Union import torch
10
1
2
0
null
0
7
Use image node_id 3 for calling the ProxyValue obj's underlying member method code with example usage: obj.proxy() and returns: self
132
node_id 3
306,395
node
ProxyValue
null
true
self
null
null
null
null
self,self
def node(self) -> torch.fx.Node: if isinstance(self.proxy_or_node, torch.fx.Node): return self.proxy_or_node assert isinstance(self.proxy_or_node, torch.fx.Proxy) return self.proxy_or_node.node
["def","node","(","self",")","-",">","torch.fx.Node",":","if","isinstance","(","self.proxy_or_node",",","torch.fx.Node",")",":","return","self.proxy_or_node","assert","isinstance","(","self.proxy_or_node",",","torch.fx.Proxy",")","return","self.proxy_or_node.node"]
15
19
null
proxy_value.py
pytorch/torch/_export/pass_infra/proxy_value.py
from typing import Union import torch
10
1
2
0
null
0
7
Use image node_id 2 for calling the ProxyValue obj's underlying member method code with example usage: obj.node() and returns: self, self
137
node_id 2
306,394
__init__
ProxyValue
null
true
self,data,proxy
null
null
null
null
ProxyValue
def __init__(self, data, proxy: Union[torch.fx.Proxy, torch.fx.Node]): # pyre-ignore self.data = data self.proxy_or_node = proxy
["def","__init__","(","self",",","data",",","proxy",":","Union","[","torch.fx.Proxy",",","torch.fx.Node","]",")",":","#","pyre-ignore","self.data","=","data","self.proxy_or_node","=","proxy"]
9
12
null
proxy_value.py
pytorch/torch/_export/pass_infra/proxy_value.py
from typing import Union import torch
10
1
2
0
null
0
7
Use image node_id 1 to create a new ProxyValue object with example: obj = ProxyValue(data, proxy)
98
node_id 1
306,393
_create_graph_module_for_export
global
null
false
root,graph
null
null
null
null
gm
def _create_graph_module_for_export(root, graph): try: gm = torch.fx.GraphModule(root, graph) except SyntaxError: # If custom objects stored in memory are being used in the graph, # the generated python code will result in a syntax error on the custom # object, since it is unable to parse the in-memory object. However # we can still run the graph eagerly through torch.fx.Interpreter, # so we will bypass this error. warnings.warn( "Unable to execute the generated python source code from " "the graph. The graph module will no longer be directly callable, " "but you can still run the ExportedProgram, and if needed, you can " "run the graph module eagerly using torch.fx.Interpreter." ) gm = torch.fx.GraphModule(root, torch.fx.Graph()) gm._graph = graph return gm
["def","_create_graph_module_for_export","(","root",",","graph",")",":","try",":","gm","=","torch.fx.GraphModule","(","root",",","graph",")","except","SyntaxError",":","#","If","custom","objects","stored","in","memory","are","being","used","in","the","graph",",","#","the","generated","python","code","will","result","in","a","syntax","error","on","the","custom","#","object",",","since","it","is","unable","to","parse","the","in-memory","object",".","However","#","we","can","still","run","the","graph","eagerly","through","torch.fx.Interpreter",",","#","so","we","will","bypass","this","error",".","warnings.warn","(","``","Unable","to","execute","the","generated","python","source","code","from","``","``","the","graph",".","The","graph","module","will","no","longer","be","directly","callable",",","``","``","but","you","can","still","run","the","ExportedProgram",",","and","if","needed",",","you","can","``","``","run","the","graph","module","eagerly","using","torch.fx.Interpreter",".","''",")","gm","=","torch.fx.GraphModule","(","root",",","torch.fx.Graph","(",")",")","gm._graph","=","graph","return","gm"]
42
60
null
exported_program.py
pytorch/torch/_export/exported_program.py
import dataclasses from typing import Optional import warnings import torch import torch.fx import torch.utils._pytree from torch.export.graph_signature import ExportBackwardSignature, ExportGraphSignature from torch.export.exported_program import ExportedProgram, ModuleCallEntry, ModuleCallSignature
10
null
8
null
null
1
null
Use image node_id 1 for calling a global function with example usage: _create_graph_module_for_export(root, graph) and returns: gm
130
node_id 1
306,369
internal_assert
global
null
false
pred,assert_msg
null
null
null
null
null
def internal_assert(pred: bool, assert_msg: str) -> None: """ This is exir's custom assert method. It internally just throws InternalError. Note that the sole purpose is to throw our own error while maintaining similar syntax as python assert. """ if not pred: raise InternalError(assert_msg)
["def","internal_assert","(","pred",":","bool",",","assert_msg",":","str",")","-",">","None",":","``","''","''","This","is","exir","'s","custom","assert","method",".","It","internally","just","throws","InternalError",".","Note","that","the","sole","purpose","is","to","throw","our","own","error","while","maintaining","similar","syntax","as","python","assert.","``","''","''","if","not","pred",":","raise","InternalError","(","assert_msg",")"]
25
33
null
error.py
pytorch/torch/_export/error.py
from enum import Enum
10
null
1
null
null
1
null
Use image node_id 1 for calling a global function with example usage: internal_assert(pred, assert_msg) without return types
124
node_id 1
306,368
__init__
ExportError
Exception
true
self,error_code,message
This type of exception is raised for errors that are directly caused by the user code. In general, user errors happen during model authoring, tracing, using our public facing APIs, and writing graph passes.
["This","type","of","exception","is","raised","for","errors","that","are","directly","caused","by","the","user","code",".","In","general",",","user","errors","happen","during","model","authoring",",","tracing",",","using","our","public","facing","APIs",",","and","writing","graph","passes","."]
null
null
ExportError
def __init__(self, error_code: ExportErrorType, message: str) -> None: prefix = f"[{error_code}]: " super().__init__(prefix + message)
["def","__init__","(","self",",","error_code",":","ExportErrorType",",","message",":","str",")","-",">","None",":","prefix","=","f","''","[","{","error_code","}","]",":","``","super","(",")",".__init__","(","prefix","+","message",")"]
54
56
null
error.py
pytorch/torch/_export/error.py
from enum import Enum
10
3
1
3
1
1
1
Use image node_id 1 to create a new ExportError object from inherited base classes: Exception with example: obj = ExportError(error_code, message)
146
node_id 1
306,367
__init__
InternalError
Exception
true
self,message
Raised when an internal invariance is violated in EXIR stack. Should hint users to report a bug to dev and expose the original error message.
["Raised","when","an","internal","invariance","is","violated","in","EXIR","stack",".","Should","hint","users","to","report","a","bug","to","dev","and","expose","the","original","error","message","."]
null
null
InternalError
def __init__(self, message: str) -> None: super().__init__(message)
["def","__init__","(","self",",","message",":","str",")","-",">","None",":","super","(",")",".__init__","(","message",")"]
43
44
null
error.py
pytorch/torch/_export/error.py
from enum import Enum
10
3
1
3
1
1
1
Use image node_id 1 to create a new InternalError object from inherited base classes: Exception with example: obj = InternalError(message)
138
node_id 1
306,366
__init__
Perplexity
Metric
true
self
Perplexity measures how well a language model predicts a text sample. It's calculated as the average number of bits per word a model needs to represent the sample. Args: kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Examples: >>> import torch >>> preds = torch.rand(2, 8, 5, generator=torch.manual_seed(22)) >>> target = torch.randint(5, (2, 8), generator=torch.manual_seed(22)) >>> target[0, 6:] = -100 >>> metric = Perplexity(ignore_index=-100) >>> metric(preds, target) tensor(5.2545)
["Perplexity","measures","how","well","a","language","model","predicts","a","text","sample",".","It","'s","calculated","as","the","average","number","of","bits","per","word","a","model","needs","to","represent","the","sample",".","Args",":","kwargs",":","Additional","keyword","arguments",",","see",":","ref",":","`","Metric","kwargs","`","for","more","info",".","Examples",":",">",">",">","import","torch",">",">",">","preds","=","torch.rand","(","2",",","8",",","5",",","generator=torch.manual_seed","(","22",")",")",">",">",">","target","=","torch.randint","(","5",",","(","2",",","8",")",",","generator=torch.manual_seed","(","22",")",")",">",">",">","target","[","0",",","6",":","]","=","-100",">",">",">","metric","=","Perplexity","(","ignore_index=-100",")",">",">",">","metric","(","preds",",","target",")","tensor","(","5.2545",")"]
null
null
Perplexity
def __init__(self, **kwargs: Dict[str, Any]): super().__init__(**kwargs) self.add_state( "total_log_probs", default=torch.tensor(0.0, dtype=torch.float64), dist_reduce_fx="sum", ) self.add_state( "count", default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx="sum", ) self.loss_fn = CrossEntropyLoss()
["def","__init__","(","self",",","*","*","kwargs",":","Dict","[","str",",","Any","]",")",":","super","(",")",".__init__","(","*","*","kwargs",")","self.add_state","(","``","total_log_probs","''",",","default=torch.tensor","(","0.0",",","dtype=torch.float64",")",",","dist_reduce_fx=","''","sum","''",",",")","self.add_state","(","``","count","''",",","default=torch.tensor","(","0",",","dtype=torch.int64",")",",","dist_reduce_fx=","''","sum","''",",",")","self.loss_fn","=","CrossEntropyLoss","(",")"]
43
49
null
perplexity.py
flash-attention/training/src/metrics/perplexity.py
from typing import Any, Dict, Optional import torch import torch.nn.functional from torch import Tensor from torchmetrics import Metric
10
1
5
1
1
0
3
Use image node_id 1 to create a new Perplexity object from inherited base classes: Metric with example: obj = Perplexity()
122
node_id 1
139,890
update
Perplexity
Metric
true
self,preds,target,loss
Perplexity measures how well a language model predicts a text sample. It's calculated as the average number of bits per word a model needs to represent the sample. Args: kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Examples: >>> import torch >>> preds = torch.rand(2, 8, 5, generator=torch.manual_seed(22)) >>> target = torch.randint(5, (2, 8), generator=torch.manual_seed(22)) >>> target[0, 6:] = -100 >>> metric = Perplexity(ignore_index=-100) >>> metric(preds, target) tensor(5.2545)
["Perplexity","measures","how","well","a","language","model","predicts","a","text","sample",".","It","'s","calculated","as","the","average","number","of","bits","per","word","a","model","needs","to","represent","the","sample",".","Args",":","kwargs",":","Additional","keyword","arguments",",","see",":","ref",":","`","Metric","kwargs","`","for","more","info",".","Examples",":",">",">",">","import","torch",">",">",">","preds","=","torch.rand","(","2",",","8",",","5",",","generator=torch.manual_seed","(","22",")",")",">",">",">","target","=","torch.randint","(","5",",","(","2",",","8",")",",","generator=torch.manual_seed","(","22",")",")",">",">",">","target","[","0",",","6",":","]","=","-100",">",">",">","metric","=","Perplexity","(","ignore_index=-100",")",">",">",">","metric","(","preds",",","target",")","tensor","(","5.2545",")"]
Compute and store intermediate statistics for Perplexity. Args: preds: Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size]. target: Ground truth values with a shape [batch_size, seq_len].
["Compute","and","store","intermediate","statistics","for","Perplexity",".","Args",":","preds",":","Probabilities","assigned","to","each","token","in","a","sequence","with","shape","[","batch_size",",","seq_len",",","vocab_size","]",".","target",":","Ground","truth","values","with","a","shape","[","batch_size",",","seq_len","]","."]
null
def update(self, preds: Tensor, target: Tensor, loss: Optional[Tensor] = None) -> None: # type: ignore """Compute and store intermediate statistics for Perplexity. Args: preds: Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size]. target: Ground truth values with a shape [batch_size, seq_len]. """ count = target.numel() if loss is None: loss = self.loss_fn(preds, target) self.total_log_probs += loss.double() * count self.count += count
["def","update","(","self",",","preds",":","Tensor",",","target",":","Tensor",",","loss",":","Optional","[","Tensor","]","=","None",")","-",">","None",":","#","type",":","ignore","``","''","''","Compute","and","store","intermediate","statistics","for","Perplexity",".","Args",":","preds",":","Probabilities","assigned","to","each","token","in","a","sequence","with","shape","[","batch_size",",","seq_len",",","vocab_size","]",".","target",":","Ground","truth","values","with","a","shape","[","batch_size",",","seq_len","]",".","``","''","''","count","=","target.numel","(",")","if","loss","is","None",":","loss","=","self.loss_fn","(","preds",",","target",")","self.total_log_probs","+=","loss.double","(",")","*","count","self.count","+=","count"]
51
63
null
perplexity.py
flash-attention/training/src/metrics/perplexity.py
from typing import Any, Dict, Optional import torch import torch.nn.functional from torch import Tensor from torchmetrics import Metric
10
1
5
1
1
0
3
Use image node_id 2 for calling the Perplexity obj's underlying member method code with example usage: obj.update(preds, target, loss) without return types
155
node_id 2
139,891
compute
Perplexity
Metric
true
self
Perplexity measures how well a language model predicts a text sample. It's calculated as the average number of bits per word a model needs to represent the sample. Args: kwargs: Additional keyword arguments, see :ref:`Metric kwargs` for more info. Examples: >>> import torch >>> preds = torch.rand(2, 8, 5, generator=torch.manual_seed(22)) >>> target = torch.randint(5, (2, 8), generator=torch.manual_seed(22)) >>> target[0, 6:] = -100 >>> metric = Perplexity(ignore_index=-100) >>> metric(preds, target) tensor(5.2545)
["Perplexity","measures","how","well","a","language","model","predicts","a","text","sample",".","It","'s","calculated","as","the","average","number","of","bits","per","word","a","model","needs","to","represent","the","sample",".","Args",":","kwargs",":","Additional","keyword","arguments",",","see",":","ref",":","`","Metric","kwargs","`","for","more","info",".","Examples",":",">",">",">","import","torch",">",">",">","preds","=","torch.rand","(","2",",","8",",","5",",","generator=torch.manual_seed","(","22",")",")",">",">",">","target","=","torch.randint","(","5",",","(","2",",","8",")",",","generator=torch.manual_seed","(","22",")",")",">",">",">","target","[","0",",","6",":","]","=","-100",">",">",">","metric","=","Perplexity","(","ignore_index=-100",")",">",">",">","metric","(","preds",",","target",")","tensor","(","5.2545",")"]
Compute the Perplexity. Returns: Perplexity
["Compute","the","Perplexity",".","Returns",":","Perplexity"]
torch
def compute(self) -> Tensor: """Compute the Perplexity. Returns: Perplexity """ return torch.exp(self.total_log_probs / self.count)
["def","compute","(","self",")","-",">","Tensor",":","``","''","''","Compute","the","Perplexity",".","Returns",":","Perplexity","``","''","''","return","torch.exp","(","self.total_log_probs","\/","self.count",")"]
65
70
null
perplexity.py
flash-attention/training/src/metrics/perplexity.py
from typing import Any, Dict, Optional import torch import torch.nn.functional from torch import Tensor from torchmetrics import Metric
10
1
5
1
1
0
3
Use image node_id 3 for calling the Perplexity obj's underlying member method code with example usage: obj.compute() and returns: torch
135
node_id 3
139,892
call_backward
global
null
false
backward_fn,saved_tensors
null
null
null
null
grads
def call_backward(backward_fn, saved_tensors, *args): grads = backward_fn(FakeContext(saved_tensors), *args) # in eager, we wrap in a tuple when there's only one grad output if type(grads) is not tuple: grads = (grads,) return grads
["def","call_backward","(","backward_fn",",","saved_tensors",",","*","args",")",":","grads","=","backward_fn","(","FakeContext","(","saved_tensors",")",",","*","args",")","#","in","eager",",","we","wrap","in","a","tuple","when","there","'s","only","one","grad","output","if","type","(","grads",")","is","not","tuple",":","grads","=","(","grads",",",")","return","grads"]
65
72
null
external_utils.py
pytorch/torch/_dynamo/external_utils.py
import functools import torch import torch.utils._pytree
10
null
3
null
null
5
null
Use image node_id 5 for calling a global function with example usage: call_backward(backward_fn, saved_tensors) and returns: grads
130
node_id 5
304,855
wrap_numpy
global
null
false
f
null
null
null
null
wrap,f,pytree
def wrap_numpy(f): r"""Decorator that turns a function from ``np.ndarray``s to ``np.ndarray``s into a function from ``torch.Tensor``s to ``torch.Tensor``s. """ if not np: return f @functools.wraps(f) def wrap(*args, **kwargs): args, kwargs = pytree.tree_map_only( torch.Tensor, lambda x: x.numpy(), (args, kwargs) ) out = f(*args, **kwargs) return pytree.tree_map_only( np.ndarray, lambda x: torch.as_tensor(x), out ) return wrap
["def","wrap_numpy","(","f",")",":","r","''","''","''","Decorator","that","turns","a","function","from","``","np.ndarray","``","s","to","``","np.ndarray","``","s","into","a","function","from","``","torch.Tensor","``","s","to","``","torch.Tensor","``","s.","``","''","''","if","not","np",":","return","f","@","functools.wraps","(","f",")","def","wrap","(","*","args",",","*","*","kwargs",")",":","args",",","kwargs","=","pytree.tree_map_only","(","torch.Tensor",",","lambda","x",":","x.numpy","(",")",",","(","args",",","kwargs",")",")","out","=","f","(","*","args",",","*","*","kwargs",")","return","pytree.tree_map_only","(","np.ndarray",",","lambda","x",":","torch.as_tensor","(","x",")",",","out",")","return","wrap"]
40
55
null
external_utils.py
pytorch/torch/_dynamo/external_utils.py
import functools import torch import torch.utils._pytree
10
null
3
null
null
5
null
Use image node_id 4 for calling a global function with example usage: wrap_numpy(f) and returns: wrap, f, pytree
112
node_id 4
304,854
call_hook
global
null
false
hook
null
null
null
null
result,args
def call_hook(hook, *args): """ Used by compiled autograd to handle hook returning None """ result = hook(*args) if result is None: return args[0] return result
["def","call_hook","(","hook",",","*","args",")",":","``","''","''","Used","by","compiled","autograd","to","handle","hook","returning","None","``","''","''","result","=","hook","(","*","args",")","if","result","is","None",":","return","args","[","0","]","return","result"]
30
37
null
external_utils.py
pytorch/torch/_dynamo/external_utils.py
import functools import torch import torch.utils._pytree
10
null
3
null
null
5
null
Use image node_id 3 for calling a global function with example usage: call_hook(hook) and returns: result, args
111
node_id 3
304,853
wrap_inline
global
null
false
fn
null
null
null
null
inner,fn
def wrap_inline(fn): """ Create an extra frame around fn that is not in skipfiles """ @functools.wraps(fn) def inner(*args, **kwargs): return fn(*args, **kwargs) return inner
["def","wrap_inline","(","fn",")",":","``","''","''","Create","an","extra","frame","around","fn","that","is","not","in","skipfiles","``","''","''","@","functools.wraps","(","fn",")","def","inner","(","*","args",",","*","*","kwargs",")",":","return","fn","(","*","args",",","*","*","kwargs",")","return","inner"]
18
27
null
external_utils.py
pytorch/torch/_dynamo/external_utils.py
import functools import torch import torch.utils._pytree
10
null
3
null
null
5
null
Use image node_id 2 for calling a global function with example usage: wrap_inline(fn) and returns: inner, fn
108
node_id 2
304,852
cosine_distance
global
null
false
image_embeds,text_embeds
null
null
null
null
torch
def cosine_distance(image_embeds, text_embeds): normalized_image_embeds = nn.functional.normalize(image_embeds) normalized_text_embeds = nn.functional.normalize(text_embeds) return torch.mm( normalized_image_embeds, normalized_text_embeds.t() )
["def","cosine_distance","(","image_embeds",",","text_embeds",")",":","normalized_image_embeds","=","nn.functional.normalize","(","image_embeds",")","normalized_text_embeds","=","nn.functional.normalize","(","text_embeds",")","return","torch.mm","(","normalized_image_embeds",",","normalized_text_embeds.t","(",")",")"]
25
28
null
safety_checker.py
llmga/llmga/diffusers/src/diffusers/pipelines/stable_diffusion_safe/safety_checker.py
import torch import torch.nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging
10
null
4
null
null
1
null
Use image node_id 1 for calling a global function with example usage: cosine_distance(image_embeds, text_embeds) and returns: torch
131
node_id 1
171,105
view_windows
global
null
false
filepath
null
null
null
null
null
def view_windows( filepath: typing.Union[os.PathLike, str], *, quiet: bool ) -> None: """Start filepath with its associated application (windows).""" # TODO: implement quiet=True filepath = os.path.normpath(filepath) log.debug("view: %r", filepath) os.startfile(filepath)
["def","view_windows","(","filepath",":","typing.Union","[","os.PathLike",",","str","]",",","*",",","quiet",":","bool",")","-",">","None",":","``","''","''","Start","filepath","with","its","associated","application","(","windows",")",".","''","''","''","#","TODO",":","implement","quiet=True","filepath","=","os.path.normpath","(","filepath",")","log.debug","(","``","view",":","%","r","''",",","filepath",")","os.startfile","(","filepath",")"]
65
71
null
viewing.py
graphviz/graphviz/backend/viewing.py
import logging import os import platform import subprocess import typing from ..None import _tools
10
null
6
null
null
4
null
Use image node_id 4 for calling a global function with example usage: view_windows(filepath) without return types
113
node_id 4
147,163
Init
TableInC
object
true
self,buf,pos
null
null
null
null
null
def Init(self, buf, pos): self._tab = flatbuffers.table.Table(buf, pos)
["def","Init","(","self",",","buf",",","pos",")",":","self._tab","=","flatbuffers.table.Table","(","buf",",","pos",")"]
11
12
null
TableInC.py
flatbuffers/java/src/test/java/NamespaceA/TableInC.py
import flatbuffers
10
1
1
1
1
4
3
Use image node_id 1 for calling the TableInC obj's underlying member method code with example usage: obj.Init(buf, pos) without return types
140
node_id 1
142,890
ReferToA2
TableInC
object
true
self
null
null
null
null
None,obj
def ReferToA2(self): o = flatbuffers.number_types.UOffsetTFlags.py_type( self._tab.Offset(6) ) if o != 0: x = self._tab.Indirect(o + self._tab.Pos) from .SecondTableInA import SecondTableInA obj = SecondTableInA() obj.Init(self._tab.Bytes, x) return obj return None
["def","ReferToA2","(","self",")",":","o","=","flatbuffers.number_types.UOffsetTFlags.py_type","(","self._tab.Offset","(","6",")",")","if","o","!","=","0",":","x","=","self._tab.Indirect","(","o","+","self._tab.Pos",")","from",".SecondTableInA","import","SecondTableInA","obj","=","SecondTableInA","(",")","obj.Init","(","self._tab.Bytes",",","x",")","return","obj","return","None"]
26
34
null
TableInC.py
flatbuffers/java/src/test/java/NamespaceA/TableInC.py
import flatbuffers
10
1
1
1
1
4
3
Use image node_id 3 for calling the TableInC obj's underlying member method code with example usage: obj.ReferToA2() and returns: None, obj
139
node_id 3
142,892
test_version_1_point_10
global
null
false
null
null
null
null
null
def test_version_1_point_10(): # regression test for gh-2998. assert_(NumpyVersion("1.9.0") < "1.10.0") assert_(NumpyVersion("1.11.0") < "1.11.1") assert_(NumpyVersion("1.11.0") == "1.11.0") assert_(NumpyVersion("1.99.11") < "1.99.12")
["def","test_version_1_point_10","(",")",":","#","regression","test","for","gh-2998",".","assert_","(","NumpyVersion","(","``","1.9.0","''",")","<","``","1.10.0","''",")","assert_","(","NumpyVersion","(","``","1.11.0","''",")","<","``","1.11.1","''",")","assert_","(","NumpyVersion","(","``","1.11.0","''",")","==","``","1.11.0","''",")","assert_","(","NumpyVersion","(","``","1.99.11","''",")","<","``","1.99.12","''",")"]
17
22
null
test__version.py
numpy/numpy/lib/tests/test__version.py
from numpy.testing import assert_, assert_raises from numpy.lib import NumpyVersion
10
null
2
null
null
8
null
Use image node_id 2 for calling a global function with example usage: test_version_1_point_10() without return types
116
node_id 2
209,830
default_cache_dir
global
null
false
null
null
null
null
os
def default_cache_dir() -> str: return os.path.join( os.path.abspath(os.getcwd()), "glide_model_cache" )
["def","default_cache_dir","(",")","-",">","str",":","return","os.path.join","(","os.path.abspath","(","os.getcwd","(",")",")",",","``","glide_model_cache","''",")"]
21
22
null
download.py
glide-text2im/glide_text2im/download.py
import os from functools import lru_cache from typing import Dict, Optional import requests import torch from filelock import FileLock from tqdm.auto import tqdm
10
null
7
null
null
3
null
Use image node_id 1 for calling a global function with example usage: default_cache_dir() and returns: os
105
node_id 1
146,646
fetch_file_cached
global
null
false
url,progress,cache_dir,chunk_size
null
null
null
null
local_path,local_path
def fetch_file_cached( url: str, progress: bool = True, cache_dir: Optional[str] = None, chunk_size: int = 4096, ) -> str: """ Download the file at the given URL into a local file and return the path. If cache_dir is specified, it will be used to download the files. Otherwise, default_cache_dir() is used. """ if cache_dir is None: cache_dir = default_cache_dir() os.makedirs(cache_dir, exist_ok=True) local_path = os.path.join(cache_dir, url.split("/")[-1]) if os.path.exists(local_path): return local_path response = requests.get(url, stream=True) size = int(response.headers.get("content-length", "0")) with FileLock(local_path + ".lock"): if progress: pbar = tqdm(total=size, unit="iB", unit_scale=True) tmp_path = local_path + ".tmp" with open(tmp_path, "wb") as f: for chunk in response.iter_content(chunk_size): if progress: pbar.update(len(chunk)) f.write(chunk) os.rename(tmp_path, local_path) if progress: pbar.close() return local_path
["def","fetch_file_cached","(","url",":","str",",","progress",":","bool","=","True",",","cache_dir",":","Optional","[","str","]","=","None",",","chunk_size",":","int","=","4096",",",")","-",">","str",":","``","''","''","Download","the","file","at","the","given","URL","into","a","local","file","and","return","the","path",".","If","cache_dir","is","specified",",","it","will","be","used","to","download","the","files",".","Otherwise",",","default_cache_dir","(",")","is","used.","``","''","''","if","cache_dir","is","None",":","cache_dir","=","default_cache_dir","(",")","os.makedirs","(","cache_dir",",","exist_ok=True",")","local_path","=","os.path.join","(","cache_dir",",","url.split","(","``","\/","''",")","[","-1","]",")","if","os.path.exists","(","local_path",")",":","return","local_path","response","=","requests.get","(","url",",","stream=True",")","size","=","int","(","response.headers.get","(","``","content-length","''",",","``","0","''",")",")","with","FileLock","(","local_path","+","``",".lock","''",")",":","if","progress",":","pbar","=","tqdm","(","total=size",",","unit=","''","iB","''",",","unit_scale=True",")","tmp_path","=","local_path","+","``",".tmp","''","with","open","(","tmp_path",",","``","wb","''",")","as","f",":","for","chunk","in","response.iter_content","(","chunk_size",")",":","if","progress",":","pbar.update","(","len","(","chunk",")",")","f.write","(","chunk",")","os.rename","(","tmp_path",",","local_path",")","if","progress",":","pbar.close","(",")","return","local_path"]
25
54
null
download.py
glide-text2im/glide_text2im/download.py
import os from functools import lru_cache from typing import Dict, Optional import requests import torch from filelock import FileLock from tqdm.auto import tqdm
10
null
7
null
null
3
null
Use image node_id 2 for calling a global function with example usage: fetch_file_cached(url, progress, cache_dir, chunk_size) and returns: local_path, local_path
161
node_id 2
146,647
test_alpha_beta_rc
global
null
false
null
null
null
null
null
def test_alpha_beta_rc(): assert_(NumpyVersion("1.8.0rc1") == "1.8.0rc1") for ver in ["1.8.0", "1.8.0rc2"]: assert_(NumpyVersion("1.8.0rc1") < ver) for ver in ["1.8.0a2", "1.8.0b3", "1.7.2rc4"]: assert_(NumpyVersion("1.8.0rc1") > ver) assert_(NumpyVersion("1.8.0b1") > "1.8.0a2")
["def","test_alpha_beta_rc","(",")",":","assert_","(","NumpyVersion","(","``","1.8.0rc1","''",")","==","``","1.8.0rc1","''",")","for","ver","in","[","``","1.8.0","''",",","``","1.8.0rc2","''","]",":","assert_","(","NumpyVersion","(","``","1.8.0rc1","''",")","<","ver",")","for","ver","in","[","``","1.8.0a2","''",",","``","1.8.0b3","''",",","``","1.7.2rc4","''","]",":","assert_","(","NumpyVersion","(","``","1.8.0rc1","''",")",">","ver",")","assert_","(","NumpyVersion","(","``","1.8.0b1","''",")",">","``","1.8.0a2","''",")"]
25
33
null
test__version.py
numpy/numpy/lib/tests/test__version.py
from numpy.testing import assert_, assert_raises from numpy.lib import NumpyVersion
10
null
2
null
null
8
null
Use image node_id 3 for calling a global function with example usage: test_alpha_beta_rc() without return types
111
node_id 3
209,831
test_dev_version
global
null
false
null
null
null
null
null
def test_dev_version(): assert_(NumpyVersion("1.9.0.dev-Unknown") < "1.9.0") for ver in [ "1.9.0", "1.9.0a1", "1.9.0b2", "1.9.0b2.dev-ffffffff", ]: assert_(NumpyVersion("1.9.0.dev-f16acvda") < ver) assert_( NumpyVersion("1.9.0.dev-f16acvda") == "1.9.0.dev-11111111" )
["def","test_dev_version","(",")",":","assert_","(","NumpyVersion","(","``","1.9.0.dev-Unknown","''",")","<","``","1.9.0","''",")","for","ver","in","[","``","1.9.0","''",",","``","1.9.0a1","''",",","``","1.9.0b2","''",",","``","1.9.0b2.dev-ffffffff","''",",","]",":","assert_","(","NumpyVersion","(","``","1.9.0.dev-f16acvda","''",")","<","ver",")","assert_","(","NumpyVersion","(","``","1.9.0.dev-f16acvda","''",")","==","``","1.9.0.dev-11111111","''",")"]
36
41
null
test__version.py
numpy/numpy/lib/tests/test__version.py
from numpy.testing import assert_, assert_raises from numpy.lib import NumpyVersion
10
null
2
null
null
8
null
Use image node_id 4 for calling a global function with example usage: test_dev_version() without return types
109
node_id 4
209,832
test_dev_a_b_rc_mixed
global
null
false
null
null
null
null
null
def test_dev_a_b_rc_mixed(): assert_( NumpyVersion("1.9.0a2.dev-f16acvda") == "1.9.0a2.dev-11111111" ) assert_(NumpyVersion("1.9.0a2.dev-6acvda54") < "1.9.0a2")
["def","test_dev_a_b_rc_mixed","(",")",":","assert_","(","NumpyVersion","(","``","1.9.0a2.dev-f16acvda","''",")","==","``","1.9.0a2.dev-11111111","''",")","assert_","(","NumpyVersion","(","``","1.9.0a2.dev-6acvda54","''",")","<","``","1.9.0a2","''",")"]
44
46
null
test__version.py
numpy/numpy/lib/tests/test__version.py
from numpy.testing import assert_, assert_raises from numpy.lib import NumpyVersion
10
null
2
null
null
8
null
Use image node_id 5 for calling a global function with example usage: test_dev_a_b_rc_mixed() without return types
114
node_id 5
209,833
test_dev0_version
global
null
false
null
null
null
null
null
def test_dev0_version(): assert_(NumpyVersion("1.9.0.dev0+Unknown") < "1.9.0") for ver in [ "1.9.0", "1.9.0a1", "1.9.0b2", "1.9.0b2.dev0+ffffffff", ]: assert_(NumpyVersion("1.9.0.dev0+f16acvda") < ver) assert_( NumpyVersion("1.9.0.dev0+f16acvda") == "1.9.0.dev0+11111111" )
["def","test_dev0_version","(",")",":","assert_","(","NumpyVersion","(","``","1.9.0.dev0+Unknown","''",")","<","``","1.9.0","''",")","for","ver","in","[","``","1.9.0","''",",","``","1.9.0a1","''",",","``","1.9.0b2","''",",","``","1.9.0b2.dev0+ffffffff","''",",","]",":","assert_","(","NumpyVersion","(","``","1.9.0.dev0+f16acvda","''",")","<","ver",")","assert_","(","NumpyVersion","(","``","1.9.0.dev0+f16acvda","''",")","==","``","1.9.0.dev0+11111111","''",")"]
49
54
null
test__version.py
numpy/numpy/lib/tests/test__version.py
from numpy.testing import assert_, assert_raises from numpy.lib import NumpyVersion
10
null
2
null
null
8
null
Use image node_id 6 for calling a global function with example usage: test_dev0_version() without return types
110
node_id 6
209,834
test_dev0_a_b_rc_mixed
global
null
false
null
null
null
null
null
def test_dev0_a_b_rc_mixed(): assert_( NumpyVersion("1.9.0a2.dev0+f16acvda") == "1.9.0a2.dev0+11111111" ) assert_(NumpyVersion("1.9.0a2.dev0+6acvda54") < "1.9.0a2")
["def","test_dev0_a_b_rc_mixed","(",")",":","assert_","(","NumpyVersion","(","``","1.9.0a2.dev0+f16acvda","''",")","==","``","1.9.0a2.dev0+11111111","''",")","assert_","(","NumpyVersion","(","``","1.9.0a2.dev0+6acvda54","''",")","<","``","1.9.0a2","''",")"]
57
59
null
test__version.py
numpy/numpy/lib/tests/test__version.py
from numpy.testing import assert_, assert_raises from numpy.lib import NumpyVersion
10
null
2
null
null
8
null
Use image node_id 7 for calling a global function with example usage: test_dev0_a_b_rc_mixed() without return types
115
node_id 7
209,835
test_raises
global
null
false
null
null
null
null
null
def test_raises(): for ver in ["1.9", "1,9.0", "1.7.x"]: assert_raises(ValueError, NumpyVersion, ver)
["def","test_raises","(",")",":","for","ver","in","[","``","1.9","''",",","``","1,9.0","''",",","``","1.7.x","''","]",":","assert_raises","(","ValueError",",","NumpyVersion",",","ver",")"]
62
64
null
test__version.py
numpy/numpy/lib/tests/test__version.py
from numpy.testing import assert_, assert_raises from numpy.lib import NumpyVersion
10
null
2
null
null
8
null
Use image node_id 8 for calling a global function with example usage: test_raises() without return types
104
node_id 8
209,836
__init__
GenericObject
null
true
self,v
null
null
null
null
GenericObject
def __init__(self, v): self.v = v
["def","__init__","(","self",",","v",")",":","self.v","=","v"]
11
12
null
print_coercion_tables.py
numpy/numpy/testing/print_coercion_tables.py
import numpy from numpy._core.numerictypes import obj2sctype from collections import namedtuple
10
1
3
0
null
3
3
Use image node_id 1 to create a new GenericObject object with example: obj = GenericObject(v)
94
node_id 1
213,860
test_main_versions
global
null
false
null
null
null
null
null
def test_main_versions(): assert_(NumpyVersion("1.8.0") == "1.8.0") for ver in ["1.9.0", "2.0.0", "1.8.1", "10.0.1"]: assert_(NumpyVersion("1.8.0") < ver) for ver in ["1.7.0", "1.7.1", "0.9.9"]: assert_(NumpyVersion("1.8.0") > ver)
["def","test_main_versions","(",")",":","assert_","(","NumpyVersion","(","``","1.8.0","''",")","==","``","1.8.0","''",")","for","ver","in","[","``","1.9.0","''",",","``","2.0.0","''",",","``","1.8.1","''",",","``","10.0.1","''","]",":","assert_","(","NumpyVersion","(","``","1.8.0","''",")","<","ver",")","for","ver","in","[","``","1.7.0","''",",","``","1.7.1","''",",","``","0.9.9","''","]",":","assert_","(","NumpyVersion","(","``","1.8.0","''",")",">","ver",")"]
8
14
null
test__version.py
numpy/numpy/lib/tests/test__version.py
from numpy.testing import assert_, assert_raises from numpy.lib import NumpyVersion
10
null
2
null
null
8
null
Use image node_id 1 for calling a global function with example usage: test_main_versions() without return types
111
node_id 1
209,829
load_checkpoint
global
null
false
checkpoint_name,device,progress,cache_dir,chunk_size
null
null
null
null
th
def load_checkpoint( checkpoint_name: str, device: th.device, progress: bool = True, cache_dir: Optional[str] = None, chunk_size: int = 4096, ) -> Dict[str, th.Tensor]: if checkpoint_name not in MODEL_PATHS: raise ValueError( f"Unknown checkpoint name {checkpoint_name}. Known names are: {MODEL_PATHS.keys()}." ) path = fetch_file_cached( MODEL_PATHS[checkpoint_name], progress=progress, cache_dir=cache_dir, chunk_size=chunk_size, ) return th.load(path, map_location=device)
["def","load_checkpoint","(","checkpoint_name",":","str",",","device",":","th.device",",","progress",":","bool","=","True",",","cache_dir",":","Optional","[","str","]","=","None",",","chunk_size",":","int","=","4096",",",")","-",">","Dict","[","str",",","th.Tensor","]",":","if","checkpoint_name","not","in","MODEL_PATHS",":","raise","ValueError","(","f","''","Unknown","checkpoint","name","{","checkpoint_name","}",".","Known","names","are",":","{","MODEL_PATHS.keys","(",")","}",".","''",")","path","=","fetch_file_cached","(","MODEL_PATHS","[","checkpoint_name","]",",","progress=progress",",","cache_dir=cache_dir",",","chunk_size=chunk_size",",",")","return","th.load","(","path",",","map_location=device",")"]
57
71
null
download.py
glide-text2im/glide_text2im/download.py
import os from functools import lru_cache from typing import Dict, Optional import requests import torch from filelock import FileLock from tqdm.auto import tqdm
10
null
7
null
null
3
null
Use image node_id 3 for calling a global function with example usage: load_checkpoint(checkpoint_name, device, progress, cache_dir, chunk_size) and returns: th
159
node_id 3
146,648
get_compiled_module
TestTritonWrapper
TestCase
true
self
null
null
null
null
compiled_module
def get_compiled_module(self): compiled_module = None for v in PyCodeCache.cache.values(): if hasattr(v, "benchmark_compiled_module"): self.assertTrue( compiled_module is None, "Found multiple compiled modules", ) compiled_module = v self.assertTrue(compiled_module is not None) return compiled_module
["def","get_compiled_module","(","self",")",":","compiled_module","=","None","for","v","in","PyCodeCache.cache.values","(",")",":","if","hasattr","(","v",",","``","benchmark_compiled_module","''",")",":","self.assertTrue","(","compiled_module","is","None",",","``","Found","multiple","compiled","modules","''",",",")","compiled_module","=","v","self.assertTrue","(","compiled_module","is","not","None",")","return","compiled_module"]
13
23
null
test_triton_wrapper.py
pytorch/test/inductor/test_triton_wrapper.py
import subprocess import sys import torch from torch._dynamo.test_case import run_tests, TestCase from torch._inductor.codecache import PyCodeCache from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU
10
1
6
1
1
0
2
Use image node_id 1 for calling the TestTritonWrapper obj's underlying member method code with example usage: obj.get_compiled_module() and returns: compiled_module
164
node_id 1
286,601
test_triton_config
TestTritonHeuristics
TestCase
true
self
null
null
Make sure block size does not exceed the maximum defined in inductor config.
["Make","sure","block","size","does","not","exceed","the","maximum","defined","in","inductor","config","."]
null
def test_triton_config(self): """ Make sure block size does not exceed the maximum defined in inductor config. """ cfg = triton_config([2048, 2], 64, 64) for label in "XYZ": key = f"{label}BLOCK" if key not in cfg.kwargs: continue self.assertTrue( cfg.kwargs[key] <= config.triton.max_block[label] )
["def","test_triton_config","(","self",")",":","``","''","''","Make","sure","block","size","does","not","exceed","the","maximum","defined","in","inductor","config.","``","''","''","cfg","=","triton_config","(","[","2048",",","2","]",",","64",",","64",")","for","label","in","``","XYZ","''",":","key","=","f","''","{","label","}","BLOCK","''","if","key","not","in","cfg.kwargs",":","continue","self.assertTrue","(","cfg.kwargs","[","key","]","<","=","config.triton.max_block","[","label","]",")"]
22
31
null
test_triton_heuristics.py
pytorch/test/inductor/test_triton_heuristics.py
import sys import unittest from torch.testing._internal.common_utils import IS_LINUX from torch.testing._internal.inductor_utils import HAS_GPU from torch._dynamo.test_case import run_tests, TestCase from torch._inductor import config from torch._inductor.triton_heuristics import triton_config
10
1
7
1
1
0
1
Use image node_id 1 for calling the TestTritonHeuristics obj's underlying member method code with example usage: obj.test_triton_config() without return types
158
node_id 1
286,600
__add__
GenericObject
null
true
self,other
null
null
null
null
self
def __add__(self, other): return self
["def","__add__","(","self",",","other",")",":","return","self"]
14
15
null
print_coercion_tables.py
numpy/numpy/testing/print_coercion_tables.py
import numpy from numpy._core.numerictypes import obj2sctype from collections import namedtuple
10
1
3
0
null
3
3
Use image node_id 2 for calling the GenericObject obj's underlying member method code with example usage: obj.__add__(other) and returns: self
142
node_id 2
213,861
__radd__
GenericObject
null
true
self,other
null
null
null
null
self
def __radd__(self, other): return self
["def","__radd__","(","self",",","other",")",":","return","self"]
17
18
null
print_coercion_tables.py
numpy/numpy/testing/print_coercion_tables.py
import numpy from numpy._core.numerictypes import obj2sctype from collections import namedtuple
10
1
3
0
null
3
3
Use image node_id 3 for calling the GenericObject obj's underlying member method code with example usage: obj.__radd__(other) and returns: self
143
node_id 3
213,862
print_cancast_table
global
null
false
ntypes
null
null
null
null
null
def print_cancast_table(ntypes): print("X", end=" ") for char in ntypes: print(char, end=" ") print() for row in ntypes: print(row, end=" ") for col in ntypes: if np.can_cast(row, col, "equiv"): cast = "#" elif np.can_cast(row, col, "safe"): cast = "=" elif np.can_cast(row, col, "same_kind"): cast = "~" elif np.can_cast(row, col, "unsafe"): cast = "." else: cast = " " print(cast, end=" ") print()
["def","print_cancast_table","(","ntypes",")",":","print","(","``","X","''",",","end=","''","``",")","for","char","in","ntypes",":","print","(","char",",","end=","''","``",")","print","(",")","for","row","in","ntypes",":","print","(","row",",","end=","''","``",")","for","col","in","ntypes",":","if","np.can_cast","(","row",",","col",",","``","equiv","''",")",":","cast","=","``","#","''","elif","np.can_cast","(","row",",","col",",","``","safe","''",")",":","cast","=","``","=","''","elif","np.can_cast","(","row",",","col",",","``","same_kind","''",")",":","cast","=","``","~","''","elif","np.can_cast","(","row",",","col",",","``","unsafe","''",")",":","cast","=","``",".","''","else",":","cast","=","``","``","print","(","cast",",","end=","''","``",")","print","(",")"]
22
41
null
print_coercion_tables.py
numpy/numpy/testing/print_coercion_tables.py
import numpy from numpy._core.numerictypes import obj2sctype from collections import namedtuple
10
null
3
null
null
3
null
Use image node_id 1 for calling a global function with example usage: print_cancast_table(ntypes) without return types
118
node_id 1
213,863
print_coercion_table
global
null
false
ntypes,inputfirstvalue,inputsecondvalue,firstarray,use_promote_types
null
null
null
null
null
def print_coercion_table( ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False, ): print("+", end=" ") for char in ntypes: print(char, end=" ") print() for row in ntypes: if row == "O": rowtype = GenericObject else: rowtype = obj2sctype(row) print(row, end=" ") for col in ntypes: if col == "O": coltype = GenericObject else: coltype = obj2sctype(col) try: if firstarray: rowvalue = np.array( [rowtype(inputfirstvalue)], dtype=rowtype ) else: rowvalue = rowtype(inputfirstvalue) colvalue = coltype(inputsecondvalue) if use_promote_types: char = np.promote_types( rowvalue.dtype, colvalue.dtype ).char else: value = np.add(rowvalue, colvalue) if isinstance(value, np.ndarray): char = value.dtype.char else: char = np.dtype(type(value)).char except ValueError: char = "!" except OverflowError: char = "@" except TypeError: char = "#" print(char, end=" ") print()
["def","print_coercion_table","(","ntypes",",","inputfirstvalue",",","inputsecondvalue",",","firstarray",",","use_promote_types=False",",",")",":","print","(","``","+","''",",","end=","''","``",")","for","char","in","ntypes",":","print","(","char",",","end=","''","``",")","print","(",")","for","row","in","ntypes",":","if","row","==","``","O","''",":","rowtype","=","GenericObject","else",":","rowtype","=","obj2sctype","(","row",")","print","(","row",",","end=","''","``",")","for","col","in","ntypes",":","if","col","==","``","O","''",":","coltype","=","GenericObject","else",":","coltype","=","obj2sctype","(","col",")","try",":","if","firstarray",":","rowvalue","=","np.array","(","[","rowtype","(","inputfirstvalue",")","]",",","dtype=rowtype",")","else",":","rowvalue","=","rowtype","(","inputfirstvalue",")","colvalue","=","coltype","(","inputsecondvalue",")","if","use_promote_types",":","char","=","np.promote_types","(","rowvalue.dtype",",","colvalue.dtype",")",".char","else",":","value","=","np.add","(","rowvalue",",","colvalue",")","if","isinstance","(","value",",","np.ndarray",")",":","char","=","value.dtype.char","else",":","char","=","np.dtype","(","type","(","value",")",")",".char","except","ValueError",":","char","=","``","!","''","except","OverflowError",":","char","=","``","@","''","except","TypeError",":","char","=","``","#","''","print","(","char",",","end=","''","``",")","print","(",")"]
43
81
null
print_coercion_tables.py
numpy/numpy/testing/print_coercion_tables.py
import numpy from numpy._core.numerictypes import obj2sctype from collections import namedtuple
10
null
3
null
null
3
null
Use image node_id 2 for calling a global function with example usage: print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types) without return types
185
node_id 2
213,864
main
global
null
false
null
null
null
null
null
def main(): """ Main function to control the flow of the program. """ # Parse arguments. args = parse_args() if args.num_chunks > 1: pred_contents = [] for _idx in range(args.num_chunks): file = os.path.join( args.pred_path, f"{args.num_chunks}_{_idx}.json" ) try: pred_contents += json.load(open(file)) except: pred_contents += [ json.loads(line) for line in open(file) ] else: file = open(args.pred_path) pred_contents = [json.loads(line) for line in file] # Dictionary to store the count of occurrences for each video_id video_id_counts = {} new_pred_contents = [] # Iterate through each sample in pred_contents for sample in pred_contents: # import pdb; pdb.set_trace() video_id = sample["id"] if video_id in video_id_counts: video_id_counts[video_id] += 1 else: video_id_counts[video_id] = 0 # Create a new sample with the modified key new_sample = sample new_sample["id"] = f"{video_id}_{video_id_counts[video_id]}" new_pred_contents.append(new_sample) # Generating list of id's and corresponding files id_list = [x["id"] for x in new_pred_contents] caption_files = [f"{id}.json" for id in id_list] output_dir = args.output_dir # Generate output directory if not exists. if not os.path.exists(output_dir): os.makedirs(output_dir) # Preparing dictionary of question-answer sets prediction_set = {} for sample in new_pred_contents: id = sample["id"] question = sample["question"] answer = sample["answer"] pred = sample["pred"] qa_set = {"q": question, "a": answer, "pred": pred} prediction_set[id] = qa_set # Set the OpenAI API key. openai.api_key = args.api_key # Your API key here if args.api_base: openai.api_base = args.api_base # Your API base here num_tasks = args.num_tasks # While loop to ensure that all captions are processed. while True: try: # Files that have not been processed yet. completed_files = os.listdir(output_dir) print(f"completed_files: {len(completed_files)}") # Files that have not been processed yet. incomplete_files = [ f for f in caption_files if f not in completed_files ] print(f"incomplete_files: {len(incomplete_files)}") # Break the loop when there are no incomplete files if len(incomplete_files) == 0: break if len(incomplete_files) <= num_tasks: num_tasks = 1 # Split tasks into parts. part_len = len(incomplete_files) // num_tasks all_parts = [ incomplete_files[i : i + part_len] for i in range(0, len(incomplete_files), part_len) ] task_args = [ (prediction_set, part, args.output_dir) for part in all_parts ] # Use a pool of workers to process the files in parallel. with Pool() as pool: pool.starmap(annotate, task_args) except Exception as e: print(f"Error: {e}") # Combine all the processed files into one combined_contents = {} json_path = args.output_json # Iterate through json files for file_name in os.listdir(output_dir): if file_name.endswith(".json"): file_path = os.path.join(output_dir, file_name) with open(file_path, "r") as json_file: content = json.load(json_file) combined_contents[file_name[:-5]] = content # Write combined content to a json file with open(json_path, "w") as json_file: json.dump(combined_contents, json_file) print("All evaluation completed!") # Calculate average score and accuracy score_sum = 0 count = 0 yes_count = 0 no_count = 0 for key, result in combined_contents.items(): # Computing score count += 1 score_match = result[0]["score"] score = int(score_match) score_sum += score try: # Computing accuracy pred = result[0]["pred"] except: pred = result[0]["predicted"] if "yes" in pred.lower(): yes_count += 1 elif "no" in pred.lower(): no_count += 1 else: raise Exception("Invalid prediction") average_score = score_sum / count accuracy = yes_count / (yes_count + no_count) print("Yes count:", yes_count) print("No count:", no_count) print("Accuracy:", accuracy) print("Average score:", average_score)
["def","main","(",")",":","``","''","''","Main","function","to","control","the","flow","of","the","program.","``","''","''","#","Parse","arguments",".","args","=","parse_args","(",")","if","args.num_chunks",">","1",":","pred_contents","=","[","]","for","_idx","in","range","(","args.num_chunks",")",":","file","=","os.path.join","(","args.pred_path",",","f","''","{","args.num_chunks","}","_","{","_idx","}",".json","''",")","try",":","pred_contents","+=","json.load","(","open","(","file",")",")","except",":","pred_contents","+=","[","json.loads","(","line",")","for","line","in","open","(","file",")","]","else",":","file","=","open","(","args.pred_path",")","pred_contents","=","[","json.loads","(","line",")","for","line","in","file","]","#","Dictionary","to","store","the","count","of","occurrences","for","each","video_id","video_id_counts","=","{","}","new_pred_contents","=","[","]","#","Iterate","through","each","sample","in","pred_contents","for","sample","in","pred_contents",":","#","import","pdb",";","pdb.set_trace","(",")","video_id","=","sample","[","``","id","''","]","if","video_id","in","video_id_counts",":","video_id_counts","[","video_id","]","+=","1","else",":","video_id_counts","[","video_id","]","=","0","#","Create","a","new","sample","with","the","modified","key","new_sample","=","sample","new_sample","[","``","id","''","]","=","f","''","{","video_id","}","_","{","video_id_counts","[","video_id","]","}","''","new_pred_contents.append","(","new_sample",")","#","Generating","list","of","id","'s","and","corresponding","files","id_list","=","[","x","[","``","id","''","]","for","x","in","new_pred_contents","]","caption_files","=","[","f","''","{","id","}",".json","''","for","id","in","id_list","]","output_dir","=","args.output_dir","#","Generate","output","directory","if","not","exists",".","if","not","os.path.exists","(","output_dir",")",":","os.makedirs","(","output_dir",")","#","Preparing","dictionary","of","question-answer","sets","prediction_set","=","{","}","for","sample","in","new_pred_contents",":","id","=","sample","[","``","id","''","]","question","=","sample","[","``","question","''","]","answer","=","sample","[","``","answer","''","]","pred","=","sample","[","``","pred","''","]","qa_set","=","{","``","q","''",":","question",",","``","a","''",":","answer",",","``","pred","''",":","pred","}","prediction_set","[","id","]","=","qa_set","#","Set","the","OpenAI","API","key",".","openai.api_key","=","args.api_key","#","Your","API","key","here","if","args.api_base",":","openai.api_base","=","args.api_base","#","Your","API","base","here","num_tasks","=","args.num_tasks","#","While","loop","to","ensure","that","all","captions","are","processed",".","while","True",":","try",":","#","Files","that","have","not","been","processed","yet",".","completed_files","=","os.listdir","(","output_dir",")","print","(","f","''","completed_files",":","{","len","(","completed_files",")","}","''",")","#","Files","that","have","not","been","processed","yet",".","incomplete_files","=","[","f","for","f","in","caption_files","if","f","not","in","completed_files","]","print","(","f","''","incomplete_files",":","{","len","(","incomplete_files",")","}","''",")","#","Break","the","loop","when","there","are","no","incomplete","files","if","len","(","incomplete_files",")","==","0",":","break","if","len","(","incomplete_files",")","<","=","num_tasks",":","num_tasks","=","1","#","Split","tasks","into","parts",".","part_len","=","len","(","incomplete_files",")","\/\/","num_tasks","all_parts","=","[","incomplete_files","[","i",":","i","+","part_len","]","for","i","in","range","(","0",",","len","(","incomplete_files",")",",","part_len",")","]","task_args","=","[","(","prediction_set",",","part",",","args.output_dir",")","for","part","in","all_parts","]","#","Use","a","pool","of","workers","to","process","the","files","in","parallel",".","with","Pool","(",")","as","pool",":","pool.starmap","(","annotate",",","task_args",")","except","Exception","as","e",":","print","(","f","''","Error",":","{","e","}","''",")","#","Combine","all","the","processed","files","into","one","combined_contents","=","{","}","json_path","=","args.output_json","#","Iterate","through","json","files","for","file_name","in","os.listdir","(","output_dir",")",":","if","file_name.endswith","(","``",".json","''",")",":","file_path","=","os.path.join","(","output_dir",",","file_name",")","with","open","(","file_path",",","``","r","''",")","as","json_file",":","content","=","json.load","(","json_file",")","combined_contents","[","file_name","[",":","-5","]","]","=","content","#","Write","combined","content","to","a","json","file","with","open","(","json_path",",","``","w","''",")","as","json_file",":","json.dump","(","combined_contents",",","json_file",")","print","(","``","All","evaluation","completed","!","''",")","#","Calculate","average","score","and","accuracy","score_sum","=","0","count","=","0","yes_count","=","0","no_count","=","0","for","key",",","result","in","combined_contents.items","(",")",":","#","Computing","score","count","+=","1","score_match","=","result","[","0","]","[","``","score","''","]","score","=","int","(","score_match",")","score_sum","+=","score","try",":","#","Computing","accuracy","pred","=","result","[","0","]","[","``","pred","''","]","except",":","pred","=","result","[","0","]","[","``","predicted","''","]","if","``","yes","''","in","pred.lower","(",")",":","yes_count","+=","1","elif","``","no","''","in","pred.lower","(",")",":","no_count","+=","1","else",":","raise","Exception","(","``","Invalid","prediction","''",")","average_score","=","score_sum","\/","count","accuracy","=","yes_count","\/","(","yes_count","+","no_count",")","print","(","``","Yes","count",":","''",",","yes_count",")","print","(","``","No","count",":","''",",","no_count",")","print","(","``","Accuracy",":","''",",","accuracy",")","print","(","``","Average","score",":","''",",","average_score",")"]
77
215
null
eval_msvd_qa.py
llama-vid/llamavid/eval/eval_msvd_qa.py
import openai import os import argparse import json import ast from multiprocessing.pool import Pool from tqdm import tqdm
10
null
7
null
null
3
null
Use image node_id 3 for calling a global function with example usage: main() without return types
97
node_id 3
167,044
annotate
global
null
false
prediction_set,caption_files,output_dir
null
null
null
null
null
def annotate(prediction_set, caption_files, output_dir): """ Evaluates question and answer pairs using GPT-3 Returns a score for correctness. """ for file in tqdm(caption_files): key = file[:-5] # Strip file extension qa_set = prediction_set[key] question = qa_set["q"] answer = qa_set["a"] pred = qa_set["pred"] try: # Compute the correctness score completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ { "role": "system", "content": "You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. " "Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:" "------" "##INSTRUCTIONS: " "- Focus on the meaningful match between the predicted answer and the correct answer.\n" "- Consider synonyms or paraphrases as valid matches.\n" "- Evaluate the correctness of the prediction compared to the answer.", }, { "role": "user", "content": "Please evaluate the following video-based question-answer pair:\n\n" f"Question: {question}\n" f"Correct Answer: {answer}\n" f"Predicted Answer: {pred}\n\n" "Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. " "Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING." "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " "For example, your response should look like this: {'pred': 'yes', 'score': 4.8}.", }, ], ) # Convert response to a Python dictionary. response_message = completion["choices"][0]["message"][ "content" ] response_dict = ast.literal_eval(response_message) result_qa_pair = [response_dict, qa_set] # Save the question-answer pairs to a json file. with open(f"{output_dir}/{key}.json", "w") as f: json.dump(result_qa_pair, f) except Exception as e: print(f"Error processing file '{key}': {e}")
["def","annotate","(","prediction_set",",","caption_files",",","output_dir",")",":","``","''","''","Evaluates","question","and","answer","pairs","using","GPT-3","Returns","a","score","for","correctness.","``","''","''","for","file","in","tqdm","(","caption_files",")",":","key","=","file","[",":","-5","]","#","Strip","file","extension","qa_set","=","prediction_set","[","key","]","question","=","qa_set","[","``","q","''","]","answer","=","qa_set","[","``","a","''","]","pred","=","qa_set","[","``","pred","''","]","try",":","#","Compute","the","correctness","score","completion","=","openai.ChatCompletion.create","(","model=","''","gpt-3.5-turbo","''",",","messages=","[","{","``","role","''",":","``","system","''",",","``","content","''",":","``","You","are","an","intelligent","chatbot","designed","for","evaluating","the","correctness","of","generative","outputs","for","question-answer","pairs.","``","``","Your","task","is","to","compare","the","predicted","answer","with","the","correct","answer","and","determine","if","they","match","meaningfully",".","Here","'s","how","you","can","accomplish","the","task",":","''","``","--","--","--","''","``","#","#","INSTRUCTIONS",":","``","``","-","Focus","on","the","meaningful","match","between","the","predicted","answer","and","the","correct","answer.\\n","''","``","-","Consider","synonyms","or","paraphrases","as","valid","matches.\\n","''","``","-","Evaluate","the","correctness","of","the","prediction","compared","to","the","answer",".","``",",","}",",","{","``","role","''",":","``","user","''",",","``","content","''",":","``","Please","evaluate","the","following","video-based","question-answer","pair",":","\\n\\n","''","f","''","Question",":","{","question","}","\\n","''","f","''","Correct","Answer",":","{","answer","}","\\n","''","f","''","Predicted","Answer",":","{","pred","}","\\n\\n","''","``","Provide","your","evaluation","only","as","a","yes\/no","and","score","where","the","score","is","an","integer","value","between","0","and","5",",","with","5","indicating","the","highest","meaningful","match.","``","``","Please","generate","the","response","in","the","form","of","a","Python","dictionary","string","with","keys","'pred","'","and","'score","'",",","where","value","of","'pred","'","is","a","string","of","'yes","'","or","'no","'","and","value","of","'score","'","is","in","INTEGER",",","not","STRING",".","''","``","DO","NOT","PROVIDE","ANY","OTHER","OUTPUT","TEXT","OR","EXPLANATION",".","Only","provide","the","Python","dictionary","string.","``","``","For","example",",","your","response","should","look","like","this",":","{","'pred","'",":","'yes","'",",","'score","'",":","4.8","}",".","``",",","}",",","]",",",")","#","Convert","response","to","a","Python","dictionary",".","response_message","=","completion","[","``","choices","''","]","[","0","]","[","``","message","''","]","[","``","content","''","]","response_dict","=","ast.literal_eval","(","response_message",")","result_qa_pair","=","[","response_dict",",","qa_set","]","#","Save","the","question-answer","pairs","to","a","json","file",".","with","open","(","f","''","{","output_dir","}","\/","{","key","}",".json","''",",","``","w","''",")","as","f",":","json.dump","(","result_qa_pair",",","f",")","except","Exception","as","e",":","print","(","f","''","Error","processing","file","'","{","key","}","'",":","{","e","}","''",")"]
23
74
null
eval_msvd_qa.py
llama-vid/llamavid/eval/eval_msvd_qa.py
import openai import os import argparse import json import ast from multiprocessing.pool import Pool from tqdm import tqdm
10
null
7
null
null
3
null
Use image node_id 2 for calling a global function with example usage: annotate(prediction_set, caption_files, output_dir) without return types
142
node_id 2
167,043
parse_args
global
null
false
null
null
null
null
args
def parse_args(): parser = argparse.ArgumentParser( description="question-answer-generation-using-gpt-3" ) parser.add_argument( "--pred_path", required=True, help="The path to file containing prediction.", ) parser.add_argument( "--output_dir", required=True, help="The path to save annotation json files.", ) parser.add_argument( "--output_json", required=True, help="The path to save annotation final combined json file.", ) parser.add_argument( "--num_tasks", required=True, type=int, help="Number of splits.", ) parser.add_argument( "--num_chunks", default=1, type=int, help="Result splits" ) parser.add_argument( "--api_key", required=True, type=str, help="OpenAI API key" ) parser.add_argument( "--api_base", default=None, type=str, help="OpenAI API base" ) args = parser.parse_args() return args
["def","parse_args","(",")",":","parser","=","argparse.ArgumentParser","(","description=","''","question-answer-generation-using-gpt-3","''",")","parser.add_argument","(","``","--","pred_path","''",",","required=True",",","help=","''","The","path","to","file","containing","prediction",".","``",",",")","parser.add_argument","(","``","--","output_dir","''",",","required=True",",","help=","''","The","path","to","save","annotation","json","files",".","``",",",")","parser.add_argument","(","``","--","output_json","''",",","required=True",",","help=","''","The","path","to","save","annotation","final","combined","json","file",".","``",",",")","parser.add_argument","(","``","--","num_tasks","''",",","required=True",",","type=int",",","help=","''","Number","of","splits",".","``",",",")","parser.add_argument","(","``","--","num_chunks","''",",","default=1",",","type=int",",","help=","''","Result","splits","''",")","parser.add_argument","(","``","--","api_key","''",",","required=True",",","type=str",",","help=","''","OpenAI","API","key","''",")","parser.add_argument","(","``","--","api_base","''",",","default=None",",","type=str",",","help=","''","OpenAI","API","base","''",")","args","=","parser.parse_args","(",")","return","args"]
10
20
null
eval_msvd_qa.py
llama-vid/llamavid/eval/eval_msvd_qa.py
import openai import os import argparse import json import ast from multiprocessing.pool import Pool from tqdm import tqdm
10
null
7
null
null
3
null
Use image node_id 1 for calling a global function with example usage: parse_args() and returns: args
100
node_id 1
167,042
main
global
null
false
null
null
null
null
null
def main(): """ Main function to control the flow of the program. """ # Parse arguments. args = parse_args() if args.num_chunks > 1: pred_contents = [] for _idx in range(args.num_chunks): file = os.path.join( args.pred_path, f"{args.num_chunks}_{_idx}.json" ) pred_contents += [json.loads(line) for line in open(file)] else: pred_contents = [ json.loads(line) for line in open(args.pred_path) ] # Dictionary to store the count of occurrences for each video_id video_id_counts = {} new_pred_contents = [] # Iterate through each sample in pred_contents for sample in pred_contents: video_id = sample["id"] if video_id in video_id_counts: video_id_counts[video_id] += 1 else: video_id_counts[video_id] = 0 # Create a new sample with the modified key new_sample = sample new_sample["id"] = f"{video_id}_{video_id_counts[video_id]}" new_pred_contents.append(new_sample) # Generating list of id's and corresponding files id_list = [x["id"] for x in new_pred_contents] caption_files = [f"{id}.json" for id in id_list] output_dir = args.output_dir # Generate output directory if not exists. if not os.path.exists(output_dir): os.makedirs(output_dir) # Preparing dictionary of question-answer sets prediction_set = {} for sample in new_pred_contents: id = sample["id"] question = sample["question"] answer = sample["answer"] pred = sample["pred"] qa_set = {"q": question, "a": answer, "pred": pred} prediction_set[id] = qa_set # Set the OpenAI API key. openai.api_key = args.api_key # Your API key here if args.api_base: openai.api_base = args.api_base # Your API base here num_tasks = args.num_tasks # While loop to ensure that all captions are processed. while True: try: # Files that have not been processed yet. completed_files = os.listdir(output_dir) print(f"completed_files: {len(completed_files)}") # Files that have not been processed yet. incomplete_files = [ f for f in caption_files if f not in completed_files ] print(f"incomplete_files: {len(incomplete_files)}") # Break the loop when there are no incomplete files if len(incomplete_files) == 0: break if len(incomplete_files) <= num_tasks: num_tasks = 1 # Split tasks into parts. part_len = len(incomplete_files) // num_tasks all_parts = [ incomplete_files[i : i + part_len] for i in range(0, len(incomplete_files), part_len) ] task_args = [ (prediction_set, part, args.output_dir) for part in all_parts ] # Use a pool of workers to process the files in parallel. with Pool() as pool: pool.starmap(annotate, task_args) except Exception as e: print(f"Error: {e}") # Combine all the processed files into one combined_contents = {} json_path = args.output_json # Iterate through json files for file_name in os.listdir(output_dir): if file_name.endswith(".json"): file_path = os.path.join(output_dir, file_name) with open(file_path, "r") as json_file: content = json.load(json_file) combined_contents[file_name[:-5]] = content # Write combined content to a json file with open(json_path, "w") as json_file: json.dump(combined_contents, json_file) print("All evaluation completed!") # Calculate average score and accuracy score_sum = 0 count = 0 yes_count = 0 no_count = 0 for key, result in combined_contents.items(): # Computing score count += 1 score_match = result[0]["score"] score = int(score_match) score_sum += score # Computing accuracy pred = result[0]["pred"] if "yes" in pred.lower(): yes_count += 1 elif "no" in pred.lower(): no_count += 1 average_score = score_sum / count accuracy = yes_count / (yes_count + no_count) print("Yes count:", yes_count) print("No count:", no_count) print("Accuracy:", accuracy) print("Average score:", average_score)
["def","main","(",")",":","``","''","''","Main","function","to","control","the","flow","of","the","program.","``","''","''","#","Parse","arguments",".","args","=","parse_args","(",")","if","args.num_chunks",">","1",":","pred_contents","=","[","]","for","_idx","in","range","(","args.num_chunks",")",":","file","=","os.path.join","(","args.pred_path",",","f","''","{","args.num_chunks","}","_","{","_idx","}",".json","''",")","pred_contents","+=","[","json.loads","(","line",")","for","line","in","open","(","file",")","]","else",":","pred_contents","=","[","json.loads","(","line",")","for","line","in","open","(","args.pred_path",")","]","#","Dictionary","to","store","the","count","of","occurrences","for","each","video_id","video_id_counts","=","{","}","new_pred_contents","=","[","]","#","Iterate","through","each","sample","in","pred_contents","for","sample","in","pred_contents",":","video_id","=","sample","[","``","id","''","]","if","video_id","in","video_id_counts",":","video_id_counts","[","video_id","]","+=","1","else",":","video_id_counts","[","video_id","]","=","0","#","Create","a","new","sample","with","the","modified","key","new_sample","=","sample","new_sample","[","``","id","''","]","=","f","''","{","video_id","}","_","{","video_id_counts","[","video_id","]","}","''","new_pred_contents.append","(","new_sample",")","#","Generating","list","of","id","'s","and","corresponding","files","id_list","=","[","x","[","``","id","''","]","for","x","in","new_pred_contents","]","caption_files","=","[","f","''","{","id","}",".json","''","for","id","in","id_list","]","output_dir","=","args.output_dir","#","Generate","output","directory","if","not","exists",".","if","not","os.path.exists","(","output_dir",")",":","os.makedirs","(","output_dir",")","#","Preparing","dictionary","of","question-answer","sets","prediction_set","=","{","}","for","sample","in","new_pred_contents",":","id","=","sample","[","``","id","''","]","question","=","sample","[","``","question","''","]","answer","=","sample","[","``","answer","''","]","pred","=","sample","[","``","pred","''","]","qa_set","=","{","``","q","''",":","question",",","``","a","''",":","answer",",","``","pred","''",":","pred","}","prediction_set","[","id","]","=","qa_set","#","Set","the","OpenAI","API","key",".","openai.api_key","=","args.api_key","#","Your","API","key","here","if","args.api_base",":","openai.api_base","=","args.api_base","#","Your","API","base","here","num_tasks","=","args.num_tasks","#","While","loop","to","ensure","that","all","captions","are","processed",".","while","True",":","try",":","#","Files","that","have","not","been","processed","yet",".","completed_files","=","os.listdir","(","output_dir",")","print","(","f","''","completed_files",":","{","len","(","completed_files",")","}","''",")","#","Files","that","have","not","been","processed","yet",".","incomplete_files","=","[","f","for","f","in","caption_files","if","f","not","in","completed_files","]","print","(","f","''","incomplete_files",":","{","len","(","incomplete_files",")","}","''",")","#","Break","the","loop","when","there","are","no","incomplete","files","if","len","(","incomplete_files",")","==","0",":","break","if","len","(","incomplete_files",")","<","=","num_tasks",":","num_tasks","=","1","#","Split","tasks","into","parts",".","part_len","=","len","(","incomplete_files",")","\/\/","num_tasks","all_parts","=","[","incomplete_files","[","i",":","i","+","part_len","]","for","i","in","range","(","0",",","len","(","incomplete_files",")",",","part_len",")","]","task_args","=","[","(","prediction_set",",","part",",","args.output_dir",")","for","part","in","all_parts","]","#","Use","a","pool","of","workers","to","process","the","files","in","parallel",".","with","Pool","(",")","as","pool",":","pool.starmap","(","annotate",",","task_args",")","except","Exception","as","e",":","print","(","f","''","Error",":","{","e","}","''",")","#","Combine","all","the","processed","files","into","one","combined_contents","=","{","}","json_path","=","args.output_json","#","Iterate","through","json","files","for","file_name","in","os.listdir","(","output_dir",")",":","if","file_name.endswith","(","``",".json","''",")",":","file_path","=","os.path.join","(","output_dir",",","file_name",")","with","open","(","file_path",",","``","r","''",")","as","json_file",":","content","=","json.load","(","json_file",")","combined_contents","[","file_name","[",":","-5","]","]","=","content","#","Write","combined","content","to","a","json","file","with","open","(","json_path",",","``","w","''",")","as","json_file",":","json.dump","(","combined_contents",",","json_file",")","print","(","``","All","evaluation","completed","!","''",")","#","Calculate","average","score","and","accuracy","score_sum","=","0","count","=","0","yes_count","=","0","no_count","=","0","for","key",",","result","in","combined_contents.items","(",")",":","#","Computing","score","count","+=","1","score_match","=","result","[","0","]","[","``","score","''","]","score","=","int","(","score_match",")","score_sum","+=","score","#","Computing","accuracy","pred","=","result","[","0","]","[","``","pred","''","]","if","``","yes","''","in","pred.lower","(",")",":","yes_count","+=","1","elif","``","no","''","in","pred.lower","(",")",":","no_count","+=","1","average_score","=","score_sum","\/","count","accuracy","=","yes_count","\/","(","yes_count","+","no_count",")","print","(","``","Yes","count",":","''",",","yes_count",")","print","(","``","No","count",":","''",",","no_count",")","print","(","``","Accuracy",":","''",",","accuracy",")","print","(","``","Average","score",":","''",",","average_score",")"]
77
205
null
eval_activitynet_qa.py
llama-vid/llamavid/eval/eval_activitynet_qa.py
import openai import os import argparse import json import ast from multiprocessing.pool import Pool from tqdm import tqdm
10
null
7
null
null
3
null
Use image node_id 3 for calling a global function with example usage: main() without return types
97
node_id 3
167,041
test_wrapper_using_gpu_seed
TestTritonWrapper
TestCase
true
self
null
null
Make sure the subprocess.check_output does not throw.
["Make","sure","the","subprocess.check_output","does","not","throw","."]
unknown
def test_wrapper_using_gpu_seed(self): """ Make sure the subprocess.check_output does not throw. """ @torch.compile def f(x, y): # dropout will result in usage of cuda_seed z = torch.nn.functional.dropout(x, 0.5) return z + y N = 10 x = torch.rand(N).to(device=GPU_TYPE) y = torch.rand(N).to(device=GPU_TYPE) out = f(x, y) compiled_module = self.get_compiled_module() # now run the compiled module in subprocess and check its output bench_out = subprocess.check_output( f"{sys.executable} {compiled_module.__file__}".split(), stderr=subprocess.STDOUT, ).decode() self.assertTrue(len(bench_out) > 0)
["def","test_wrapper_using_gpu_seed","(","self",")",":","``","''","''","Make","sure","the","subprocess.check_output","does","not","throw.","``","''","''","@","torch.compile","def","f","(","x",",","y",")",":","#","dropout","will","result","in","usage","of","cuda_seed","z","=","torch.nn.functional.dropout","(","x",",","0.5",")","return","z","+","y","N","=","10","x","=","torch.rand","(","N",")",".to","(","device=GPU_TYPE",")","y","=","torch.rand","(","N",")",".to","(","device=GPU_TYPE",")","out","=","f","(","x",",","y",")","compiled_module","=","self.get_compiled_module","(",")","#","now","run","the","compiled","module","in","subprocess","and","check","its","output","bench_out","=","subprocess.check_output","(","f","''","{","sys.executable","}","{","compiled_module.__file__","}","''",".split","(",")",",","stderr=subprocess.STDOUT",",",")",".decode","(",")","self.assertTrue","(","len","(","bench_out",")",">","0",")"]
25
48
null
test_triton_wrapper.py
pytorch/test/inductor/test_triton_wrapper.py
import subprocess import sys import torch from torch._dynamo.test_case import run_tests, TestCase from torch._inductor.codecache import PyCodeCache from torch.testing._internal.inductor_utils import GPU_TYPE, HAS_GPU
10
1
6
1
1
0
2
Use image node_id 2 for calling the TestTritonWrapper obj's underlying member method code with example usage: obj.test_wrapper_using_gpu_seed() and returns: unknown
164
node_id 2
286,602
annotate
global
null
false
prediction_set,caption_files,output_dir
null
null
null
null
null
def annotate(prediction_set, caption_files, output_dir): """ Evaluates question and answer pairs using GPT-3 Returns a score for correctness. """ for file in tqdm(caption_files): key = file[:-5] # Strip file extension qa_set = prediction_set[key] question = qa_set["q"] answer = qa_set["a"] pred = qa_set["pred"] try: # Compute the correctness score completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ { "role": "system", "content": "You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. " "Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:" "------" "##INSTRUCTIONS: " "- Focus on the meaningful match between the predicted answer and the correct answer.\n" "- Consider synonyms or paraphrases as valid matches.\n" "- Evaluate the correctness of the prediction compared to the answer.", }, { "role": "user", "content": "Please evaluate the following video-based question-answer pair:\n\n" f"Question: {question}\n" f"Correct Answer: {answer}\n" f"Predicted Answer: {pred}\n\n" "Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. " "Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING." "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " "For example, your response should look like this: {'pred': 'yes', 'score': 4.8}.", }, ], ) # Convert response to a Python dictionary. response_message = completion["choices"][0]["message"][ "content" ] response_dict = ast.literal_eval(response_message) result_qa_pair = [response_dict, qa_set] # Save the question-answer pairs to a json file. with open(f"{output_dir}/{key}.json", "w") as f: json.dump(result_qa_pair, f) except Exception as e: print(f"Error processing file '{key}': {e}")
["def","annotate","(","prediction_set",",","caption_files",",","output_dir",")",":","``","''","''","Evaluates","question","and","answer","pairs","using","GPT-3","Returns","a","score","for","correctness.","``","''","''","for","file","in","tqdm","(","caption_files",")",":","key","=","file","[",":","-5","]","#","Strip","file","extension","qa_set","=","prediction_set","[","key","]","question","=","qa_set","[","``","q","''","]","answer","=","qa_set","[","``","a","''","]","pred","=","qa_set","[","``","pred","''","]","try",":","#","Compute","the","correctness","score","completion","=","openai.ChatCompletion.create","(","model=","''","gpt-3.5-turbo","''",",","messages=","[","{","``","role","''",":","``","system","''",",","``","content","''",":","``","You","are","an","intelligent","chatbot","designed","for","evaluating","the","correctness","of","generative","outputs","for","question-answer","pairs.","``","``","Your","task","is","to","compare","the","predicted","answer","with","the","correct","answer","and","determine","if","they","match","meaningfully",".","Here","'s","how","you","can","accomplish","the","task",":","''","``","--","--","--","''","``","#","#","INSTRUCTIONS",":","``","``","-","Focus","on","the","meaningful","match","between","the","predicted","answer","and","the","correct","answer.\\n","''","``","-","Consider","synonyms","or","paraphrases","as","valid","matches.\\n","''","``","-","Evaluate","the","correctness","of","the","prediction","compared","to","the","answer",".","``",",","}",",","{","``","role","''",":","``","user","''",",","``","content","''",":","``","Please","evaluate","the","following","video-based","question-answer","pair",":","\\n\\n","''","f","''","Question",":","{","question","}","\\n","''","f","''","Correct","Answer",":","{","answer","}","\\n","''","f","''","Predicted","Answer",":","{","pred","}","\\n\\n","''","``","Provide","your","evaluation","only","as","a","yes\/no","and","score","where","the","score","is","an","integer","value","between","0","and","5",",","with","5","indicating","the","highest","meaningful","match.","``","``","Please","generate","the","response","in","the","form","of","a","Python","dictionary","string","with","keys","'pred","'","and","'score","'",",","where","value","of","'pred","'","is","a","string","of","'yes","'","or","'no","'","and","value","of","'score","'","is","in","INTEGER",",","not","STRING",".","''","``","DO","NOT","PROVIDE","ANY","OTHER","OUTPUT","TEXT","OR","EXPLANATION",".","Only","provide","the","Python","dictionary","string.","``","``","For","example",",","your","response","should","look","like","this",":","{","'pred","'",":","'yes","'",",","'score","'",":","4.8","}",".","``",",","}",",","]",",",")","#","Convert","response","to","a","Python","dictionary",".","response_message","=","completion","[","``","choices","''","]","[","0","]","[","``","message","''","]","[","``","content","''","]","response_dict","=","ast.literal_eval","(","response_message",")","result_qa_pair","=","[","response_dict",",","qa_set","]","#","Save","the","question-answer","pairs","to","a","json","file",".","with","open","(","f","''","{","output_dir","}","\/","{","key","}",".json","''",",","``","w","''",")","as","f",":","json.dump","(","result_qa_pair",",","f",")","except","Exception","as","e",":","print","(","f","''","Error","processing","file","'","{","key","}","'",":","{","e","}","''",")"]
23
74
null
eval_activitynet_qa.py
llama-vid/llamavid/eval/eval_activitynet_qa.py
import openai import os import argparse import json import ast from multiprocessing.pool import Pool from tqdm import tqdm
10
null
7
null
null
3
null
Use image node_id 2 for calling a global function with example usage: annotate(prediction_set, caption_files, output_dir) without return types
142
node_id 2
167,040
__iter__
Arrayterator
null
true
self
Buffered iterator for big arrays. `Arrayterator` creates a buffered iterator for reading big arrays in small contiguous blocks. The class is useful for objects stored in the file system. It allows iteration over the object *without* reading everything in memory; instead, small blocks are read and iterated over. `Arrayterator` can be used with any object that supports multidimensional slices. This includes NumPy arrays, but also variables from Scientific.IO.NetCDF or pynetcdf for example. Parameters ---------- var : array_like The object to iterate over. buf_size : int, optional The buffer size. If `buf_size` is supplied, the maximum amount of data that will be read into memory is `buf_size` elements. Default is None, which will read as many element as possible into memory. Attributes ---------- var buf_size start stop step shape flat See Also -------- numpy.ndenumerate : Multidimensional array iterator. numpy.flatiter : Flat array iterator. numpy.memmap : Create a memory-map to an array stored in a binary file on disk. Notes ----- The algorithm works by first finding a "running dimension", along which the blocks will be extracted. Given an array of dimensions ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the first dimension will be used. If, on the other hand, ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. Blocks are extracted along this dimension, and when the last block is returned the process continues from the next dimension, until all elements have been read. Examples -------- >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) >>> a_itor = np.lib.Arrayterator(a, 2) >>> a_itor.shape (3, 4, 5, 6) Now we can iterate over ``a_itor``, and it will return arrays of size two. Since `buf_size` was smaller than any dimension, the first dimension will be iterated over first: >>> for subarr in a_itor: ... if not subarr.all(): ... print(subarr, subarr.shape) # doctest: +SKIP >>> # [[[[0 1]]]] (1, 1, 1, 2)
["Buffered","iterator","for","big","arrays",".","`","Arrayterator","`","creates","a","buffered","iterator","for","reading","big","arrays","in","small","contiguous","blocks",".","The","class","is","useful","for","objects","stored","in","the","file","system",".","It","allows","iteration","over","the","object","*","without","*","reading","everything","in","memory",";","instead",",","small","blocks","are","read","and","iterated","over",".","`","Arrayterator","`","can","be","used","with","any","object","that","supports","multidimensional","slices",".","This","includes","NumPy","arrays",",","but","also","variables","from","Scientific.IO.NetCDF","or","pynetcdf","for","example",".","Parameters","--","--","--","--","--","var",":","array_like","The","object","to","iterate","over",".","buf_size",":","int",",","optional","The","buffer","size",".","If","`","buf_size","`","is","supplied",",","the","maximum","amount","of","data","that","will","be","read","into","memory","is","`","buf_size","`","elements",".","Default","is","None",",","which","will","read","as","many","element","as","possible","into","memory",".","Attributes","--","--","--","--","--","var","buf_size","start","stop","step","shape","flat","See","Also","--","--","--","--","numpy.ndenumerate",":","Multidimensional","array","iterator",".","numpy.flatiter",":","Flat","array","iterator",".","numpy.memmap",":","Create","a","memory-map","to","an","array","stored","in","a","binary","file","on","disk",".","Notes","--","--","-","The","algorithm","works","by","first","finding","a","``","running","dimension","''",",","along","which","the","blocks","will","be","extracted",".","Given","an","array","of","dimensions","``","(","d1",",","d2",",","...",",","dn",")","``",",","e.g",".","if","`","buf_size","`","is","smaller","than","``","d1","``",",","the","first","dimension","will","be","used",".","If",",","on","the","other","hand",",","``","d1","<","buf_size","<","d1","*","d2","``","the","second","dimension","will","be","used",",","and","so","on",".","Blocks","are","extracted","along","this","dimension",",","and","when","the","last","block","is","returned","the","process","continues","from","the","next","dimension",",","until","all","elements","have","been","read",".","Examples","--","--","--","--",">",">",">","a","=","np.arange","(","3","*","4","*","5","*","6",")",".reshape","(","3",",","4",",","5",",","6",")",">",">",">","a_itor","=","np.lib.Arrayterator","(","a",",","2",")",">",">",">","a_itor.shape","(","3",",","4",",","5",",","6",")","Now","we","can","iterate","over","``","a_itor","``",",","and","it","will","return","arrays","of","size","two",".","Since","`","buf_size","`","was","smaller","than","any","dimension",",","the","first","dimension","will","be","iterated","over","first",":",">",">",">","for","subarr","in","a_itor",":","...","if","not","subarr.all","(",")",":","...","print","(","subarr",",","subarr.shape",")","#","doctest",":","+SKIP",">",">",">","#","[","[","[","[","0","1","]","]","]","]","(","1",",","1",",","1",",","2",")"]
null
null
null
def __iter__(self): # Skip arrays with degenerate dimensions if [dim for dim in self.shape if dim <= 0]: return start = self.start[:] stop = self.stop[:] step = self.step[:] ndims = self.var.ndim while True: count = self.buf_size or reduce(mul, self.shape) # iterate over each dimension, looking for the # running dimension (ie, the dimension along which # the blocks will be built from) rundim = 0 for i in range(ndims - 1, -1, -1): # if count is zero we ran out of elements to read # along higher dimensions, so we read only a single position if count == 0: stop[i] = start[i] + 1 elif count <= self.shape[i]: # limit along this dimension stop[i] = start[i] + count * step[i] rundim = i else: # read everything along this dimension stop[i] = self.stop[i] stop[i] = min(self.stop[i], stop[i]) count = count // self.shape[i] # yield a block slice_ = tuple(slice(*t) for t in zip(start, stop, step)) yield self.var[slice_] # Update start position, taking care of overflow to # other dimensions start[rundim] = stop[rundim] # start where we stopped for i in range(ndims - 1, 0, -1): if start[i] >= self.stop[i]: start[i] = self.start[i] start[i - 1] += self.step[i - 1] if start[0] >= self.stop[0]: return
["def","__iter__","(","self",")",":","#","Skip","arrays","with","degenerate","dimensions","if","[","dim","for","dim","in","self.shape","if","dim","<","=","0","]",":","return","start","=","self.start","[",":","]","stop","=","self.stop","[",":","]","step","=","self.step","[",":","]","ndims","=","self.var.ndim","while","True",":","count","=","self.buf_size","or","reduce","(","mul",",","self.shape",")","#","iterate","over","each","dimension",",","looking","for","the","#","running","dimension","(","ie",",","the","dimension","along","which","#","the","blocks","will","be","built","from",")","rundim","=","0","for","i","in","range","(","ndims","-","1",",","-1",",","-1",")",":","#","if","count","is","zero","we","ran","out","of","elements","to","read","#","along","higher","dimensions",",","so","we","read","only","a","single","position","if","count","==","0",":","stop","[","i","]","=","start","[","i","]","+","1","elif","count","<","=","self.shape","[","i","]",":","#","limit","along","this","dimension","stop","[","i","]","=","start","[","i","]","+","count","*","step","[","i","]","rundim","=","i","else",":","#","read","everything","along","this","dimension","stop","[","i","]","=","self.stop","[","i","]","stop","[","i","]","=","min","(","self.stop","[","i","]",",","stop","[","i","]",")","count","=","count","\/\/","self.shape","[","i","]","#","yield","a","block","slice_","=","tuple","(","slice","(","*","t",")","for","t","in","zip","(","start",",","stop",",","step",")",")","yield","self.var","[","slice_","]","#","Update","start","position",",","taking","care","of","overflow","to","#","other","dimensions","start","[","rundim","]","=","stop","[","rundim","]","#","start","where","we","stopped","for","i","in","range","(","ndims","-","1",",","0",",","-1",")",":","if","start","[","i","]",">","=","self.stop","[","i","]",":","start","[","i","]","=","self.start","[","i","]","start","[","i","-","1","]","+=","self.step","[","i","-","1","]","if","start","[","0","]",">","=","self.stop","[","0","]",":","return"]
177
221
null
_arrayterator_impl.py
numpy/numpy/lib/_arrayterator_impl.py
from operator import mul from functools import reduce
10
1
2
0
null
0
7
Use image node_id 7 for calling the Arrayterator obj's underlying member method code with example usage: obj.__iter__() without return types
140
node_id 7
209,167
flat
Arrayterator
null
true
self
Buffered iterator for big arrays. `Arrayterator` creates a buffered iterator for reading big arrays in small contiguous blocks. The class is useful for objects stored in the file system. It allows iteration over the object *without* reading everything in memory; instead, small blocks are read and iterated over. `Arrayterator` can be used with any object that supports multidimensional slices. This includes NumPy arrays, but also variables from Scientific.IO.NetCDF or pynetcdf for example. Parameters ---------- var : array_like The object to iterate over. buf_size : int, optional The buffer size. If `buf_size` is supplied, the maximum amount of data that will be read into memory is `buf_size` elements. Default is None, which will read as many element as possible into memory. Attributes ---------- var buf_size start stop step shape flat See Also -------- numpy.ndenumerate : Multidimensional array iterator. numpy.flatiter : Flat array iterator. numpy.memmap : Create a memory-map to an array stored in a binary file on disk. Notes ----- The algorithm works by first finding a "running dimension", along which the blocks will be extracted. Given an array of dimensions ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the first dimension will be used. If, on the other hand, ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. Blocks are extracted along this dimension, and when the last block is returned the process continues from the next dimension, until all elements have been read. Examples -------- >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) >>> a_itor = np.lib.Arrayterator(a, 2) >>> a_itor.shape (3, 4, 5, 6) Now we can iterate over ``a_itor``, and it will return arrays of size two. Since `buf_size` was smaller than any dimension, the first dimension will be iterated over first: >>> for subarr in a_itor: ... if not subarr.all(): ... print(subarr, subarr.shape) # doctest: +SKIP >>> # [[[[0 1]]]] (1, 1, 1, 2)
["Buffered","iterator","for","big","arrays",".","`","Arrayterator","`","creates","a","buffered","iterator","for","reading","big","arrays","in","small","contiguous","blocks",".","The","class","is","useful","for","objects","stored","in","the","file","system",".","It","allows","iteration","over","the","object","*","without","*","reading","everything","in","memory",";","instead",",","small","blocks","are","read","and","iterated","over",".","`","Arrayterator","`","can","be","used","with","any","object","that","supports","multidimensional","slices",".","This","includes","NumPy","arrays",",","but","also","variables","from","Scientific.IO.NetCDF","or","pynetcdf","for","example",".","Parameters","--","--","--","--","--","var",":","array_like","The","object","to","iterate","over",".","buf_size",":","int",",","optional","The","buffer","size",".","If","`","buf_size","`","is","supplied",",","the","maximum","amount","of","data","that","will","be","read","into","memory","is","`","buf_size","`","elements",".","Default","is","None",",","which","will","read","as","many","element","as","possible","into","memory",".","Attributes","--","--","--","--","--","var","buf_size","start","stop","step","shape","flat","See","Also","--","--","--","--","numpy.ndenumerate",":","Multidimensional","array","iterator",".","numpy.flatiter",":","Flat","array","iterator",".","numpy.memmap",":","Create","a","memory-map","to","an","array","stored","in","a","binary","file","on","disk",".","Notes","--","--","-","The","algorithm","works","by","first","finding","a","``","running","dimension","''",",","along","which","the","blocks","will","be","extracted",".","Given","an","array","of","dimensions","``","(","d1",",","d2",",","...",",","dn",")","``",",","e.g",".","if","`","buf_size","`","is","smaller","than","``","d1","``",",","the","first","dimension","will","be","used",".","If",",","on","the","other","hand",",","``","d1","<","buf_size","<","d1","*","d2","``","the","second","dimension","will","be","used",",","and","so","on",".","Blocks","are","extracted","along","this","dimension",",","and","when","the","last","block","is","returned","the","process","continues","from","the","next","dimension",",","until","all","elements","have","been","read",".","Examples","--","--","--","--",">",">",">","a","=","np.arange","(","3","*","4","*","5","*","6",")",".reshape","(","3",",","4",",","5",",","6",")",">",">",">","a_itor","=","np.lib.Arrayterator","(","a",",","2",")",">",">",">","a_itor.shape","(","3",",","4",",","5",",","6",")","Now","we","can","iterate","over","``","a_itor","``",",","and","it","will","return","arrays","of","size","two",".","Since","`","buf_size","`","was","smaller","than","any","dimension",",","the","first","dimension","will","be","iterated","over","first",":",">",">",">","for","subarr","in","a_itor",":","...","if","not","subarr.all","(",")",":","...","print","(","subarr",",","subarr.shape",")","#","doctest",":","+SKIP",">",">",">","#","[","[","[","[","0","1","]","]","]","]","(","1",",","1",",","1",",","2",")"]
A 1-D flat iterator for Arrayterator objects. This iterator returns elements of the array to be iterated over in `~lib.Arrayterator` one by one. It is similar to `flatiter`. See Also -------- lib.Arrayterator flatiter Examples -------- >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) >>> a_itor = np.lib.Arrayterator(a, 2) >>> for subarr in a_itor.flat: ... if not subarr: ... print(subarr, type(subarr)) ... 0 <class 'numpy.int64'>
["A","1-D","flat","iterator","for","Arrayterator","objects",".","This","iterator","returns","elements","of","the","array","to","be","iterated","over","in","`","~lib.Arrayterator","`","one","by","one",".","It","is","similar","to","`","flatiter","`",".","See","Also","--","--","--","--","lib.Arrayterator","flatiter","Examples","--","--","--","--",">",">",">","a","=","np.arange","(","3","*","4","*","5","*","6",")",".reshape","(","3",",","4",",","5",",","6",")",">",">",">","a_itor","=","np.lib.Arrayterator","(","a",",","2",")",">",">",">","for","subarr","in","a_itor.flat",":","...","if","not","subarr",":","...","print","(","subarr",",","type","(","subarr",")",")","...","0","<","class","'numpy.int64","'",">"]
null
def flat(self): """ A 1-D flat iterator for Arrayterator objects. This iterator returns elements of the array to be iterated over in `~lib.Arrayterator` one by one. It is similar to `flatiter`. See Also -------- lib.Arrayterator flatiter Examples -------- >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) >>> a_itor = np.lib.Arrayterator(a, 2) >>> for subarr in a_itor.flat: ... if not subarr: ... print(subarr, type(subarr)) ... 0 <class 'numpy.int64'> """ for block in self: yield from block.flat
["def","flat","(","self",")",":","``","''","''","A","1-D","flat","iterator","for","Arrayterator","objects",".","This","iterator","returns","elements","of","the","array","to","be","iterated","over","in","`","~lib.Arrayterator","`","one","by","one",".","It","is","similar","to","`","flatiter","`",".","See","Also","--","--","--","--","lib.Arrayterator","flatiter","Examples","--","--","--","--",">",">",">","a","=","np.arange","(","3","*","4","*","5","*","6",")",".reshape","(","3",",","4",",","5",",","6",")",">",">",">","a_itor","=","np.lib.Arrayterator","(","a",",","2",")",">",">",">","for","subarr","in","a_itor.flat",":","...","if","not","subarr",":","...","print","(","subarr",",","type","(","subarr",")",")","...","0","<","class","'numpy.int64","'",">","``","''","''","for","block","in","self",":","yield","from","block.flat"]
138
164
null
_arrayterator_impl.py
numpy/numpy/lib/_arrayterator_impl.py
from operator import mul from functools import reduce
10
1
2
0
null
0
7
Use image node_id 5 for calling the Arrayterator obj's underlying member method code with example usage: obj.flat() without return types
136
node_id 5
209,165
test_xnnpack_lowering
TestXNNPackBackend
unittest
true
self
null
null
null
null
unknown
def test_xnnpack_lowering(self): class Module(torch.nn.Module): def forward(self, x): return x + x scripted_module = torch.jit.script(Module()) faulty_compile_spec = { "backward": { "inputs": [torch.zeros(1)], "outputs": [torch.zeros(1)], } } error_msg = ( 'method_compile_spec does not contain the "forward" key.' ) with self.assertRaisesRegex( RuntimeError, error_msg, ): _ = torch._C._jit_to_backend( "xnnpack", scripted_module, faulty_compile_spec, ) mismatch_compile_spec = { "forward": { "inputs": [torch.zeros(1), torch.zeros(1)], "outputs": [torch.zeros(1)], } } error_msg = "method_compile_spec inputs do not match expected number of forward inputs" with self.assertRaisesRegex( RuntimeError, error_msg, ): _ = torch._C._jit_to_backend( "xnnpack", scripted_module, mismatch_compile_spec ) lowered = torch._C._jit_to_backend( "xnnpack", scripted_module, { "forward": { "inputs": [torch.zeros(1)], "outputs": [torch.zeros(1)], } }, ) lowered(torch.zeros(1))
["def","test_xnnpack_lowering","(","self",")",":","class","Module","(","torch.nn.Module",")",":","def","forward","(","self",",","x",")",":","return","x","+","x","scripted_module","=","torch.jit.script","(","Module","(",")",")","faulty_compile_spec","=","{","``","backward","''",":","{","``","inputs","''",":","[","torch.zeros","(","1",")","]",",","``","outputs","''",":","[","torch.zeros","(","1",")","]",",","}","}","error_msg","=","(","'method_compile_spec","does","not","contain","the","``","forward","''","key",".","'",")","with","self.assertRaisesRegex","(","RuntimeError",",","error_msg",",",")",":","_","=","torch._C._jit_to_backend","(","``","xnnpack","''",",","scripted_module",",","faulty_compile_spec",",",")","mismatch_compile_spec","=","{","``","forward","''",":","{","``","inputs","''",":","[","torch.zeros","(","1",")",",","torch.zeros","(","1",")","]",",","``","outputs","''",":","[","torch.zeros","(","1",")","]",",","}","}","error_msg","=","``","method_compile_spec","inputs","do","not","match","expected","number","of","forward","inputs","''","with","self.assertRaisesRegex","(","RuntimeError",",","error_msg",",",")",":","_","=","torch._C._jit_to_backend","(","``","xnnpack","''",",","scripted_module",",","mismatch_compile_spec",")","lowered","=","torch._C._jit_to_backend","(","``","xnnpack","''",",","scripted_module",",","{","``","forward","''",":","{","``","inputs","''",":","[","torch.zeros","(","1",")","]",",","``","outputs","''",":","[","torch.zeros","(","1",")","]",",","}","}",",",")","lowered","(","torch.zeros","(","1",")",")"]
39
94
null
test_xnnpack_delegate.py
pytorch/test/jit/xnnpack/test_xnnpack_delegate.py
import unittest import torch import torch._C
10
1
3
1
1
0
5
Use image node_id 2 for calling the TestXNNPackBackend obj's underlying member method code with example usage: obj.test_xnnpack_lowering() and returns: unknown
159
node_id 2
288,133
test_xnnpack_constant_data
TestXNNPackBackend
unittest
true
self
null
null
null
null
unknown
def test_xnnpack_constant_data(self): class Module(torch.nn.Module): def __init__(self): super().__init__() self._constant = torch.ones(4, 4, 4) def forward(self, x): return x + self._constant scripted_module = torch.jit.script(Module()) lowered_module = torch._C._jit_to_backend( "xnnpack", scripted_module, { "forward": { "inputs": [torch.randn(4, 4, 4)], "outputs": [torch.randn(4, 4, 4)], } }, ) for i in range(0, 20): sample_input = torch.randn(4, 4, 4) actual_output = scripted_module(sample_input) expected_output = lowered_module(sample_input) self.assertTrue( torch.allclose( actual_output, expected_output, atol=1e-03, rtol=1e-03 ) )
["def","test_xnnpack_constant_data","(","self",")",":","class","Module","(","torch.nn.Module",")",":","def","__init__","(","self",")",":","super","(",")",".__init__","(",")","self._constant","=","torch.ones","(","4",",","4",",","4",")","def","forward","(","self",",","x",")",":","return","x","+","self._constant","scripted_module","=","torch.jit.script","(","Module","(",")",")","lowered_module","=","torch._C._jit_to_backend","(","``","xnnpack","''",",","scripted_module",",","{","``","forward","''",":","{","``","inputs","''",":","[","torch.randn","(","4",",","4",",","4",")","]",",","``","outputs","''",":","[","torch.randn","(","4",",","4",",","4",")","]",",","}","}",",",")","for","i","in","range","(","0",",","20",")",":","sample_input","=","torch.randn","(","4",",","4",",","4",")","actual_output","=","scripted_module","(","sample_input",")","expected_output","=","lowered_module","(","sample_input",")","self.assertTrue","(","torch.allclose","(","actual_output",",","expected_output",",","atol=1e-03",",","rtol=1e-03",")",")"]
11
37
null
test_xnnpack_delegate.py
pytorch/test/jit/xnnpack/test_xnnpack_delegate.py
import unittest import torch import torch._C
10
1
3
1
1
0
5
Use image node_id 1 for calling the TestXNNPackBackend obj's underlying member method code with example usage: obj.test_xnnpack_constant_data() and returns: unknown
164
node_id 1
288,132
validate_var_name
global
null
false
var_name
null
null
null
null
null
def validate_var_name(var_name: str) -> None: """Validates that the variable name is a valid identifier.""" if not var_name.isidentifier(): raise ValueError( 'Invalid Python variable name, got "{}"'.format(var_name) ) if keyword.iskeyword(var_name): raise ValueError( 'Cannot use Python keywords, got "{}"'.format(var_name) )
["def","validate_var_name","(","var_name",":","str",")","-",">","None",":","``","''","''","Validates","that","the","variable","name","is","a","valid","identifier",".","''","''","''","if","not","var_name.isidentifier","(",")",":","raise","ValueError","(","'Invalid","Python","variable","name",",","got","``","{","}","''","'.format","(","var_name",")",")","if","keyword.iskeyword","(","var_name",")",":","raise","ValueError","(","'","Can","not","use","Python","keywords",",","got","``","{","}","''","'.format","(","var_name",")",")"]
24
29
null
py_utils.py
generative-ai-python/google/generativeai/notebook/py_utils.py
from __future__ import annotations import builtins import keyword import sys from typing import Any
10
null
5
null
null
5
null
Use image node_id 1 for calling a global function with example usage: validate_var_name(var_name) without return types
118
node_id 1
146,202
get_main_module
global
null
false
null
null
null
null
sys
def get_main_module(): return sys.modules["__main__"]
["def","get_main_module","(",")",":","return","sys.modules","[","``","__main__","''","]"]
32
33
null
py_utils.py
generative-ai-python/google/generativeai/notebook/py_utils.py
from __future__ import annotations import builtins import keyword import sys from typing import Any
10
null
5
null
null
5
null
Use image node_id 2 for calling a global function with example usage: get_main_module() and returns: sys
104
node_id 2
146,203
get_py_var
global
null
false
var_name
null
null
null
null
g_vars,vars
def get_py_var(var_name: str) -> Any: """Retrieves the value of `var_name` from the global environment.""" validate_var_name(var_name) g_vars = vars(get_main_module()) if var_name in g_vars: return g_vars[var_name] elif var_name in vars(builtins): return vars(builtins)[var_name] raise NameError('"{}" not found'.format(var_name))
["def","get_py_var","(","var_name",":","str",")","-",">","Any",":","``","''","''","Retrieves","the","value","of","`","var_name","`","from","the","global","environment",".","''","''","''","validate_var_name","(","var_name",")","g_vars","=","vars","(","get_main_module","(",")",")","if","var_name","in","g_vars",":","return","g_vars","[","var_name","]","elif","var_name","in","vars","(","builtins",")",":","return","vars","(","builtins",")","[","var_name","]","raise","NameError","(","'","''","{","}","''","not","found'.format","(","var_name",")",")"]
36
44
py_utils.py
generative-ai-python/google/generativeai/notebook/py_utils.py
from __future__ import annotations import builtins import keyword import sys from typing import Any
10
null
5
null
null
5
null
Use image node_id 3 for calling a global function with example usage: get_py_var(var_name) and returns: g_vars, vars
116
node_id 3
146,204
has_py_var
global
null
false
var_name
null
null
null
null
True,False,False
def has_py_var(var_name: str) -> bool: """Returns true if `var_name` is defined in the global environment.""" try: validate_var_name(var_name) _ = get_py_var(var_name) except ValueError: return False except NameError: return False return True
["def","has_py_var","(","var_name",":","str",")","-",">","bool",":","``","''","''","Returns","true","if","`","var_name","`","is","defined","in","the","global","environment",".","''","''","''","try",":","validate_var_name","(","var_name",")","_","=","get_py_var","(","var_name",")","except","ValueError",":","return","False","except","NameError",":","return","False","return","True"]
47
57
null
py_utils.py
generative-ai-python/google/generativeai/notebook/py_utils.py
from __future__ import annotations import builtins import keyword import sys from typing import Any
10
null
5
null
null
5
null
Use image node_id 4 for calling a global function with example usage: has_py_var(var_name) and returns: True, False, False
122
node_id 4
146,205
set_py_var
global
null
false
var_name,val
null
null
null
null
null
def set_py_var(var_name: str, val: Any) -> None: """Sets the value of `var_name` in the global environment.""" validate_var_name(var_name) g_vars = vars(get_main_module()) g_vars[var_name] = val
["def","set_py_var","(","var_name",":","str",",","val",":","Any",")","-",">","None",":","``","''","''","Sets","the","value","of","`","var_name","`","in","the","global","environment",".","''","''","''","validate_var_name","(","var_name",")","g_vars","=","vars","(","get_main_module","(",")",")","g_vars","[","var_name","]","=","val"]
60
64
null
py_utils.py
generative-ai-python/google/generativeai/notebook/py_utils.py
from __future__ import annotations import builtins import keyword import sys from typing import Any
10
null
5
null
null
5
null
Use image node_id 5 for calling a global function with example usage: set_py_var(var_name, val) without return types
116
node_id 5
146,206
test_run_without_schema
TestNeo4jHookConn
null
true
self,mock_graph_database
null
null
null
null
null
def test_run_without_schema(self, mock_graph_database): connection = Connection( conn_type="neo4j", login="login", password="password", host="host", schema=None, ) mock_sql = mock.MagicMock(name="sql") # Use the environment variable mocking to test saving the configuration as a URI and # to avoid mocking Airflow models class with mock.patch.dict( "os.environ", AIRFLOW_CONN_NEO4J_DEFAULT=connection.get_uri() ): neo4j_hook = Neo4jHook() op_result = neo4j_hook.run(mock_sql) mock_graph_database.assert_has_calls( [ mock.call.driver( "bolt://host:7687", auth=("login", "password"), encrypted=False, ), mock.call.driver().session(), mock.call.driver().session().__enter__(), mock.call.driver() .session() .__enter__() .run(mock_sql), mock.call.driver().session().__enter__().run().data(), mock.call.driver() .session() .__exit__(None, None, None), ] ) session = ( mock_graph_database.driver.return_value.session.return_value.__enter__.return_value ) assert op_result == session.run.return_value.data.return_value
["def","test_run_without_schema","(","self",",","mock_graph_database",")",":","connection","=","Connection","(","conn_type=","''","neo4j","''",",","login=","''","login","''",",","password=","''","password","''",",","host=","''","host","''",",","schema=None",",",")","mock_sql","=","mock.MagicMock","(","name=","''","sql","''",")","#","Use","the","environment","variable","mocking","to","test","saving","the","configuration","as","a","URI","and","#","to","avoid","mocking","Airflow","models","class","with","mock.patch.dict","(","``","os.environ","''",",","AIRFLOW_CONN_NEO4J_DEFAULT=connection.get_uri","(",")",")",":","neo4j_hook","=","Neo4jHook","(",")","op_result","=","neo4j_hook.run","(","mock_sql",")","mock_graph_database.assert_has_calls","(","[","mock.call.driver","(","``","bolt",":","\/\/host:7687","''",",","auth=","(","``","login","''",",","``","password","''",")",",","encrypted=False",",",")",",","mock.call.driver","(",")",".session","(",")",",","mock.call.driver","(",")",".session","(",")",".__enter__","(",")",",","mock.call.driver","(",")",".session","(",")",".__enter__","(",")",".run","(","mock_sql",")",",","mock.call.driver","(",")",".session","(",")",".__enter__","(",")",".run","(",")",".data","(",")",",","mock.call.driver","(",")",".session","(",")",".__exit__","(","None",",","None",",","None",")",",","]",")","session","=","(","mock_graph_database.driver.return_value.session.return_value.__enter__.return_value",")","assert","op_result","==","session.run.return_value.data.return_value"]
83
105
null
test_neo4j.py
airflow/tests/providers/neo4j/hooks/test_neo4j.py
from __future__ import annotations from unittest import mock import pytest from airflow.models import Connection from airflow.providers.neo4j.hooks.neo4j import Neo4jHook
10
1
5
0
null
0
4
Use image node_id 3 for calling the TestNeo4jHookConn obj's underlying member method code with example usage: obj.test_run_without_schema(mock_graph_database) without return types
179
node_id 3
25,713
set_doc
global
null
false
doc
null
null
null
null
inner,f
def set_doc(doc): """A decorator to set the docstring of a function.""" def inner(f): f.__doc__ = doc return f return inner
["def","set_doc","(","doc",")",":","``","''","''","A","decorator","to","set","the","docstring","of","a","function",".","''","''","''","def","inner","(","f",")",":","f.__doc__","=","doc","return","f","return","inner"]
24
31
null
string_utils.py
generative-ai-python/google/generativeai/string_utils.py
from __future__ import annotations import dataclasses import pprint import re import reprlib import textwrap
10
null
6
null
null
4
null
Use image node_id 1 for calling a global function with example usage: set_doc(doc) and returns: inner, f
104
node_id 1
146,243
strip_oneof
global
null
false
docstring
null
null
null
null
str
def strip_oneof(docstring): lines = docstring.splitlines() lines = [line for line in lines if ".. _oneof:" not in line] lines = [ line for line in lines if "This field is a member of `oneof`_" not in line ] return "\n".join(lines)
["def","strip_oneof","(","docstring",")",":","lines","=","docstring.splitlines","(",")","lines","=","[","line","for","line","in","lines","if","``","..","_oneof",":","''","not","in","line","]","lines","=","[","line","for","line","in","lines","if","``","This","field","is","a","member","of","`","oneof","`","_","''","not","in","line","]","return","``","\\n","''",".join","(","lines",")"]
34
38
null
string_utils.py
generative-ai-python/google/generativeai/string_utils.py
from __future__ import annotations import dataclasses import pprint import re import reprlib import textwrap
10
null
6
null
null
4
null
Use image node_id 2 for calling a global function with example usage: strip_oneof(docstring) and returns: str
109
node_id 2
146,244
shape
Arrayterator
null
true
self
Buffered iterator for big arrays. `Arrayterator` creates a buffered iterator for reading big arrays in small contiguous blocks. The class is useful for objects stored in the file system. It allows iteration over the object *without* reading everything in memory; instead, small blocks are read and iterated over. `Arrayterator` can be used with any object that supports multidimensional slices. This includes NumPy arrays, but also variables from Scientific.IO.NetCDF or pynetcdf for example. Parameters ---------- var : array_like The object to iterate over. buf_size : int, optional The buffer size. If `buf_size` is supplied, the maximum amount of data that will be read into memory is `buf_size` elements. Default is None, which will read as many element as possible into memory. Attributes ---------- var buf_size start stop step shape flat See Also -------- numpy.ndenumerate : Multidimensional array iterator. numpy.flatiter : Flat array iterator. numpy.memmap : Create a memory-map to an array stored in a binary file on disk. Notes ----- The algorithm works by first finding a "running dimension", along which the blocks will be extracted. Given an array of dimensions ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the first dimension will be used. If, on the other hand, ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on. Blocks are extracted along this dimension, and when the last block is returned the process continues from the next dimension, until all elements have been read. Examples -------- >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) >>> a_itor = np.lib.Arrayterator(a, 2) >>> a_itor.shape (3, 4, 5, 6) Now we can iterate over ``a_itor``, and it will return arrays of size two. Since `buf_size` was smaller than any dimension, the first dimension will be iterated over first: >>> for subarr in a_itor: ... if not subarr.all(): ... print(subarr, subarr.shape) # doctest: +SKIP >>> # [[[[0 1]]]] (1, 1, 1, 2)
["Buffered","iterator","for","big","arrays",".","`","Arrayterator","`","creates","a","buffered","iterator","for","reading","big","arrays","in","small","contiguous","blocks",".","The","class","is","useful","for","objects","stored","in","the","file","system",".","It","allows","iteration","over","the","object","*","without","*","reading","everything","in","memory",";","instead",",","small","blocks","are","read","and","iterated","over",".","`","Arrayterator","`","can","be","used","with","any","object","that","supports","multidimensional","slices",".","This","includes","NumPy","arrays",",","but","also","variables","from","Scientific.IO.NetCDF","or","pynetcdf","for","example",".","Parameters","--","--","--","--","--","var",":","array_like","The","object","to","iterate","over",".","buf_size",":","int",",","optional","The","buffer","size",".","If","`","buf_size","`","is","supplied",",","the","maximum","amount","of","data","that","will","be","read","into","memory","is","`","buf_size","`","elements",".","Default","is","None",",","which","will","read","as","many","element","as","possible","into","memory",".","Attributes","--","--","--","--","--","var","buf_size","start","stop","step","shape","flat","See","Also","--","--","--","--","numpy.ndenumerate",":","Multidimensional","array","iterator",".","numpy.flatiter",":","Flat","array","iterator",".","numpy.memmap",":","Create","a","memory-map","to","an","array","stored","in","a","binary","file","on","disk",".","Notes","--","--","-","The","algorithm","works","by","first","finding","a","``","running","dimension","''",",","along","which","the","blocks","will","be","extracted",".","Given","an","array","of","dimensions","``","(","d1",",","d2",",","...",",","dn",")","``",",","e.g",".","if","`","buf_size","`","is","smaller","than","``","d1","``",",","the","first","dimension","will","be","used",".","If",",","on","the","other","hand",",","``","d1","<","buf_size","<","d1","*","d2","``","the","second","dimension","will","be","used",",","and","so","on",".","Blocks","are","extracted","along","this","dimension",",","and","when","the","last","block","is","returned","the","process","continues","from","the","next","dimension",",","until","all","elements","have","been","read",".","Examples","--","--","--","--",">",">",">","a","=","np.arange","(","3","*","4","*","5","*","6",")",".reshape","(","3",",","4",",","5",",","6",")",">",">",">","a_itor","=","np.lib.Arrayterator","(","a",",","2",")",">",">",">","a_itor.shape","(","3",",","4",",","5",",","6",")","Now","we","can","iterate","over","``","a_itor","``",",","and","it","will","return","arrays","of","size","two",".","Since","`","buf_size","`","was","smaller","than","any","dimension",",","the","first","dimension","will","be","iterated","over","first",":",">",">",">","for","subarr","in","a_itor",":","...","if","not","subarr.all","(",")",":","...","print","(","subarr",",","subarr.shape",")","#","doctest",":","+SKIP",">",">",">","#","[","[","[","[","0","1","]","]","]","]","(","1",",","1",",","1",",","2",")"]
The shape of the array to be iterated over. For an example, see `Arrayterator`.
["The","shape","of","the","array","to","be","iterated","over",".","For","an","example",",","see","`","Arrayterator","`","."]
tuple
def shape(self): """ The shape of the array to be iterated over. For an example, see `Arrayterator`. """ return tuple( ((stop - start - 1) // step + 1) for start, stop, step in zip(self.start, self.stop, self.step) )
["def","shape","(","self",")",":","``","''","''","The","shape","of","the","array","to","be","iterated","over",".","For","an","example",",","see","`","Arrayterator","`",".","``","''","''","return","tuple","(","(","(","stop","-","start","-","1",")","\/\/","step","+","1",")","for","start",",","stop",",","step","in","zip","(","self.start",",","self.stop",",","self.step",")",")"]
167
175
null
_arrayterator_impl.py
numpy/numpy/lib/_arrayterator_impl.py
from operator import mul from functools import reduce
10
1
2
0
null
0
7
Use image node_id 6 for calling the Arrayterator obj's underlying member method code with example usage: obj.shape() and returns: tuple
135
node_id 6
209,166
prettyprint
global
null
false
cls
null
null
null
null
cls
def prettyprint(cls): cls.__str__ = _prettyprint cls.__repr__ = _prettyprint return cls
["def","prettyprint","(","cls",")",":","cls.__str__","=","_prettyprint","cls.__repr__","=","_prettyprint","return","cls"]
41
44
null
string_utils.py
generative-ai-python/google/generativeai/string_utils.py
from __future__ import annotations import dataclasses import pprint import re import reprlib import textwrap
10
null
6
null
null
4
null
Use image node_id 3 for calling a global function with example usage: prettyprint(cls) and returns: cls
103
node_id 3
146,245