nwo
stringlengths
5
91
sha
stringlengths
40
40
path
stringlengths
5
174
language
stringclasses
1 value
identifier
stringlengths
1
120
parameters
stringlengths
0
3.15k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
24.1k
docstring
stringlengths
0
27.3k
docstring_summary
stringlengths
0
13.8k
docstring_tokens
sequence
function
stringlengths
22
139k
function_tokens
sequence
url
stringlengths
87
283
hzlzh/AlfredWorkflow.com
7055f14f6922c80ea5943839eb0caff11ae57255
Sources/Workflows/Alfred-Time-Keeper/PyAl/Request/requests/packages/oauthlib/oauth1/rfc5849/parameters.py
python
prepare_request_uri_query
(oauth_params, uri)
return urlunparse((sch, net, path, par, query, fra))
Prepare the Request URI Query. Per `section 3.5.3`_ of the spec. .. _`section 3.5.3`: http://tools.ietf.org/html/rfc5849#section-3.5.3
Prepare the Request URI Query.
[ "Prepare", "the", "Request", "URI", "Query", "." ]
def prepare_request_uri_query(oauth_params, uri): """Prepare the Request URI Query. Per `section 3.5.3`_ of the spec. .. _`section 3.5.3`: http://tools.ietf.org/html/rfc5849#section-3.5.3 """ # append OAuth params to the existing set of query components sch, net, path, par, query, fra = urlparse(uri) query = urlencode(_append_params(oauth_params, extract_params(query) or [])) return urlunparse((sch, net, path, par, query, fra))
[ "def", "prepare_request_uri_query", "(", "oauth_params", ",", "uri", ")", ":", "# append OAuth params to the existing set of query components", "sch", ",", "net", ",", "path", ",", "par", ",", "query", ",", "fra", "=", "urlparse", "(", "uri", ")", "query", "=", "urlencode", "(", "_append_params", "(", "oauth_params", ",", "extract_params", "(", "query", ")", "or", "[", "]", ")", ")", "return", "urlunparse", "(", "(", "sch", ",", "net", ",", "path", ",", "par", ",", "query", ",", "fra", ")", ")" ]
https://github.com/hzlzh/AlfredWorkflow.com/blob/7055f14f6922c80ea5943839eb0caff11ae57255/Sources/Workflows/Alfred-Time-Keeper/PyAl/Request/requests/packages/oauthlib/oauth1/rfc5849/parameters.py#L123-L134
uber/fiber
ad6faf02b8e94dee498990e9fd9c588234666725
fiber/socket.py
python
NNGDevice._start_process
(self)
self._proc = mp.Process( target=self._run, args=(child_conn,), daemon=True )
self._proc = mp.Process( target=self._run, args=(child_conn,), daemon=True )
[ "self", ".", "_proc", "=", "mp", ".", "Process", "(", "target", "=", "self", ".", "_run", "args", "=", "(", "child_conn", ")", "daemon", "=", "True", ")" ]
def _start_process(self): parent_conn, child_conn = mp.Pipe() self._proc = threading.Thread( target=self._run, args=(child_conn,), daemon=True ) ''' self._proc = mp.Process( target=self._run, args=(child_conn,), daemon=True ) ''' self._proc.start() #child_conn.close() self.conn = parent_conn
[ "def", "_start_process", "(", "self", ")", ":", "parent_conn", ",", "child_conn", "=", "mp", ".", "Pipe", "(", ")", "self", ".", "_proc", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "_run", ",", "args", "=", "(", "child_conn", ",", ")", ",", "daemon", "=", "True", ")", "self", ".", "_proc", ".", "start", "(", ")", "#child_conn.close()", "self", ".", "conn", "=", "parent_conn" ]
https://github.com/uber/fiber/blob/ad6faf02b8e94dee498990e9fd9c588234666725/fiber/socket.py#L156-L169
mypaint/mypaint
90b36dbc7b8bd2f323383f7edf608a5e0a3a1a33
lib/layer/data.py
python
FileBackedLayer._load_surface_from_oradir_member
(self, oradir, cache_dir, src, progress, x, y)
Loads the surface from a file in an OpenRaster-like folder This override makes a managed copy of the original file in the REVISIONS_SUBDIR of the cache folder.
Loads the surface from a file in an OpenRaster-like folder
[ "Loads", "the", "surface", "from", "a", "file", "in", "an", "OpenRaster", "-", "like", "folder" ]
def _load_surface_from_oradir_member(self, oradir, cache_dir, src, progress, x, y): """Loads the surface from a file in an OpenRaster-like folder This override makes a managed copy of the original file in the REVISIONS_SUBDIR of the cache folder. """ # Load the displayed surface tiles super(FileBackedLayer, self)._load_surface_from_oradir_member( oradir, cache_dir, src, progress, x, y, ) # Copy it to the revisions subdir, and manage it there. revisions_dir = os.path.join(cache_dir, self.REVISIONS_SUBDIR) if not os.path.isdir(revisions_dir): os.makedirs(revisions_dir) self._workfile = _ManagedFile( unicode(os.path.join(oradir, src)), copy=True, dir=revisions_dir, ) # Record its loaded position self._x = x self._y = y
[ "def", "_load_surface_from_oradir_member", "(", "self", ",", "oradir", ",", "cache_dir", ",", "src", ",", "progress", ",", "x", ",", "y", ")", ":", "# Load the displayed surface tiles", "super", "(", "FileBackedLayer", ",", "self", ")", ".", "_load_surface_from_oradir_member", "(", "oradir", ",", "cache_dir", ",", "src", ",", "progress", ",", "x", ",", "y", ",", ")", "# Copy it to the revisions subdir, and manage it there.", "revisions_dir", "=", "os", ".", "path", ".", "join", "(", "cache_dir", ",", "self", ".", "REVISIONS_SUBDIR", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "revisions_dir", ")", ":", "os", ".", "makedirs", "(", "revisions_dir", ")", "self", ".", "_workfile", "=", "_ManagedFile", "(", "unicode", "(", "os", ".", "path", ".", "join", "(", "oradir", ",", "src", ")", ")", ",", "copy", "=", "True", ",", "dir", "=", "revisions_dir", ",", ")", "# Record its loaded position", "self", ".", "_x", "=", "x", "self", ".", "_y", "=", "y" ]
https://github.com/mypaint/mypaint/blob/90b36dbc7b8bd2f323383f7edf608a5e0a3a1a33/lib/layer/data.py#L693-L718
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
examples/remarketing/upload_store_sales_transactions.py
python
_create_offline_user_data_job
( client, offline_user_data_job_service, customer_id, offline_user_data_job_type, external_id, advertiser_upload_date_time, bridge_map_version_id, partner_id, custom_key, )
return offline_user_data_job_resource_name
Creates an offline user data job for uploading store sales transactions. Args: client: An initialized Google Ads API client. offline_user_data_job_service: The offline user data job service client. customer_id: The Google Ads customer ID. offline_user_data_job_type: Optional type of offline user data in the job (first party or third party). external_id: Optional external ID for the offline user data job. advertiser_upload_date_time: Optional date and time the advertiser uploaded data to the partner. Only required for third party uploads. bridge_map_version_id: Optional version of partner IDs to be used for uploads. Only required for third party uploads. partner_id: Optional ID of the third party partner. Only required for third party uploads. custom_key: A custom key str to segment store sales conversions. Only required after creating a custom key and custom values in the account. Returns: The string resource name of the created job.
Creates an offline user data job for uploading store sales transactions.
[ "Creates", "an", "offline", "user", "data", "job", "for", "uploading", "store", "sales", "transactions", "." ]
def _create_offline_user_data_job( client, offline_user_data_job_service, customer_id, offline_user_data_job_type, external_id, advertiser_upload_date_time, bridge_map_version_id, partner_id, custom_key, ): """Creates an offline user data job for uploading store sales transactions. Args: client: An initialized Google Ads API client. offline_user_data_job_service: The offline user data job service client. customer_id: The Google Ads customer ID. offline_user_data_job_type: Optional type of offline user data in the job (first party or third party). external_id: Optional external ID for the offline user data job. advertiser_upload_date_time: Optional date and time the advertiser uploaded data to the partner. Only required for third party uploads. bridge_map_version_id: Optional version of partner IDs to be used for uploads. Only required for third party uploads. partner_id: Optional ID of the third party partner. Only required for third party uploads. custom_key: A custom key str to segment store sales conversions. Only required after creating a custom key and custom values in the account. Returns: The string resource name of the created job. """ # TIP: If you are migrating from the AdWords API, please note that Google # Ads API uses the term "fraction" instead of "rate". For example, # loyalty_rate in the AdWords API is called loyalty_fraction in the Google # Ads API. # Create a new offline user data job. offline_user_data_job = client.get_type("OfflineUserDataJob") offline_user_data_job.type_ = offline_user_data_job_type if external_id is not None: offline_user_data_job.external_id = external_id # Please refer to https://support.google.com/google-ads/answer/7506124 for # additional details. store_sales_metadata = offline_user_data_job.store_sales_metadata # Set the fraction of your overall sales that you (or the advertiser, # in the third party case) can associate with a customer (email, phone # number, address, etc.) in your database or loyalty program. # For example, set this to 0.7 if you have 100 transactions over 30 # days, and out of those 100 transactions, you can identify 70 by an # email address or phone number. store_sales_metadata.loyalty_fraction = 0.7 # Set the fraction of sales you're uploading out of the overall sales # that you (or the advertiser, in the third party case) can associate # with a customer. In most cases, you will set this to 1.0. # Continuing the example above for loyalty fraction, a value of 1.0 here # indicates that you are uploading all 70 of the transactions that can # be identified by an email address or phone number. store_sales_metadata.transaction_upload_fraction = 1.0 if custom_key: store_sales_metadata.custom_key = custom_key if ( offline_user_data_job_type == client.enums.OfflineUserDataJobTypeEnum.STORE_SALES_UPLOAD_THIRD_PARTY ): # Create additional metadata required for uploading third party data. store_sales_third_party_metadata = ( store_sales_metadata.third_party_metadata ) # The date/time must be in the format "yyyy-MM-dd hh:mm:ss". store_sales_third_party_metadata.advertiser_upload_date_time = ( advertiser_upload_date_time ) # Set the fraction of transactions you received from the advertiser # that have valid formatting and values. This captures any transactions # the advertiser provided to you but which you are unable to upload to # Google due to formatting errors or missing data. # In most cases, you will set this to 1.0. store_sales_third_party_metadata.valid_transaction_fraction = 1.0 # Set the fraction of valid transactions (as defined above) you # received from the advertiser that you (the third party) have matched # to an external user ID on your side. # In most cases, you will set this to 1.0. store_sales_third_party_metadata.partner_match_fraction = 1.0 # Set the fraction of transactions you (the third party) are uploading # out of the transactions you received from the advertiser that meet # both of the following criteria: # 1. Are valid in terms of formatting and values. See valid transaction # fraction above. # 2. You matched to an external user ID on your side. See partner match # fraction above. # In most cases, you will set this to 1.0. store_sales_third_party_metadata.partner_upload_fraction = 1.0 # Set the version of partner IDs to be used for uploads. # Please speak with your Google representative to get the values to use # for the bridge map version and partner IDs. store_sales_third_party_metadata.bridge_map_version_id = ( bridge_map_version_id ) # Set the third party partner ID uploading the transactions. store_sales_third_party_metadata.partner_id = partner_id create_offline_user_data_job_response = offline_user_data_job_service.create_offline_user_data_job( customer_id=customer_id, job=offline_user_data_job ) offline_user_data_job_resource_name = ( create_offline_user_data_job_response.resource_name ) print( "Created an offline user data job with resource name " f"'{offline_user_data_job_resource_name}'." ) return offline_user_data_job_resource_name
[ "def", "_create_offline_user_data_job", "(", "client", ",", "offline_user_data_job_service", ",", "customer_id", ",", "offline_user_data_job_type", ",", "external_id", ",", "advertiser_upload_date_time", ",", "bridge_map_version_id", ",", "partner_id", ",", "custom_key", ",", ")", ":", "# TIP: If you are migrating from the AdWords API, please note that Google", "# Ads API uses the term \"fraction\" instead of \"rate\". For example,", "# loyalty_rate in the AdWords API is called loyalty_fraction in the Google", "# Ads API.", "# Create a new offline user data job.", "offline_user_data_job", "=", "client", ".", "get_type", "(", "\"OfflineUserDataJob\"", ")", "offline_user_data_job", ".", "type_", "=", "offline_user_data_job_type", "if", "external_id", "is", "not", "None", ":", "offline_user_data_job", ".", "external_id", "=", "external_id", "# Please refer to https://support.google.com/google-ads/answer/7506124 for", "# additional details.", "store_sales_metadata", "=", "offline_user_data_job", ".", "store_sales_metadata", "# Set the fraction of your overall sales that you (or the advertiser,", "# in the third party case) can associate with a customer (email, phone", "# number, address, etc.) in your database or loyalty program.", "# For example, set this to 0.7 if you have 100 transactions over 30", "# days, and out of those 100 transactions, you can identify 70 by an", "# email address or phone number.", "store_sales_metadata", ".", "loyalty_fraction", "=", "0.7", "# Set the fraction of sales you're uploading out of the overall sales", "# that you (or the advertiser, in the third party case) can associate", "# with a customer. In most cases, you will set this to 1.0.", "# Continuing the example above for loyalty fraction, a value of 1.0 here", "# indicates that you are uploading all 70 of the transactions that can", "# be identified by an email address or phone number.", "store_sales_metadata", ".", "transaction_upload_fraction", "=", "1.0", "if", "custom_key", ":", "store_sales_metadata", ".", "custom_key", "=", "custom_key", "if", "(", "offline_user_data_job_type", "==", "client", ".", "enums", ".", "OfflineUserDataJobTypeEnum", ".", "STORE_SALES_UPLOAD_THIRD_PARTY", ")", ":", "# Create additional metadata required for uploading third party data.", "store_sales_third_party_metadata", "=", "(", "store_sales_metadata", ".", "third_party_metadata", ")", "# The date/time must be in the format \"yyyy-MM-dd hh:mm:ss\".", "store_sales_third_party_metadata", ".", "advertiser_upload_date_time", "=", "(", "advertiser_upload_date_time", ")", "# Set the fraction of transactions you received from the advertiser", "# that have valid formatting and values. This captures any transactions", "# the advertiser provided to you but which you are unable to upload to", "# Google due to formatting errors or missing data.", "# In most cases, you will set this to 1.0.", "store_sales_third_party_metadata", ".", "valid_transaction_fraction", "=", "1.0", "# Set the fraction of valid transactions (as defined above) you", "# received from the advertiser that you (the third party) have matched", "# to an external user ID on your side.", "# In most cases, you will set this to 1.0.", "store_sales_third_party_metadata", ".", "partner_match_fraction", "=", "1.0", "# Set the fraction of transactions you (the third party) are uploading", "# out of the transactions you received from the advertiser that meet", "# both of the following criteria:", "# 1. Are valid in terms of formatting and values. See valid transaction", "# fraction above.", "# 2. You matched to an external user ID on your side. See partner match", "# fraction above.", "# In most cases, you will set this to 1.0.", "store_sales_third_party_metadata", ".", "partner_upload_fraction", "=", "1.0", "# Set the version of partner IDs to be used for uploads.", "# Please speak with your Google representative to get the values to use", "# for the bridge map version and partner IDs.", "store_sales_third_party_metadata", ".", "bridge_map_version_id", "=", "(", "bridge_map_version_id", ")", "# Set the third party partner ID uploading the transactions.", "store_sales_third_party_metadata", ".", "partner_id", "=", "partner_id", "create_offline_user_data_job_response", "=", "offline_user_data_job_service", ".", "create_offline_user_data_job", "(", "customer_id", "=", "customer_id", ",", "job", "=", "offline_user_data_job", ")", "offline_user_data_job_resource_name", "=", "(", "create_offline_user_data_job_response", ".", "resource_name", ")", "print", "(", "\"Created an offline user data job with resource name \"", "f\"'{offline_user_data_job_resource_name}'.\"", ")", "return", "offline_user_data_job_resource_name" ]
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/examples/remarketing/upload_store_sales_transactions.py#L127-L243
res0nat0r/gitosis
faaf96ff7ea943bb29cf912fdbf2999ec8904900
gitosis/gitweb.py
python
generate_project_list
(config, path)
Generate projects list for ``gitweb``. :param config: configuration to read projects from :type config: RawConfigParser :param path: path to write projects list to :type path: str
Generate projects list for ``gitweb``.
[ "Generate", "projects", "list", "for", "gitweb", "." ]
def generate_project_list(config, path): """ Generate projects list for ``gitweb``. :param config: configuration to read projects from :type config: RawConfigParser :param path: path to write projects list to :type path: str """ tmp = '%s.%d.tmp' % (path, os.getpid()) f = file(tmp, 'w') try: generate_project_list_fp(config=config, fp=f) finally: f.close() os.rename(tmp, path)
[ "def", "generate_project_list", "(", "config", ",", "path", ")", ":", "tmp", "=", "'%s.%d.tmp'", "%", "(", "path", ",", "os", ".", "getpid", "(", ")", ")", "f", "=", "file", "(", "tmp", ",", "'w'", ")", "try", ":", "generate_project_list_fp", "(", "config", "=", "config", ",", "fp", "=", "f", ")", "finally", ":", "f", ".", "close", "(", ")", "os", ".", "rename", "(", "tmp", ",", "path", ")" ]
https://github.com/res0nat0r/gitosis/blob/faaf96ff7ea943bb29cf912fdbf2999ec8904900/gitosis/gitweb.py#L115-L133
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
scripts/monitoring/cron-send-fluentd-depth.py
python
OpenshiftFluentdQueueCheck.check_fluentd_queues
(self)
return oldest_age
Check oldest buffer file in Fluentd pods
Check oldest buffer file in Fluentd pods
[ "Check", "oldest", "buffer", "file", "in", "Fluentd", "pods" ]
def check_fluentd_queues(self): """ Check oldest buffer file in Fluentd pods """ # Get timestamps of files in /var/lib/fluentd from each pod buffer_list = [] for pod in self.fluentd_pods: pod_name = pod['metadata']['name'] find_ts = "exec " + pod_name + " -- find /var/lib/fluentd -type f -name \*.log ! -name '*output_ops_tag*' -printf '%T+\n'" buffer_ts = self.oc.run_user_cmd(find_ts) timestamps = buffer_ts.split("\n") timestamps.pop() # Removes empty newline timestamps.sort() if len(timestamps) > 0: buffer_list.append(timestamps[0]) logger.info("Found files in fluentd queue on " + pod_name + " with timestamp(s): %s", str(timestamps)) else: logger.info("No files found in fluentd queue on " + pod_name) # Convert timestamps to age in seconds age_list=[] for ts in buffer_list: if "+" in ts: ts = ts.replace("+", " ") ts = parser.parse(ts) ts = ts.replace(tzinfo=None) buffer_age = (datetime.now() - ts).total_seconds() age_list.append(buffer_age) oldest_age = int(math.ceil(max(age_list or [0]))) logger.info("Oldest fluentd queue file is %s seconds old.", str(oldest_age)) return oldest_age
[ "def", "check_fluentd_queues", "(", "self", ")", ":", "# Get timestamps of files in /var/lib/fluentd from each pod", "buffer_list", "=", "[", "]", "for", "pod", "in", "self", ".", "fluentd_pods", ":", "pod_name", "=", "pod", "[", "'metadata'", "]", "[", "'name'", "]", "find_ts", "=", "\"exec \"", "+", "pod_name", "+", "\" -- find /var/lib/fluentd -type f -name \\*.log ! -name '*output_ops_tag*' -printf '%T+\\n'\"", "buffer_ts", "=", "self", ".", "oc", ".", "run_user_cmd", "(", "find_ts", ")", "timestamps", "=", "buffer_ts", ".", "split", "(", "\"\\n\"", ")", "timestamps", ".", "pop", "(", ")", "# Removes empty newline", "timestamps", ".", "sort", "(", ")", "if", "len", "(", "timestamps", ")", ">", "0", ":", "buffer_list", ".", "append", "(", "timestamps", "[", "0", "]", ")", "logger", ".", "info", "(", "\"Found files in fluentd queue on \"", "+", "pod_name", "+", "\" with timestamp(s): %s\"", ",", "str", "(", "timestamps", ")", ")", "else", ":", "logger", ".", "info", "(", "\"No files found in fluentd queue on \"", "+", "pod_name", ")", "# Convert timestamps to age in seconds ", "age_list", "=", "[", "]", "for", "ts", "in", "buffer_list", ":", "if", "\"+\"", "in", "ts", ":", "ts", "=", "ts", ".", "replace", "(", "\"+\"", ",", "\" \"", ")", "ts", "=", "parser", ".", "parse", "(", "ts", ")", "ts", "=", "ts", ".", "replace", "(", "tzinfo", "=", "None", ")", "buffer_age", "=", "(", "datetime", ".", "now", "(", ")", "-", "ts", ")", ".", "total_seconds", "(", ")", "age_list", ".", "append", "(", "buffer_age", ")", "oldest_age", "=", "int", "(", "math", ".", "ceil", "(", "max", "(", "age_list", "or", "[", "0", "]", ")", ")", ")", "logger", ".", "info", "(", "\"Oldest fluentd queue file is %s seconds old.\"", ",", "str", "(", "oldest_age", ")", ")", "return", "oldest_age" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/scripts/monitoring/cron-send-fluentd-depth.py#L64-L92
IronLanguages/main
a949455434b1fda8c783289e897e78a9a0caabb5
External.LCA_RESTRICTED/Languages/IronPython/27/Lib/pdb.py
python
Pdb.user_call
(self, frame, argument_list)
This method is called when there is the remote possibility that we ever need to stop in this function.
This method is called when there is the remote possibility that we ever need to stop in this function.
[ "This", "method", "is", "called", "when", "there", "is", "the", "remote", "possibility", "that", "we", "ever", "need", "to", "stop", "in", "this", "function", "." ]
def user_call(self, frame, argument_list): """This method is called when there is the remote possibility that we ever need to stop in this function.""" if self._wait_for_mainpyfile: return if self.stop_here(frame): print >>self.stdout, '--Call--' self.interaction(frame, None)
[ "def", "user_call", "(", "self", ",", "frame", ",", "argument_list", ")", ":", "if", "self", ".", "_wait_for_mainpyfile", ":", "return", "if", "self", ".", "stop_here", "(", "frame", ")", ":", "print", ">>", "self", ".", "stdout", ",", "'--Call--'", "self", ".", "interaction", "(", "frame", ",", "None", ")" ]
https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/External.LCA_RESTRICTED/Languages/IronPython/27/Lib/pdb.py#L141-L148
cloudera/hue
23f02102d4547c17c32bd5ea0eb24e9eadd657a4
desktop/core/ext-py/Django-1.11.29/django/contrib/admin/options.py
python
BaseModelAdmin.to_field_allowed
(self, request, to_field)
return False
Returns True if the model associated with this admin should be allowed to be referenced by the specified field.
Returns True if the model associated with this admin should be allowed to be referenced by the specified field.
[ "Returns", "True", "if", "the", "model", "associated", "with", "this", "admin", "should", "be", "allowed", "to", "be", "referenced", "by", "the", "specified", "field", "." ]
def to_field_allowed(self, request, to_field): """ Returns True if the model associated with this admin should be allowed to be referenced by the specified field. """ opts = self.model._meta try: field = opts.get_field(to_field) except FieldDoesNotExist: return False # Always allow referencing the primary key since it's already possible # to get this information from the change view URL. if field.primary_key: return True # Allow reverse relationships to models defining m2m fields if they # target the specified field. for many_to_many in opts.many_to_many: if many_to_many.m2m_target_field_name() == to_field: return True # Make sure at least one of the models registered for this site # references this field through a FK or a M2M relationship. registered_models = set() for model, admin in self.admin_site._registry.items(): registered_models.add(model) for inline in admin.inlines: registered_models.add(inline.model) related_objects = ( f for f in opts.get_fields(include_hidden=True) if (f.auto_created and not f.concrete) ) for related_object in related_objects: related_model = related_object.related_model remote_field = related_object.field.remote_field if (any(issubclass(model, related_model) for model in registered_models) and hasattr(remote_field, 'get_related_field') and remote_field.get_related_field() == field): return True return False
[ "def", "to_field_allowed", "(", "self", ",", "request", ",", "to_field", ")", ":", "opts", "=", "self", ".", "model", ".", "_meta", "try", ":", "field", "=", "opts", ".", "get_field", "(", "to_field", ")", "except", "FieldDoesNotExist", ":", "return", "False", "# Always allow referencing the primary key since it's already possible", "# to get this information from the change view URL.", "if", "field", ".", "primary_key", ":", "return", "True", "# Allow reverse relationships to models defining m2m fields if they", "# target the specified field.", "for", "many_to_many", "in", "opts", ".", "many_to_many", ":", "if", "many_to_many", ".", "m2m_target_field_name", "(", ")", "==", "to_field", ":", "return", "True", "# Make sure at least one of the models registered for this site", "# references this field through a FK or a M2M relationship.", "registered_models", "=", "set", "(", ")", "for", "model", ",", "admin", "in", "self", ".", "admin_site", ".", "_registry", ".", "items", "(", ")", ":", "registered_models", ".", "add", "(", "model", ")", "for", "inline", "in", "admin", ".", "inlines", ":", "registered_models", ".", "add", "(", "inline", ".", "model", ")", "related_objects", "=", "(", "f", "for", "f", "in", "opts", ".", "get_fields", "(", "include_hidden", "=", "True", ")", "if", "(", "f", ".", "auto_created", "and", "not", "f", ".", "concrete", ")", ")", "for", "related_object", "in", "related_objects", ":", "related_model", "=", "related_object", ".", "related_model", "remote_field", "=", "related_object", ".", "field", ".", "remote_field", "if", "(", "any", "(", "issubclass", "(", "model", ",", "related_model", ")", "for", "model", "in", "registered_models", ")", "and", "hasattr", "(", "remote_field", ",", "'get_related_field'", ")", "and", "remote_field", ".", "get_related_field", "(", ")", "==", "field", ")", ":", "return", "True", "return", "False" ]
https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/Django-1.11.29/django/contrib/admin/options.py#L388-L431
zlsdu/Word-Embedding
d874ae195f1ab4905e40d3e43220d303578e7de3
elmo/elmo_for_classification.py
python
ElmoModel_for_Classification.elmo_model
(self)
build model
build model
[ "build", "model" ]
def elmo_model(self): ''' build model ''' tf.reset_default_graph() self.input_x = tf.placeholder(tf.string, [None, 1], name='input_x') self.input_y = tf.placeholder(tf.int32, [None, 2], name='input_y') self.global_step = tf.Variable(0, trainable=False, name='global_step') self.elmo = hub.Module('https://tfhub.dev/google/elmo/2', trainable=True) self.embedding = self.elmo(tf.squeeze(tf.cast(self.input_x, tf.string), axis=1), as_dict=True, signature='default') # LstmCell单元的隐层数取决于上一层embedding_size的大小 with tf.name_scope('rnn'): cell = tf.contrib.rnn.BasicLSTMCell(self.embedding_size, state_is_tuple=True) cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob) cells = [cell for _ in range(self.num_layers)] rnn_cells = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True) # 下面不使用dtype还会报错,需要初始化 outputs, _ = tf.nn.dynamic_rnn(cell=rnn_cells, inputs=self.embedding['elmo'], dtype=tf.float32) last_outputs = outputs[:, -1, :] with tf.name_scope('hidden'): fc = tf.layers.dense(last_outputs, self.hidden_size, name='fc1') fc = tf.contrib.layers.dropout(fc, self.keep_prob) fc = tf.nn.relu(fc) with tf.name_scope('logits'): # tf.math.argmax self.logits = tf.layers.dense(fc, self.class_num, name='fc2') self.y_pred_cls = tf.arg_max(tf.nn.softmax(self.logits), 1) with tf.name_scope("optimize"): # 损失函数,交叉熵 # tf.nn.softmax_cross_entropy_with_logits_v2 cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.input_y) self.loss = tf.reduce_mean(cross_entropy) self.optim = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.loss, global_step=self.global_step)
[ "def", "elmo_model", "(", "self", ")", ":", "tf", ".", "reset_default_graph", "(", ")", "self", ".", "input_x", "=", "tf", ".", "placeholder", "(", "tf", ".", "string", ",", "[", "None", ",", "1", "]", ",", "name", "=", "'input_x'", ")", "self", ".", "input_y", "=", "tf", ".", "placeholder", "(", "tf", ".", "int32", ",", "[", "None", ",", "2", "]", ",", "name", "=", "'input_y'", ")", "self", ".", "global_step", "=", "tf", ".", "Variable", "(", "0", ",", "trainable", "=", "False", ",", "name", "=", "'global_step'", ")", "self", ".", "elmo", "=", "hub", ".", "Module", "(", "'https://tfhub.dev/google/elmo/2'", ",", "trainable", "=", "True", ")", "self", ".", "embedding", "=", "self", ".", "elmo", "(", "tf", ".", "squeeze", "(", "tf", ".", "cast", "(", "self", ".", "input_x", ",", "tf", ".", "string", ")", ",", "axis", "=", "1", ")", ",", "as_dict", "=", "True", ",", "signature", "=", "'default'", ")", "# LstmCell单元的隐层数取决于上一层embedding_size的大小", "with", "tf", ".", "name_scope", "(", "'rnn'", ")", ":", "cell", "=", "tf", ".", "contrib", ".", "rnn", ".", "BasicLSTMCell", "(", "self", ".", "embedding_size", ",", "state_is_tuple", "=", "True", ")", "cell", "=", "tf", ".", "contrib", ".", "rnn", ".", "DropoutWrapper", "(", "cell", ",", "output_keep_prob", "=", "self", ".", "keep_prob", ")", "cells", "=", "[", "cell", "for", "_", "in", "range", "(", "self", ".", "num_layers", ")", "]", "rnn_cells", "=", "tf", ".", "contrib", ".", "rnn", ".", "MultiRNNCell", "(", "cells", ",", "state_is_tuple", "=", "True", ")", "# 下面不使用dtype还会报错,需要初始化", "outputs", ",", "_", "=", "tf", ".", "nn", ".", "dynamic_rnn", "(", "cell", "=", "rnn_cells", ",", "inputs", "=", "self", ".", "embedding", "[", "'elmo'", "]", ",", "dtype", "=", "tf", ".", "float32", ")", "last_outputs", "=", "outputs", "[", ":", ",", "-", "1", ",", ":", "]", "with", "tf", ".", "name_scope", "(", "'hidden'", ")", ":", "fc", "=", "tf", ".", "layers", ".", "dense", "(", "last_outputs", ",", "self", ".", "hidden_size", ",", "name", "=", "'fc1'", ")", "fc", "=", "tf", ".", "contrib", ".", "layers", ".", "dropout", "(", "fc", ",", "self", ".", "keep_prob", ")", "fc", "=", "tf", ".", "nn", ".", "relu", "(", "fc", ")", "with", "tf", ".", "name_scope", "(", "'logits'", ")", ":", "# tf.math.argmax", "self", ".", "logits", "=", "tf", ".", "layers", ".", "dense", "(", "fc", ",", "self", ".", "class_num", ",", "name", "=", "'fc2'", ")", "self", ".", "y_pred_cls", "=", "tf", ".", "arg_max", "(", "tf", ".", "nn", ".", "softmax", "(", "self", ".", "logits", ")", ",", "1", ")", "with", "tf", ".", "name_scope", "(", "\"optimize\"", ")", ":", "# 损失函数,交叉熵", "# tf.nn.softmax_cross_entropy_with_logits_v2", "cross_entropy", "=", "tf", ".", "nn", ".", "softmax_cross_entropy_with_logits", "(", "logits", "=", "self", ".", "logits", ",", "labels", "=", "self", ".", "input_y", ")", "self", ".", "loss", "=", "tf", ".", "reduce_mean", "(", "cross_entropy", ")", "self", ".", "optim", "=", "tf", ".", "train", ".", "GradientDescentOptimizer", "(", "learning_rate", "=", "self", ".", "learning_rate", ")", ".", "minimize", "(", "self", ".", "loss", ",", "global_step", "=", "self", ".", "global_step", ")" ]
https://github.com/zlsdu/Word-Embedding/blob/d874ae195f1ab4905e40d3e43220d303578e7de3/elmo/elmo_for_classification.py#L95-L132
Wookai/paper-tips-and-tricks
af57d2312dad82ed220b88964bc6fb2bba9947a1
src/python/plot_utils.py
python
get_fig_size
(fig_width_cm, fig_height_cm=None)
return map(lambda x: x/2.54, size_cm)
Convert dimensions in centimeters to inches. If no height is given, it is computed using the golden ratio.
Convert dimensions in centimeters to inches. If no height is given, it is computed using the golden ratio.
[ "Convert", "dimensions", "in", "centimeters", "to", "inches", ".", "If", "no", "height", "is", "given", "it", "is", "computed", "using", "the", "golden", "ratio", "." ]
def get_fig_size(fig_width_cm, fig_height_cm=None): """Convert dimensions in centimeters to inches. If no height is given, it is computed using the golden ratio. """ if not fig_height_cm: golden_ratio = (1 + math.sqrt(5))/2 fig_height_cm = fig_width_cm / golden_ratio size_cm = (fig_width_cm, fig_height_cm) return map(lambda x: x/2.54, size_cm)
[ "def", "get_fig_size", "(", "fig_width_cm", ",", "fig_height_cm", "=", "None", ")", ":", "if", "not", "fig_height_cm", ":", "golden_ratio", "=", "(", "1", "+", "math", ".", "sqrt", "(", "5", ")", ")", "/", "2", "fig_height_cm", "=", "fig_width_cm", "/", "golden_ratio", "size_cm", "=", "(", "fig_width_cm", ",", "fig_height_cm", ")", "return", "map", "(", "lambda", "x", ":", "x", "/", "2.54", ",", "size_cm", ")" ]
https://github.com/Wookai/paper-tips-and-tricks/blob/af57d2312dad82ed220b88964bc6fb2bba9947a1/src/python/plot_utils.py#L8-L17
Nuitka/Nuitka
39262276993757fa4e299f497654065600453fc9
nuitka/utils/Importing.py
python
importFilePy2
(filename)
return imp.load_source(basename, filename)
Import a file for Python version 2.
Import a file for Python version 2.
[ "Import", "a", "file", "for", "Python", "version", "2", "." ]
def importFilePy2(filename): """Import a file for Python version 2.""" import imp basename = os.path.splitext(os.path.basename(filename))[0] return imp.load_source(basename, filename)
[ "def", "importFilePy2", "(", "filename", ")", ":", "import", "imp", "basename", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", "[", "0", "]", "return", "imp", ".", "load_source", "(", "basename", ",", "filename", ")" ]
https://github.com/Nuitka/Nuitka/blob/39262276993757fa4e299f497654065600453fc9/nuitka/utils/Importing.py#L52-L57
SheffieldML/GPy
bb1bc5088671f9316bc92a46d356734e34c2d5c0
GPy/kern/src/linear.py
python
LinearFull.K
(self, X, X2=None)
return np.einsum('ij,jk,lk->il', X, P, X if X2 is None else X2)
[]
def K(self, X, X2=None): P = np.dot(self.W, self.W.T) + np.diag(self.kappa) return np.einsum('ij,jk,lk->il', X, P, X if X2 is None else X2)
[ "def", "K", "(", "self", ",", "X", ",", "X2", "=", "None", ")", ":", "P", "=", "np", ".", "dot", "(", "self", ".", "W", ",", "self", ".", "W", ".", "T", ")", "+", "np", ".", "diag", "(", "self", ".", "kappa", ")", "return", "np", ".", "einsum", "(", "'ij,jk,lk->il'", ",", "X", ",", "P", ",", "X", "if", "X2", "is", "None", "else", "X2", ")" ]
https://github.com/SheffieldML/GPy/blob/bb1bc5088671f9316bc92a46d356734e34c2d5c0/GPy/kern/src/linear.py#L197-L199
nosmokingbandit/Watcher3
0217e75158b563bdefc8e01c3be7620008cf3977
lib/sqlalchemy/engine/base.py
python
Connection.begin_twophase
(self, xid=None)
return self.__transaction
Begin a two-phase or XA transaction and return a transaction handle. The returned object is an instance of :class:`.TwoPhaseTransaction`, which in addition to the methods provided by :class:`.Transaction`, also provides a :meth:`~.TwoPhaseTransaction.prepare` method. :param xid: the two phase transaction id. If not supplied, a random id will be generated. See also :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`.
Begin a two-phase or XA transaction and return a transaction handle.
[ "Begin", "a", "two", "-", "phase", "or", "XA", "transaction", "and", "return", "a", "transaction", "handle", "." ]
def begin_twophase(self, xid=None): """Begin a two-phase or XA transaction and return a transaction handle. The returned object is an instance of :class:`.TwoPhaseTransaction`, which in addition to the methods provided by :class:`.Transaction`, also provides a :meth:`~.TwoPhaseTransaction.prepare` method. :param xid: the two phase transaction id. If not supplied, a random id will be generated. See also :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`. """ if self.__branch_from: return self.__branch_from.begin_twophase(xid=xid) if self.__transaction is not None: raise exc.InvalidRequestError( "Cannot start a two phase transaction when a transaction " "is already in progress.") if xid is None: xid = self.engine.dialect.create_xid() self.__transaction = TwoPhaseTransaction(self, xid) return self.__transaction
[ "def", "begin_twophase", "(", "self", ",", "xid", "=", "None", ")", ":", "if", "self", ".", "__branch_from", ":", "return", "self", ".", "__branch_from", ".", "begin_twophase", "(", "xid", "=", "xid", ")", "if", "self", ".", "__transaction", "is", "not", "None", ":", "raise", "exc", ".", "InvalidRequestError", "(", "\"Cannot start a two phase transaction when a transaction \"", "\"is already in progress.\"", ")", "if", "xid", "is", "None", ":", "xid", "=", "self", ".", "engine", ".", "dialect", ".", "create_xid", "(", ")", "self", ".", "__transaction", "=", "TwoPhaseTransaction", "(", "self", ",", "xid", ")", "return", "self", ".", "__transaction" ]
https://github.com/nosmokingbandit/Watcher3/blob/0217e75158b563bdefc8e01c3be7620008cf3977/lib/sqlalchemy/engine/base.py#L633-L660
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_vendored_deps/library/oc_scale.py
python
Yedit.update
(self, path, value, index=None, curr_value=None)
return (False, self.yaml_dict)
put path, value into a dict
put path, value into a dict
[ "put", "path", "value", "into", "a", "dict" ]
def update(self, path, value, index=None, curr_value=None): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError: entry = None if isinstance(entry, dict): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member if not isinstance(value, dict): raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' + 'value=[{}] type=[{}]'.format(value, type(value))) entry.update(value) return (True, self.yaml_dict) elif isinstance(entry, list): # AUDIT:maybe-no-member makes sense due to fuzzy types # pylint: disable=maybe-no-member ind = None if curr_value: try: ind = entry.index(curr_value) except ValueError: return (False, self.yaml_dict) elif index is not None: ind = index if ind is not None and entry[ind] != value: entry[ind] = value return (True, self.yaml_dict) # see if it exists in the list try: ind = entry.index(value) except ValueError: # doesn't exist, append it entry.append(value) return (True, self.yaml_dict) # already exists, return if ind is not None: return (False, self.yaml_dict) return (False, self.yaml_dict)
[ "def", "update", "(", "self", ",", "path", ",", "value", ",", "index", "=", "None", ",", "curr_value", "=", "None", ")", ":", "try", ":", "entry", "=", "Yedit", ".", "get_entry", "(", "self", ".", "yaml_dict", ",", "path", ",", "self", ".", "separator", ")", "except", "KeyError", ":", "entry", "=", "None", "if", "isinstance", "(", "entry", ",", "dict", ")", ":", "# AUDIT:maybe-no-member makes sense due to fuzzy types", "# pylint: disable=maybe-no-member", "if", "not", "isinstance", "(", "value", ",", "dict", ")", ":", "raise", "YeditException", "(", "'Cannot replace key, value entry in dict with non-dict type. '", "+", "'value=[{}] type=[{}]'", ".", "format", "(", "value", ",", "type", "(", "value", ")", ")", ")", "entry", ".", "update", "(", "value", ")", "return", "(", "True", ",", "self", ".", "yaml_dict", ")", "elif", "isinstance", "(", "entry", ",", "list", ")", ":", "# AUDIT:maybe-no-member makes sense due to fuzzy types", "# pylint: disable=maybe-no-member", "ind", "=", "None", "if", "curr_value", ":", "try", ":", "ind", "=", "entry", ".", "index", "(", "curr_value", ")", "except", "ValueError", ":", "return", "(", "False", ",", "self", ".", "yaml_dict", ")", "elif", "index", "is", "not", "None", ":", "ind", "=", "index", "if", "ind", "is", "not", "None", "and", "entry", "[", "ind", "]", "!=", "value", ":", "entry", "[", "ind", "]", "=", "value", "return", "(", "True", ",", "self", ".", "yaml_dict", ")", "# see if it exists in the list", "try", ":", "ind", "=", "entry", ".", "index", "(", "value", ")", "except", "ValueError", ":", "# doesn't exist, append it", "entry", ".", "append", "(", "value", ")", "return", "(", "True", ",", "self", ".", "yaml_dict", ")", "# already exists, return", "if", "ind", "is", "not", "None", ":", "return", "(", "False", ",", "self", ".", "yaml_dict", ")", "return", "(", "False", ",", "self", ".", "yaml_dict", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_vendored_deps/library/oc_scale.py#L537-L582
open-io/oio-sds
16041950b6056a55d5ce7ca77795defe6dfa6c61
oio/directory/admin.py
python
AdminClient.service_balance_elections
(self, svc_id, max_ops=0, inactivity=0, **kwargs)
return _resp.status, body
Balance elections to get an acceptable slave/master ratio. :param svc_to: id of the service that should balance its elections. :param max_ops: maximum number of balancing operations. :param inactivity: avoid expiring election whose last activity is younger than the specified value.
Balance elections to get an acceptable slave/master ratio.
[ "Balance", "elections", "to", "get", "an", "acceptable", "slave", "/", "master", "ratio", "." ]
def service_balance_elections(self, svc_id, max_ops=0, inactivity=0, **kwargs): """ Balance elections to get an acceptable slave/master ratio. :param svc_to: id of the service that should balance its elections. :param max_ops: maximum number of balancing operations. :param inactivity: avoid expiring election whose last activity is younger than the specified value. """ params = {'inactivity': int(inactivity), 'max': int(max_ops), 'id': svc_id} _resp, body = self.forwarder._request( 'POST', '/balance-masters', params=params, **kwargs) return _resp.status, body
[ "def", "service_balance_elections", "(", "self", ",", "svc_id", ",", "max_ops", "=", "0", ",", "inactivity", "=", "0", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'inactivity'", ":", "int", "(", "inactivity", ")", ",", "'max'", ":", "int", "(", "max_ops", ")", ",", "'id'", ":", "svc_id", "}", "_resp", ",", "body", "=", "self", ".", "forwarder", ".", "_request", "(", "'POST'", ",", "'/balance-masters'", ",", "params", "=", "params", ",", "*", "*", "kwargs", ")", "return", "_resp", ".", "status", ",", "body" ]
https://github.com/open-io/oio-sds/blob/16041950b6056a55d5ce7ca77795defe6dfa6c61/oio/directory/admin.py#L363-L378
keras-team/keras
5caa668b6a415675064a730f5eb46ecc08e40f65
keras/engine/data_adapter.py
python
single_batch_iterator
(strategy, x, y=None, sample_weight=None, class_weight=None)
return iter(dataset)
Creates a single-batch dataset.
Creates a single-batch dataset.
[ "Creates", "a", "single", "-", "batch", "dataset", "." ]
def single_batch_iterator(strategy, x, y=None, sample_weight=None, class_weight=None): """Creates a single-batch dataset.""" x, y, sample_weight = _process_tensorlike((x, y, sample_weight)) if y is None: data = (x,) elif sample_weight is None: data = (x, y) else: data = (x, y, sample_weight) _check_data_cardinality(data) dataset = tf.data.Dataset.from_tensors(data) if class_weight: dataset = dataset.map(_make_class_weight_map_fn(class_weight)) dataset = strategy.experimental_distribute_dataset(dataset) return iter(dataset)
[ "def", "single_batch_iterator", "(", "strategy", ",", "x", ",", "y", "=", "None", ",", "sample_weight", "=", "None", ",", "class_weight", "=", "None", ")", ":", "x", ",", "y", ",", "sample_weight", "=", "_process_tensorlike", "(", "(", "x", ",", "y", ",", "sample_weight", ")", ")", "if", "y", "is", "None", ":", "data", "=", "(", "x", ",", ")", "elif", "sample_weight", "is", "None", ":", "data", "=", "(", "x", ",", "y", ")", "else", ":", "data", "=", "(", "x", ",", "y", ",", "sample_weight", ")", "_check_data_cardinality", "(", "data", ")", "dataset", "=", "tf", ".", "data", ".", "Dataset", ".", "from_tensors", "(", "data", ")", "if", "class_weight", ":", "dataset", "=", "dataset", ".", "map", "(", "_make_class_weight_map_fn", "(", "class_weight", ")", ")", "dataset", "=", "strategy", ".", "experimental_distribute_dataset", "(", "dataset", ")", "return", "iter", "(", "dataset", ")" ]
https://github.com/keras-team/keras/blob/5caa668b6a415675064a730f5eb46ecc08e40f65/keras/engine/data_adapter.py#L1622-L1641
AeroPython/PyFME
156fa9f1db097f107c20ad7354c71b1eaee4cbb1
src/pyfme/models/dynamic_system.py
python
AircraftDynamicSystem.time_step
(self, dt)
return self.full_state
Perform an integration time step Parameters ---------- dt : float Time step for integration Returns ------- full_state : AircraftState Aircraft's state after integration time step
Perform an integration time step
[ "Perform", "an", "integration", "time", "step" ]
def time_step(self, dt): """Perform an integration time step Parameters ---------- dt : float Time step for integration Returns ------- full_state : AircraftState Aircraft's state after integration time step """ super().time_step(dt) # Now self.state_vector and state_vector_dot are updated self._update_full_system_state_from_state(self.state_vector, self.state_vector_dot) return self.full_state
[ "def", "time_step", "(", "self", ",", "dt", ")", ":", "super", "(", ")", ".", "time_step", "(", "dt", ")", "# Now self.state_vector and state_vector_dot are updated", "self", ".", "_update_full_system_state_from_state", "(", "self", ".", "state_vector", ",", "self", ".", "state_vector_dot", ")", "return", "self", ".", "full_state" ]
https://github.com/AeroPython/PyFME/blob/156fa9f1db097f107c20ad7354c71b1eaee4cbb1/src/pyfme/models/dynamic_system.py#L312-L330
tensorflow/datasets
2e496976d7d45550508395fb2f35cf958c8a3414
tensorflow_datasets/text/scan.py
python
Scan._generate_examples
(self, datapath, splitpath=None, splitname=None)
Yields examples.
Yields examples.
[ "Yields", "examples", "." ]
def _generate_examples(self, datapath, splitpath=None, splitname=None): """Yields examples.""" if splitpath: all_samples = list(self._read_examples(datapath)) with tf.io.gfile.GFile(splitpath) as infile: split = json.load(infile) for idx in split[splitname + 'Idxs']: yield all_samples[idx] else: for example in self._read_examples(datapath): yield example
[ "def", "_generate_examples", "(", "self", ",", "datapath", ",", "splitpath", "=", "None", ",", "splitname", "=", "None", ")", ":", "if", "splitpath", ":", "all_samples", "=", "list", "(", "self", ".", "_read_examples", "(", "datapath", ")", ")", "with", "tf", ".", "io", ".", "gfile", ".", "GFile", "(", "splitpath", ")", "as", "infile", ":", "split", "=", "json", ".", "load", "(", "infile", ")", "for", "idx", "in", "split", "[", "splitname", "+", "'Idxs'", "]", ":", "yield", "all_samples", "[", "idx", "]", "else", ":", "for", "example", "in", "self", ".", "_read_examples", "(", "datapath", ")", ":", "yield", "example" ]
https://github.com/tensorflow/datasets/blob/2e496976d7d45550508395fb2f35cf958c8a3414/tensorflow_datasets/text/scan.py#L192-L202
couchbase/couchbase-python-client
58ccfd42af320bde6b733acf094fd5a4cf34e0ad
couchbase/subdocument.py
python
with_expiry
()
return SD.get('$document.exptime', xattr=True)
Fetches the expiry from the xattrs of the doc :return: Spec
Fetches the expiry from the xattrs of the doc
[ "Fetches", "the", "expiry", "from", "the", "xattrs", "of", "the", "doc" ]
def with_expiry(): # type: (...) -> Spec """ Fetches the expiry from the xattrs of the doc :return: Spec """ return SD.get('$document.exptime', xattr=True)
[ "def", "with_expiry", "(", ")", ":", "# type: (...) -> Spec", "return", "SD", ".", "get", "(", "'$document.exptime'", ",", "xattr", "=", "True", ")" ]
https://github.com/couchbase/couchbase-python-client/blob/58ccfd42af320bde6b733acf094fd5a4cf34e0ad/couchbase/subdocument.py#L244-L251
google/deepvariant
9cf1c7b0e2342d013180aa153cba3c9331c9aef7
deepvariant/allele_frequency.py
python
add_allele_frequencies_to_candidates
(candidates, population_vcf_reader, ref_reader)
Adds allele frequencies for candidate variants. Args: candidates: Iterable of DeepVariantCall protos that are the candidates we want to process. population_vcf_reader: A VcfReader object that reads the associated population VCF file for candidates. None if the contig is not found. ref_reader: A fasta.IndexedFastaReader. Yields: DeepVariantCall protos. The same set of input candidates, with field allele_frequency filled.
Adds allele frequencies for candidate variants.
[ "Adds", "allele", "frequencies", "for", "candidate", "variants", "." ]
def add_allele_frequencies_to_candidates(candidates, population_vcf_reader, ref_reader): """Adds allele frequencies for candidate variants. Args: candidates: Iterable of DeepVariantCall protos that are the candidates we want to process. population_vcf_reader: A VcfReader object that reads the associated population VCF file for candidates. None if the contig is not found. ref_reader: A fasta.IndexedFastaReader. Yields: DeepVariantCall protos. The same set of input candidates, with field allele_frequency filled. """ for candidate in candidates: if population_vcf_reader: dict_allele_frequency = find_matching_allele_frequency( variant=candidate.variant, population_vcf_reader=population_vcf_reader, ref_reader=ref_reader) else: # Set ALT frequencies to 0 if population_vcf_reader is None. dict_allele_frequency = {} dict_allele_frequency[candidate.variant.reference_bases] = 1 for alt in candidate.variant.alternate_bases: dict_allele_frequency[alt] = 0 yield deepvariant_pb2.DeepVariantCall( variant=candidate.variant, allele_support=candidate.allele_support, allele_frequency=dict_allele_frequency)
[ "def", "add_allele_frequencies_to_candidates", "(", "candidates", ",", "population_vcf_reader", ",", "ref_reader", ")", ":", "for", "candidate", "in", "candidates", ":", "if", "population_vcf_reader", ":", "dict_allele_frequency", "=", "find_matching_allele_frequency", "(", "variant", "=", "candidate", ".", "variant", ",", "population_vcf_reader", "=", "population_vcf_reader", ",", "ref_reader", "=", "ref_reader", ")", "else", ":", "# Set ALT frequencies to 0 if population_vcf_reader is None.", "dict_allele_frequency", "=", "{", "}", "dict_allele_frequency", "[", "candidate", ".", "variant", ".", "reference_bases", "]", "=", "1", "for", "alt", "in", "candidate", ".", "variant", ".", "alternate_bases", ":", "dict_allele_frequency", "[", "alt", "]", "=", "0", "yield", "deepvariant_pb2", ".", "DeepVariantCall", "(", "variant", "=", "candidate", ".", "variant", ",", "allele_support", "=", "candidate", ".", "allele_support", ",", "allele_frequency", "=", "dict_allele_frequency", ")" ]
https://github.com/google/deepvariant/blob/9cf1c7b0e2342d013180aa153cba3c9331c9aef7/deepvariant/allele_frequency.py#L344-L375
kiibohd/kll
b6d997b810006326d31fc570c89d396fd0b70569
kll/common/stage.py
python
OperationClassificationStage.command_line_args
(self, args)
Group parser for command line arguments @param args: Name space of processed arguments
Group parser for command line arguments
[ "Group", "parser", "for", "command", "line", "arguments" ]
def command_line_args(self, args): ''' Group parser for command line arguments @param args: Name space of processed arguments '''
[ "def", "command_line_args", "(", "self", ",", "args", ")", ":" ]
https://github.com/kiibohd/kll/blob/b6d997b810006326d31fc570c89d396fd0b70569/kll/common/stage.py#L871-L876
SohuTech/essay
1989850b352b18845de5293393e979352595b392
essay/tasks/nginx.py
python
stop
(nginx_bin=None, nginx_conf=None, use_sudo=False)
停止Nginx 参数: nginx_bin: nginx可执行文件路径,如果为提供则从env获取。 nginx_conf: nginx配置文件路径,如果为提供则从env获取。
停止Nginx
[ "停止Nginx" ]
def stop(nginx_bin=None, nginx_conf=None, use_sudo=False): """ 停止Nginx 参数: nginx_bin: nginx可执行文件路径,如果为提供则从env获取。 nginx_conf: nginx配置文件路径,如果为提供则从env获取。 """ _nginx_command('stop', nginx_bin, nginx_conf, use_sudo=use_sudo)
[ "def", "stop", "(", "nginx_bin", "=", "None", ",", "nginx_conf", "=", "None", ",", "use_sudo", "=", "False", ")", ":", "_nginx_command", "(", "'stop'", ",", "nginx_bin", ",", "nginx_conf", ",", "use_sudo", "=", "use_sudo", ")" ]
https://github.com/SohuTech/essay/blob/1989850b352b18845de5293393e979352595b392/essay/tasks/nginx.py#L33-L42
mikedh/trimesh
6b1e05616b44e6dd708d9bc748b211656ebb27ec
trimesh/scene/transforms.py
python
SceneGraph.to_gltf
(self, scene, mesh_index=None)
return {'nodes': result}
Export a transforms as the 'nodes' section of the GLTF header dict. Parameters ------------ scene : trimesh.Scene Scene with geometry. mesh_index : dict or None Mapping { key in scene.geometry : int } Returns -------- gltf : dict With 'nodes' referencing a list of dicts
Export a transforms as the 'nodes' section of the GLTF header dict.
[ "Export", "a", "transforms", "as", "the", "nodes", "section", "of", "the", "GLTF", "header", "dict", "." ]
def to_gltf(self, scene, mesh_index=None): """ Export a transforms as the 'nodes' section of the GLTF header dict. Parameters ------------ scene : trimesh.Scene Scene with geometry. mesh_index : dict or None Mapping { key in scene.geometry : int } Returns -------- gltf : dict With 'nodes' referencing a list of dicts """ if mesh_index is None: # geometry is an OrderedDict # map mesh name to index: {geometry key : index} mesh_index = {name: i for i, name in enumerate(scene.geometry.keys())} # get graph information into local scope before loop graph = self.transforms # get the stored node data node_data = graph.node_data edge_data = graph.edge_data base_frame = self.base_frame # list of dict, in gltf format # start with base frame as first node index result = [{'name': base_frame}] # {node name : node index in gltf} lookup = {base_frame: 0} # collect the nodes in order for node in node_data.keys(): if node == base_frame: continue # assign the index to the node-name lookup lookup[node] = len(result) # populate a result at the correct index result.append({'name': node}) # get generated properties outside of loop # does the scene have a defined camera to export has_camera = scene.has_camera children = graph.children # then iterate through to collect data for info in result: # name of the scene node node = info['name'] # get the original node names for children childs = children.get(node, []) if len(childs) > 0: info['children'] = [lookup[k] for k in childs] # if we have a mesh store by index if 'geometry' in node_data[node]: mesh_key = node_data[node]['geometry'] if mesh_key in mesh_index: info['mesh'] = mesh_index[mesh_key] # check to see if we have camera node if has_camera and node == scene.camera.name: info['camera'] = 0 if node != base_frame: parent = graph.parents[node] # get the matrix from this edge matrix = edge_data[(parent, node)]['matrix'] # only include if it's not an identify matrix if np.abs(matrix - np.eye(4)).max() > 1e-5: info['matrix'] = matrix.T.reshape(-1).tolist() # if an extra was stored on this edge extras = edge_data[(parent, node)].get('extras') if extras: # convert any numpy arrays to lists extras.update( {k: v.tolist() for k, v in extras.items() if hasattr(v, 'tolist')}) info['extras'] = extras return {'nodes': result}
[ "def", "to_gltf", "(", "self", ",", "scene", ",", "mesh_index", "=", "None", ")", ":", "if", "mesh_index", "is", "None", ":", "# geometry is an OrderedDict", "# map mesh name to index: {geometry key : index}", "mesh_index", "=", "{", "name", ":", "i", "for", "i", ",", "name", "in", "enumerate", "(", "scene", ".", "geometry", ".", "keys", "(", ")", ")", "}", "# get graph information into local scope before loop", "graph", "=", "self", ".", "transforms", "# get the stored node data", "node_data", "=", "graph", ".", "node_data", "edge_data", "=", "graph", ".", "edge_data", "base_frame", "=", "self", ".", "base_frame", "# list of dict, in gltf format", "# start with base frame as first node index", "result", "=", "[", "{", "'name'", ":", "base_frame", "}", "]", "# {node name : node index in gltf}", "lookup", "=", "{", "base_frame", ":", "0", "}", "# collect the nodes in order", "for", "node", "in", "node_data", ".", "keys", "(", ")", ":", "if", "node", "==", "base_frame", ":", "continue", "# assign the index to the node-name lookup", "lookup", "[", "node", "]", "=", "len", "(", "result", ")", "# populate a result at the correct index", "result", ".", "append", "(", "{", "'name'", ":", "node", "}", ")", "# get generated properties outside of loop", "# does the scene have a defined camera to export", "has_camera", "=", "scene", ".", "has_camera", "children", "=", "graph", ".", "children", "# then iterate through to collect data", "for", "info", "in", "result", ":", "# name of the scene node", "node", "=", "info", "[", "'name'", "]", "# get the original node names for children", "childs", "=", "children", ".", "get", "(", "node", ",", "[", "]", ")", "if", "len", "(", "childs", ")", ">", "0", ":", "info", "[", "'children'", "]", "=", "[", "lookup", "[", "k", "]", "for", "k", "in", "childs", "]", "# if we have a mesh store by index", "if", "'geometry'", "in", "node_data", "[", "node", "]", ":", "mesh_key", "=", "node_data", "[", "node", "]", "[", "'geometry'", "]", "if", "mesh_key", "in", "mesh_index", ":", "info", "[", "'mesh'", "]", "=", "mesh_index", "[", "mesh_key", "]", "# check to see if we have camera node", "if", "has_camera", "and", "node", "==", "scene", ".", "camera", ".", "name", ":", "info", "[", "'camera'", "]", "=", "0", "if", "node", "!=", "base_frame", ":", "parent", "=", "graph", ".", "parents", "[", "node", "]", "# get the matrix from this edge", "matrix", "=", "edge_data", "[", "(", "parent", ",", "node", ")", "]", "[", "'matrix'", "]", "# only include if it's not an identify matrix", "if", "np", ".", "abs", "(", "matrix", "-", "np", ".", "eye", "(", "4", ")", ")", ".", "max", "(", ")", ">", "1e-5", ":", "info", "[", "'matrix'", "]", "=", "matrix", ".", "T", ".", "reshape", "(", "-", "1", ")", ".", "tolist", "(", ")", "# if an extra was stored on this edge", "extras", "=", "edge_data", "[", "(", "parent", ",", "node", ")", "]", ".", "get", "(", "'extras'", ")", "if", "extras", ":", "# convert any numpy arrays to lists", "extras", ".", "update", "(", "{", "k", ":", "v", ".", "tolist", "(", ")", "for", "k", ",", "v", "in", "extras", ".", "items", "(", ")", "if", "hasattr", "(", "v", ",", "'tolist'", ")", "}", ")", "info", "[", "'extras'", "]", "=", "extras", "return", "{", "'nodes'", ":", "result", "}" ]
https://github.com/mikedh/trimesh/blob/6b1e05616b44e6dd708d9bc748b211656ebb27ec/trimesh/scene/transforms.py#L192-L280
nipy/nipype
cd4c34d935a43812d1756482fdc4034844e485b8
nipype/interfaces/base/support.py
python
RuntimeContext.__exit__
(self, exc_type, exc_value, exc_tb)
Tear-down interface execution.
Tear-down interface execution.
[ "Tear", "-", "down", "interface", "execution", "." ]
def __exit__(self, exc_type, exc_value, exc_tb): """Tear-down interface execution.""" self._runtime.endTime = dt.isoformat(dt.utcnow()) timediff = parseutc(self._runtime.endTime) - parseutc(self._runtime.startTime) self._runtime.duration = ( timediff.days * 86400 + timediff.seconds + timediff.microseconds / 1e6 ) # Collect monitored data for k, v in self._resmon.stop().items(): setattr(self._runtime, k, v) os.chdir(self._runtime.prevcwd) if exc_type is not None or exc_value is not None or exc_tb is not None: import traceback # Retrieve the maximum info fast self._runtime.traceback = "".join( traceback.format_exception(exc_type, exc_value, exc_tb) ) # Gather up the exception arguments and append nipype info. exc_args = exc_value.args if getattr(exc_value, "args") else tuple() exc_args += ( f"An exception of type {exc_type.__name__} occurred while " f"running interface {self._runtime.interface}.", ) self._runtime.traceback_args = ("\n".join([f"{arg}" for arg in exc_args]),) if self._ignore_exc: return True if hasattr(self._runtime, "cmdline"): retcode = self._runtime.returncode if retcode not in self._runtime.success_codes: self._runtime.traceback = ( f"RuntimeError: subprocess exited with code {retcode}." )
[ "def", "__exit__", "(", "self", ",", "exc_type", ",", "exc_value", ",", "exc_tb", ")", ":", "self", ".", "_runtime", ".", "endTime", "=", "dt", ".", "isoformat", "(", "dt", ".", "utcnow", "(", ")", ")", "timediff", "=", "parseutc", "(", "self", ".", "_runtime", ".", "endTime", ")", "-", "parseutc", "(", "self", ".", "_runtime", ".", "startTime", ")", "self", ".", "_runtime", ".", "duration", "=", "(", "timediff", ".", "days", "*", "86400", "+", "timediff", ".", "seconds", "+", "timediff", ".", "microseconds", "/", "1e6", ")", "# Collect monitored data", "for", "k", ",", "v", "in", "self", ".", "_resmon", ".", "stop", "(", ")", ".", "items", "(", ")", ":", "setattr", "(", "self", ".", "_runtime", ",", "k", ",", "v", ")", "os", ".", "chdir", "(", "self", ".", "_runtime", ".", "prevcwd", ")", "if", "exc_type", "is", "not", "None", "or", "exc_value", "is", "not", "None", "or", "exc_tb", "is", "not", "None", ":", "import", "traceback", "# Retrieve the maximum info fast", "self", ".", "_runtime", ".", "traceback", "=", "\"\"", ".", "join", "(", "traceback", ".", "format_exception", "(", "exc_type", ",", "exc_value", ",", "exc_tb", ")", ")", "# Gather up the exception arguments and append nipype info.", "exc_args", "=", "exc_value", ".", "args", "if", "getattr", "(", "exc_value", ",", "\"args\"", ")", "else", "tuple", "(", ")", "exc_args", "+=", "(", "f\"An exception of type {exc_type.__name__} occurred while \"", "f\"running interface {self._runtime.interface}.\"", ",", ")", "self", ".", "_runtime", ".", "traceback_args", "=", "(", "\"\\n\"", ".", "join", "(", "[", "f\"{arg}\"", "for", "arg", "in", "exc_args", "]", ")", ",", ")", "if", "self", ".", "_ignore_exc", ":", "return", "True", "if", "hasattr", "(", "self", ".", "_runtime", ",", "\"cmdline\"", ")", ":", "retcode", "=", "self", ".", "_runtime", ".", "returncode", "if", "retcode", "not", "in", "self", ".", "_runtime", ".", "success_codes", ":", "self", ".", "_runtime", ".", "traceback", "=", "(", "f\"RuntimeError: subprocess exited with code {retcode}.\"", ")" ]
https://github.com/nipy/nipype/blob/cd4c34d935a43812d1756482fdc4034844e485b8/nipype/interfaces/base/support.py#L82-L118
AutodeskRoboticsLab/Mimic
85447f0d346be66988303a6a054473d92f1ed6f4
mimic/scripts/extern/pyqtgraph_0_11_0_dev0/pyqtgraph/ptime.py
python
winTime
()
return systime.clock() + START_TIME
Return the current time in seconds with high precision (windows version, use Manager.time() to stay platform independent).
Return the current time in seconds with high precision (windows version, use Manager.time() to stay platform independent).
[ "Return", "the", "current", "time", "in", "seconds", "with", "high", "precision", "(", "windows", "version", "use", "Manager", ".", "time", "()", "to", "stay", "platform", "independent", ")", "." ]
def winTime(): """Return the current time in seconds with high precision (windows version, use Manager.time() to stay platform independent).""" return systime.clock() + START_TIME
[ "def", "winTime", "(", ")", ":", "return", "systime", ".", "clock", "(", ")", "+", "START_TIME" ]
https://github.com/AutodeskRoboticsLab/Mimic/blob/85447f0d346be66988303a6a054473d92f1ed6f4/mimic/scripts/extern/pyqtgraph_0_11_0_dev0/pyqtgraph/ptime.py#L14-L16
omz/PythonistaAppTemplate
f560f93f8876d82a21d108977f90583df08d55af
PythonistaAppTemplate/PythonistaKit.framework/pylib/_pyio.py
python
IOBase._checkWritable
(self, msg=None)
Internal: raise an IOError if file is not writable
Internal: raise an IOError if file is not writable
[ "Internal", ":", "raise", "an", "IOError", "if", "file", "is", "not", "writable" ]
def _checkWritable(self, msg=None): """Internal: raise an IOError if file is not writable """ if not self.writable(): raise IOError("File or stream is not writable." if msg is None else msg)
[ "def", "_checkWritable", "(", "self", ",", "msg", "=", "None", ")", ":", "if", "not", "self", ".", "writable", "(", ")", ":", "raise", "IOError", "(", "\"File or stream is not writable.\"", "if", "msg", "is", "None", "else", "msg", ")" ]
https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/_pyio.py#L400-L405
pantsbuild/pex
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
pex/vendor/_vendored/pip/pip/_vendor/ipaddress.py
python
IPv6Interface.__lt__
(self, other)
[]
def __lt__(self, other): address_less = IPv6Address.__lt__(self, other) if address_less is NotImplemented: return NotImplemented try: return (self.network < other.network or self.network == other.network and address_less) except AttributeError: # We *do* allow addresses and interfaces to be sorted. The # unassociated address is considered less than all interfaces. return False
[ "def", "__lt__", "(", "self", ",", "other", ")", ":", "address_less", "=", "IPv6Address", ".", "__lt__", "(", "self", ",", "other", ")", "if", "address_less", "is", "NotImplemented", ":", "return", "NotImplemented", "try", ":", "return", "(", "self", ".", "network", "<", "other", ".", "network", "or", "self", ".", "network", "==", "other", ".", "network", "and", "address_less", ")", "except", "AttributeError", ":", "# We *do* allow addresses and interfaces to be sorted. The", "# unassociated address is considered less than all interfaces.", "return", "False" ]
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/pip/pip/_vendor/ipaddress.py#L2219-L2229
crossbario/crossbar
ed350b7ba1c8421f3640b9c2e94a21ed4cfdff64
crossbar/network/_api.py
python
Network.find_domains
(self, include_owners: Optional[list] = None, include_titles: Optional[list] = None, include_descs: Optional[list] = None, include_tags: Optional[list] = None, details: Optional[CallDetails] = None)
Search for XBR Cloud Domains by owner, label, description, tags, etc. :param include_owners: If provided, only return cloud domains owned by any of the owners specified. :param include_titles: If provided, only return cloud domains with a title that contains any of the specified titles. :param include_descs: If provided, only return cloud domains with a description that contains any of the specified descriptions. :param include_tags: If provided, only return cloud domains with a tag that contains any of the specified tags. :param details: Caller details. :type details: :class:`autobahn.wamp.types.CallDetails` :return: List of addresses of cloud domains matching the search criteria.
Search for XBR Cloud Domains by owner, label, description, tags, etc.
[ "Search", "for", "XBR", "Cloud", "Domains", "by", "owner", "label", "description", "tags", "etc", "." ]
def find_domains(self, include_owners: Optional[list] = None, include_titles: Optional[list] = None, include_descs: Optional[list] = None, include_tags: Optional[list] = None, details: Optional[CallDetails] = None): """ Search for XBR Cloud Domains by owner, label, description, tags, etc. :param include_owners: If provided, only return cloud domains owned by any of the owners specified. :param include_titles: If provided, only return cloud domains with a title that contains any of the specified titles. :param include_descs: If provided, only return cloud domains with a description that contains any of the specified descriptions. :param include_tags: If provided, only return cloud domains with a tag that contains any of the specified tags. :param details: Caller details. :type details: :class:`autobahn.wamp.types.CallDetails` :return: List of addresses of cloud domains matching the search criteria. """ assert details is None or isinstance( details, CallDetails), 'details must be `autobahn.wamp.types.CallDetails`, but was `{}`'.format(details) raise NotImplementedError()
[ "def", "find_domains", "(", "self", ",", "include_owners", ":", "Optional", "[", "list", "]", "=", "None", ",", "include_titles", ":", "Optional", "[", "list", "]", "=", "None", ",", "include_descs", ":", "Optional", "[", "list", "]", "=", "None", ",", "include_tags", ":", "Optional", "[", "list", "]", "=", "None", ",", "details", ":", "Optional", "[", "CallDetails", "]", "=", "None", ")", ":", "assert", "details", "is", "None", "or", "isinstance", "(", "details", ",", "CallDetails", ")", ",", "'details must be `autobahn.wamp.types.CallDetails`, but was `{}`'", ".", "format", "(", "details", ")", "raise", "NotImplementedError", "(", ")" ]
https://github.com/crossbario/crossbar/blob/ed350b7ba1c8421f3640b9c2e94a21ed4cfdff64/crossbar/network/_api.py#L2639-L2664
Borda/pyImSegm
7584b40a8d5bba04d3bf46f540f22b5d923e4b03
imsegm/utilities/data_samples.py
python
sample_color_image_rand_segment
(im_size=SAMPLE_SEG_SIZE_2D_NORM, nb_classes=SAMPLE_SEG_NB_CLASSES, rand_seed=None)
return img, seg
create samoe image and segmentation :param tuple(int,int) im_size: :param int nb_classes: :param rand_seed: :return: >>> im, seg = sample_color_image_rand_segment((5, 6), 2, rand_seed=0) >>> im.shape (5, 6, 3) >>> seg array([[1, 1, 0, 0, 1, 0], [0, 1, 1, 0, 1, 0], [0, 1, 0, 0, 0, 1], [1, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0]])
create samoe image and segmentation
[ "create", "samoe", "image", "and", "segmentation" ]
def sample_color_image_rand_segment(im_size=SAMPLE_SEG_SIZE_2D_NORM, nb_classes=SAMPLE_SEG_NB_CLASSES, rand_seed=None): """ create samoe image and segmentation :param tuple(int,int) im_size: :param int nb_classes: :param rand_seed: :return: >>> im, seg = sample_color_image_rand_segment((5, 6), 2, rand_seed=0) >>> im.shape (5, 6, 3) >>> seg array([[1, 1, 0, 0, 1, 0], [0, 1, 1, 0, 1, 0], [0, 1, 0, 0, 0, 1], [1, 0, 1, 0, 0, 0], [0, 0, 1, 0, 1, 0]]) """ if len(im_size) != 2: raise ValueError('required image dimension is 2 to instead %r' % im_size) np.random.seed(rand_seed) im_size_rgb = (im_size[0], im_size[1], 3) img = np.random.random_integers(0, 255, im_size_rgb) seg = np.random.random_integers(0, nb_classes - 1, im_size) for lb in range(int(nb_classes)): val_step = 255 / nb_classes im = np.random.random_integers(int(val_step * lb), int(val_step * (lb + 1)), im_size_rgb) img[seg == lb] = im[seg == lb] # img = Image.fromarray(np.array(im, dtype=np.uint8), 'RGB') return img, seg
[ "def", "sample_color_image_rand_segment", "(", "im_size", "=", "SAMPLE_SEG_SIZE_2D_NORM", ",", "nb_classes", "=", "SAMPLE_SEG_NB_CLASSES", ",", "rand_seed", "=", "None", ")", ":", "if", "len", "(", "im_size", ")", "!=", "2", ":", "raise", "ValueError", "(", "'required image dimension is 2 to instead %r'", "%", "im_size", ")", "np", ".", "random", ".", "seed", "(", "rand_seed", ")", "im_size_rgb", "=", "(", "im_size", "[", "0", "]", ",", "im_size", "[", "1", "]", ",", "3", ")", "img", "=", "np", ".", "random", ".", "random_integers", "(", "0", ",", "255", ",", "im_size_rgb", ")", "seg", "=", "np", ".", "random", ".", "random_integers", "(", "0", ",", "nb_classes", "-", "1", ",", "im_size", ")", "for", "lb", "in", "range", "(", "int", "(", "nb_classes", ")", ")", ":", "val_step", "=", "255", "/", "nb_classes", "im", "=", "np", ".", "random", ".", "random_integers", "(", "int", "(", "val_step", "*", "lb", ")", ",", "int", "(", "val_step", "*", "(", "lb", "+", "1", ")", ")", ",", "im_size_rgb", ")", "img", "[", "seg", "==", "lb", "]", "=", "im", "[", "seg", "==", "lb", "]", "# img = Image.fromarray(np.array(im, dtype=np.uint8), 'RGB')", "return", "img", ",", "seg" ]
https://github.com/Borda/pyImSegm/blob/7584b40a8d5bba04d3bf46f540f22b5d923e4b03/imsegm/utilities/data_samples.py#L122-L151
getnikola/nikola
2da876e9322e42a93f8295f950e336465c6a4ee5
nikola/utils.py
python
TemplateHookRegistry.__repr__
(self)
return '<TemplateHookRegistry: {0}>'.format(self.name)
Provide the representation of a registry.
Provide the representation of a registry.
[ "Provide", "the", "representation", "of", "a", "registry", "." ]
def __repr__(self): """Provide the representation of a registry.""" return '<TemplateHookRegistry: {0}>'.format(self.name)
[ "def", "__repr__", "(", "self", ")", ":", "return", "'<TemplateHookRegistry: {0}>'", ".", "format", "(", "self", ".", "name", ")" ]
https://github.com/getnikola/nikola/blob/2da876e9322e42a93f8295f950e336465c6a4ee5/nikola/utils.py#L481-L483
ljvmiranda921/seagull
26828319066b53301170345ba28ae27cb795d1fd
seagull/lifeforms/oscillators.py
python
Toad.__init__
(self)
Initialize the class
Initialize the class
[ "Initialize", "the", "class" ]
def __init__(self): """Initialize the class""" super(Toad, self).__init__()
[ "def", "__init__", "(", "self", ")", ":", "super", "(", "Toad", ",", "self", ")", ".", "__init__", "(", ")" ]
https://github.com/ljvmiranda921/seagull/blob/26828319066b53301170345ba28ae27cb795d1fd/seagull/lifeforms/oscillators.py#L34-L36
realpython/book2-exercises
cde325eac8e6d8cff2316601c2e5b36bb46af7d0
web2py/venv/lib/python2.7/site-packages/pip/_vendor/ipaddress.py
python
IPv6Network.__init__
(self, address, strict=True)
Instantiate a new IPv6 Network object. Args: address: A string or integer representing the IPv6 network or the IP and prefix/netmask. '2001:db8::/128' '2001:db8:0000:0000:0000:0000:0000:0000/128' '2001:db8::' are all functionally the same in IPv6. That is to say, failing to provide a subnetmask will create an object with a mask of /128. Additionally, an integer can be passed, so IPv6Network('2001:db8::') == IPv6Network(42540766411282592856903984951653826560) or, more generally IPv6Network(int(IPv6Network('2001:db8::'))) == IPv6Network('2001:db8::') strict: A boolean. If true, ensure that we have been passed A true network address, eg, 2001:db8::1000/124 and not an IP address on a network, eg, 2001:db8::1/124. Raises: AddressValueError: If address isn't a valid IPv6 address. NetmaskValueError: If the netmask isn't valid for an IPv6 address. ValueError: If strict was True and a network address was not supplied.
Instantiate a new IPv6 Network object.
[ "Instantiate", "a", "new", "IPv6", "Network", "object", "." ]
def __init__(self, address, strict=True): """Instantiate a new IPv6 Network object. Args: address: A string or integer representing the IPv6 network or the IP and prefix/netmask. '2001:db8::/128' '2001:db8:0000:0000:0000:0000:0000:0000/128' '2001:db8::' are all functionally the same in IPv6. That is to say, failing to provide a subnetmask will create an object with a mask of /128. Additionally, an integer can be passed, so IPv6Network('2001:db8::') == IPv6Network(42540766411282592856903984951653826560) or, more generally IPv6Network(int(IPv6Network('2001:db8::'))) == IPv6Network('2001:db8::') strict: A boolean. If true, ensure that we have been passed A true network address, eg, 2001:db8::1000/124 and not an IP address on a network, eg, 2001:db8::1/124. Raises: AddressValueError: If address isn't a valid IPv6 address. NetmaskValueError: If the netmask isn't valid for an IPv6 address. ValueError: If strict was True and a network address was not supplied. """ _BaseNetwork.__init__(self, address) # Efficient constructor from integer or packed address if isinstance(address, (bytes, _compat_int_types)): self.network_address = IPv6Address(address) self.netmask, self._prefixlen = self._make_netmask( self._max_prefixlen) return if isinstance(address, tuple): if len(address) > 1: arg = address[1] else: arg = self._max_prefixlen self.netmask, self._prefixlen = self._make_netmask(arg) self.network_address = IPv6Address(address[0]) packed = int(self.network_address) if packed & int(self.netmask) != packed: if strict: raise ValueError('%s has host bits set' % self) else: self.network_address = IPv6Address(packed & int(self.netmask)) return # Assume input argument to be string or any object representation # which converts into a formatted IP prefix string. addr = _split_optional_netmask(address) self.network_address = IPv6Address(self._ip_int_from_string(addr[0])) if len(addr) == 2: arg = addr[1] else: arg = self._max_prefixlen self.netmask, self._prefixlen = self._make_netmask(arg) if strict: if (IPv6Address(int(self.network_address) & int(self.netmask)) != self.network_address): raise ValueError('%s has host bits set' % self) self.network_address = IPv6Address(int(self.network_address) & int(self.netmask)) if self._prefixlen == (self._max_prefixlen - 1): self.hosts = self.__iter__
[ "def", "__init__", "(", "self", ",", "address", ",", "strict", "=", "True", ")", ":", "_BaseNetwork", ".", "__init__", "(", "self", ",", "address", ")", "# Efficient constructor from integer or packed address", "if", "isinstance", "(", "address", ",", "(", "bytes", ",", "_compat_int_types", ")", ")", ":", "self", ".", "network_address", "=", "IPv6Address", "(", "address", ")", "self", ".", "netmask", ",", "self", ".", "_prefixlen", "=", "self", ".", "_make_netmask", "(", "self", ".", "_max_prefixlen", ")", "return", "if", "isinstance", "(", "address", ",", "tuple", ")", ":", "if", "len", "(", "address", ")", ">", "1", ":", "arg", "=", "address", "[", "1", "]", "else", ":", "arg", "=", "self", ".", "_max_prefixlen", "self", ".", "netmask", ",", "self", ".", "_prefixlen", "=", "self", ".", "_make_netmask", "(", "arg", ")", "self", ".", "network_address", "=", "IPv6Address", "(", "address", "[", "0", "]", ")", "packed", "=", "int", "(", "self", ".", "network_address", ")", "if", "packed", "&", "int", "(", "self", ".", "netmask", ")", "!=", "packed", ":", "if", "strict", ":", "raise", "ValueError", "(", "'%s has host bits set'", "%", "self", ")", "else", ":", "self", ".", "network_address", "=", "IPv6Address", "(", "packed", "&", "int", "(", "self", ".", "netmask", ")", ")", "return", "# Assume input argument to be string or any object representation", "# which converts into a formatted IP prefix string.", "addr", "=", "_split_optional_netmask", "(", "address", ")", "self", ".", "network_address", "=", "IPv6Address", "(", "self", ".", "_ip_int_from_string", "(", "addr", "[", "0", "]", ")", ")", "if", "len", "(", "addr", ")", "==", "2", ":", "arg", "=", "addr", "[", "1", "]", "else", ":", "arg", "=", "self", ".", "_max_prefixlen", "self", ".", "netmask", ",", "self", ".", "_prefixlen", "=", "self", ".", "_make_netmask", "(", "arg", ")", "if", "strict", ":", "if", "(", "IPv6Address", "(", "int", "(", "self", ".", "network_address", ")", "&", "int", "(", "self", ".", "netmask", ")", ")", "!=", "self", ".", "network_address", ")", ":", "raise", "ValueError", "(", "'%s has host bits set'", "%", "self", ")", "self", ".", "network_address", "=", "IPv6Address", "(", "int", "(", "self", ".", "network_address", ")", "&", "int", "(", "self", ".", "netmask", ")", ")", "if", "self", ".", "_prefixlen", "==", "(", "self", ".", "_max_prefixlen", "-", "1", ")", ":", "self", ".", "hosts", "=", "self", ".", "__iter__" ]
https://github.com/realpython/book2-exercises/blob/cde325eac8e6d8cff2316601c2e5b36bb46af7d0/web2py/venv/lib/python2.7/site-packages/pip/_vendor/ipaddress.py#L2285-L2362
krintoxi/NoobSec-Toolkit
38738541cbc03cedb9a3b3ed13b629f781ad64f6
NoobSecToolkit /tools/sqli/plugins/dbms/firebird/fingerprint.py
python
Fingerprint._sysTablesCheck
(self)
return retVal
[]
def _sysTablesCheck(self): retVal = None table = ( ("1.0", ("EXISTS(SELECT CURRENT_USER FROM RDB$DATABASE)",)), ("1.5", ("NULLIF(%d,%d) IS NULL", "EXISTS(SELECT CURRENT_TRANSACTION FROM RDB$DATABASE)")), ("2.0", ("EXISTS(SELECT CURRENT_TIME(0) FROM RDB$DATABASE)", "BIT_LENGTH(%d)>0", "CHAR_LENGTH(%d)>0")), ("2.1", ("BIN_XOR(%d,%d)=0", "PI()>0.%d", "RAND()<1.%d", "FLOOR(1.%d)>=0")), # TODO: add test for Firebird 2.5 ) for i in xrange(len(table)): version, checks = table[i] failed = False check = checks[randomRange(0, len(checks) - 1)].replace("%d", getUnicode(randomRange(1, 100))) result = inject.checkBooleanExpression(check) if result: retVal = version else: failed = True break if failed: break return retVal
[ "def", "_sysTablesCheck", "(", "self", ")", ":", "retVal", "=", "None", "table", "=", "(", "(", "\"1.0\"", ",", "(", "\"EXISTS(SELECT CURRENT_USER FROM RDB$DATABASE)\"", ",", ")", ")", ",", "(", "\"1.5\"", ",", "(", "\"NULLIF(%d,%d) IS NULL\"", ",", "\"EXISTS(SELECT CURRENT_TRANSACTION FROM RDB$DATABASE)\"", ")", ")", ",", "(", "\"2.0\"", ",", "(", "\"EXISTS(SELECT CURRENT_TIME(0) FROM RDB$DATABASE)\"", ",", "\"BIT_LENGTH(%d)>0\"", ",", "\"CHAR_LENGTH(%d)>0\"", ")", ")", ",", "(", "\"2.1\"", ",", "(", "\"BIN_XOR(%d,%d)=0\"", ",", "\"PI()>0.%d\"", ",", "\"RAND()<1.%d\"", ",", "\"FLOOR(1.%d)>=0\"", ")", ")", ",", "# TODO: add test for Firebird 2.5", ")", "for", "i", "in", "xrange", "(", "len", "(", "table", ")", ")", ":", "version", ",", "checks", "=", "table", "[", "i", "]", "failed", "=", "False", "check", "=", "checks", "[", "randomRange", "(", "0", ",", "len", "(", "checks", ")", "-", "1", ")", "]", ".", "replace", "(", "\"%d\"", ",", "getUnicode", "(", "randomRange", "(", "1", ",", "100", ")", ")", ")", "result", "=", "inject", ".", "checkBooleanExpression", "(", "check", ")", "if", "result", ":", "retVal", "=", "version", "else", ":", "failed", "=", "True", "break", "if", "failed", ":", "break", "return", "retVal" ]
https://github.com/krintoxi/NoobSec-Toolkit/blob/38738541cbc03cedb9a3b3ed13b629f781ad64f6/NoobSecToolkit /tools/sqli/plugins/dbms/firebird/fingerprint.py#L69-L94
nlloyd/SubliminalCollaborator
5c619e17ddbe8acb9eea8996ec038169ddcd50a1
libs/twisted/words/xish/utility.py
python
EventDispatcher.addObserver
(self, event, observerfn, priority=0, *args, **kwargs)
Register an observer for an event. Each observer will be registered with a certain priority. Higher priority observers get called before lower priority observers. @param event: Name or XPath query for the event to be monitored. @type event: C{str} or L{xpath.XPathQuery}. @param observerfn: Function to be called when the specified event has been triggered. This callable takes one parameter: the data object that triggered the event. When specified, the C{*args} and C{**kwargs} parameters to addObserver are being used as additional parameters to the registered observer callable. @param priority: (Optional) priority of this observer in relation to other observer that match the same event. Defaults to C{0}. @type priority: C{int}
Register an observer for an event.
[ "Register", "an", "observer", "for", "an", "event", "." ]
def addObserver(self, event, observerfn, priority=0, *args, **kwargs): """ Register an observer for an event. Each observer will be registered with a certain priority. Higher priority observers get called before lower priority observers. @param event: Name or XPath query for the event to be monitored. @type event: C{str} or L{xpath.XPathQuery}. @param observerfn: Function to be called when the specified event has been triggered. This callable takes one parameter: the data object that triggered the event. When specified, the C{*args} and C{**kwargs} parameters to addObserver are being used as additional parameters to the registered observer callable. @param priority: (Optional) priority of this observer in relation to other observer that match the same event. Defaults to C{0}. @type priority: C{int} """ self._addObserver(False, event, observerfn, priority, *args, **kwargs)
[ "def", "addObserver", "(", "self", ",", "event", ",", "observerfn", ",", "priority", "=", "0", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_addObserver", "(", "False", ",", "event", ",", "observerfn", ",", "priority", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
https://github.com/nlloyd/SubliminalCollaborator/blob/5c619e17ddbe8acb9eea8996ec038169ddcd50a1/libs/twisted/words/xish/utility.py#L202-L223
oracle/oci-python-sdk
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
examples/showoci/showoci_to_se.py
python
ShowOCI2SE.__convert_load_balancer_backendset
(self, region_name, load_balance_obj)
[]
def __convert_load_balancer_backendset(self, region_name, load_balance_obj): try: lb = load_balance_obj['details'] backendset = load_balance_obj['backendset'] for bs in backendset: self.LoadBalancerBackendsetId += 1 data = {'class': 'LoadBalancerBackendset' + str(self.LoadBalancerBackendsetId), 'region_name': region_name, 'compartment_name': lb['compartment_name'], 'compartment_id': lb['compartment_id'], 'loadBalancer_id': lb['id'], 'loadBalancer_name': lb['display_name'], 'backendset': backendset } self.outdata.append(data) except Exception as e: self.__print_error("__convert_load_balancer_backendset", e)
[ "def", "__convert_load_balancer_backendset", "(", "self", ",", "region_name", ",", "load_balance_obj", ")", ":", "try", ":", "lb", "=", "load_balance_obj", "[", "'details'", "]", "backendset", "=", "load_balance_obj", "[", "'backendset'", "]", "for", "bs", "in", "backendset", ":", "self", ".", "LoadBalancerBackendsetId", "+=", "1", "data", "=", "{", "'class'", ":", "'LoadBalancerBackendset'", "+", "str", "(", "self", ".", "LoadBalancerBackendsetId", ")", ",", "'region_name'", ":", "region_name", ",", "'compartment_name'", ":", "lb", "[", "'compartment_name'", "]", ",", "'compartment_id'", ":", "lb", "[", "'compartment_id'", "]", ",", "'loadBalancer_id'", ":", "lb", "[", "'id'", "]", ",", "'loadBalancer_name'", ":", "lb", "[", "'display_name'", "]", ",", "'backendset'", ":", "backendset", "}", "self", ".", "outdata", ".", "append", "(", "data", ")", "except", "Exception", "as", "e", ":", "self", ".", "__print_error", "(", "\"__convert_load_balancer_backendset\"", ",", "e", ")" ]
https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/examples/showoci/showoci_to_se.py#L1002-L1022
viblo/pymunk
77647ca037d5ceabd728f20f37d2da8a3bfb73a0
pymunk/space.py
python
Space.reindex_static
(self)
Update the collision detection info for the static shapes in the space. You only need to call this if you move one of the static shapes.
Update the collision detection info for the static shapes in the space. You only need to call this if you move one of the static shapes.
[ "Update", "the", "collision", "detection", "info", "for", "the", "static", "shapes", "in", "the", "space", ".", "You", "only", "need", "to", "call", "this", "if", "you", "move", "one", "of", "the", "static", "shapes", "." ]
def reindex_static(self) -> None: """Update the collision detection info for the static shapes in the space. You only need to call this if you move one of the static shapes. """ cp.cpSpaceReindexStatic(self._space)
[ "def", "reindex_static", "(", "self", ")", "->", "None", ":", "cp", ".", "cpSpaceReindexStatic", "(", "self", ".", "_space", ")" ]
https://github.com/viblo/pymunk/blob/77647ca037d5ceabd728f20f37d2da8a3bfb73a0/pymunk/space.py#L506-L510
perone/Pyevolve
589b6a9b92ed1fd9ef00987bf4bfe807c4a7b7e0
pyevolve/GTree.py
python
GTreeNodeGP.compare
(self, other)
return -1
Compare this node with other :param other: the other GTreeNodeGP
Compare this node with other
[ "Compare", "this", "node", "with", "other" ]
def compare(self, other): """ Compare this node with other :param other: the other GTreeNodeGP """ if not isinstance(other, GTreeNodeGP): Util.raiseException("The other node used to compare is not a GTreeNodeGP class", TypeError) if other.node_type == self.node_type: if other.node_data == self.node_data: return 0 return -1
[ "def", "compare", "(", "self", ",", "other", ")", ":", "if", "not", "isinstance", "(", "other", ",", "GTreeNodeGP", ")", ":", "Util", ".", "raiseException", "(", "\"The other node used to compare is not a GTreeNodeGP class\"", ",", "TypeError", ")", "if", "other", ".", "node_type", "==", "self", ".", "node_type", ":", "if", "other", ".", "node_data", "==", "self", ".", "node_data", ":", "return", "0", "return", "-", "1" ]
https://github.com/perone/Pyevolve/blob/589b6a9b92ed1fd9ef00987bf4bfe807c4a7b7e0/pyevolve/GTree.py#L254-L265
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/components/isy994/climate.py
python
ISYThermostatEntity.fan_modes
(self)
return [FAN_AUTO, FAN_ON]
Return the list of available fan modes.
Return the list of available fan modes.
[ "Return", "the", "list", "of", "available", "fan", "modes", "." ]
def fan_modes(self): """Return the list of available fan modes.""" return [FAN_AUTO, FAN_ON]
[ "def", "fan_modes", "(", "self", ")", ":", "return", "[", "FAN_AUTO", ",", "FAN_ON", "]" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/isy994/climate.py#L189-L191
pantsbuild/pex
473c6ac732ed4bc338b4b20a9ec930d1d722c9b4
pex/vendor/_vendored/pip/pip/_vendor/ipaddress.py
python
_find_address_range
(addresses)
Find a sequence of sorted deduplicated IPv#Address. Args: addresses: a list of IPv#Address objects. Yields: A tuple containing the first and last IP addresses in the sequence.
Find a sequence of sorted deduplicated IPv#Address.
[ "Find", "a", "sequence", "of", "sorted", "deduplicated", "IPv#Address", "." ]
def _find_address_range(addresses): """Find a sequence of sorted deduplicated IPv#Address. Args: addresses: a list of IPv#Address objects. Yields: A tuple containing the first and last IP addresses in the sequence. """ it = iter(addresses) first = last = next(it) for ip in it: if ip._ip != last._ip + 1: yield first, last first = ip last = ip yield first, last
[ "def", "_find_address_range", "(", "addresses", ")", ":", "it", "=", "iter", "(", "addresses", ")", "first", "=", "last", "=", "next", "(", "it", ")", "for", "ip", "in", "it", ":", "if", "ip", ".", "_ip", "!=", "last", ".", "_ip", "+", "1", ":", "yield", "first", ",", "last", "first", "=", "ip", "last", "=", "ip", "yield", "first", ",", "last" ]
https://github.com/pantsbuild/pex/blob/473c6ac732ed4bc338b4b20a9ec930d1d722c9b4/pex/vendor/_vendored/pip/pip/_vendor/ipaddress.py#L286-L303
google/clusterfuzz
f358af24f414daa17a3649b143e71ea71871ef59
src/clusterfuzz/_internal/bot/untrusted_runner/build_setup.py
python
setup_regular_build
(request)
return _build_response(build.setup())
Set up a regular build.
Set up a regular build.
[ "Set", "up", "a", "regular", "build", "." ]
def setup_regular_build(request): """Set up a regular build.""" build = build_manager.RegularBuild(request.base_build_dir, request.revision, request.build_url, request.target_weights, request.build_prefix) return _build_response(build.setup())
[ "def", "setup_regular_build", "(", "request", ")", ":", "build", "=", "build_manager", ".", "RegularBuild", "(", "request", ".", "base_build_dir", ",", "request", ".", "revision", ",", "request", ".", "build_url", ",", "request", ".", "target_weights", ",", "request", ".", "build_prefix", ")", "return", "_build_response", "(", "build", ".", "setup", "(", ")", ")" ]
https://github.com/google/clusterfuzz/blob/f358af24f414daa17a3649b143e71ea71871ef59/src/clusterfuzz/_internal/bot/untrusted_runner/build_setup.py#L36-L41
sagemath/sage
f9b2db94f675ff16963ccdefba4f1a3393b3fe0d
src/sage/rings/polynomial/ore_function_element.py
python
OreFunctionBaseringInjection.section
(self)
return ConstantOreFunctionSection(self.codomain(), self.domain())
r""" Return the canonical homomorphism from the constants of a Ore function filed to its base field. TESTS:: sage: k.<t> = GF(5^3) sage: Frob = k.frobenius_endomorphism() sage: S.<x> = k['x',Frob] sage: K = S.fraction_field() sage: m = K.coerce_map_from(k) sage: m.section() Generic map: From: Ore Function Field in x over Finite Field in t of size 5^3 twisted by t |--> t^5 To: Finite Field in t of size 5^3
r""" Return the canonical homomorphism from the constants of a Ore function filed to its base field.
[ "r", "Return", "the", "canonical", "homomorphism", "from", "the", "constants", "of", "a", "Ore", "function", "filed", "to", "its", "base", "field", "." ]
def section(self): r""" Return the canonical homomorphism from the constants of a Ore function filed to its base field. TESTS:: sage: k.<t> = GF(5^3) sage: Frob = k.frobenius_endomorphism() sage: S.<x> = k['x',Frob] sage: K = S.fraction_field() sage: m = K.coerce_map_from(k) sage: m.section() Generic map: From: Ore Function Field in x over Finite Field in t of size 5^3 twisted by t |--> t^5 To: Finite Field in t of size 5^3 """ return ConstantOreFunctionSection(self.codomain(), self.domain())
[ "def", "section", "(", "self", ")", ":", "return", "ConstantOreFunctionSection", "(", "self", ".", "codomain", "(", ")", ",", "self", ".", "domain", "(", ")", ")" ]
https://github.com/sagemath/sage/blob/f9b2db94f675ff16963ccdefba4f1a3393b3fe0d/src/sage/rings/polynomial/ore_function_element.py#L787-L804
lovelylain/pyctp
fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d
example/ctp/stock/ApiStruct.py
python
QryTraderOffer.__init__
(self, ExchangeID='', ParticipantID='', TraderID='')
[]
def __init__(self, ExchangeID='', ParticipantID='', TraderID=''): self.ExchangeID = '' #交易所代码, char[9] self.ParticipantID = '' #会员代码, char[11] self.TraderID = ''
[ "def", "__init__", "(", "self", ",", "ExchangeID", "=", "''", ",", "ParticipantID", "=", "''", ",", "TraderID", "=", "''", ")", ":", "self", ".", "ExchangeID", "=", "''", "#交易所代码, char[9]", "self", ".", "ParticipantID", "=", "''", "#会员代码, char[11]", "self", ".", "TraderID", "=", "''" ]
https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/example/ctp/stock/ApiStruct.py#L2627-L2630
wucng/TensorExpand
4ea58f64f5c5082b278229b799c9f679536510b7
TensorExpand/Object detection/Mask RCNN/matterport-Mask_RCNN/train_mnist.py
python
ShapesDataset.Image_Processing
(self,image)
return img, img_mask
缩放到32x32,像素值转成0、1
缩放到32x32,像素值转成0、1
[ "缩放到32x32,像素值转成0、1" ]
def Image_Processing(self,image): '''缩放到32x32,像素值转成0、1''' image = np.reshape(image, [28, 28]) # [28,28] img = cv2.resize(image, (32, 32)) # [32,32] img_mask = np.round(img) # 转成对应的掩膜 return img, img_mask
[ "def", "Image_Processing", "(", "self", ",", "image", ")", ":", "image", "=", "np", ".", "reshape", "(", "image", ",", "[", "28", ",", "28", "]", ")", "# [28,28]", "img", "=", "cv2", ".", "resize", "(", "image", ",", "(", "32", ",", "32", ")", ")", "# [32,32]", "img_mask", "=", "np", ".", "round", "(", "img", ")", "# 转成对应的掩膜", "return", "img", ",", "img_mask" ]
https://github.com/wucng/TensorExpand/blob/4ea58f64f5c5082b278229b799c9f679536510b7/TensorExpand/Object detection/Mask RCNN/matterport-Mask_RCNN/train_mnist.py#L223-L229
wnhsu/FactorizedHierarchicalVAE
7e3e23aebff70df2bd038f059d38292b864e44c1
src/tools/kaldi/sph_scp_to_wav.py
python
sph_scp_to_wav
(sph_scp, wav_dir, wav_scp)
generate wav files with name %(wav_dir)/%(utt_id).wav and wav_scp file of "%(utt_id) %(wav_path)" format
generate wav files with name %(wav_dir)/%(utt_id).wav and wav_scp file of "%(utt_id) %(wav_path)" format
[ "generate", "wav", "files", "with", "name", "%", "(", "wav_dir", ")", "/", "%", "(", "utt_id", ")", ".", "wav", "and", "wav_scp", "file", "of", "%", "(", "utt_id", ")", "%", "(", "wav_path", ")", "format" ]
def sph_scp_to_wav(sph_scp, wav_dir, wav_scp): """ generate wav files with name %(wav_dir)/%(utt_id).wav and wav_scp file of "%(utt_id) %(wav_path)" format """ with open(sph_scp) as f: lines = [line.rstrip() for line in f] sph_list = [(line.split()[0], line.split()[1:-1]) for line in lines] if len(sph_list[0][1]) == 0: info("%s already contains a list of wav files." % sph_scp) subprocess.check_output(["cp", sph_scp, wav_scp]) return check_and_makedirs(wav_dir) if os.path.dirname(wav_scp): check_and_makedirs(os.path.dirname(wav_scp)) with open(wav_scp, "w") as f: for utt_id, command in sph_list: debug(utt_id, command) wav_path = "%s/%s.wav" % (wav_dir, utt_id) with open(wav_path, "w") as f_wav: subprocess.Popen(command, stdout=f_wav) f.write("%s %s\n" % (utt_id, os.path.abspath(wav_path)))
[ "def", "sph_scp_to_wav", "(", "sph_scp", ",", "wav_dir", ",", "wav_scp", ")", ":", "with", "open", "(", "sph_scp", ")", "as", "f", ":", "lines", "=", "[", "line", ".", "rstrip", "(", ")", "for", "line", "in", "f", "]", "sph_list", "=", "[", "(", "line", ".", "split", "(", ")", "[", "0", "]", ",", "line", ".", "split", "(", ")", "[", "1", ":", "-", "1", "]", ")", "for", "line", "in", "lines", "]", "if", "len", "(", "sph_list", "[", "0", "]", "[", "1", "]", ")", "==", "0", ":", "info", "(", "\"%s already contains a list of wav files.\"", "%", "sph_scp", ")", "subprocess", ".", "check_output", "(", "[", "\"cp\"", ",", "sph_scp", ",", "wav_scp", "]", ")", "return", "check_and_makedirs", "(", "wav_dir", ")", "if", "os", ".", "path", ".", "dirname", "(", "wav_scp", ")", ":", "check_and_makedirs", "(", "os", ".", "path", ".", "dirname", "(", "wav_scp", ")", ")", "with", "open", "(", "wav_scp", ",", "\"w\"", ")", "as", "f", ":", "for", "utt_id", ",", "command", "in", "sph_list", ":", "debug", "(", "utt_id", ",", "command", ")", "wav_path", "=", "\"%s/%s.wav\"", "%", "(", "wav_dir", ",", "utt_id", ")", "with", "open", "(", "wav_path", ",", "\"w\"", ")", "as", "f_wav", ":", "subprocess", ".", "Popen", "(", "command", ",", "stdout", "=", "f_wav", ")", "f", ".", "write", "(", "\"%s %s\\n\"", "%", "(", "utt_id", ",", "os", ".", "path", ".", "abspath", "(", "wav_path", ")", ")", ")" ]
https://github.com/wnhsu/FactorizedHierarchicalVAE/blob/7e3e23aebff70df2bd038f059d38292b864e44c1/src/tools/kaldi/sph_scp_to_wav.py#L8-L31
xme/toolbox
b32be04b6018483f968da1422d68ed6b8b9ee61b
pum.py
python
commitChanges
(fw, key, partial)
return
---------------------------------
---------------------------------
[ "---------------------------------" ]
def commitChanges(fw, key, partial): """ --------------------------------- """ """ Commit the firewall configuration """ """ --------------------------------- """ if not fw or not key or not urlCategory: return if partial: if verbose: sys.stdout.write("+++ Saving firewall changes (partial)\n") req = "https://%s/api/?type=commit&action=partial&key=%s&cmd=<commit><partial><policy-and-objects></policy-and-objects></partial></commit>" % (fw, key) else: if verbose: sys.stdout.write("+++ Saving firewall changes (full)\n") req = "https://%s/api/?type=commit&key=%s&cmd=<commit></commit>" % (fw, key) data = getHTTP(req) if not data: return xml = ET.fromstring(data) # Sample # ------ # <response code="19" status="success"> # <result><msg><line>Commit job enqueued with jobid 4</line></msg><job>4</job></result></response> print ET.tostring(xml) result = xml.find('msg') err_code = xml.get('code') err_status = xml.get('status') for i in xml.getiterator('line'): err_msg = i.text.rstrip() if err_status != "success": sys.stdout.write("ERROR: Cannot commit: %s (code=%s)\n" % (err_msg, err_code)) return
[ "def", "commitChanges", "(", "fw", ",", "key", ",", "partial", ")", ":", "\"\"\" Commit the firewall configuration \"\"\"", "\"\"\" --------------------------------- \"\"\"", "if", "not", "fw", "or", "not", "key", "or", "not", "urlCategory", ":", "return", "if", "partial", ":", "if", "verbose", ":", "sys", ".", "stdout", ".", "write", "(", "\"+++ Saving firewall changes (partial)\\n\"", ")", "req", "=", "\"https://%s/api/?type=commit&action=partial&key=%s&cmd=<commit><partial><policy-and-objects></policy-and-objects></partial></commit>\"", "%", "(", "fw", ",", "key", ")", "else", ":", "if", "verbose", ":", "sys", ".", "stdout", ".", "write", "(", "\"+++ Saving firewall changes (full)\\n\"", ")", "req", "=", "\"https://%s/api/?type=commit&key=%s&cmd=<commit></commit>\"", "%", "(", "fw", ",", "key", ")", "data", "=", "getHTTP", "(", "req", ")", "if", "not", "data", ":", "return", "xml", "=", "ET", ".", "fromstring", "(", "data", ")", "# Sample", "# ------", "# <response code=\"19\" status=\"success\">", "# <result><msg><line>Commit job enqueued with jobid 4</line></msg><job>4</job></result></response>", "print", "ET", ".", "tostring", "(", "xml", ")", "result", "=", "xml", ".", "find", "(", "'msg'", ")", "err_code", "=", "xml", ".", "get", "(", "'code'", ")", "err_status", "=", "xml", ".", "get", "(", "'status'", ")", "for", "i", "in", "xml", ".", "getiterator", "(", "'line'", ")", ":", "err_msg", "=", "i", ".", "text", ".", "rstrip", "(", ")", "if", "err_status", "!=", "\"success\"", ":", "sys", ".", "stdout", ".", "write", "(", "\"ERROR: Cannot commit: %s (code=%s)\\n\"", "%", "(", "err_msg", ",", "err_code", ")", ")", "return" ]
https://github.com/xme/toolbox/blob/b32be04b6018483f968da1422d68ed6b8b9ee61b/pum.py#L62-L91
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/site-packages/sympy/liealgebras/type_b.py
python
TypeB.lie_algebra
(self)
return "so(" + str(2*n) + ")"
Returns the Lie algebra associated with B_n
Returns the Lie algebra associated with B_n
[ "Returns", "the", "Lie", "algebra", "associated", "with", "B_n" ]
def lie_algebra(self): """ Returns the Lie algebra associated with B_n """ n = self.n return "so(" + str(2*n) + ")"
[ "def", "lie_algebra", "(", "self", ")", ":", "n", "=", "self", ".", "n", "return", "\"so(\"", "+", "str", "(", "2", "*", "n", ")", "+", "\")\"" ]
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/liealgebras/type_b.py#L163-L169
zynga/jasy
8a2ec2c2ca3f6c0f73cba4306e581c89b30f1b18
jasy/core/Project.py
python
Project.pause
(self)
Pauses the project so that other processes could modify/access it
Pauses the project so that other processes could modify/access it
[ "Pauses", "the", "project", "so", "that", "other", "processes", "could", "modify", "/", "access", "it" ]
def pause(self): """Pauses the project so that other processes could modify/access it""" self.__cache.close()
[ "def", "pause", "(", "self", ")", ":", "self", ".", "__cache", ".", "close", "(", ")" ]
https://github.com/zynga/jasy/blob/8a2ec2c2ca3f6c0f73cba4306e581c89b30f1b18/jasy/core/Project.py#L557-L560
oracle/graalpython
577e02da9755d916056184ec441c26e00b70145c
graalpython/lib-python/3/zipfile.py
python
ZipExtFile._read1
(self, n)
return data
[]
def _read1(self, n): # Read up to n compressed bytes with at most one read() system call, # decrypt and decompress them. if self._eof or n <= 0: return b'' # Read from file. if self._compress_type == ZIP_DEFLATED: ## Handle unconsumed data. data = self._decompressor.unconsumed_tail if n > len(data): data += self._read2(n - len(data)) else: data = self._read2(n) if self._compress_type == ZIP_STORED: self._eof = self._compress_left <= 0 elif self._compress_type == ZIP_DEFLATED: n = max(n, self.MIN_READ_SIZE) data = self._decompressor.decompress(data, n) self._eof = (self._decompressor.eof or self._compress_left <= 0 and not self._decompressor.unconsumed_tail) if self._eof: data += self._decompressor.flush() else: data = self._decompressor.decompress(data) self._eof = self._decompressor.eof or self._compress_left <= 0 data = data[:self._left] self._left -= len(data) if self._left <= 0: self._eof = True self._update_crc(data) return data
[ "def", "_read1", "(", "self", ",", "n", ")", ":", "# Read up to n compressed bytes with at most one read() system call,", "# decrypt and decompress them.", "if", "self", ".", "_eof", "or", "n", "<=", "0", ":", "return", "b''", "# Read from file.", "if", "self", ".", "_compress_type", "==", "ZIP_DEFLATED", ":", "## Handle unconsumed data.", "data", "=", "self", ".", "_decompressor", ".", "unconsumed_tail", "if", "n", ">", "len", "(", "data", ")", ":", "data", "+=", "self", ".", "_read2", "(", "n", "-", "len", "(", "data", ")", ")", "else", ":", "data", "=", "self", ".", "_read2", "(", "n", ")", "if", "self", ".", "_compress_type", "==", "ZIP_STORED", ":", "self", ".", "_eof", "=", "self", ".", "_compress_left", "<=", "0", "elif", "self", ".", "_compress_type", "==", "ZIP_DEFLATED", ":", "n", "=", "max", "(", "n", ",", "self", ".", "MIN_READ_SIZE", ")", "data", "=", "self", ".", "_decompressor", ".", "decompress", "(", "data", ",", "n", ")", "self", ".", "_eof", "=", "(", "self", ".", "_decompressor", ".", "eof", "or", "self", ".", "_compress_left", "<=", "0", "and", "not", "self", ".", "_decompressor", ".", "unconsumed_tail", ")", "if", "self", ".", "_eof", ":", "data", "+=", "self", ".", "_decompressor", ".", "flush", "(", ")", "else", ":", "data", "=", "self", ".", "_decompressor", ".", "decompress", "(", "data", ")", "self", ".", "_eof", "=", "self", ".", "_decompressor", ".", "eof", "or", "self", ".", "_compress_left", "<=", "0", "data", "=", "data", "[", ":", "self", ".", "_left", "]", "self", ".", "_left", "-=", "len", "(", "data", ")", "if", "self", ".", "_left", "<=", "0", ":", "self", ".", "_eof", "=", "True", "self", ".", "_update_crc", "(", "data", ")", "return", "data" ]
https://github.com/oracle/graalpython/blob/577e02da9755d916056184ec441c26e00b70145c/graalpython/lib-python/3/zipfile.py#L997-L1031
bheinzerling/pyrouge
08e9cc35d713f718a05b02bf3bb2e29947d436ce
pyrouge/Rouge155.py
python
Rouge155.__get_eval_string
( task_id, system_id, system_dir, system_filename, model_dir, model_filenames)
return eval_string
ROUGE can evaluate several system summaries for a given text against several model summaries, i.e. there is an m-to-n relation between system and model summaries. The system summaries are listed in the <PEERS> tag and the model summaries in the <MODELS> tag. pyrouge currently only supports one system summary per text, i.e. it assumes a 1-to-n relation between system and model summaries.
ROUGE can evaluate several system summaries for a given text against several model summaries, i.e. there is an m-to-n relation between system and model summaries. The system summaries are listed in the <PEERS> tag and the model summaries in the <MODELS> tag. pyrouge currently only supports one system summary per text, i.e. it assumes a 1-to-n relation between system and model summaries.
[ "ROUGE", "can", "evaluate", "several", "system", "summaries", "for", "a", "given", "text", "against", "several", "model", "summaries", "i", ".", "e", ".", "there", "is", "an", "m", "-", "to", "-", "n", "relation", "between", "system", "and", "model", "summaries", ".", "The", "system", "summaries", "are", "listed", "in", "the", "<PEERS", ">", "tag", "and", "the", "model", "summaries", "in", "the", "<MODELS", ">", "tag", ".", "pyrouge", "currently", "only", "supports", "one", "system", "summary", "per", "text", "i", ".", "e", ".", "it", "assumes", "a", "1", "-", "to", "-", "n", "relation", "between", "system", "and", "model", "summaries", "." ]
def __get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames): """ ROUGE can evaluate several system summaries for a given text against several model summaries, i.e. there is an m-to-n relation between system and model summaries. The system summaries are listed in the <PEERS> tag and the model summaries in the <MODELS> tag. pyrouge currently only supports one system summary per text, i.e. it assumes a 1-to-n relation between system and model summaries. """ peer_elems = "<P ID=\"{id}\">{name}</P>".format( id=system_id, name=system_filename) model_elems = ["<M ID=\"{id}\">{name}</M>".format( id=chr(65 + i), name=name) for i, name in enumerate(model_filenames)] model_elems = "\n\t\t\t".join(model_elems) eval_string = """ <EVAL ID="{task_id}"> <MODEL-ROOT>{model_root}</MODEL-ROOT> <PEER-ROOT>{peer_root}</PEER-ROOT> <INPUT-FORMAT TYPE="SEE"> </INPUT-FORMAT> <PEERS> {peer_elems} </PEERS> <MODELS> {model_elems} </MODELS> </EVAL> """.format( task_id=task_id, model_root=model_dir, model_elems=model_elems, peer_root=system_dir, peer_elems=peer_elems) return eval_string
[ "def", "__get_eval_string", "(", "task_id", ",", "system_id", ",", "system_dir", ",", "system_filename", ",", "model_dir", ",", "model_filenames", ")", ":", "peer_elems", "=", "\"<P ID=\\\"{id}\\\">{name}</P>\"", ".", "format", "(", "id", "=", "system_id", ",", "name", "=", "system_filename", ")", "model_elems", "=", "[", "\"<M ID=\\\"{id}\\\">{name}</M>\"", ".", "format", "(", "id", "=", "chr", "(", "65", "+", "i", ")", ",", "name", "=", "name", ")", "for", "i", ",", "name", "in", "enumerate", "(", "model_filenames", ")", "]", "model_elems", "=", "\"\\n\\t\\t\\t\"", ".", "join", "(", "model_elems", ")", "eval_string", "=", "\"\"\"\n <EVAL ID=\"{task_id}\">\n <MODEL-ROOT>{model_root}</MODEL-ROOT>\n <PEER-ROOT>{peer_root}</PEER-ROOT>\n <INPUT-FORMAT TYPE=\"SEE\">\n </INPUT-FORMAT>\n <PEERS>\n {peer_elems}\n </PEERS>\n <MODELS>\n {model_elems}\n </MODELS>\n </EVAL>\n\"\"\"", ".", "format", "(", "task_id", "=", "task_id", ",", "model_root", "=", "model_dir", ",", "model_elems", "=", "model_elems", ",", "peer_root", "=", "system_dir", ",", "peer_elems", "=", "peer_elems", ")", "return", "eval_string" ]
https://github.com/bheinzerling/pyrouge/blob/08e9cc35d713f718a05b02bf3bb2e29947d436ce/pyrouge/Rouge155.py#L432-L471
DataDog/datadogpy
c71bd8de53aaeffedc5b1d3dd133354d1fa533b7
datadog/api/graphs.py
python
Graph.create
(cls, **params)
return super(Graph, cls).create(method="GET", **params)
Take a snapshot of a graph, returning the full url to the snapshot. :param metric_query: metric query :type metric_query: string query :param start: query start timestamp :type start: POSIX timestamp :param end: query end timestamp :type end: POSIX timestamp :param event_query: a query that will add event bands to the graph :type event_query: string query :returns: Dictionary representing the API's JSON response
Take a snapshot of a graph, returning the full url to the snapshot.
[ "Take", "a", "snapshot", "of", "a", "graph", "returning", "the", "full", "url", "to", "the", "snapshot", "." ]
def create(cls, **params): """ Take a snapshot of a graph, returning the full url to the snapshot. :param metric_query: metric query :type metric_query: string query :param start: query start timestamp :type start: POSIX timestamp :param end: query end timestamp :type end: POSIX timestamp :param event_query: a query that will add event bands to the graph :type event_query: string query :returns: Dictionary representing the API's JSON response """ return super(Graph, cls).create(method="GET", **params)
[ "def", "create", "(", "cls", ",", "*", "*", "params", ")", ":", "return", "super", "(", "Graph", ",", "cls", ")", ".", "create", "(", "method", "=", "\"GET\"", ",", "*", "*", "params", ")" ]
https://github.com/DataDog/datadogpy/blob/c71bd8de53aaeffedc5b1d3dd133354d1fa533b7/datadog/api/graphs.py#L16-L34
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
ansible/roles/lib_gcloud/library/gcloud_config.py
python
GcloudCLI._add_project_policy
(self, project, member, role)
return self.gcloud_cmd(cmd, output=True, output_type='json')
create service account key
create service account key
[ "create", "service", "account", "key" ]
def _add_project_policy(self, project, member, role): '''create service account key ''' cmd = ['projects', 'add-iam-policy-binding', project, '--member', member, '--role', role] cmd.extend(['--format', 'json']) return self.gcloud_cmd(cmd, output=True, output_type='json')
[ "def", "_add_project_policy", "(", "self", ",", "project", ",", "member", ",", "role", ")", ":", "cmd", "=", "[", "'projects'", ",", "'add-iam-policy-binding'", ",", "project", ",", "'--member'", ",", "member", ",", "'--role'", ",", "role", "]", "cmd", ".", "extend", "(", "[", "'--format'", ",", "'json'", "]", ")", "return", "self", ".", "gcloud_cmd", "(", "cmd", ",", "output", "=", "True", ",", "output_type", "=", "'json'", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_gcloud/library/gcloud_config.py#L323-L329
iclavera/learning_to_adapt
bd7d99ba402521c96631e7d09714128f549db0f1
learning_to_adapt/mujoco_py/mjtypes.py
python
MjvOptionWrapper.label
(self)
return self._wrapped.contents.label
[]
def label(self): return self._wrapped.contents.label
[ "def", "label", "(", "self", ")", ":", "return", "self", ".", "_wrapped", ".", "contents", ".", "label" ]
https://github.com/iclavera/learning_to_adapt/blob/bd7d99ba402521c96631e7d09714128f549db0f1/learning_to_adapt/mujoco_py/mjtypes.py#L1426-L1427
google/ml-fairness-gym
5b1cd336b844059aa4e4426b54d1f0e6b8c4c7e9
environments/recommenders/restaurant_toy_recsim.py
python
User.create_observation
(self)
return {'user_id': self.user_id}
Returns a user observation. Only the user's ID is visible. Their current state is not.
Returns a user observation.
[ "Returns", "a", "user", "observation", "." ]
def create_observation(self): """Returns a user observation. Only the user's ID is visible. Their current state is not. """ return {'user_id': self.user_id}
[ "def", "create_observation", "(", "self", ")", ":", "return", "{", "'user_id'", ":", "self", ".", "user_id", "}" ]
https://github.com/google/ml-fairness-gym/blob/5b1cd336b844059aa4e4426b54d1f0e6b8c4c7e9/environments/recommenders/restaurant_toy_recsim.py#L225-L230
caiiiac/Machine-Learning-with-Python
1a26c4467da41ca4ebc3d5bd789ea942ef79422f
MachineLearning/venv/lib/python3.5/site-packages/scipy/stats/_multivariate.py
python
_squeeze_output
(out)
return out
Remove single-dimensional entries from array and convert to scalar, if necessary.
Remove single-dimensional entries from array and convert to scalar, if necessary.
[ "Remove", "single", "-", "dimensional", "entries", "from", "array", "and", "convert", "to", "scalar", "if", "necessary", "." ]
def _squeeze_output(out): """ Remove single-dimensional entries from array and convert to scalar, if necessary. """ out = out.squeeze() if out.ndim == 0: out = out[()] return out
[ "def", "_squeeze_output", "(", "out", ")", ":", "out", "=", "out", ".", "squeeze", "(", ")", "if", "out", ".", "ndim", "==", "0", ":", "out", "=", "out", "[", "(", ")", "]", "return", "out" ]
https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/scipy/stats/_multivariate.py#L38-L47
openstack/nova
b49b7663e1c3073917d5844b81d38db8e86d05c4
nova/api/wsgi.py
python
Loader.load_app
(self, name)
Return the paste URLMap wrapped WSGI application. :param name: Name of the application to load. :returns: Paste URLMap object wrapping the requested application. :raises: `nova.exception.PasteAppNotFound`
Return the paste URLMap wrapped WSGI application.
[ "Return", "the", "paste", "URLMap", "wrapped", "WSGI", "application", "." ]
def load_app(self, name): """Return the paste URLMap wrapped WSGI application. :param name: Name of the application to load. :returns: Paste URLMap object wrapping the requested application. :raises: `nova.exception.PasteAppNotFound` """ try: LOG.debug("Loading app %(name)s from %(path)s", {'name': name, 'path': self.config_path}) return deploy.loadapp("config:%s" % self.config_path, name=name) except LookupError: LOG.exception("Couldn't lookup app: %s", name) raise exception.PasteAppNotFound(name=name, path=self.config_path)
[ "def", "load_app", "(", "self", ",", "name", ")", ":", "try", ":", "LOG", ".", "debug", "(", "\"Loading app %(name)s from %(path)s\"", ",", "{", "'name'", ":", "name", ",", "'path'", ":", "self", ".", "config_path", "}", ")", "return", "deploy", ".", "loadapp", "(", "\"config:%s\"", "%", "self", ".", "config_path", ",", "name", "=", "name", ")", "except", "LookupError", ":", "LOG", ".", "exception", "(", "\"Couldn't lookup app: %s\"", ",", "name", ")", "raise", "exception", ".", "PasteAppNotFound", "(", "name", "=", "name", ",", "path", "=", "self", ".", "config_path", ")" ]
https://github.com/openstack/nova/blob/b49b7663e1c3073917d5844b81d38db8e86d05c4/nova/api/wsgi.py#L246-L260
paperswithcode/axcell
f4648d8fee1aa8482d007a38828da9dc06630f0e
axcell/models/structure/transfo_experiment.py
python
MnliProcessor.get_labels
(self)
return ["contradiction", "entailment", "neutral"]
See base class.
See base class.
[ "See", "base", "class", "." ]
def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"]
[ "def", "get_labels", "(", "self", ")", ":", "return", "[", "\"contradiction\"", ",", "\"entailment\"", ",", "\"neutral\"", "]" ]
https://github.com/paperswithcode/axcell/blob/f4648d8fee1aa8482d007a38828da9dc06630f0e/axcell/models/structure/transfo_experiment.py#L701-L703
scipy/scipy
e0a749f01e79046642ccfdc419edbf9e7ca141ad
scipy/io/matlab/_miobase.py
python
matfile_version
(file_name, *, appendmat=True)
Return major, minor tuple depending on apparent mat file type Where: #. 0,x -> version 4 format mat files #. 1,x -> version 5 format mat files #. 2,x -> version 7.3 format mat files (HDF format) Parameters ---------- file_name : str Name of the mat file (do not need .mat extension if appendmat==True). Can also pass open file-like object. appendmat : bool, optional True to append the .mat extension to the end of the given filename, if not already present. Default is True. Returns ------- major_version : {0, 1, 2} major MATLAB File format version minor_version : int minor MATLAB file format version Raises ------ MatReadError If the file is empty. ValueError The matfile version is unknown. Notes ----- Has the side effect of setting the file read pointer to 0
Return major, minor tuple depending on apparent mat file type
[ "Return", "major", "minor", "tuple", "depending", "on", "apparent", "mat", "file", "type" ]
def matfile_version(file_name, *, appendmat=True): """ Return major, minor tuple depending on apparent mat file type Where: #. 0,x -> version 4 format mat files #. 1,x -> version 5 format mat files #. 2,x -> version 7.3 format mat files (HDF format) Parameters ---------- file_name : str Name of the mat file (do not need .mat extension if appendmat==True). Can also pass open file-like object. appendmat : bool, optional True to append the .mat extension to the end of the given filename, if not already present. Default is True. Returns ------- major_version : {0, 1, 2} major MATLAB File format version minor_version : int minor MATLAB file format version Raises ------ MatReadError If the file is empty. ValueError The matfile version is unknown. Notes ----- Has the side effect of setting the file read pointer to 0 """ from ._mio import _open_file_context with _open_file_context(file_name, appendmat=appendmat) as fileobj: return _get_matfile_version(fileobj)
[ "def", "matfile_version", "(", "file_name", ",", "*", ",", "appendmat", "=", "True", ")", ":", "from", ".", "_mio", "import", "_open_file_context", "with", "_open_file_context", "(", "file_name", ",", "appendmat", "=", "appendmat", ")", "as", "fileobj", ":", "return", "_get_matfile_version", "(", "fileobj", ")" ]
https://github.com/scipy/scipy/blob/e0a749f01e79046642ccfdc419edbf9e7ca141ad/scipy/io/matlab/_miobase.py#L184-L223
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/distutils/__init__.py
python
sysconfig_get_config_vars
(*args)
return real_vars
[]
def sysconfig_get_config_vars(*args): real_vars = old_get_config_vars(*args) if sys.platform == 'win32': lib_dir = os.path.join(sys.real_prefix, "libs") if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars: real_vars['LIBDIR'] = lib_dir # asked for all elif isinstance(real_vars, list) and 'LIBDIR' in args: real_vars = real_vars + [lib_dir] # asked for list return real_vars
[ "def", "sysconfig_get_config_vars", "(", "*", "args", ")", ":", "real_vars", "=", "old_get_config_vars", "(", "*", "args", ")", "if", "sys", ".", "platform", "==", "'win32'", ":", "lib_dir", "=", "os", ".", "path", ".", "join", "(", "sys", ".", "real_prefix", ",", "\"libs\"", ")", "if", "isinstance", "(", "real_vars", ",", "dict", ")", "and", "'LIBDIR'", "not", "in", "real_vars", ":", "real_vars", "[", "'LIBDIR'", "]", "=", "lib_dir", "# asked for all", "elif", "isinstance", "(", "real_vars", ",", "list", ")", "and", "'LIBDIR'", "in", "args", ":", "real_vars", "=", "real_vars", "+", "[", "lib_dir", "]", "# asked for list", "return", "real_vars" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/distutils/__init__.py#L91-L99
kovidgoyal/calibre
2b41671370f2a9eb1109b9ae901ccf915f1bd0c8
src/calibre/devices/interface.py
python
BookList.remove_book
(self, book)
Remove a book from the booklist. Correct any device metadata at the same time
Remove a book from the booklist. Correct any device metadata at the same time
[ "Remove", "a", "book", "from", "the", "booklist", ".", "Correct", "any", "device", "metadata", "at", "the", "same", "time" ]
def remove_book(self, book): ''' Remove a book from the booklist. Correct any device metadata at the same time ''' raise NotImplementedError()
[ "def", "remove_book", "(", "self", ",", "book", ")", ":", "raise", "NotImplementedError", "(", ")" ]
https://github.com/kovidgoyal/calibre/blob/2b41671370f2a9eb1109b9ae901ccf915f1bd0c8/src/calibre/devices/interface.py#L746-L751
zeromq/pyzmq
29aded1bf017385866dcbf7b92a954f272360060
zmq/_future.py
python
_AsyncSocket._drop_io_state
(self, state)
Stop poller from watching an io_state.
Stop poller from watching an io_state.
[ "Stop", "poller", "from", "watching", "an", "io_state", "." ]
def _drop_io_state(self, state): """Stop poller from watching an io_state.""" if self._state & state: self._state = self._state & (~state) self._update_handler(self._state)
[ "def", "_drop_io_state", "(", "self", ",", "state", ")", ":", "if", "self", ".", "_state", "&", "state", ":", "self", ".", "_state", "=", "self", ".", "_state", "&", "(", "~", "state", ")", "self", ".", "_update_handler", "(", "self", ".", "_state", ")" ]
https://github.com/zeromq/pyzmq/blob/29aded1bf017385866dcbf7b92a954f272360060/zmq/_future.py#L665-L669
triaquae/triaquae
bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9
TriAquae/models/django/core/management/commands/inspectdb.py
python
Command.handle_noargs
(self, **options)
[]
def handle_noargs(self, **options): try: for line in self.handle_inspection(options): self.stdout.write("%s\n" % line) except NotImplementedError: raise CommandError("Database inspection isn't supported for the currently selected database backend.")
[ "def", "handle_noargs", "(", "self", ",", "*", "*", "options", ")", ":", "try", ":", "for", "line", "in", "self", ".", "handle_inspection", "(", "options", ")", ":", "self", ".", "stdout", ".", "write", "(", "\"%s\\n\"", "%", "line", ")", "except", "NotImplementedError", ":", "raise", "CommandError", "(", "\"Database inspection isn't supported for the currently selected database backend.\"", ")" ]
https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/django/core/management/commands/inspectdb.py#L24-L29
pyjs/pyjs
6c4a3d3a67300cd5df7f95a67ca9dcdc06950523
examples/splitpanel/__main__.py
python
translate
()
Translate example, MUST call util.translate().
Translate example, MUST call util.translate().
[ "Translate", "example", "MUST", "call", "util", ".", "translate", "()", "." ]
def translate(): '''Translate example, MUST call util.translate().''' util.translate()
[ "def", "translate", "(", ")", ":", "util", ".", "translate", "(", ")" ]
https://github.com/pyjs/pyjs/blob/6c4a3d3a67300cd5df7f95a67ca9dcdc06950523/examples/splitpanel/__main__.py#L21-L23
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
python
phase_time_delta
(phase)
return delta
Calculate the difference between phase start and end times
Calculate the difference between phase start and end times
[ "Calculate", "the", "difference", "between", "phase", "start", "and", "end", "times" ]
def phase_time_delta(phase): """ Calculate the difference between phase start and end times """ time_format = '%Y%m%d%H%M%SZ' phase_start = datetime.strptime(phase['start'], time_format) if 'end' not in phase: # The phase failed so set the end time to now phase_end = datetime.now() else: phase_end = datetime.strptime(phase['end'], time_format) delta = str(phase_end - phase_start).split(".")[0] # Trim microseconds return delta
[ "def", "phase_time_delta", "(", "phase", ")", ":", "time_format", "=", "'%Y%m%d%H%M%SZ'", "phase_start", "=", "datetime", ".", "strptime", "(", "phase", "[", "'start'", "]", ",", "time_format", ")", "if", "'end'", "not", "in", "phase", ":", "# The phase failed so set the end time to now", "phase_end", "=", "datetime", ".", "now", "(", ")", "else", ":", "phase_end", "=", "datetime", ".", "strptime", "(", "phase", "[", "'end'", "]", ",", "time_format", ")", "delta", "=", "str", "(", "phase_end", "-", "phase_start", ")", ".", "split", "(", "\".\"", ")", "[", "0", "]", "# Trim microseconds", "return", "delta" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py#L157-L168
mtgjson/mtgjson
961efcd716a0dbcf67e3072afd34bd20010af2b0
mtgjson5/output_generator.py
python
construct_format_map
( all_printings_path: pathlib.Path = MtgjsonConfig().output_path.joinpath( f"{MtgjsonStructuresObject().all_printings}.json" ), normal_sets_only: bool = True, )
return format_map
For each set in AllPrintings, determine what format(s) the set is legal in and put the set's key into that specific entry in the return value. :param all_printings_path: Path to AllPrintings.json :param normal_sets_only: Should we only handle normal sets :return: Format Map for future identifications
For each set in AllPrintings, determine what format(s) the set is legal in and put the set's key into that specific entry in the return value. :param all_printings_path: Path to AllPrintings.json :param normal_sets_only: Should we only handle normal sets :return: Format Map for future identifications
[ "For", "each", "set", "in", "AllPrintings", "determine", "what", "format", "(", "s", ")", "the", "set", "is", "legal", "in", "and", "put", "the", "set", "s", "key", "into", "that", "specific", "entry", "in", "the", "return", "value", ".", ":", "param", "all_printings_path", ":", "Path", "to", "AllPrintings", ".", "json", ":", "param", "normal_sets_only", ":", "Should", "we", "only", "handle", "normal", "sets", ":", "return", ":", "Format", "Map", "for", "future", "identifications" ]
def construct_format_map( all_printings_path: pathlib.Path = MtgjsonConfig().output_path.joinpath( f"{MtgjsonStructuresObject().all_printings}.json" ), normal_sets_only: bool = True, ) -> Dict[str, List[str]]: """ For each set in AllPrintings, determine what format(s) the set is legal in and put the set's key into that specific entry in the return value. :param all_printings_path: Path to AllPrintings.json :param normal_sets_only: Should we only handle normal sets :return: Format Map for future identifications """ format_map: Dict[str, List[str]] = { magic_format: [] for magic_format in constants.SUPPORTED_FORMAT_OUTPUTS } if not all_printings_path.is_file(): LOGGER.warning(f"{all_printings_path} was not found, skipping format map") return {} with all_printings_path.open(encoding="utf-8") as file: content = json.load(file) for set_code_key, set_code_content in content.get("data", {}).items(): if ( normal_sets_only and set_code_content.get("type") not in constants.SUPPORTED_SET_TYPES ): continue formats_set_legal_in = constants.SUPPORTED_FORMAT_OUTPUTS for card in set_code_content.get("cards"): card_legalities = set(card.get("legalities").keys()) formats_set_legal_in = formats_set_legal_in.intersection(card_legalities) for magic_format in formats_set_legal_in: format_map[magic_format].append(set_code_key) return format_map
[ "def", "construct_format_map", "(", "all_printings_path", ":", "pathlib", ".", "Path", "=", "MtgjsonConfig", "(", ")", ".", "output_path", ".", "joinpath", "(", "f\"{MtgjsonStructuresObject().all_printings}.json\"", ")", ",", "normal_sets_only", ":", "bool", "=", "True", ",", ")", "->", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", ":", "format_map", ":", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "=", "{", "magic_format", ":", "[", "]", "for", "magic_format", "in", "constants", ".", "SUPPORTED_FORMAT_OUTPUTS", "}", "if", "not", "all_printings_path", ".", "is_file", "(", ")", ":", "LOGGER", ".", "warning", "(", "f\"{all_printings_path} was not found, skipping format map\"", ")", "return", "{", "}", "with", "all_printings_path", ".", "open", "(", "encoding", "=", "\"utf-8\"", ")", "as", "file", ":", "content", "=", "json", ".", "load", "(", "file", ")", "for", "set_code_key", ",", "set_code_content", "in", "content", ".", "get", "(", "\"data\"", ",", "{", "}", ")", ".", "items", "(", ")", ":", "if", "(", "normal_sets_only", "and", "set_code_content", ".", "get", "(", "\"type\"", ")", "not", "in", "constants", ".", "SUPPORTED_SET_TYPES", ")", ":", "continue", "formats_set_legal_in", "=", "constants", ".", "SUPPORTED_FORMAT_OUTPUTS", "for", "card", "in", "set_code_content", ".", "get", "(", "\"cards\"", ")", ":", "card_legalities", "=", "set", "(", "card", ".", "get", "(", "\"legalities\"", ")", ".", "keys", "(", ")", ")", "formats_set_legal_in", "=", "formats_set_legal_in", ".", "intersection", "(", "card_legalities", ")", "for", "magic_format", "in", "formats_set_legal_in", ":", "format_map", "[", "magic_format", "]", ".", "append", "(", "set_code_key", ")", "return", "format_map" ]
https://github.com/mtgjson/mtgjson/blob/961efcd716a0dbcf67e3072afd34bd20010af2b0/mtgjson5/output_generator.py#L277-L317
securesystemslab/zippy
ff0e84ac99442c2c55fe1d285332cfd4e185e089
zippy/lib-python/3/ast.py
python
NodeTransformer.generic_visit
(self, node)
return node
[]
def generic_visit(self, node): for field, old_value in iter_fields(node): old_value = getattr(node, field, None) if isinstance(old_value, list): new_values = [] for value in old_value: if isinstance(value, AST): value = self.visit(value) if value is None: continue elif not isinstance(value, AST): new_values.extend(value) continue new_values.append(value) old_value[:] = new_values elif isinstance(old_value, AST): new_node = self.visit(old_value) if new_node is None: delattr(node, field) else: setattr(node, field, new_node) return node
[ "def", "generic_visit", "(", "self", ",", "node", ")", ":", "for", "field", ",", "old_value", "in", "iter_fields", "(", "node", ")", ":", "old_value", "=", "getattr", "(", "node", ",", "field", ",", "None", ")", "if", "isinstance", "(", "old_value", ",", "list", ")", ":", "new_values", "=", "[", "]", "for", "value", "in", "old_value", ":", "if", "isinstance", "(", "value", ",", "AST", ")", ":", "value", "=", "self", ".", "visit", "(", "value", ")", "if", "value", "is", "None", ":", "continue", "elif", "not", "isinstance", "(", "value", ",", "AST", ")", ":", "new_values", ".", "extend", "(", "value", ")", "continue", "new_values", ".", "append", "(", "value", ")", "old_value", "[", ":", "]", "=", "new_values", "elif", "isinstance", "(", "old_value", ",", "AST", ")", ":", "new_node", "=", "self", ".", "visit", "(", "old_value", ")", "if", "new_node", "is", "None", ":", "delattr", "(", "node", ",", "field", ")", "else", ":", "setattr", "(", "node", ",", "field", ",", "new_node", ")", "return", "node" ]
https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/ast.py#L297-L318
dhtech/swboot
b741e8f90f3941a7619e12addf337bed1d299204
tftpy/TftpContexts.py
python
TftpContextClientUpload.end
(self)
Finish up the context.
Finish up the context.
[ "Finish", "up", "the", "context", "." ]
def end(self): """Finish up the context.""" TftpContext.end(self) self.metrics.end_time = time.time() log.debug("Set metrics.end_time to %s" % self.metrics.end_time) self.metrics.compute()
[ "def", "end", "(", "self", ")", ":", "TftpContext", ".", "end", "(", "self", ")", "self", ".", "metrics", ".", "end_time", "=", "time", ".", "time", "(", ")", "log", ".", "debug", "(", "\"Set metrics.end_time to %s\"", "%", "self", ".", "metrics", ".", "end_time", ")", "self", ".", "metrics", ".", "compute", "(", ")" ]
https://github.com/dhtech/swboot/blob/b741e8f90f3941a7619e12addf337bed1d299204/tftpy/TftpContexts.py#L349-L354
ChineseGLUE/ChineseGLUE
1591b85cf5427c2ff60f718d359ecb71d2b44879
baselines/models/xlnet/tpu_estimator.py
python
TPUInfeedOutfeedSessionHookForPrediction.__init__
(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op, rendezvous=None, master=None, session_config=None)
[]
def __init__(self, ctx, enqueue_ops, dequeue_ops, tpu_compile_op, rendezvous=None, master=None, session_config=None): super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__( ctx, enqueue_ops, dequeue_ops, tpu_compile_op=tpu_compile_op, run_infeed_loop_on_coordinator=False, rendezvous=rendezvous, master=master, session_config=session_config)
[ "def", "__init__", "(", "self", ",", "ctx", ",", "enqueue_ops", ",", "dequeue_ops", ",", "tpu_compile_op", ",", "rendezvous", "=", "None", ",", "master", "=", "None", ",", "session_config", "=", "None", ")", ":", "super", "(", "TPUInfeedOutfeedSessionHookForPrediction", ",", "self", ")", ".", "__init__", "(", "ctx", ",", "enqueue_ops", ",", "dequeue_ops", ",", "tpu_compile_op", "=", "tpu_compile_op", ",", "run_infeed_loop_on_coordinator", "=", "False", ",", "rendezvous", "=", "rendezvous", ",", "master", "=", "master", ",", "session_config", "=", "session_config", ")" ]
https://github.com/ChineseGLUE/ChineseGLUE/blob/1591b85cf5427c2ff60f718d359ecb71d2b44879/baselines/models/xlnet/tpu_estimator.py#L559-L569
jython/frozen-mirror
b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99
lib-python/2.7/lib-tk/ttk.py
python
Radiobutton.__init__
(self, master=None, **kw)
Construct a Ttk Radiobutton with parent master. STANDARD OPTIONS class, compound, cursor, image, state, style, takefocus, text, textvariable, underline, width WIDGET-SPECIFIC OPTIONS command, value, variable
Construct a Ttk Radiobutton with parent master.
[ "Construct", "a", "Ttk", "Radiobutton", "with", "parent", "master", "." ]
def __init__(self, master=None, **kw): """Construct a Ttk Radiobutton with parent master. STANDARD OPTIONS class, compound, cursor, image, state, style, takefocus, text, textvariable, underline, width WIDGET-SPECIFIC OPTIONS command, value, variable """ Widget.__init__(self, master, "ttk::radiobutton", kw)
[ "def", "__init__", "(", "self", ",", "master", "=", "None", ",", "*", "*", "kw", ")", ":", "Widget", ".", "__init__", "(", "self", ",", "master", ",", "\"ttk::radiobutton\"", ",", "kw", ")" ]
https://github.com/jython/frozen-mirror/blob/b8d7aa4cee50c0c0fe2f4b235dd62922dd0f3f99/lib-python/2.7/lib-tk/ttk.py#L1035-L1047
fake-name/ReadableWebProxy
ed5c7abe38706acc2684a1e6cd80242a03c5f010
WebMirror/management/rss_parser_funcs/feed_parse_extractOtakutlsBlogspotCom.py
python
extractOtakutlsBlogspotCom
(item)
return False
Parser for 'otakutls.blogspot.com'
Parser for 'otakutls.blogspot.com'
[ "Parser", "for", "otakutls", ".", "blogspot", ".", "com" ]
def extractOtakutlsBlogspotCom(item): ''' Parser for 'otakutls.blogspot.com' ''' vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or "preview" in item['title'].lower(): return None tagmap = [ ('Blade And Sword God Soverigen', 'Blade And Sword God Soverigen', 'translated'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
[ "def", "extractOtakutlsBlogspotCom", "(", "item", ")", ":", "vol", ",", "chp", ",", "frag", ",", "postfix", "=", "extractVolChapterFragmentPostfix", "(", "item", "[", "'title'", "]", ")", "if", "not", "(", "chp", "or", "vol", ")", "or", "\"preview\"", "in", "item", "[", "'title'", "]", ".", "lower", "(", ")", ":", "return", "None", "tagmap", "=", "[", "(", "'Blade And Sword God Soverigen'", ",", "'Blade And Sword God Soverigen'", ",", "'translated'", ")", ",", "]", "for", "tagname", ",", "name", ",", "tl_type", "in", "tagmap", ":", "if", "tagname", "in", "item", "[", "'tags'", "]", ":", "return", "buildReleaseMessageWithType", "(", "item", ",", "name", ",", "vol", ",", "chp", ",", "frag", "=", "frag", ",", "postfix", "=", "postfix", ",", "tl_type", "=", "tl_type", ")", "return", "False" ]
https://github.com/fake-name/ReadableWebProxy/blob/ed5c7abe38706acc2684a1e6cd80242a03c5f010/WebMirror/management/rss_parser_funcs/feed_parse_extractOtakutlsBlogspotCom.py#L1-L19
facebookresearch/CrypTen
90bf38b4f80726c808f322efb0ce430dcdf5e5ec
crypten/mpc/primitives/binary.py
python
BinarySharedTensor.where
(self, condition, y)
return (self & condition_expanded) ^ y_masked
Selects elements from self or y based on condition Args: condition (torch.bool or BinarySharedTensor): when True yield self, otherwise yield y. Note condition is not bitwise. y (torch.tensor or BinarySharedTensor): selected when condition is False. Returns: BinarySharedTensor or torch.tensor.
Selects elements from self or y based on condition
[ "Selects", "elements", "from", "self", "or", "y", "based", "on", "condition" ]
def where(self, condition, y): """Selects elements from self or y based on condition Args: condition (torch.bool or BinarySharedTensor): when True yield self, otherwise yield y. Note condition is not bitwise. y (torch.tensor or BinarySharedTensor): selected when condition is False. Returns: BinarySharedTensor or torch.tensor. """ if is_tensor(condition): condition = condition.long() is_binary = ((condition == 1) | (condition == 0)).all() assert is_binary, "condition values must be 0 or 1" # -1 mult expands 0 into binary 00...00 and 1 into 11...11 condition_expanded = -condition y_masked = y & (~condition_expanded) elif isinstance(condition, BinarySharedTensor): condition_expanded = condition.clone() # -1 mult expands binary while & 1 isolates first bit condition_expanded.share = -(condition_expanded.share & 1) # encrypted tensor must be first operand y_masked = (~condition_expanded) & y else: msg = f"condition {condition} must be torch.bool, or BinarySharedTensor" raise ValueError(msg) return (self & condition_expanded) ^ y_masked
[ "def", "where", "(", "self", ",", "condition", ",", "y", ")", ":", "if", "is_tensor", "(", "condition", ")", ":", "condition", "=", "condition", ".", "long", "(", ")", "is_binary", "=", "(", "(", "condition", "==", "1", ")", "|", "(", "condition", "==", "0", ")", ")", ".", "all", "(", ")", "assert", "is_binary", ",", "\"condition values must be 0 or 1\"", "# -1 mult expands 0 into binary 00...00 and 1 into 11...11", "condition_expanded", "=", "-", "condition", "y_masked", "=", "y", "&", "(", "~", "condition_expanded", ")", "elif", "isinstance", "(", "condition", ",", "BinarySharedTensor", ")", ":", "condition_expanded", "=", "condition", ".", "clone", "(", ")", "# -1 mult expands binary while & 1 isolates first bit", "condition_expanded", ".", "share", "=", "-", "(", "condition_expanded", ".", "share", "&", "1", ")", "# encrypted tensor must be first operand", "y_masked", "=", "(", "~", "condition_expanded", ")", "&", "y", "else", ":", "msg", "=", "f\"condition {condition} must be torch.bool, or BinarySharedTensor\"", "raise", "ValueError", "(", "msg", ")", "return", "(", "self", "&", "condition_expanded", ")", "^", "y_masked" ]
https://github.com/facebookresearch/CrypTen/blob/90bf38b4f80726c808f322efb0ce430dcdf5e5ec/crypten/mpc/primitives/binary.py#L401-L429
OpenCobolIDE/OpenCobolIDE
c78d0d335378e5fe0a5e74f53c19b68b55e85388
open_cobol_ide/extlibs/future/backports/urllib/request.py
python
AbstractHTTPHandler.do_open
(self, http_class, req, **http_conn_args)
return r
Return an HTTPResponse object for the request, using http_class. http_class must implement the HTTPConnection API from http.client.
Return an HTTPResponse object for the request, using http_class.
[ "Return", "an", "HTTPResponse", "object", "for", "the", "request", "using", "http_class", "." ]
def do_open(self, http_class, req, **http_conn_args): """Return an HTTPResponse object for the request, using http_class. http_class must implement the HTTPConnection API from http.client. """ host = req.host if not host: raise URLError('no host given') # will parse host:port h = http_class(host, timeout=req.timeout, **http_conn_args) headers = dict(req.unredirected_hdrs) headers.update(dict((k, v) for k, v in req.headers.items() if k not in headers)) # TODO(jhylton): Should this be redesigned to handle # persistent connections? # We want to make an HTTP/1.1 request, but the addinfourl # class isn't prepared to deal with a persistent connection. # It will try to read all remaining data from the socket, # which will block while the server waits for the next request. # So make sure the connection gets closed after the (only) # request. headers["Connection"] = "close" headers = dict((name.title(), val) for name, val in headers.items()) if req._tunnel_host: tunnel_headers = {} proxy_auth_hdr = "Proxy-Authorization" if proxy_auth_hdr in headers: tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr] # Proxy-Authorization should not be sent to origin # server. del headers[proxy_auth_hdr] h.set_tunnel(req._tunnel_host, headers=tunnel_headers) try: h.request(req.get_method(), req.selector, req.data, headers) except socket.error as err: # timeout error h.close() raise URLError(err) else: r = h.getresponse() # If the server does not send us a 'Connection: close' header, # HTTPConnection assumes the socket should be left open. Manually # mark the socket to be closed when this response object goes away. if h.sock: h.sock.close() h.sock = None r.url = req.get_full_url() # This line replaces the .msg attribute of the HTTPResponse # with .headers, because urllib clients expect the response to # have the reason in .msg. It would be good to mark this # attribute is deprecated and get then to use info() or # .headers. r.msg = r.reason return r
[ "def", "do_open", "(", "self", ",", "http_class", ",", "req", ",", "*", "*", "http_conn_args", ")", ":", "host", "=", "req", ".", "host", "if", "not", "host", ":", "raise", "URLError", "(", "'no host given'", ")", "# will parse host:port", "h", "=", "http_class", "(", "host", ",", "timeout", "=", "req", ".", "timeout", ",", "*", "*", "http_conn_args", ")", "headers", "=", "dict", "(", "req", ".", "unredirected_hdrs", ")", "headers", ".", "update", "(", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "req", ".", "headers", ".", "items", "(", ")", "if", "k", "not", "in", "headers", ")", ")", "# TODO(jhylton): Should this be redesigned to handle", "# persistent connections?", "# We want to make an HTTP/1.1 request, but the addinfourl", "# class isn't prepared to deal with a persistent connection.", "# It will try to read all remaining data from the socket,", "# which will block while the server waits for the next request.", "# So make sure the connection gets closed after the (only)", "# request.", "headers", "[", "\"Connection\"", "]", "=", "\"close\"", "headers", "=", "dict", "(", "(", "name", ".", "title", "(", ")", ",", "val", ")", "for", "name", ",", "val", "in", "headers", ".", "items", "(", ")", ")", "if", "req", ".", "_tunnel_host", ":", "tunnel_headers", "=", "{", "}", "proxy_auth_hdr", "=", "\"Proxy-Authorization\"", "if", "proxy_auth_hdr", "in", "headers", ":", "tunnel_headers", "[", "proxy_auth_hdr", "]", "=", "headers", "[", "proxy_auth_hdr", "]", "# Proxy-Authorization should not be sent to origin", "# server.", "del", "headers", "[", "proxy_auth_hdr", "]", "h", ".", "set_tunnel", "(", "req", ".", "_tunnel_host", ",", "headers", "=", "tunnel_headers", ")", "try", ":", "h", ".", "request", "(", "req", ".", "get_method", "(", ")", ",", "req", ".", "selector", ",", "req", ".", "data", ",", "headers", ")", "except", "socket", ".", "error", "as", "err", ":", "# timeout error", "h", ".", "close", "(", ")", "raise", "URLError", "(", "err", ")", "else", ":", "r", "=", "h", ".", "getresponse", "(", ")", "# If the server does not send us a 'Connection: close' header,", "# HTTPConnection assumes the socket should be left open. Manually", "# mark the socket to be closed when this response object goes away.", "if", "h", ".", "sock", ":", "h", ".", "sock", ".", "close", "(", ")", "h", ".", "sock", "=", "None", "r", ".", "url", "=", "req", ".", "get_full_url", "(", ")", "# This line replaces the .msg attribute of the HTTPResponse", "# with .headers, because urllib clients expect the response to", "# have the reason in .msg. It would be good to mark this", "# attribute is deprecated and get then to use info() or", "# .headers.", "r", ".", "msg", "=", "r", ".", "reason", "return", "r" ]
https://github.com/OpenCobolIDE/OpenCobolIDE/blob/c78d0d335378e5fe0a5e74f53c19b68b55e85388/open_cobol_ide/extlibs/future/backports/urllib/request.py#L1245-L1305
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/filter_plugins/openshift_master.py
python
IdentityProviderBase.get_default
(key)
get a default value for a given key
get a default value for a given key
[ "get", "a", "default", "value", "for", "a", "given", "key" ]
def get_default(key): ''' get a default value for a given key ''' if key == 'mappingMethod': return 'claim' else: return None
[ "def", "get_default", "(", "key", ")", ":", "if", "key", "==", "'mappingMethod'", ":", "return", "'claim'", "else", ":", "return", "None" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/filter_plugins/openshift_master.py#L92-L97
junsukchoe/ADL
dab2e78163bd96970ec9ae41de62835332dbf4fe
tensorpack/callbacks/monitor.py
python
MonitorBase.process
(self, name, val)
Process a key-value pair.
Process a key-value pair.
[ "Process", "a", "key", "-", "value", "pair", "." ]
def process(self, name, val): """ Process a key-value pair. """ pass
[ "def", "process", "(", "self", ",", "name", ",", "val", ")", ":", "pass" ]
https://github.com/junsukchoe/ADL/blob/dab2e78163bd96970ec9ae41de62835332dbf4fe/tensorpack/callbacks/monitor.py#L72-L76
mudpi/mudpi-core
fb206b1136f529c7197f1e6b29629ed05630d377
mudpi/extensions/bme280/sensor.py
python
BME280Sensor.state
(self)
return self._state
Return the state of the component (from memory, no IO!)
Return the state of the component (from memory, no IO!)
[ "Return", "the", "state", "of", "the", "component", "(", "from", "memory", "no", "IO!", ")" ]
def state(self): """ Return the state of the component (from memory, no IO!) """ return self._state
[ "def", "state", "(", "self", ")", ":", "return", "self", ".", "_state" ]
https://github.com/mudpi/mudpi-core/blob/fb206b1136f529c7197f1e6b29629ed05630d377/mudpi/extensions/bme280/sensor.py#L67-L69
cortex-lab/phy
9a330b9437a3d0b40a37a201d147224e6e7fb462
phy/plot/base.py
python
BaseVisual.toggle
(self)
Toggle the visual visibility.
Toggle the visual visibility.
[ "Toggle", "the", "visual", "visibility", "." ]
def toggle(self): """Toggle the visual visibility.""" self._hidden = not self._hidden
[ "def", "toggle", "(", "self", ")", ":", "self", ".", "_hidden", "=", "not", "self", ".", "_hidden" ]
https://github.com/cortex-lab/phy/blob/9a330b9437a3d0b40a37a201d147224e6e7fb462/phy/plot/base.py#L138-L140
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/models/perceiver/modeling_perceiver.py
python
Conv2dSamePadding.__init__
(self, *args, **kwargs)
[]
def __init__(self, *args, **kwargs): super(Conv2dSamePadding, self).__init__(*args, **kwargs) self.zero_pad_2d = nn.ZeroPad2d( reduce(__add__, [(k // 2 + (k - 2 * (k // 2)) - 1, k // 2) for k in self.kernel_size[::-1]]) )
[ "def", "__init__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", "Conv2dSamePadding", ",", "self", ")", ".", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "zero_pad_2d", "=", "nn", ".", "ZeroPad2d", "(", "reduce", "(", "__add__", ",", "[", "(", "k", "//", "2", "+", "(", "k", "-", "2", "*", "(", "k", "//", "2", ")", ")", "-", "1", ",", "k", "//", "2", ")", "for", "k", "in", "self", ".", "kernel_size", "[", ":", ":", "-", "1", "]", "]", ")", ")" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/models/perceiver/modeling_perceiver.py#L2528-L2532
awslabs/mxboard
432d4df2489ecf6dbb251d7f96f1ccadb368997a
python/mxboard/event_file_writer.py
python
_get_sentinel_event
()
return event_pb2.Event()
Generate a sentinel event for terminating worker.
Generate a sentinel event for terminating worker.
[ "Generate", "a", "sentinel", "event", "for", "terminating", "worker", "." ]
def _get_sentinel_event(): """Generate a sentinel event for terminating worker.""" return event_pb2.Event()
[ "def", "_get_sentinel_event", "(", ")", ":", "return", "event_pb2", ".", "Event", "(", ")" ]
https://github.com/awslabs/mxboard/blob/432d4df2489ecf6dbb251d7f96f1ccadb368997a/python/mxboard/event_file_writer.py#L110-L112
quodlibet/quodlibet
e3099c89f7aa6524380795d325cc14630031886c
quodlibet/packages/raven/utils/stacks.py
python
iter_stack_frames
(frames=None)
Given an optional list of frames (defaults to current stack), iterates over all frames that do not contain the ``__traceback_hide__`` local variable.
Given an optional list of frames (defaults to current stack), iterates over all frames that do not contain the ``__traceback_hide__`` local variable.
[ "Given", "an", "optional", "list", "of", "frames", "(", "defaults", "to", "current", "stack", ")", "iterates", "over", "all", "frames", "that", "do", "not", "contain", "the", "__traceback_hide__", "local", "variable", "." ]
def iter_stack_frames(frames=None): """ Given an optional list of frames (defaults to current stack), iterates over all frames that do not contain the ``__traceback_hide__`` local variable. """ if not frames: frames = inspect.stack()[1:] for frame, lineno in ((f[0], f[2]) for f in frames): f_locals = getattr(frame, 'f_locals', {}) if not _getitem_from_frame(f_locals, '__traceback_hide__'): yield frame, lineno
[ "def", "iter_stack_frames", "(", "frames", "=", "None", ")", ":", "if", "not", "frames", ":", "frames", "=", "inspect", ".", "stack", "(", ")", "[", "1", ":", "]", "for", "frame", ",", "lineno", "in", "(", "(", "f", "[", "0", "]", ",", "f", "[", "2", "]", ")", "for", "f", "in", "frames", ")", ":", "f_locals", "=", "getattr", "(", "frame", ",", "'f_locals'", ",", "{", "}", ")", "if", "not", "_getitem_from_frame", "(", "f_locals", ",", "'__traceback_hide__'", ")", ":", "yield", "frame", ",", "lineno" ]
https://github.com/quodlibet/quodlibet/blob/e3099c89f7aa6524380795d325cc14630031886c/quodlibet/packages/raven/utils/stacks.py#L128-L140
pyqt/examples
843bb982917cecb2350b5f6d7f42c9b7fb142ec1
src/pyqt-official/draganddrop/puzzle/puzzle.py
python
PuzzleWidget.findPiece
(self, pieceRect)
[]
def findPiece(self, pieceRect): try: return self.pieceRects.index(pieceRect) except ValueError: return -1
[ "def", "findPiece", "(", "self", ",", "pieceRect", ")", ":", "try", ":", "return", "self", ".", "pieceRects", ".", "index", "(", "pieceRect", ")", "except", "ValueError", ":", "return", "-", "1" ]
https://github.com/pyqt/examples/blob/843bb982917cecb2350b5f6d7f42c9b7fb142ec1/src/pyqt-official/draganddrop/puzzle/puzzle.py#L134-L138
sqall01/alertR
e1d1a83e54f876cc4cd7bd87387e05cb75d4dc13
managerClientKeypad/lib/globalData/sensorObjects.py
python
_SensorData.deepcopy
(obj)
This function copies all attributes of the given object to a new data object. :param obj: :return: object of this class
This function copies all attributes of the given object to a new data object. :param obj: :return: object of this class
[ "This", "function", "copies", "all", "attributes", "of", "the", "given", "object", "to", "a", "new", "data", "object", ".", ":", "param", "obj", ":", ":", "return", ":", "object", "of", "this", "class" ]
def deepcopy(obj): """ This function copies all attributes of the given object to a new data object. :param obj: :return: object of this class """ raise NotImplementedError("Abstract class.")
[ "def", "deepcopy", "(", "obj", ")", ":", "raise", "NotImplementedError", "(", "\"Abstract class.\"", ")" ]
https://github.com/sqall01/alertR/blob/e1d1a83e54f876cc4cd7bd87387e05cb75d4dc13/managerClientKeypad/lib/globalData/sensorObjects.py#L37-L43
HypothesisWorks/hypothesis
d1bfc4acc86899caa7a40f892322e1a69fbf36f4
hypothesis-python/src/hypothesis/internal/conjecture/engine.py
python
ConjectureRunner.cached_test_function
(self, buffer, error_on_discard=False, extend=0)
return result
Checks the tree to see if we've tested this buffer, and returns the previous result if we have. Otherwise we call through to ``test_function``, and return a fresh result. If ``error_on_discard`` is set to True this will raise ``ContainsDiscard`` in preference to running the actual test function. This is to allow us to skip test cases we expect to be redundant in some cases. Note that it may be the case that we don't raise ``ContainsDiscard`` even if the result has discards if we cannot determine from previous runs whether it will have a discard.
Checks the tree to see if we've tested this buffer, and returns the previous result if we have.
[ "Checks", "the", "tree", "to", "see", "if", "we", "ve", "tested", "this", "buffer", "and", "returns", "the", "previous", "result", "if", "we", "have", "." ]
def cached_test_function(self, buffer, error_on_discard=False, extend=0): """Checks the tree to see if we've tested this buffer, and returns the previous result if we have. Otherwise we call through to ``test_function``, and return a fresh result. If ``error_on_discard`` is set to True this will raise ``ContainsDiscard`` in preference to running the actual test function. This is to allow us to skip test cases we expect to be redundant in some cases. Note that it may be the case that we don't raise ``ContainsDiscard`` even if the result has discards if we cannot determine from previous runs whether it will have a discard. """ buffer = bytes(buffer)[:BUFFER_SIZE] max_length = min(BUFFER_SIZE, len(buffer) + extend) def check_result(result): assert result is Overrun or ( isinstance(result, ConjectureResult) and result.status != Status.OVERRUN ) return result try: cached = check_result(self.__data_cache[buffer]) if cached.status > Status.OVERRUN or extend == 0: return cached except KeyError: pass if error_on_discard: class DiscardObserver(DataObserver): def kill_branch(self): raise ContainsDiscard() observer = DiscardObserver() else: observer = DataObserver() dummy_data = self.new_conjecture_data( prefix=buffer, max_length=max_length, observer=observer ) try: self.tree.simulate_test_function(dummy_data) except PreviouslyUnseenBehaviour: pass else: if dummy_data.status > Status.OVERRUN: dummy_data.freeze() try: return self.__data_cache[dummy_data.buffer] except KeyError: pass else: self.__data_cache[buffer] = Overrun return Overrun # We didn't find a match in the tree, so we need to run the test # function normally. Note that test_function will automatically # add this to the tree so we don't need to update the cache. result = None data = self.new_conjecture_data( prefix=max((buffer, dummy_data.buffer), key=len), max_length=max_length ) self.test_function(data) result = check_result(data.as_result()) if extend == 0 or (result is not Overrun and len(result.buffer) <= len(buffer)): self.__data_cache[buffer] = result return result
[ "def", "cached_test_function", "(", "self", ",", "buffer", ",", "error_on_discard", "=", "False", ",", "extend", "=", "0", ")", ":", "buffer", "=", "bytes", "(", "buffer", ")", "[", ":", "BUFFER_SIZE", "]", "max_length", "=", "min", "(", "BUFFER_SIZE", ",", "len", "(", "buffer", ")", "+", "extend", ")", "def", "check_result", "(", "result", ")", ":", "assert", "result", "is", "Overrun", "or", "(", "isinstance", "(", "result", ",", "ConjectureResult", ")", "and", "result", ".", "status", "!=", "Status", ".", "OVERRUN", ")", "return", "result", "try", ":", "cached", "=", "check_result", "(", "self", ".", "__data_cache", "[", "buffer", "]", ")", "if", "cached", ".", "status", ">", "Status", ".", "OVERRUN", "or", "extend", "==", "0", ":", "return", "cached", "except", "KeyError", ":", "pass", "if", "error_on_discard", ":", "class", "DiscardObserver", "(", "DataObserver", ")", ":", "def", "kill_branch", "(", "self", ")", ":", "raise", "ContainsDiscard", "(", ")", "observer", "=", "DiscardObserver", "(", ")", "else", ":", "observer", "=", "DataObserver", "(", ")", "dummy_data", "=", "self", ".", "new_conjecture_data", "(", "prefix", "=", "buffer", ",", "max_length", "=", "max_length", ",", "observer", "=", "observer", ")", "try", ":", "self", ".", "tree", ".", "simulate_test_function", "(", "dummy_data", ")", "except", "PreviouslyUnseenBehaviour", ":", "pass", "else", ":", "if", "dummy_data", ".", "status", ">", "Status", ".", "OVERRUN", ":", "dummy_data", ".", "freeze", "(", ")", "try", ":", "return", "self", ".", "__data_cache", "[", "dummy_data", ".", "buffer", "]", "except", "KeyError", ":", "pass", "else", ":", "self", ".", "__data_cache", "[", "buffer", "]", "=", "Overrun", "return", "Overrun", "# We didn't find a match in the tree, so we need to run the test", "# function normally. Note that test_function will automatically", "# add this to the tree so we don't need to update the cache.", "result", "=", "None", "data", "=", "self", ".", "new_conjecture_data", "(", "prefix", "=", "max", "(", "(", "buffer", ",", "dummy_data", ".", "buffer", ")", ",", "key", "=", "len", ")", ",", "max_length", "=", "max_length", ")", "self", ".", "test_function", "(", "data", ")", "result", "=", "check_result", "(", "data", ".", "as_result", "(", ")", ")", "if", "extend", "==", "0", "or", "(", "result", "is", "not", "Overrun", "and", "len", "(", "result", ".", "buffer", ")", "<=", "len", "(", "buffer", ")", ")", ":", "self", ".", "__data_cache", "[", "buffer", "]", "=", "result", "return", "result" ]
https://github.com/HypothesisWorks/hypothesis/blob/d1bfc4acc86899caa7a40f892322e1a69fbf36f4/hypothesis-python/src/hypothesis/internal/conjecture/engine.py#L982-L1055
home-assistant/core
265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1
homeassistant/helpers/config_validation.py
python
match_all
(value: T)
return value
Validate that matches all values.
Validate that matches all values.
[ "Validate", "that", "matches", "all", "values", "." ]
def match_all(value: T) -> T: """Validate that matches all values.""" return value
[ "def", "match_all", "(", "value", ":", "T", ")", "->", "T", ":", "return", "value" ]
https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/helpers/config_validation.py#L468-L470
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_vendored_deps/library/oc_configmap.py
python
OpenShiftCLI._create
(self, fname)
return self.openshift_cmd(['create', '-f', fname])
call oc create on a filename
call oc create on a filename
[ "call", "oc", "create", "on", "a", "filename" ]
def _create(self, fname): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname])
[ "def", "_create", "(", "self", ",", "fname", ")", ":", "return", "self", ".", "openshift_cmd", "(", "[", "'create'", ",", "'-f'", ",", "fname", "]", ")" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_vendored_deps/library/oc_configmap.py#L945-L947
google/grr
8ad8a4d2c5a93c92729206b7771af19d92d4f915
grr/server/grr_response_server/flows/general/filesystem.py
python
GlobLogic._MatchPath
(self, pathspec, response)
Check if the responses matches the pathspec (considering options).
Check if the responses matches the pathspec (considering options).
[ "Check", "if", "the", "responses", "matches", "the", "pathspec", "(", "considering", "options", ")", "." ]
def _MatchPath(self, pathspec, response): """Check if the responses matches the pathspec (considering options).""" to_match = response.pathspec.Basename() if pathspec.path_options == rdf_paths.PathSpec.Options.CASE_INSENSITIVE: return to_match.lower() == pathspec.path.lower() elif pathspec.path_options == rdf_paths.PathSpec.Options.CASE_LITERAL: return to_match == pathspec.path elif pathspec.path_options == rdf_paths.PathSpec.Options.REGEX: return bool(re.match(pathspec.path, to_match, flags=re.IGNORECASE)) elif pathspec.path_options == rdf_paths.PathSpec.Options.RECURSIVE: return True raise ValueError("Unknown Pathspec type.")
[ "def", "_MatchPath", "(", "self", ",", "pathspec", ",", "response", ")", ":", "to_match", "=", "response", ".", "pathspec", ".", "Basename", "(", ")", "if", "pathspec", ".", "path_options", "==", "rdf_paths", ".", "PathSpec", ".", "Options", ".", "CASE_INSENSITIVE", ":", "return", "to_match", ".", "lower", "(", ")", "==", "pathspec", ".", "path", ".", "lower", "(", ")", "elif", "pathspec", ".", "path_options", "==", "rdf_paths", ".", "PathSpec", ".", "Options", ".", "CASE_LITERAL", ":", "return", "to_match", "==", "pathspec", ".", "path", "elif", "pathspec", ".", "path_options", "==", "rdf_paths", ".", "PathSpec", ".", "Options", ".", "REGEX", ":", "return", "bool", "(", "re", ".", "match", "(", "pathspec", ".", "path", ",", "to_match", ",", "flags", "=", "re", ".", "IGNORECASE", ")", ")", "elif", "pathspec", ".", "path_options", "==", "rdf_paths", ".", "PathSpec", ".", "Options", ".", "RECURSIVE", ":", "return", "True", "raise", "ValueError", "(", "\"Unknown Pathspec type.\"", ")" ]
https://github.com/google/grr/blob/8ad8a4d2c5a93c92729206b7771af19d92d4f915/grr/server/grr_response_server/flows/general/filesystem.py#L489-L500
Yelp/paasta
6c08c04a577359509575c794b973ea84d72accf9
paasta_tools/utils.py
python
InstanceConfig.get_deploy_blacklist
(self)
return safe_deploy_blacklist(self.config_dict.get("deploy_blacklist", []))
The deploy blacklist is a list of lists, where the lists indicate which locations the service should not be deployed
The deploy blacklist is a list of lists, where the lists indicate which locations the service should not be deployed
[ "The", "deploy", "blacklist", "is", "a", "list", "of", "lists", "where", "the", "lists", "indicate", "which", "locations", "the", "service", "should", "not", "be", "deployed" ]
def get_deploy_blacklist(self) -> DeployBlacklist: """The deploy blacklist is a list of lists, where the lists indicate which locations the service should not be deployed""" return safe_deploy_blacklist(self.config_dict.get("deploy_blacklist", []))
[ "def", "get_deploy_blacklist", "(", "self", ")", "->", "DeployBlacklist", ":", "return", "safe_deploy_blacklist", "(", "self", ".", "config_dict", ".", "get", "(", "\"deploy_blacklist\"", ",", "[", "]", ")", ")" ]
https://github.com/Yelp/paasta/blob/6c08c04a577359509575c794b973ea84d72accf9/paasta_tools/utils.py#L674-L677
mrlesmithjr/Ansible
d44f0dc0d942bdf3bf7334b307e6048f0ee16e36
roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/urllib3/util/retry.py
python
Retry.is_exhausted
(self)
return min(retry_counts) < 0
Are we out of retries?
Are we out of retries?
[ "Are", "we", "out", "of", "retries?" ]
def is_exhausted(self): """ Are we out of retries? """ retry_counts = (self.total, self.connect, self.read, self.redirect, self.status) retry_counts = list(filter(None, retry_counts)) if not retry_counts: return False return min(retry_counts) < 0
[ "def", "is_exhausted", "(", "self", ")", ":", "retry_counts", "=", "(", "self", ".", "total", ",", "self", ".", "connect", ",", "self", ".", "read", ",", "self", ".", "redirect", ",", "self", ".", "status", ")", "retry_counts", "=", "list", "(", "filter", "(", "None", ",", "retry_counts", ")", ")", "if", "not", "retry_counts", ":", "return", "False", "return", "min", "(", "retry_counts", ")", "<", "0" ]
https://github.com/mrlesmithjr/Ansible/blob/d44f0dc0d942bdf3bf7334b307e6048f0ee16e36/roles/ansible-vsphere-management/scripts/pdns/lib/python2.7/site-packages/urllib3/util/retry.py#L310-L317
google/apis-client-generator
f09f0ba855c3845d315b811c6234fd3996f33172
src/googleapis/codegen/php_generator.py
python
PhpLanguageModel.__init__
(self)
[]
def __init__(self): super(PhpLanguageModel, self).__init__(class_name_delimiter='.')
[ "def", "__init__", "(", "self", ")", ":", "super", "(", "PhpLanguageModel", ",", "self", ")", ".", "__init__", "(", "class_name_delimiter", "=", "'.'", ")" ]
https://github.com/google/apis-client-generator/blob/f09f0ba855c3845d315b811c6234fd3996f33172/src/googleapis/codegen/php_generator.py#L244-L245
EddyGao/make_VOC2007
2a009f6d2df201fb44de09b60a737238da402f6a
labelImg-master/libs/canvas.py
python
Canvas.intersectingEdges
(self, x1y1, x2y2, points)
For each edge formed by `points', yield the intersection with the line segment `(x1,y1) - (x2,y2)`, if it exists. Also return the distance of `(x2,y2)' to the middle of the edge along with its index, so that the one closest can be chosen.
For each edge formed by `points', yield the intersection with the line segment `(x1,y1) - (x2,y2)`, if it exists. Also return the distance of `(x2,y2)' to the middle of the edge along with its index, so that the one closest can be chosen.
[ "For", "each", "edge", "formed", "by", "points", "yield", "the", "intersection", "with", "the", "line", "segment", "(", "x1", "y1", ")", "-", "(", "x2", "y2", ")", "if", "it", "exists", ".", "Also", "return", "the", "distance", "of", "(", "x2", "y2", ")", "to", "the", "middle", "of", "the", "edge", "along", "with", "its", "index", "so", "that", "the", "one", "closest", "can", "be", "chosen", "." ]
def intersectingEdges(self, x1y1, x2y2, points): """For each edge formed by `points', yield the intersection with the line segment `(x1,y1) - (x2,y2)`, if it exists. Also return the distance of `(x2,y2)' to the middle of the edge along with its index, so that the one closest can be chosen.""" x1, y1 = x1y1 x2, y2 = x2y2 for i in range(4): x3, y3 = points[i] x4, y4 = points[(i + 1) % 4] denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1) nua = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3) nub = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3) if denom == 0: # This covers two cases: # nua == nub == 0: Coincident # otherwise: Parallel continue ua, ub = nua / denom, nub / denom if 0 <= ua <= 1 and 0 <= ub <= 1: x = x1 + ua * (x2 - x1) y = y1 + ua * (y2 - y1) m = QPointF((x3 + x4) / 2, (y3 + y4) / 2) d = distance(m - QPointF(x2, y2)) yield d, i, (x, y)
[ "def", "intersectingEdges", "(", "self", ",", "x1y1", ",", "x2y2", ",", "points", ")", ":", "x1", ",", "y1", "=", "x1y1", "x2", ",", "y2", "=", "x2y2", "for", "i", "in", "range", "(", "4", ")", ":", "x3", ",", "y3", "=", "points", "[", "i", "]", "x4", ",", "y4", "=", "points", "[", "(", "i", "+", "1", ")", "%", "4", "]", "denom", "=", "(", "y4", "-", "y3", ")", "*", "(", "x2", "-", "x1", ")", "-", "(", "x4", "-", "x3", ")", "*", "(", "y2", "-", "y1", ")", "nua", "=", "(", "x4", "-", "x3", ")", "*", "(", "y1", "-", "y3", ")", "-", "(", "y4", "-", "y3", ")", "*", "(", "x1", "-", "x3", ")", "nub", "=", "(", "x2", "-", "x1", ")", "*", "(", "y1", "-", "y3", ")", "-", "(", "y2", "-", "y1", ")", "*", "(", "x1", "-", "x3", ")", "if", "denom", "==", "0", ":", "# This covers two cases:", "# nua == nub == 0: Coincident", "# otherwise: Parallel", "continue", "ua", ",", "ub", "=", "nua", "/", "denom", ",", "nub", "/", "denom", "if", "0", "<=", "ua", "<=", "1", "and", "0", "<=", "ub", "<=", "1", ":", "x", "=", "x1", "+", "ua", "*", "(", "x2", "-", "x1", ")", "y", "=", "y1", "+", "ua", "*", "(", "y2", "-", "y1", ")", "m", "=", "QPointF", "(", "(", "x3", "+", "x4", ")", "/", "2", ",", "(", "y3", "+", "y4", ")", "/", "2", ")", "d", "=", "distance", "(", "m", "-", "QPointF", "(", "x2", ",", "y2", ")", ")", "yield", "d", ",", "i", ",", "(", "x", ",", "y", ")" ]
https://github.com/EddyGao/make_VOC2007/blob/2a009f6d2df201fb44de09b60a737238da402f6a/labelImg-master/libs/canvas.py#L478-L502
rcorcs/NatI
fdf014f4292afdc95250add7b6658468043228e1
en/parser/nltk_lite/probability.py
python
ConditionalFreqDist.__init__
(self)
Construct a new empty conditional frequency distribution. In particular, the count for every sample, under every condition, is zero.
Construct a new empty conditional frequency distribution. In particular, the count for every sample, under every condition, is zero.
[ "Construct", "a", "new", "empty", "conditional", "frequency", "distribution", ".", "In", "particular", "the", "count", "for", "every", "sample", "under", "every", "condition", "is", "zero", "." ]
def __init__(self): """ Construct a new empty conditional frequency distribution. In particular, the count for every sample, under every condition, is zero. """ self._fdists = {}
[ "def", "__init__", "(", "self", ")", ":", "self", ".", "_fdists", "=", "{", "}" ]
https://github.com/rcorcs/NatI/blob/fdf014f4292afdc95250add7b6658468043228e1/en/parser/nltk_lite/probability.py#L1114-L1120
NUAA-AL/ALiPy
bc69062c7129d597a9e54b9eb409c6fcb1f36a3c
alipy/query_strategy/base.py
python
BaseFeatureQuery.__init__
(self, X=None, y=None, **kwargs)
[]
def __init__(self, X=None, y=None, **kwargs): if X is not None and y is not None: if isinstance(X, np.ndarray) and isinstance(y, np.ndarray): # will not use additional memory check_X_y(X, y, accept_sparse='csc', multi_output=True) self.X = X self.y = y else: self.X, self.y = check_X_y(X, y, accept_sparse='csc', multi_output=True) else: self.X = X self.y = y
[ "def", "__init__", "(", "self", ",", "X", "=", "None", ",", "y", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "X", "is", "not", "None", "and", "y", "is", "not", "None", ":", "if", "isinstance", "(", "X", ",", "np", ".", "ndarray", ")", "and", "isinstance", "(", "y", ",", "np", ".", "ndarray", ")", ":", "# will not use additional memory", "check_X_y", "(", "X", ",", "y", ",", "accept_sparse", "=", "'csc'", ",", "multi_output", "=", "True", ")", "self", ".", "X", "=", "X", "self", ".", "y", "=", "y", "else", ":", "self", ".", "X", ",", "self", ".", "y", "=", "check_X_y", "(", "X", ",", "y", ",", "accept_sparse", "=", "'csc'", ",", "multi_output", "=", "True", ")", "else", ":", "self", ".", "X", "=", "X", "self", ".", "y", "=", "y" ]
https://github.com/NUAA-AL/ALiPy/blob/bc69062c7129d597a9e54b9eb409c6fcb1f36a3c/alipy/query_strategy/base.py#L156-L167
fortharris/Pcode
147962d160a834c219e12cb456abc130826468e4
rope/base/oi/transform.py
python
PyObjectToTextual.resource_to_path
(self, resource)
[]
def resource_to_path(self, resource): if resource.project == self.project: return resource.path else: return resource.real_path
[ "def", "resource_to_path", "(", "self", ",", "resource", ")", ":", "if", "resource", ".", "project", "==", "self", ".", "project", ":", "return", "resource", ".", "path", "else", ":", "return", "resource", ".", "real_path" ]
https://github.com/fortharris/Pcode/blob/147962d160a834c219e12cb456abc130826468e4/rope/base/oi/transform.py#L95-L99
cunjian/pytorch_face_landmark
f575be168a24af6f4807c852173fdfedf6d2c67d
Retinaface/models/retinaface.py
python
RetinaFace._make_bbox_head
(self,fpn_num=3,inchannels=64,anchor_num=2)
return bboxhead
[]
def _make_bbox_head(self,fpn_num=3,inchannels=64,anchor_num=2): bboxhead = nn.ModuleList() for i in range(fpn_num): bboxhead.append(BboxHead(inchannels,anchor_num)) return bboxhead
[ "def", "_make_bbox_head", "(", "self", ",", "fpn_num", "=", "3", ",", "inchannels", "=", "64", ",", "anchor_num", "=", "2", ")", ":", "bboxhead", "=", "nn", ".", "ModuleList", "(", ")", "for", "i", "in", "range", "(", "fpn_num", ")", ":", "bboxhead", ".", "append", "(", "BboxHead", "(", "inchannels", ",", "anchor_num", ")", ")", "return", "bboxhead" ]
https://github.com/cunjian/pytorch_face_landmark/blob/f575be168a24af6f4807c852173fdfedf6d2c67d/Retinaface/models/retinaface.py#L95-L99
lovelylain/pyctp
fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d
example/ctp/futures/ApiStruct.py
python
SecAgentACIDMap.__init__
(self, BrokerID='', UserID='', AccountID='', CurrencyID='', BrokerSecAgentID='')
[]
def __init__(self, BrokerID='', UserID='', AccountID='', CurrencyID='', BrokerSecAgentID=''): self.BrokerID = '' #经纪公司代码, char[11] self.UserID = '' #用户代码, char[16] self.AccountID = '' #资金账户, char[13] self.CurrencyID = '' #币种, char[4] self.BrokerSecAgentID = 'AccountID'
[ "def", "__init__", "(", "self", ",", "BrokerID", "=", "''", ",", "UserID", "=", "''", ",", "AccountID", "=", "''", ",", "CurrencyID", "=", "''", ",", "BrokerSecAgentID", "=", "''", ")", ":", "self", ".", "BrokerID", "=", "''", "#经纪公司代码, char[11]", "self", ".", "UserID", "=", "''", "#用户代码, char[16]", "self", ".", "AccountID", "=", "''", "#资金账户, char[13]", "self", ".", "CurrencyID", "=", "''", "#币种, char[4]", "self", ".", "BrokerSecAgentID", "=", "'AccountID'" ]
https://github.com/lovelylain/pyctp/blob/fd304de4b50c4ddc31a4190b1caaeb5dec66bc5d/example/ctp/futures/ApiStruct.py#L5866-L5871
twisted/twisted
dee676b040dd38b847ea6fb112a712cb5e119490
src/twisted/mail/smtp.py
python
SMTP._cbAnonymousAuthentication
(self, result)
Save the state resulting from a successful anonymous cred login.
Save the state resulting from a successful anonymous cred login.
[ "Save", "the", "state", "resulting", "from", "a", "successful", "anonymous", "cred", "login", "." ]
def _cbAnonymousAuthentication(self, result): """ Save the state resulting from a successful anonymous cred login. """ (iface, avatar, logout) = result if issubclass(iface, IMessageDeliveryFactory): self.deliveryFactory = avatar self.delivery = None elif issubclass(iface, IMessageDelivery): self.deliveryFactory = None self.delivery = avatar else: raise RuntimeError(f"{iface.__name__} is not a supported interface") self._onLogout = logout self.challenger = None
[ "def", "_cbAnonymousAuthentication", "(", "self", ",", "result", ")", ":", "(", "iface", ",", "avatar", ",", "logout", ")", "=", "result", "if", "issubclass", "(", "iface", ",", "IMessageDeliveryFactory", ")", ":", "self", ".", "deliveryFactory", "=", "avatar", "self", ".", "delivery", "=", "None", "elif", "issubclass", "(", "iface", ",", "IMessageDelivery", ")", ":", "self", ".", "deliveryFactory", "=", "None", "self", ".", "delivery", "=", "avatar", "else", ":", "raise", "RuntimeError", "(", "f\"{iface.__name__} is not a supported interface\"", ")", "self", ".", "_onLogout", "=", "logout", "self", ".", "challenger", "=", "None" ]
https://github.com/twisted/twisted/blob/dee676b040dd38b847ea6fb112a712cb5e119490/src/twisted/mail/smtp.py#L784-L798
obspy/obspy
0ee5a0d2db293c8d5d4c3b1f148a6c5a85fea55f
obspy/core/utcdatetime.py
python
UTCDateTime.__hash__
(self)
return None
An object is hashable if it has a hash value which never changes during its lifetime. As an UTCDateTime object may change over time, it's not hashable. Use the :meth:`~UTCDateTime.datetime()` method to generate a :class:`datetime.datetime` object for hashing. But be aware: once the UTCDateTime object changes, the hash is not valid anymore.
An object is hashable if it has a hash value which never changes during its lifetime. As an UTCDateTime object may change over time, it's not hashable. Use the :meth:`~UTCDateTime.datetime()` method to generate a :class:`datetime.datetime` object for hashing. But be aware: once the UTCDateTime object changes, the hash is not valid anymore.
[ "An", "object", "is", "hashable", "if", "it", "has", "a", "hash", "value", "which", "never", "changes", "during", "its", "lifetime", ".", "As", "an", "UTCDateTime", "object", "may", "change", "over", "time", "it", "s", "not", "hashable", ".", "Use", "the", ":", "meth", ":", "~UTCDateTime", ".", "datetime", "()", "method", "to", "generate", "a", ":", "class", ":", "datetime", ".", "datetime", "object", "for", "hashing", ".", "But", "be", "aware", ":", "once", "the", "UTCDateTime", "object", "changes", "the", "hash", "is", "not", "valid", "anymore", "." ]
def __hash__(self): """ An object is hashable if it has a hash value which never changes during its lifetime. As an UTCDateTime object may change over time, it's not hashable. Use the :meth:`~UTCDateTime.datetime()` method to generate a :class:`datetime.datetime` object for hashing. But be aware: once the UTCDateTime object changes, the hash is not valid anymore. """ # explicitly flag it as unhashable return None
[ "def", "__hash__", "(", "self", ")", ":", "# explicitly flag it as unhashable", "return", "None" ]
https://github.com/obspy/obspy/blob/0ee5a0d2db293c8d5d4c3b1f148a6c5a85fea55f/obspy/core/utcdatetime.py#L1251-L1260
shiweibsw/Translation-Tools
2fbbf902364e557fa7017f9a74a8797b7440c077
venv/Lib/site-packages/pip-9.0.3-py3.6.egg/pip/_vendor/html5lib/_tokenizer.py
python
HTMLTokenizer.selfClosingStartTagState
(self)
return True
[]
def selfClosingStartTagState(self): data = self.stream.char() if data == ">": self.currentToken["selfClosing"] = True self.emitCurrentToken() elif data is EOF: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-EOF-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.dataState else: self.tokenQueue.append({"type": tokenTypes["ParseError"], "data": "unexpected-character-after-solidus-in-tag"}) self.stream.unget(data) self.state = self.beforeAttributeNameState return True
[ "def", "selfClosingStartTagState", "(", "self", ")", ":", "data", "=", "self", ".", "stream", ".", "char", "(", ")", "if", "data", "==", "\">\"", ":", "self", ".", "currentToken", "[", "\"selfClosing\"", "]", "=", "True", "self", ".", "emitCurrentToken", "(", ")", "elif", "data", "is", "EOF", ":", "self", ".", "tokenQueue", ".", "append", "(", "{", "\"type\"", ":", "tokenTypes", "[", "\"ParseError\"", "]", ",", "\"data\"", ":", "\"unexpected-EOF-after-solidus-in-tag\"", "}", ")", "self", ".", "stream", ".", "unget", "(", "data", ")", "self", ".", "state", "=", "self", ".", "dataState", "else", ":", "self", ".", "tokenQueue", ".", "append", "(", "{", "\"type\"", ":", "tokenTypes", "[", "\"ParseError\"", "]", ",", "\"data\"", ":", "\"unexpected-character-after-solidus-in-tag\"", "}", ")", "self", ".", "stream", ".", "unget", "(", "data", ")", "self", ".", "state", "=", "self", ".", "beforeAttributeNameState", "return", "True" ]
https://github.com/shiweibsw/Translation-Tools/blob/2fbbf902364e557fa7017f9a74a8797b7440c077/venv/Lib/site-packages/pip-9.0.3-py3.6.egg/pip/_vendor/html5lib/_tokenizer.py#L1076-L1092
poodarchu/Det3D
01258d8cb26656c5b950f8d41f9dcc1dd62a391e
det3d/models/backbones/resnet.py
python
make_res_layer
( block, inplanes, planes, blocks, stride=1, dilation=1, style="pytorch", with_cp=False, conv_cfg=None, norm_cfg=dict(type="BN"), dcn=None, gcb=None, gen_attention=None, gen_attention_blocks=[], )
return nn.Sequential(*layers)
[]
def make_res_layer( block, inplanes, planes, blocks, stride=1, dilation=1, style="pytorch", with_cp=False, conv_cfg=None, norm_cfg=dict(type="BN"), dcn=None, gcb=None, gen_attention=None, gen_attention_blocks=[], ): downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( build_conv_layer( conv_cfg, inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, ), build_norm_layer(norm_cfg, planes * block.expansion)[1], ) layers = [] layers.append( block( inplanes=inplanes, planes=planes, stride=stride, dilation=dilation, downsample=downsample, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb, gen_attention=gen_attention if (0 in gen_attention_blocks) else None, ) ) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( inplanes=inplanes, planes=planes, stride=1, dilation=dilation, style=style, with_cp=with_cp, conv_cfg=conv_cfg, norm_cfg=norm_cfg, dcn=dcn, gcb=gcb, gen_attention=gen_attention if (i in gen_attention_blocks) else None, ) ) return nn.Sequential(*layers)
[ "def", "make_res_layer", "(", "block", ",", "inplanes", ",", "planes", ",", "blocks", ",", "stride", "=", "1", ",", "dilation", "=", "1", ",", "style", "=", "\"pytorch\"", ",", "with_cp", "=", "False", ",", "conv_cfg", "=", "None", ",", "norm_cfg", "=", "dict", "(", "type", "=", "\"BN\"", ")", ",", "dcn", "=", "None", ",", "gcb", "=", "None", ",", "gen_attention", "=", "None", ",", "gen_attention_blocks", "=", "[", "]", ",", ")", ":", "downsample", "=", "None", "if", "stride", "!=", "1", "or", "inplanes", "!=", "planes", "*", "block", ".", "expansion", ":", "downsample", "=", "nn", ".", "Sequential", "(", "build_conv_layer", "(", "conv_cfg", ",", "inplanes", ",", "planes", "*", "block", ".", "expansion", ",", "kernel_size", "=", "1", ",", "stride", "=", "stride", ",", "bias", "=", "False", ",", ")", ",", "build_norm_layer", "(", "norm_cfg", ",", "planes", "*", "block", ".", "expansion", ")", "[", "1", "]", ",", ")", "layers", "=", "[", "]", "layers", ".", "append", "(", "block", "(", "inplanes", "=", "inplanes", ",", "planes", "=", "planes", ",", "stride", "=", "stride", ",", "dilation", "=", "dilation", ",", "downsample", "=", "downsample", ",", "style", "=", "style", ",", "with_cp", "=", "with_cp", ",", "conv_cfg", "=", "conv_cfg", ",", "norm_cfg", "=", "norm_cfg", ",", "dcn", "=", "dcn", ",", "gcb", "=", "gcb", ",", "gen_attention", "=", "gen_attention", "if", "(", "0", "in", "gen_attention_blocks", ")", "else", "None", ",", ")", ")", "inplanes", "=", "planes", "*", "block", ".", "expansion", "for", "i", "in", "range", "(", "1", ",", "blocks", ")", ":", "layers", ".", "append", "(", "block", "(", "inplanes", "=", "inplanes", ",", "planes", "=", "planes", ",", "stride", "=", "1", ",", "dilation", "=", "dilation", ",", "style", "=", "style", ",", "with_cp", "=", "with_cp", ",", "conv_cfg", "=", "conv_cfg", ",", "norm_cfg", "=", "norm_cfg", ",", "dcn", "=", "dcn", ",", "gcb", "=", "gcb", ",", "gen_attention", "=", "gen_attention", "if", "(", "i", "in", "gen_attention_blocks", ")", "else", "None", ",", ")", ")", "return", "nn", ".", "Sequential", "(", "*", "layers", ")" ]
https://github.com/poodarchu/Det3D/blob/01258d8cb26656c5b950f8d41f9dcc1dd62a391e/det3d/models/backbones/resnet.py#L265-L330
nubank/fklearn
aa558fbce8aa10a20f1043c6b9954dec85800ddd
src/fklearn/causal/effects.py
python
pearson_effect
(df: pd.DataFrame, treatment_column: str, outcome_column: str)
return _apply_effect(correlation_evaluator, df, treatment_column, outcome_column)
Computes the Pearson correlation between the treatment and the outcome Parameters ---------- df : Pandas' DataFrame A Pandas' DataFrame with target and prediction scores. treatment_column : str The name of the treatment column in `df`. outcome_column : str The name of the outcome column in `df`. Returns ---------- effect: float The Pearson correlation between the treatment and the outcome
Computes the Pearson correlation between the treatment and the outcome
[ "Computes", "the", "Pearson", "correlation", "between", "the", "treatment", "and", "the", "outcome" ]
def pearson_effect(df: pd.DataFrame, treatment_column: str, outcome_column: str) -> float: """ Computes the Pearson correlation between the treatment and the outcome Parameters ---------- df : Pandas' DataFrame A Pandas' DataFrame with target and prediction scores. treatment_column : str The name of the treatment column in `df`. outcome_column : str The name of the outcome column in `df`. Returns ---------- effect: float The Pearson correlation between the treatment and the outcome """ return _apply_effect(correlation_evaluator, df, treatment_column, outcome_column)
[ "def", "pearson_effect", "(", "df", ":", "pd", ".", "DataFrame", ",", "treatment_column", ":", "str", ",", "outcome_column", ":", "str", ")", "->", "float", ":", "return", "_apply_effect", "(", "correlation_evaluator", ",", "df", ",", "treatment_column", ",", "outcome_column", ")" ]
https://github.com/nubank/fklearn/blob/aa558fbce8aa10a20f1043c6b9954dec85800ddd/src/fklearn/causal/effects.py#L68-L89
pypa/pip
7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4
src/pip/_vendor/pygments/filters/__init__.py
python
find_filter_class
(filtername)
return None
Lookup a filter by name. Return None if not found.
Lookup a filter by name. Return None if not found.
[ "Lookup", "a", "filter", "by", "name", ".", "Return", "None", "if", "not", "found", "." ]
def find_filter_class(filtername): """Lookup a filter by name. Return None if not found.""" if filtername in FILTERS: return FILTERS[filtername] for name, cls in find_plugin_filters(): if name == filtername: return cls return None
[ "def", "find_filter_class", "(", "filtername", ")", ":", "if", "filtername", "in", "FILTERS", ":", "return", "FILTERS", "[", "filtername", "]", "for", "name", ",", "cls", "in", "find_plugin_filters", "(", ")", ":", "if", "name", "==", "filtername", ":", "return", "cls", "return", "None" ]
https://github.com/pypa/pip/blob/7f8a6844037fb7255cfd0d34ff8e8cf44f2598d4/src/pip/_vendor/pygments/filters/__init__.py#L22-L29
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_vendored_deps/library/oc_volume.py
python
Yedit.get_curr_value
(invalue, val_type)
return curr_value
return the current value
return the current value
[ "return", "the", "current", "value" ]
def get_curr_value(invalue, val_type): '''return the current value''' if invalue is None: return None curr_value = invalue if val_type == 'yaml': curr_value = yaml.safe_load(str(invalue)) elif val_type == 'json': curr_value = json.loads(invalue) return curr_value
[ "def", "get_curr_value", "(", "invalue", ",", "val_type", ")", ":", "if", "invalue", "is", "None", ":", "return", "None", "curr_value", "=", "invalue", "if", "val_type", "==", "'yaml'", ":", "curr_value", "=", "yaml", ".", "safe_load", "(", "str", "(", "invalue", ")", ")", "elif", "val_type", "==", "'json'", ":", "curr_value", "=", "json", ".", "loads", "(", "invalue", ")", "return", "curr_value" ]
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_vendored_deps/library/oc_volume.py#L706-L717