function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def get_all_deployed_l7_policys(self):
"""Retrieve a dict of all l7policies deployed
The dict returned will have the following format:
{policy_bigip_id_0: {'id': policy_id_0,
'tenant_id': tenant_id,
'hostnames': [hostnames_0]... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def purge_orphaned_l7_policy(self, tenant_id=None, l7_policy_id=None,
hostnames=list(), listener_id=None):
"""Purge all l7_policys that exist on the BIG-IP but not in Neutron"""
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def purge_orphaned_loadbalancer(self, tenant_id=None,
loadbalancer_id=None, hostnames=list()):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
va_name = self.service_adapter.prefix + loadbalancer_id
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def purge_orphaned_listener(
self, tenant_id=None, listener_id=None, hostnames=[]):
for bigip in self.get_all_bigips():
if bigip.hostname in hostnames:
try:
listener_name = self.service_adapter.prefix + listener_id
partition = self.... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def create_loadbalancer(self, loadbalancer, service):
"""Create virtual server."""
self._common_service_handler(service)
return self._update_target(service) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def update_loadbalancer(self, old_loadbalancer, loadbalancer, service):
"""Update virtual server."""
# anti-pattern three args unused.
self._common_service_handler(service)
return self._update_target(service) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def delete_loadbalancer(self, loadbalancer, service):
"""Delete loadbalancer."""
LOG.debug("Deleting loadbalancer")
self._common_service_handler(
service,
delete_partition=True,
delete_event=True)
return self._update_target(service) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def create_listener(self, listener, service):
"""Create virtual server."""
LOG.debug("Creating listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
service) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def update_listener(self, old_listener, listener, service):
"""Update virtual server."""
LOG.debug("Updating listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def delete_listener(self, listener, service):
"""Delete virtual server."""
LOG.debug("Deleting listener")
self._common_service_handler(service)
return self._update_target(service,
self._update_listener_status,
service) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def create_pool(self, pool, service):
"""Create lb pool."""
LOG.debug("Creating pool")
# pzhang(NOTE): pool may not bound with a listener
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDAT... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def update_pool(self, old_pool, pool, service):
"""Update lb pool."""
LOG.debug("Updating pool")
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def delete_pool(self, pool, service):
"""Delete lb pool."""
LOG.debug("Deleting pool")
if service.get("listeners"):
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
re... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def create_l7policy(self, l7policy, service):
"""Create lb l7policy."""
LOG.debug("Creating l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(serv... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def update_l7policy(self, old_l7policy, l7policy, service):
"""Update lb l7policy."""
LOG.debug("Updating l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._upda... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def delete_l7policy(self, l7policy, service):
"""Delete lb l7policy."""
LOG.debug("Deleting l7policy")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(serv... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def create_l7rule(self, l7rule, service):
"""Create lb l7rule."""
LOG.debug("Creating l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def update_l7rule(self, old_l7rule, l7rule, service):
"""Update lb l7rule."""
LOG.debug("Updating l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def delete_l7rule(self, l7rule, service):
"""Delete lb l7rule."""
LOG.debug("Deleting l7rule")
target_listener = service["listeners"][0]
target_listener["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def create_member(self, member, service):
"""Create pool member."""
LOG.debug("Creating member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def update_member(self, old_member, member, service):
"""Update pool member."""
LOG.debug("Updating member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def delete_member(self, member, service):
"""Delete pool member."""
LOG.debug("Deleting member")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._update_target(service,
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def create_health_monitor(self, health_monitor, service):
"""Create pool health monitor."""
LOG.debug("Creating health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._upda... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def update_health_monitor(self, old_health_monitor,
health_monitor, service):
"""Update pool health monitor."""
LOG.debug("Updating health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def delete_health_monitor(self, health_monitor, service):
"""Delete pool health monitor."""
LOG.debug("Deleting health monitor")
target_pool = service["pools"][0]
target_pool["provisioning_status"] = "PENDING_UPDATE"
self._common_service_handler(service)
return self._upda... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def get_stats(self, service):
lb_stats = {}
stats = ['clientside.bitsIn',
'clientside.bitsOut',
'clientside.curConns',
'clientside.totConns']
loadbalancer = service['loadbalancer']
try:
# sum virtual server stats for all BIG... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def fdb_remove(self, fdb):
# Remove (L2toL3) forwarding database entries
for bigip in self.get_all_bigips():
self.network_builder.remove_bigip_fdb(bigip, fdb) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def tunnel_update(self, **kwargs):
# Tunnel Update from Neutron Core RPC
pass | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def sync(self, service):
"""Sync service defintion to device."""
# loadbalancer and plugin_rpc may not be set
lb_id = service.get('loadbalancer', dict()).get('id', '')
if hasattr(self, 'plugin_rpc') and self.plugin_rpc and lb_id:
# Get the latest service. It may have changed.... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def backup_configuration(self):
# Save Configuration on Devices
for bigip in self.get_all_bigips():
LOG.debug('_backup_configuration: saving device %s.'
% bigip.hostname)
self.cluster_manager.save_config(bigip) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def service_rename_required(self, service):
rename_required = False
# Returns whether the bigip has a pool for the service
if not service['loadbalancer']:
return False
bigips = self.get_config_bigips()
loadbalancer = service['loadbalancer']
# Does the corre... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def _service_exists(self, service):
# Returns whether the bigip has the service defined
if not service['loadbalancer']:
return False
loadbalancer = service['loadbalancer']
folder_name = self.service_adapter.get_folder_name(
loadbalancer['tenant_id']
)
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def _common_service_handler(self, service,
delete_partition=False,
delete_event=False):
# Assure that the service is configured on bigip(s)
start_time = time()
lb_pending = True
self.do_service_update = True
if se... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def _update_member_status(self, members, timed_out=False):
"""Update member status in OpenStack."""
for member in members:
if 'provisioning_status' in member:
provisioning_status = member['provisioning_status']
if provisioning_status in self.positive_plugin_c... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def _update_pool_status(self, pools):
"""Update pool status in OpenStack."""
for pool in pools:
if 'provisioning_status' in pool:
provisioning_status = pool['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def _update_listener_status(self, service):
"""Update listener status in OpenStack."""
listeners = service['listeners']
for listener in listeners:
if 'provisioning_status' in listener:
provisioning_status = listener['provisioning_status']
if provisioni... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def _update_l7rule_status(self, l7rules):
"""Update l7rule status in OpenStack."""
for l7rule in l7rules:
if 'provisioning_status' in l7rule:
provisioning_status = l7rule['provisioning_status']
if provisioning_status in self.positive_plugin_const_state:
... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def _update_l7policy_status(self, l7policies):
LOG.debug("_update_l7policy_status")
"""Update l7policy status in OpenStack."""
for l7policy in l7policies:
if 'provisioning_status' in l7policy:
provisioning_status = l7policy['provisioning_status']
if pr... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def _update_loadbalancer_status(self, service, timed_out=False):
"""Update loadbalancer status in OpenStack."""
loadbalancer = service.get('loadbalancer', {})
provisioning_status = loadbalancer.get('provisioning_status',
f5const.F5_ERROR)
#... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def update_operating_status(self, service):
if 'members' in service:
if self.network_builder:
# append route domain to member address
try:
self.network_builder._annotate_service_route_domains(
service)
except... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def service_to_traffic_group(self, service):
# Hash service tenant id to index of traffic group
# return which iControlDriver.__traffic_group that tenant is "in?"
return self.tenant_to_traffic_group(
service['loadbalancer']['tenant_id']) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def get_bigip(self):
hostnames = sorted(list(self.__bigips))
for host in hostnames:
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return self.__bigips[host] | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def get_all_bigips(self):
return_bigips = []
for host in list(self.__bigips):
if hasattr(self.__bigips[host], 'status') and \
self.__bigips[host].status == 'active':
return_bigips.append(self.__bigips[host])
return return_bigips | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def get_active_bigips(self):
return self.get_all_bigips() | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def get_inbound_throughput(self, bigip, global_statistics=None):
return self.stat_helper.get_inbound_throughput(
bigip, global_stats=global_statistics) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def get_throughput(self, bigip=None, global_statistics=None):
return self.stat_helper.get_throughput(
bigip, global_stats=global_statistics) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def get_ssltps(self, bigip=None, global_statistics=None):
return self.stat_helper.get_active_SSL_TPS(
bigip, global_stats=global_statistics) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def get_clientssl_profile_count(self, bigip=None, global_statistics=None):
return ssl_profile.SSLProfileHelper.get_client_ssl_profile_count(bigip) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def get_tunnel_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_tunnel_count(bigip) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def get_route_domain_count(self, bigip=None, global_statistics=None):
return self.network_helper.get_route_domain_count(bigip) | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def _validate_bigip_version(self, bigip, hostname):
# Ensure the BIG-IP has sufficient version
major_version = self.system_helper.get_major_version(bigip)
if major_version < f5const.MIN_TMOS_MAJOR_VERSION:
raise f5ex.MajorVersionValidateFailed(
'Device %s must be at l... | F5Networks/f5-openstack-agent | [
14,
37,
14,
83,
1444753730
] |
def dense_prediction_cell_hparams():
"""DensePredictionCell HParams.
Returns:
A dictionary of hyper-parameters used for dense prediction cell with keys:
- reduction_size: Integer, the number of output filters for each operation
inside the cell.
- dropout_on_concat_features: Boolean, apply... | googleinterns/wss | [
142,
21,
142,
9,
1597440534
] |
def __init__(self, config, hparams=None):
"""Initializes the dense prediction cell.
Args:
config: A dictionary storing the architecture of a dense prediction cell.
hparams: A dictionary of hyper-parameters, provided by users. This
dictionary will be used to update the default dictionary ret... | googleinterns/wss | [
142,
21,
142,
9,
1597440534
] |
def _parse_operation(self, config, crop_size, output_stride,
image_pooling_crop_size=None):
"""Parses one operation.
When 'operation' is 'pyramid_pooling', we compute the required
hyper-parameters and save in config.
Args:
config: A dictionary storing required hyper-parame... | googleinterns/wss | [
142,
21,
142,
9,
1597440534
] |
def _tryGevent():
global gevent, geventEvent
if gevent and geventEvent: return False
try:
import gevent
from gevent import event as geventEvent
return True
except ImportError:
raise ValueError('gevent not found') | byaka/flaskJSONRPCServer | [
2,
1,
2,
39,
1429860383
] |
def _child(target, args, kwargs):
"""Wrapper function that runs in child process. Resets gevent/libev state
and executes user-given function.
"""
_tryGevent()
_reset_signal_handlers()
gevent.reinit()
hub = gevent.get_hub()
del hub.threadpool
hub._threadpool = None
hub.destroy(destroy_loop=... | byaka/flaskJSONRPCServer | [
2,
1,
2,
39,
1429860383
] |
def start(self):
_tryGevent()
# Start grabbing SIGCHLD within libev event loop.
gevent.get_hub().loop.install_sigchld()
# Run new process (based on `fork()` on POSIX-compliant systems).
super(_GProcess, self).start()
# The occurrence of SIGCHLD is recorded asynchronously in libev.
... | byaka/flaskJSONRPCServer | [
2,
1,
2,
39,
1429860383
] |
def is_alive(self):
assert self._popen is not None, "Process not yet started."
if self._popen.returncode is None:
return True
return False | byaka/flaskJSONRPCServer | [
2,
1,
2,
39,
1429860383
] |
def exitcode(self):
if self._popen is None:
return None
return self._popen.returncode | byaka/flaskJSONRPCServer | [
2,
1,
2,
39,
1429860383
] |
def join(self, timeout=None):
"""
Wait cooperatively until child process terminates or timeout occurs.
:arg timeout: ``None`` (default) or a a time in seconds. The method
simply returns upon timeout expiration. The state of the process
has to be identified via ``is_alive()``.
... | byaka/flaskJSONRPCServer | [
2,
1,
2,
39,
1429860383
] |
def _reset_signal_handlers():
for s in _signals_to_reset:
if s < signal.NSIG:
signal.signal(s, signal.SIG_DFL) | byaka/flaskJSONRPCServer | [
2,
1,
2,
39,
1429860383
] |
def _reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value | byaka/flaskJSONRPCServer | [
2,
1,
2,
39,
1429860383
] |
def __exec(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
... | byaka/flaskJSONRPCServer | [
2,
1,
2,
39,
1429860383
] |
def test_feedback_001(self):
"""
Test the feedback algorithm.
"""
mi = 1,
ma = 256
base = 3
obj = ExponentialTimeFeedback(min_time=mi,
max_time=ma,
base=base
... | ComputerNetworks-UFRGS/OpERA | [
3,
2,
3,
1,
1374231621
] |
def __save_page(self, data, url, outputdir):
'''
save the page content with the specific url to the local path.
'''
if(not os.path.exists(outputdir)):
os.makedirs(outputdir)
filename = self.__validate_name(url)
f = open(outputdir + os.sep + filename, 'w... | onehao/opensource | [
1,
1,
1,
1,
1414656394
] |
def runoff_pitt(precip, evaptrans, soil_type, land_use):
"""
The Pitt Small Storm Hydrology method. The output is a runoff
value in inches.
This uses numpy to make a linear interpolation between tabular values to
calculate the exact runoff for a given value
`precip` is the amount of precipitat... | WikiWatershed/tr-55 | [
9,
8,
9,
9,
1427290590
] |
def runoff_nrcs(precip, evaptrans, soil_type, land_use):
"""
The runoff equation from the TR-55 document. The output is a
runoff value in inches.
`precip` is the amount of precipitation in inches.
"""
curve_number = lookup_cn(soil_type, land_use)
if nrcs_cutoff(precip, curve_number):
... | WikiWatershed/tr-55 | [
9,
8,
9,
9,
1427290590
] |
def clamp(runoff, et, inf, precip):
"""
This function ensures that runoff + et + inf <= precip.
NOTE: Infiltration is normally independent of the
precipitation level, but this function introduces a slight
dependency (that is, at very low levels of precipitation, this
fun... | WikiWatershed/tr-55 | [
9,
8,
9,
9,
1427290590
] |
def create_unmodified_census(census):
"""
This creates a cell census, ignoring any modifications. The
output is suitable for use as input to `simulate_water_quality`.
"""
unmod = copy.deepcopy(census)
unmod.pop('modifications', None)
return unmod | WikiWatershed/tr-55 | [
9,
8,
9,
9,
1427290590
] |
def simulate_water_quality(tree, cell_res, fn,
pct=1.0, current_cell=None, precolumbian=False):
"""
Perform a water quality simulation by doing simulations on each of
the cell types (leaves), then adding them together by summing the
values of a node's subtrees and storing them... | WikiWatershed/tr-55 | [
9,
8,
9,
9,
1427290590
] |
def compute_bmp_effect(census, m2_per_pixel, precip):
"""
Compute the overall amount of water retained by infiltration/retention
type BMP's.
Result is a percent of runoff remaining after water is trapped in
infiltration/retention BMP's
"""
meters_per_inch = 0.0254
cubic_meters = census[... | WikiWatershed/tr-55 | [
9,
8,
9,
9,
1427290590
] |
def simulate_day(census, precip, cell_res=10, precolumbian=False):
"""
Simulate a day, including water quality effects of modifications.
`census` contains a distribution of cell-types in the area of interest.
`cell_res` is as described in `simulate_water_quality`.
`precolumbian` indicates that ar... | WikiWatershed/tr-55 | [
9,
8,
9,
9,
1427290590
] |
def _verify_buckets_status(self, revision_id, comparison_revision_id,
expected):
# Verify that actual and expected results match, despite the order of
# `comparison_revision_id` and `revision_id` args.
revision_ids = [revision_id, comparison_revision_id]
fo... | att-comdev/deckhand | [
9,
5,
9,
5,
1497626943
] |
def test_revision_diff_created(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
self._verify_buckets_status(
... | att-comdev/deckhand | [
9,
5,
9,
5,
1497626943
] |
def test_revision_diff_self(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
self._verify_buckets_status(
... | att-comdev/deckhand | [
9,
5,
9,
5,
1497626943
] |
def test_revision_diff_modified(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
bucket_name = test_utils.rand_name('bucket')
documents = self.create_documents(bucket_name, payload)
revision_id = documents[0]['revision_id']
payload[0]['data'] = {'modified... | att-comdev/deckhand | [
9,
5,
9,
5,
1497626943
] |
def test_revision_diff_multi_revision_multi_bucket_modified(self):
revision_ids = []
bucket_name = test_utils.rand_name('bucket')
alt_bucket_name = test_utils.rand_name('bucket')
bucket_names = [bucket_name, alt_bucket_name] * 2
# Create revisions by modifying documents in `buc... | att-comdev/deckhand | [
9,
5,
9,
5,
1497626943
] |
def test_revision_diff_ignore_bucket_with_all_unrelated_documents(self):
payload = base.DocumentFixture.get_minimal_multi_fixture(count=3)
alt_payload = copy.deepcopy(payload)
bucket_name = test_utils.rand_name('bucket')
alt_bucket_name = test_utils.rand_name('bucket')
# Create ... | att-comdev/deckhand | [
9,
5,
9,
5,
1497626943
] |
def test_revision_diff_delete_then_recreate(self):
payload = base.DocumentFixture.get_minimal_fixture()
bucket_name = test_utils.rand_name('bucket')
created_documents = self.create_documents(bucket_name, payload)
revision_id_1 = created_documents[0]['revision_id']
# Delete the p... | att-comdev/deckhand | [
9,
5,
9,
5,
1497626943
] |
def forwards(self, orm):
# Adding field 'BadgeByCourse.title_en'
db.add_column('badges_badgebycourse', 'title_en',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'BadgeByCourse.t... | GeographicaGS/moocng | [
2,
1,
2,
12,
1409568699
] |
def main():
return 'main method' | GoogleCloudPlatform/repo-automation-playground | [
5,
15,
5,
28,
1562866376
] |
def not_main():
return 'not main' | GoogleCloudPlatform/repo-automation-playground | [
5,
15,
5,
28,
1562866376
] |
def also_not_main():
return 'also_not main' | GoogleCloudPlatform/repo-automation-playground | [
5,
15,
5,
28,
1562866376
] |
def untested_method():
return 'untested!' | GoogleCloudPlatform/repo-automation-playground | [
5,
15,
5,
28,
1562866376
] |
def __init__(self, representation_names, sample_freq, update_freq):
"""Constructs a SelectionStrategy object.
Args:
representation_names: A list of representations names for tf.summary.
sample_freq: Frequency to draw a new selection (in steps).
update_freq: Frequency to update the selector's ... | tensorflow/neural-structured-learning | [
963,
192,
963,
1,
1566942496
] |
def should_update(self, epoch):
"""Returns whether the strategy should update its state at this epoch."""
return epoch - self.last_update_epoch >= self.update_freq | tensorflow/neural-structured-learning | [
963,
192,
963,
1,
1566942496
] |
def _select(self):
raise NotImplementedError | tensorflow/neural-structured-learning | [
963,
192,
963,
1,
1566942496
] |
def _select(self):
# No needs to reselect since this strategy is deterministic.
return self.current_selection.numpy() | tensorflow/neural-structured-learning | [
963,
192,
963,
1,
1566942496
] |
def _select(self):
return (self.current_selection + 1) % self.num_representations | tensorflow/neural-structured-learning | [
963,
192,
963,
1,
1566942496
] |
def eta_scheduler(epoch, values=(0.1,), breakpoints=()):
"""Piecewise constant schedule for eta (selector weight learning rate)."""
idx = sum(1 if epoch > b else 0 for b in breakpoints)
return values[idx] | tensorflow/neural-structured-learning | [
963,
192,
963,
1,
1566942496
] |
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Weights of each representation. Each selection is a sample drawn
# proportionally to the weights.
# TODO(csferng): Store the weights in logit space.
self.weights = tf.Variable(tf.ones(self.num_representations))
self.current... | tensorflow/neural-structured-learning | [
963,
192,
963,
1,
1566942496
] |
def _update(self, epoch, validation_losses):
validation_losses = tf.convert_to_tensor(validation_losses)
eta = eta_scheduler(epoch)
self.weights.assign(self.weights * tf.math.exp(eta * validation_losses))
for i in range(self.num_representations):
tf.summary.scalar(
f"representations/weig... | tensorflow/neural-structured-learning | [
963,
192,
963,
1,
1566942496
] |
def create_or_update_chain(pid, sid, obsoletes_pid, obsoleted_by_pid):
chain_model = _get_chain_by_pid(pid)
if chain_model:
_set_chain_sid(chain_model, sid)
else:
_add_sciobj(pid, sid, obsoletes_pid, obsoleted_by_pid)
_update_sid_to_last_existing_pid_map(pid) | DataONEorg/d1_python | [
13,
6,
13,
17,
1464710460
] |
def cut_from_chain(sciobj_model):
"""Remove an object from a revision chain.
The object can be at any location in the chain, including the head or tail.
Preconditions:
- The object with the pid is verified to exist and to be a member of an
revision chain. E.g., with:
d1_gmn.app.views.asserts.... | DataONEorg/d1_python | [
13,
6,
13,
17,
1464710460
] |
def resolve_sid(sid):
"""Get the PID to which the ``sid`` currently maps.
Preconditions:
- ``sid`` is verified to exist. E.g., with d1_gmn.app.views.asserts.is_sid().
"""
return d1_gmn.app.models.Chain.objects.get(sid__did=sid).head_pid.did | DataONEorg/d1_python | [
13,
6,
13,
17,
1464710460
] |
def set_revision_links(sciobj_model, obsoletes_pid=None, obsoleted_by_pid=None):
if obsoletes_pid:
sciobj_model.obsoletes = d1_gmn.app.did.get_or_create_did(obsoletes_pid)
_set_revision_reverse(sciobj_model.pid.did, obsoletes_pid, is_obsoletes=False)
if obsoleted_by_pid:
sciobj_model.obs... | DataONEorg/d1_python | [
13,
6,
13,
17,
1464710460
] |
def is_obsoleted_by_pid(pid):
"""Return True if ``pid`` is referenced in the obsoletedBy field of any object.
This will return True even if the PID is in the obsoletes field of an object that
does not exist on the local MN, such as replica that is in an incomplete chain.
"""
return d1_gmn.app.mode... | DataONEorg/d1_python | [
13,
6,
13,
17,
1464710460
] |
def _add_sciobj(pid, sid, obsoletes_pid, obsoleted_by_pid):
is_added = _add_to_chain(pid, sid, obsoletes_pid, obsoleted_by_pid)
if not is_added:
# if not obsoletes_pid and not obsoleted_by_pid:
_add_standalone(pid, sid)
# else: | DataONEorg/d1_python | [
13,
6,
13,
17,
1464710460
] |
def _add_to_chain(pid, sid, obsoletes_pid, obsoleted_by_pid):
_assert_sid_is_in_chain(sid, obsoletes_pid)
_assert_sid_is_in_chain(sid, obsoleted_by_pid)
obsoletes_chain_model = _get_chain_by_pid(obsoletes_pid)
obsoleted_by_chain_model = _get_chain_by_pid(obsoleted_by_pid)
sid_chain_model = _get_chai... | DataONEorg/d1_python | [
13,
6,
13,
17,
1464710460
] |
def _add_pid_to_chain(chain_model, pid):
chain_member_model = d1_gmn.app.models.ChainMember(
chain=chain_model, pid=d1_gmn.app.did.get_or_create_did(pid)
)
chain_member_model.save() | DataONEorg/d1_python | [
13,
6,
13,
17,
1464710460
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.