text
stringlengths 89
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
630
|
---|---|---|---|
def in_ellipsoid(x, A, c):
"""
Determines which of the points ``x`` are in the
closed ellipsoid with shape matrix ``A`` centered at ``c``.
For a single point ``x``, this is computed as
.. math::
(c-x)^T\cdot A^{-1}\cdot (c-x) \leq 1
:param np.ndarray x: Shape ``(n_points, dim)`` or ``n_points``.
:param np.ndarray A: Shape ``(dim, dim)``, positive definite
:param np.ndarray c: Shape ``(dim)``
:return: `bool` or array of bools of length ``n_points``
"""
if x.ndim == 1:
y = c - x
return np.einsum('j,jl,l', y, np.linalg.inv(A), y) <= 1
else:
y = c[np.newaxis,:] - x
return np.einsum('ij,jl,il->i', y, np.linalg.inv(A), y) <= 1 | [
"def",
"in_ellipsoid",
"(",
"x",
",",
"A",
",",
"c",
")",
":",
"if",
"x",
".",
"ndim",
"==",
"1",
":",
"y",
"=",
"c",
"-",
"x",
"return",
"np",
".",
"einsum",
"(",
"'j,jl,l'",
",",
"y",
",",
"np",
".",
"linalg",
".",
"inv",
"(",
"A",
")",
",",
"y",
")",
"<=",
"1",
"else",
":",
"y",
"=",
"c",
"[",
"np",
".",
"newaxis",
",",
":",
"]",
"-",
"x",
"return",
"np",
".",
"einsum",
"(",
"'ij,jl,il->i'",
",",
"y",
",",
"np",
".",
"linalg",
".",
"inv",
"(",
"A",
")",
",",
"y",
")",
"<=",
"1"
] | 35.35 | 18.75 |
def HandleRequest(self, request):
"""Handles given HTTP request."""
impersonated_username = config.CONFIG["AdminUI.debug_impersonate_user"]
if impersonated_username:
logging.info("Overriding user as %s", impersonated_username)
request.user = config.CONFIG["AdminUI.debug_impersonate_user"]
if not aff4_users.GRRUser.IsValidUsername(request.user):
return self._BuildResponse(
403, dict(message="Invalid username: %s" % request.user))
try:
router, method_metadata, args = self._router_matcher.MatchRouter(request)
except access_control.UnauthorizedAccess as e:
error_message = str(e)
logging.exception("Access denied to %s (%s): %s", request.path,
request.method, e)
additional_headers = {
"X-GRR-Unauthorized-Access-Reason": error_message.replace("\n", ""),
"X-GRR-Unauthorized-Access-Subject": utils.SmartStr(e.subject)
}
return self._BuildResponse(
403,
dict(
message="Access denied by ACL: %s" % error_message,
subject=utils.SmartStr(e.subject)),
headers=additional_headers)
except ApiCallRouterNotFoundError as e:
error_message = str(e)
return self._BuildResponse(404, dict(message=error_message))
except werkzeug_exceptions.MethodNotAllowed as e:
error_message = str(e)
return self._BuildResponse(405, dict(message=error_message))
except Error as e:
logging.exception("Can't match URL to router/method: %s", e)
return self._BuildResponse(
500, dict(message=str(e), traceBack=traceback.format_exc()))
request.method_metadata = method_metadata
request.parsed_args = args
# SetUID() is called here so that ACL checks done by the router do not
# clash with datastore ACL checks.
# TODO(user): increase token expiry time.
token = self.BuildToken(request, 60).SetUID()
if data_store.AFF4Enabled():
# TODO:
# AFF4 edge case: if a user is issuing a request, before they are created
# using CreateGRRUser (e.g. in E2E tests or with single sign-on),
# AFF4's ReadGRRUsers will NEVER contain the user, because the creation
# done in the following lines does not add the user to the /users/
# collection. Furthermore, subsequent CreateGrrUserHandler calls fail,
# because the user technically already exists.
# We send a blind-write request to ensure that the user object is created
# for a user specified by the username.
user_urn = rdfvalue.RDFURN("aff4:/users/").Add(request.user)
# We can't use conventional AFF4 interface, since aff4.FACTORY.Create will
# create a new version of the object for every call.
with data_store.DB.GetMutationPool() as pool:
pool.MultiSet(
user_urn, {
aff4_users.GRRUser.SchemaCls.TYPE:
[aff4_users.GRRUser.__name__],
aff4_users.GRRUser.SchemaCls.LAST:
[rdfvalue.RDFDatetime.Now().SerializeToDataStore()]
},
replace=True)
if data_store.RelationalDBEnabled():
data_store.REL_DB.WriteGRRUser(request.user)
handler = None
try:
# ACL checks are done here by the router. If this method succeeds (i.e.
# does not raise), then handlers run without further ACL checks (they're
# free to do some in their own implementations, though).
handler = getattr(router, method_metadata.name)(args, token=token)
if handler.args_type != method_metadata.args_type:
raise RuntimeError("Handler args type doesn't match "
"method args type: %s vs %s" %
(handler.args_type, method_metadata.args_type))
binary_result_type = (
api_call_router.RouterMethodMetadata.BINARY_STREAM_RESULT_TYPE)
if (handler.result_type != method_metadata.result_type and
not (handler.result_type is None and
method_metadata.result_type == binary_result_type)):
raise RuntimeError("Handler result type doesn't match "
"method result type: %s vs %s" %
(handler.result_type, method_metadata.result_type))
# HEAD method is only used for checking the ACLs for particular API
# methods.
if request.method == "HEAD":
# If the request would return a stream, we add the Content-Length
# header to the response.
if (method_metadata.result_type ==
method_metadata.BINARY_STREAM_RESULT_TYPE):
binary_stream = handler.Handle(args, token=token)
return self._BuildResponse(
200, {"status": "OK"},
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
content_length=binary_stream.content_length,
token=token)
else:
return self._BuildResponse(
200, {"status": "OK"},
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
if (method_metadata.result_type ==
method_metadata.BINARY_STREAM_RESULT_TYPE):
binary_stream = handler.Handle(args, token=token)
return self._BuildStreamingResponse(
binary_stream, method_name=method_metadata.name)
else:
format_mode = GetRequestFormatMode(request, method_metadata)
result = self.CallApiHandler(handler, args, token=token)
rendered_data = self._FormatResultAsJson(
result, format_mode=format_mode)
return self._BuildResponse(
200,
rendered_data,
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except access_control.UnauthorizedAccess as e:
error_message = str(e)
logging.warning("Access denied for %s (HTTP %s %s): %s",
method_metadata.name, request.method, request.path,
error_message)
additional_headers = {
"X-GRR-Unauthorized-Access-Reason": error_message.replace("\n", ""),
"X-GRR-Unauthorized-Access-Subject": utils.SmartStr(e.subject)
}
return self._BuildResponse(
403,
dict(
message="Access denied by ACL: %s" % error_message,
subject=utils.SmartStr(e.subject)),
headers=additional_headers,
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except api_call_handler_base.ResourceNotFoundError as e:
error_message = str(e)
return self._BuildResponse(
404,
dict(message=error_message),
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except NotImplementedError as e:
error_message = str(e)
return self._BuildResponse(
501,
dict(message=error_message),
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token)
except Exception as e: # pylint: disable=broad-except
error_message = str(e)
logging.exception("Error while processing %s (%s) with %s: %s",
request.path, request.method,
handler.__class__.__name__, e)
return self._BuildResponse(
500,
dict(message=error_message, traceBack=traceback.format_exc()),
method_name=method_metadata.name,
no_audit_log=method_metadata.no_audit_log_required,
token=token) | [
"def",
"HandleRequest",
"(",
"self",
",",
"request",
")",
":",
"impersonated_username",
"=",
"config",
".",
"CONFIG",
"[",
"\"AdminUI.debug_impersonate_user\"",
"]",
"if",
"impersonated_username",
":",
"logging",
".",
"info",
"(",
"\"Overriding user as %s\"",
",",
"impersonated_username",
")",
"request",
".",
"user",
"=",
"config",
".",
"CONFIG",
"[",
"\"AdminUI.debug_impersonate_user\"",
"]",
"if",
"not",
"aff4_users",
".",
"GRRUser",
".",
"IsValidUsername",
"(",
"request",
".",
"user",
")",
":",
"return",
"self",
".",
"_BuildResponse",
"(",
"403",
",",
"dict",
"(",
"message",
"=",
"\"Invalid username: %s\"",
"%",
"request",
".",
"user",
")",
")",
"try",
":",
"router",
",",
"method_metadata",
",",
"args",
"=",
"self",
".",
"_router_matcher",
".",
"MatchRouter",
"(",
"request",
")",
"except",
"access_control",
".",
"UnauthorizedAccess",
"as",
"e",
":",
"error_message",
"=",
"str",
"(",
"e",
")",
"logging",
".",
"exception",
"(",
"\"Access denied to %s (%s): %s\"",
",",
"request",
".",
"path",
",",
"request",
".",
"method",
",",
"e",
")",
"additional_headers",
"=",
"{",
"\"X-GRR-Unauthorized-Access-Reason\"",
":",
"error_message",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
",",
"\"X-GRR-Unauthorized-Access-Subject\"",
":",
"utils",
".",
"SmartStr",
"(",
"e",
".",
"subject",
")",
"}",
"return",
"self",
".",
"_BuildResponse",
"(",
"403",
",",
"dict",
"(",
"message",
"=",
"\"Access denied by ACL: %s\"",
"%",
"error_message",
",",
"subject",
"=",
"utils",
".",
"SmartStr",
"(",
"e",
".",
"subject",
")",
")",
",",
"headers",
"=",
"additional_headers",
")",
"except",
"ApiCallRouterNotFoundError",
"as",
"e",
":",
"error_message",
"=",
"str",
"(",
"e",
")",
"return",
"self",
".",
"_BuildResponse",
"(",
"404",
",",
"dict",
"(",
"message",
"=",
"error_message",
")",
")",
"except",
"werkzeug_exceptions",
".",
"MethodNotAllowed",
"as",
"e",
":",
"error_message",
"=",
"str",
"(",
"e",
")",
"return",
"self",
".",
"_BuildResponse",
"(",
"405",
",",
"dict",
"(",
"message",
"=",
"error_message",
")",
")",
"except",
"Error",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"\"Can't match URL to router/method: %s\"",
",",
"e",
")",
"return",
"self",
".",
"_BuildResponse",
"(",
"500",
",",
"dict",
"(",
"message",
"=",
"str",
"(",
"e",
")",
",",
"traceBack",
"=",
"traceback",
".",
"format_exc",
"(",
")",
")",
")",
"request",
".",
"method_metadata",
"=",
"method_metadata",
"request",
".",
"parsed_args",
"=",
"args",
"# SetUID() is called here so that ACL checks done by the router do not",
"# clash with datastore ACL checks.",
"# TODO(user): increase token expiry time.",
"token",
"=",
"self",
".",
"BuildToken",
"(",
"request",
",",
"60",
")",
".",
"SetUID",
"(",
")",
"if",
"data_store",
".",
"AFF4Enabled",
"(",
")",
":",
"# TODO:",
"# AFF4 edge case: if a user is issuing a request, before they are created",
"# using CreateGRRUser (e.g. in E2E tests or with single sign-on),",
"# AFF4's ReadGRRUsers will NEVER contain the user, because the creation",
"# done in the following lines does not add the user to the /users/",
"# collection. Furthermore, subsequent CreateGrrUserHandler calls fail,",
"# because the user technically already exists.",
"# We send a blind-write request to ensure that the user object is created",
"# for a user specified by the username.",
"user_urn",
"=",
"rdfvalue",
".",
"RDFURN",
"(",
"\"aff4:/users/\"",
")",
".",
"Add",
"(",
"request",
".",
"user",
")",
"# We can't use conventional AFF4 interface, since aff4.FACTORY.Create will",
"# create a new version of the object for every call.",
"with",
"data_store",
".",
"DB",
".",
"GetMutationPool",
"(",
")",
"as",
"pool",
":",
"pool",
".",
"MultiSet",
"(",
"user_urn",
",",
"{",
"aff4_users",
".",
"GRRUser",
".",
"SchemaCls",
".",
"TYPE",
":",
"[",
"aff4_users",
".",
"GRRUser",
".",
"__name__",
"]",
",",
"aff4_users",
".",
"GRRUser",
".",
"SchemaCls",
".",
"LAST",
":",
"[",
"rdfvalue",
".",
"RDFDatetime",
".",
"Now",
"(",
")",
".",
"SerializeToDataStore",
"(",
")",
"]",
"}",
",",
"replace",
"=",
"True",
")",
"if",
"data_store",
".",
"RelationalDBEnabled",
"(",
")",
":",
"data_store",
".",
"REL_DB",
".",
"WriteGRRUser",
"(",
"request",
".",
"user",
")",
"handler",
"=",
"None",
"try",
":",
"# ACL checks are done here by the router. If this method succeeds (i.e.",
"# does not raise), then handlers run without further ACL checks (they're",
"# free to do some in their own implementations, though).",
"handler",
"=",
"getattr",
"(",
"router",
",",
"method_metadata",
".",
"name",
")",
"(",
"args",
",",
"token",
"=",
"token",
")",
"if",
"handler",
".",
"args_type",
"!=",
"method_metadata",
".",
"args_type",
":",
"raise",
"RuntimeError",
"(",
"\"Handler args type doesn't match \"",
"\"method args type: %s vs %s\"",
"%",
"(",
"handler",
".",
"args_type",
",",
"method_metadata",
".",
"args_type",
")",
")",
"binary_result_type",
"=",
"(",
"api_call_router",
".",
"RouterMethodMetadata",
".",
"BINARY_STREAM_RESULT_TYPE",
")",
"if",
"(",
"handler",
".",
"result_type",
"!=",
"method_metadata",
".",
"result_type",
"and",
"not",
"(",
"handler",
".",
"result_type",
"is",
"None",
"and",
"method_metadata",
".",
"result_type",
"==",
"binary_result_type",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"\"Handler result type doesn't match \"",
"\"method result type: %s vs %s\"",
"%",
"(",
"handler",
".",
"result_type",
",",
"method_metadata",
".",
"result_type",
")",
")",
"# HEAD method is only used for checking the ACLs for particular API",
"# methods.",
"if",
"request",
".",
"method",
"==",
"\"HEAD\"",
":",
"# If the request would return a stream, we add the Content-Length",
"# header to the response.",
"if",
"(",
"method_metadata",
".",
"result_type",
"==",
"method_metadata",
".",
"BINARY_STREAM_RESULT_TYPE",
")",
":",
"binary_stream",
"=",
"handler",
".",
"Handle",
"(",
"args",
",",
"token",
"=",
"token",
")",
"return",
"self",
".",
"_BuildResponse",
"(",
"200",
",",
"{",
"\"status\"",
":",
"\"OK\"",
"}",
",",
"method_name",
"=",
"method_metadata",
".",
"name",
",",
"no_audit_log",
"=",
"method_metadata",
".",
"no_audit_log_required",
",",
"content_length",
"=",
"binary_stream",
".",
"content_length",
",",
"token",
"=",
"token",
")",
"else",
":",
"return",
"self",
".",
"_BuildResponse",
"(",
"200",
",",
"{",
"\"status\"",
":",
"\"OK\"",
"}",
",",
"method_name",
"=",
"method_metadata",
".",
"name",
",",
"no_audit_log",
"=",
"method_metadata",
".",
"no_audit_log_required",
",",
"token",
"=",
"token",
")",
"if",
"(",
"method_metadata",
".",
"result_type",
"==",
"method_metadata",
".",
"BINARY_STREAM_RESULT_TYPE",
")",
":",
"binary_stream",
"=",
"handler",
".",
"Handle",
"(",
"args",
",",
"token",
"=",
"token",
")",
"return",
"self",
".",
"_BuildStreamingResponse",
"(",
"binary_stream",
",",
"method_name",
"=",
"method_metadata",
".",
"name",
")",
"else",
":",
"format_mode",
"=",
"GetRequestFormatMode",
"(",
"request",
",",
"method_metadata",
")",
"result",
"=",
"self",
".",
"CallApiHandler",
"(",
"handler",
",",
"args",
",",
"token",
"=",
"token",
")",
"rendered_data",
"=",
"self",
".",
"_FormatResultAsJson",
"(",
"result",
",",
"format_mode",
"=",
"format_mode",
")",
"return",
"self",
".",
"_BuildResponse",
"(",
"200",
",",
"rendered_data",
",",
"method_name",
"=",
"method_metadata",
".",
"name",
",",
"no_audit_log",
"=",
"method_metadata",
".",
"no_audit_log_required",
",",
"token",
"=",
"token",
")",
"except",
"access_control",
".",
"UnauthorizedAccess",
"as",
"e",
":",
"error_message",
"=",
"str",
"(",
"e",
")",
"logging",
".",
"warning",
"(",
"\"Access denied for %s (HTTP %s %s): %s\"",
",",
"method_metadata",
".",
"name",
",",
"request",
".",
"method",
",",
"request",
".",
"path",
",",
"error_message",
")",
"additional_headers",
"=",
"{",
"\"X-GRR-Unauthorized-Access-Reason\"",
":",
"error_message",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
",",
"\"X-GRR-Unauthorized-Access-Subject\"",
":",
"utils",
".",
"SmartStr",
"(",
"e",
".",
"subject",
")",
"}",
"return",
"self",
".",
"_BuildResponse",
"(",
"403",
",",
"dict",
"(",
"message",
"=",
"\"Access denied by ACL: %s\"",
"%",
"error_message",
",",
"subject",
"=",
"utils",
".",
"SmartStr",
"(",
"e",
".",
"subject",
")",
")",
",",
"headers",
"=",
"additional_headers",
",",
"method_name",
"=",
"method_metadata",
".",
"name",
",",
"no_audit_log",
"=",
"method_metadata",
".",
"no_audit_log_required",
",",
"token",
"=",
"token",
")",
"except",
"api_call_handler_base",
".",
"ResourceNotFoundError",
"as",
"e",
":",
"error_message",
"=",
"str",
"(",
"e",
")",
"return",
"self",
".",
"_BuildResponse",
"(",
"404",
",",
"dict",
"(",
"message",
"=",
"error_message",
")",
",",
"method_name",
"=",
"method_metadata",
".",
"name",
",",
"no_audit_log",
"=",
"method_metadata",
".",
"no_audit_log_required",
",",
"token",
"=",
"token",
")",
"except",
"NotImplementedError",
"as",
"e",
":",
"error_message",
"=",
"str",
"(",
"e",
")",
"return",
"self",
".",
"_BuildResponse",
"(",
"501",
",",
"dict",
"(",
"message",
"=",
"error_message",
")",
",",
"method_name",
"=",
"method_metadata",
".",
"name",
",",
"no_audit_log",
"=",
"method_metadata",
".",
"no_audit_log_required",
",",
"token",
"=",
"token",
")",
"except",
"Exception",
"as",
"e",
":",
"# pylint: disable=broad-except",
"error_message",
"=",
"str",
"(",
"e",
")",
"logging",
".",
"exception",
"(",
"\"Error while processing %s (%s) with %s: %s\"",
",",
"request",
".",
"path",
",",
"request",
".",
"method",
",",
"handler",
".",
"__class__",
".",
"__name__",
",",
"e",
")",
"return",
"self",
".",
"_BuildResponse",
"(",
"500",
",",
"dict",
"(",
"message",
"=",
"error_message",
",",
"traceBack",
"=",
"traceback",
".",
"format_exc",
"(",
")",
")",
",",
"method_name",
"=",
"method_metadata",
".",
"name",
",",
"no_audit_log",
"=",
"method_metadata",
".",
"no_audit_log_required",
",",
"token",
"=",
"token",
")"
] | 41.846154 | 20.148352 |
def parse_args():
"""
Parse arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--gpu-id', type=int, default=0,
help='GPU id (-1 means CPU)')
parser.add_argument('--train-file', default='snli_1.0/snli_1.0_train.txt',
help='training set file')
parser.add_argument('--test-file', default='snli_1.0/snli_1.0_dev.txt',
help='validation set file')
parser.add_argument('--max-num-examples', type=int, default=-1,
help='maximum number of examples to load (for debugging)')
parser.add_argument('--batch-size', type=int, default=32,
help='batch size')
parser.add_argument('--print-interval', type=int, default=20,
help='the interval of two print')
parser.add_argument('--mode', choices=['train', 'test'], default='train',
help='train or test')
parser.add_argument('--lr', type=float, default=0.025,
help='learning rate')
parser.add_argument('--epochs', type=int, default=300,
help='maximum number of epochs to train')
parser.add_argument('--embedding', default='glove',
help='word embedding type')
parser.add_argument('--embedding-source', default='glove.840B.300d',
help='embedding file source')
parser.add_argument('--embedding-size', type=int, default=300,
help='size of pretrained word embedding')
parser.add_argument('--hidden-size', type=int, default=200,
help='hidden layer size')
parser.add_argument('--output-dir', default='./output',
help='directory for all experiment output')
parser.add_argument('--model-dir', default='./output',
help='directory to load model')
parser.add_argument('--seed', type=int, default=0,
help='random seed')
parser.add_argument('--dropout', type=float, default=0.,
help='dropout rate')
parser.add_argument('--weight-decay', type=float, default=0.,
help='l2 regularization weight')
parser.add_argument('--intra-attention', action='store_true',
help='use intra-sentence attention')
return parser.parse_args() | [
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--gpu-id'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'GPU id (-1 means CPU)'",
")",
"parser",
".",
"add_argument",
"(",
"'--train-file'",
",",
"default",
"=",
"'snli_1.0/snli_1.0_train.txt'",
",",
"help",
"=",
"'training set file'",
")",
"parser",
".",
"add_argument",
"(",
"'--test-file'",
",",
"default",
"=",
"'snli_1.0/snli_1.0_dev.txt'",
",",
"help",
"=",
"'validation set file'",
")",
"parser",
".",
"add_argument",
"(",
"'--max-num-examples'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"-",
"1",
",",
"help",
"=",
"'maximum number of examples to load (for debugging)'",
")",
"parser",
".",
"add_argument",
"(",
"'--batch-size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"32",
",",
"help",
"=",
"'batch size'",
")",
"parser",
".",
"add_argument",
"(",
"'--print-interval'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"20",
",",
"help",
"=",
"'the interval of two print'",
")",
"parser",
".",
"add_argument",
"(",
"'--mode'",
",",
"choices",
"=",
"[",
"'train'",
",",
"'test'",
"]",
",",
"default",
"=",
"'train'",
",",
"help",
"=",
"'train or test'",
")",
"parser",
".",
"add_argument",
"(",
"'--lr'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.025",
",",
"help",
"=",
"'learning rate'",
")",
"parser",
".",
"add_argument",
"(",
"'--epochs'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"300",
",",
"help",
"=",
"'maximum number of epochs to train'",
")",
"parser",
".",
"add_argument",
"(",
"'--embedding'",
",",
"default",
"=",
"'glove'",
",",
"help",
"=",
"'word embedding type'",
")",
"parser",
".",
"add_argument",
"(",
"'--embedding-source'",
",",
"default",
"=",
"'glove.840B.300d'",
",",
"help",
"=",
"'embedding file source'",
")",
"parser",
".",
"add_argument",
"(",
"'--embedding-size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"300",
",",
"help",
"=",
"'size of pretrained word embedding'",
")",
"parser",
".",
"add_argument",
"(",
"'--hidden-size'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"200",
",",
"help",
"=",
"'hidden layer size'",
")",
"parser",
".",
"add_argument",
"(",
"'--output-dir'",
",",
"default",
"=",
"'./output'",
",",
"help",
"=",
"'directory for all experiment output'",
")",
"parser",
".",
"add_argument",
"(",
"'--model-dir'",
",",
"default",
"=",
"'./output'",
",",
"help",
"=",
"'directory to load model'",
")",
"parser",
".",
"add_argument",
"(",
"'--seed'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"0",
",",
"help",
"=",
"'random seed'",
")",
"parser",
".",
"add_argument",
"(",
"'--dropout'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.",
",",
"help",
"=",
"'dropout rate'",
")",
"parser",
".",
"add_argument",
"(",
"'--weight-decay'",
",",
"type",
"=",
"float",
",",
"default",
"=",
"0.",
",",
"help",
"=",
"'l2 regularization weight'",
")",
"parser",
".",
"add_argument",
"(",
"'--intra-attention'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'use intra-sentence attention'",
")",
"return",
"parser",
".",
"parse_args",
"(",
")"
] | 52.511111 | 18.2 |
def get(key, default=''):
'''
defaults.get is used much like pillar.get except that it will read
a default value for a pillar from defaults.json or defaults.yaml
files that are stored in the root of a salt formula.
CLI Example:
.. code-block:: bash
salt '*' defaults.get core:users:root
The defaults is computed from pillar key. The first entry is considered as
the formula namespace.
For example, querying ``core:users:root`` will try to load
``salt://core/defaults.yaml`` and ``salt://core/defaults.json``.
'''
# Determine formula namespace from query
if ':' in key:
namespace, key = key.split(':', 1)
else:
namespace, key = key, None
# Fetch and load defaults formula files from states.
defaults = _load(namespace)
# Fetch value
if key:
return salt.utils.data.traverse_dict_and_list(defaults, key, default)
else:
return defaults | [
"def",
"get",
"(",
"key",
",",
"default",
"=",
"''",
")",
":",
"# Determine formula namespace from query",
"if",
"':'",
"in",
"key",
":",
"namespace",
",",
"key",
"=",
"key",
".",
"split",
"(",
"':'",
",",
"1",
")",
"else",
":",
"namespace",
",",
"key",
"=",
"key",
",",
"None",
"# Fetch and load defaults formula files from states.",
"defaults",
"=",
"_load",
"(",
"namespace",
")",
"# Fetch value",
"if",
"key",
":",
"return",
"salt",
".",
"utils",
".",
"data",
".",
"traverse_dict_and_list",
"(",
"defaults",
",",
"key",
",",
"default",
")",
"else",
":",
"return",
"defaults"
] | 27.969697 | 25.727273 |
def send(self, data):
"""Send a formatted message to the ADB server"""
self._send_data(int_to_hex(len(data)))
self._send_data(data) | [
"def",
"send",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"_send_data",
"(",
"int_to_hex",
"(",
"len",
"(",
"data",
")",
")",
")",
"self",
".",
"_send_data",
"(",
"data",
")"
] | 30.4 | 15.2 |
def link_label(link):
'''return a link label as a string'''
if hasattr(link, 'label'):
label = link.label
else:
label = str(link.linknum+1)
return label | [
"def",
"link_label",
"(",
"link",
")",
":",
"if",
"hasattr",
"(",
"link",
",",
"'label'",
")",
":",
"label",
"=",
"link",
".",
"label",
"else",
":",
"label",
"=",
"str",
"(",
"link",
".",
"linknum",
"+",
"1",
")",
"return",
"label"
] | 28.857143 | 12.571429 |
def combine_intersections(
intersections, nodes1, degree1, nodes2, degree2, all_types
):
r"""Combine curve-curve intersections into curved polygon(s).
.. note::
This is a helper used only by :meth:`.Surface.intersect`.
Does so assuming each intersection lies on an edge of one of
two :class:`.Surface`-s.
.. note ::
This assumes that each ``intersection`` has been classified via
:func:`classify_intersection` and only the intersections classified
as ``FIRST`` and ``SECOND`` were kept.
Args:
intersections (List[.Intersection]): Intersections from each of the
9 edge-edge pairs from a surface-surface pairing.
nodes1 (numpy.ndarray): The nodes defining the first surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree1 (int): The degree of the surface given by ``nodes1``.
nodes2 (numpy.ndarray): The nodes defining the second surface in
the intersection (assumed in :math:\mathbf{R}^2`).
degree2 (int): The degree of the surface given by ``nodes2``.
all_types (Set[.IntersectionClassification]): The set of all
intersection classifications encountered among the intersections
for the given surface-surface pair.
Returns:
Tuple[Optional[list], Optional[bool]]: Pair (2-tuple) of
* List of "edge info" lists. Each list represents a curved polygon
and contains 3-tuples of edge index, start and end (see the
output of :func:`ends_to_curve`).
* "Contained" boolean. If not :data:`None`, indicates
that one of the surfaces is contained in the other.
"""
if intersections:
return basic_interior_combine(intersections)
elif all_types:
return tangent_only_intersections(all_types)
else:
return no_intersections(nodes1, degree1, nodes2, degree2) | [
"def",
"combine_intersections",
"(",
"intersections",
",",
"nodes1",
",",
"degree1",
",",
"nodes2",
",",
"degree2",
",",
"all_types",
")",
":",
"if",
"intersections",
":",
"return",
"basic_interior_combine",
"(",
"intersections",
")",
"elif",
"all_types",
":",
"return",
"tangent_only_intersections",
"(",
"all_types",
")",
"else",
":",
"return",
"no_intersections",
"(",
"nodes1",
",",
"degree1",
",",
"nodes2",
",",
"degree2",
")"
] | 39.104167 | 26.229167 |
def open_macros(self, filepath):
"""Loads macros from file and marks grid as changed
Parameters
----------
filepath: String
\tPath to macro file
"""
try:
wx.BeginBusyCursor()
self.main_window.grid.Disable()
with open(filepath) as macro_infile:
# Enter safe mode
self.main_window.grid.actions.enter_safe_mode()
post_command_event(self.main_window, self.SafeModeEntryMsg)
# Mark content as changed
post_command_event(self.main_window, self.ContentChangedMsg)
macrocode = macro_infile.read()
self.grid.code_array.macros += "\n" + macrocode.strip("\n")
self.grid.main_window.macro_panel.codetext_ctrl.SetText(
self.grid.code_array.macros)
except IOError:
msg = _("Error opening file {filepath}.").format(filepath=filepath)
post_command_event(self.main_window, self.StatusBarMsg, text=msg)
return False
finally:
self.main_window.grid.Enable()
wx.EndBusyCursor()
# Mark content as changed
try:
post_command_event(self.main_window, self.ContentChangedMsg)
except TypeError:
# The main window does not exist any more
pass | [
"def",
"open_macros",
"(",
"self",
",",
"filepath",
")",
":",
"try",
":",
"wx",
".",
"BeginBusyCursor",
"(",
")",
"self",
".",
"main_window",
".",
"grid",
".",
"Disable",
"(",
")",
"with",
"open",
"(",
"filepath",
")",
"as",
"macro_infile",
":",
"# Enter safe mode",
"self",
".",
"main_window",
".",
"grid",
".",
"actions",
".",
"enter_safe_mode",
"(",
")",
"post_command_event",
"(",
"self",
".",
"main_window",
",",
"self",
".",
"SafeModeEntryMsg",
")",
"# Mark content as changed",
"post_command_event",
"(",
"self",
".",
"main_window",
",",
"self",
".",
"ContentChangedMsg",
")",
"macrocode",
"=",
"macro_infile",
".",
"read",
"(",
")",
"self",
".",
"grid",
".",
"code_array",
".",
"macros",
"+=",
"\"\\n\"",
"+",
"macrocode",
".",
"strip",
"(",
"\"\\n\"",
")",
"self",
".",
"grid",
".",
"main_window",
".",
"macro_panel",
".",
"codetext_ctrl",
".",
"SetText",
"(",
"self",
".",
"grid",
".",
"code_array",
".",
"macros",
")",
"except",
"IOError",
":",
"msg",
"=",
"_",
"(",
"\"Error opening file {filepath}.\"",
")",
".",
"format",
"(",
"filepath",
"=",
"filepath",
")",
"post_command_event",
"(",
"self",
".",
"main_window",
",",
"self",
".",
"StatusBarMsg",
",",
"text",
"=",
"msg",
")",
"return",
"False",
"finally",
":",
"self",
".",
"main_window",
".",
"grid",
".",
"Enable",
"(",
")",
"wx",
".",
"EndBusyCursor",
"(",
")",
"# Mark content as changed",
"try",
":",
"post_command_event",
"(",
"self",
".",
"main_window",
",",
"self",
".",
"ContentChangedMsg",
")",
"except",
"TypeError",
":",
"# The main window does not exist any more",
"pass"
] | 30.155556 | 23.444444 |
def replace_customer_by_id(cls, customer_id, customer, **kwargs):
"""Replace Customer
Replace all attributes of Customer
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_customer_by_id(customer_id, customer, async=True)
>>> result = thread.get()
:param async bool
:param str customer_id: ID of customer to replace (required)
:param Customer customer: Attributes of customer to replace (required)
:return: Customer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._replace_customer_by_id_with_http_info(customer_id, customer, **kwargs)
else:
(data) = cls._replace_customer_by_id_with_http_info(customer_id, customer, **kwargs)
return data | [
"def",
"replace_customer_by_id",
"(",
"cls",
",",
"customer_id",
",",
"customer",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"cls",
".",
"_replace_customer_by_id_with_http_info",
"(",
"customer_id",
",",
"customer",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"cls",
".",
"_replace_customer_by_id_with_http_info",
"(",
"customer_id",
",",
"customer",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | 45.090909 | 22.454545 |
def _build_appid_info(self, master, dict):
'''
Builds the appid info object
@param master: the master dict
@param dict: the dict object received
@return: the appid info object
'''
master['total_crawlids'] = 0
master['total_pending'] = 0
master['total_domains'] = 0
master['crawlids'] = {}
master['appid'] = dict['appid']
master['spiderid'] = dict['spiderid']
# used for finding total count of domains
domain_dict = {}
# get all domain queues
match_string = '{sid}:*:queue'.format(sid=dict['spiderid'])
for key in self.redis_conn.scan_iter(match=match_string):
domain = key.split(":")[1]
sortedDict = self._get_bin(key)
# now iterate through binned dict
for score in sortedDict:
for item in sortedDict[score]:
if 'meta' in item:
item = item['meta']
if item['appid'] == dict['appid']:
crawlid = item['crawlid']
# add new crawlid to master dict
if crawlid not in master['crawlids']:
master['crawlids'][crawlid] = {
'total': 0,
'domains': {},
'distinct_domains': 0
}
if 'expires' in item and item['expires'] != 0:
master['crawlids'][crawlid]['expires'] = item['expires']
master['total_crawlids'] += 1
master['crawlids'][crawlid]['total'] = master['crawlids'][crawlid]['total'] + 1
if domain not in master['crawlids'][crawlid]['domains']:
master['crawlids'][crawlid]['domains'][domain] = {
'total': 0,
'high_priority': -9999,
'low_priority': 9999,
}
master['crawlids'][crawlid]['distinct_domains'] += 1
domain_dict[domain] = True
master['crawlids'][crawlid]['domains'][domain]['total'] = master['crawlids'][crawlid]['domains'][domain]['total'] + 1
if item['priority'] > master['crawlids'][crawlid]['domains'][domain]['high_priority']:
master['crawlids'][crawlid]['domains'][domain]['high_priority'] = item['priority']
if item['priority'] < master['crawlids'][crawlid]['domains'][domain]['low_priority']:
master['crawlids'][crawlid]['domains'][domain]['low_priority'] = item['priority']
master['total_pending'] += 1
master['total_domains'] = len(domain_dict)
return master | [
"def",
"_build_appid_info",
"(",
"self",
",",
"master",
",",
"dict",
")",
":",
"master",
"[",
"'total_crawlids'",
"]",
"=",
"0",
"master",
"[",
"'total_pending'",
"]",
"=",
"0",
"master",
"[",
"'total_domains'",
"]",
"=",
"0",
"master",
"[",
"'crawlids'",
"]",
"=",
"{",
"}",
"master",
"[",
"'appid'",
"]",
"=",
"dict",
"[",
"'appid'",
"]",
"master",
"[",
"'spiderid'",
"]",
"=",
"dict",
"[",
"'spiderid'",
"]",
"# used for finding total count of domains",
"domain_dict",
"=",
"{",
"}",
"# get all domain queues",
"match_string",
"=",
"'{sid}:*:queue'",
".",
"format",
"(",
"sid",
"=",
"dict",
"[",
"'spiderid'",
"]",
")",
"for",
"key",
"in",
"self",
".",
"redis_conn",
".",
"scan_iter",
"(",
"match",
"=",
"match_string",
")",
":",
"domain",
"=",
"key",
".",
"split",
"(",
"\":\"",
")",
"[",
"1",
"]",
"sortedDict",
"=",
"self",
".",
"_get_bin",
"(",
"key",
")",
"# now iterate through binned dict",
"for",
"score",
"in",
"sortedDict",
":",
"for",
"item",
"in",
"sortedDict",
"[",
"score",
"]",
":",
"if",
"'meta'",
"in",
"item",
":",
"item",
"=",
"item",
"[",
"'meta'",
"]",
"if",
"item",
"[",
"'appid'",
"]",
"==",
"dict",
"[",
"'appid'",
"]",
":",
"crawlid",
"=",
"item",
"[",
"'crawlid'",
"]",
"# add new crawlid to master dict",
"if",
"crawlid",
"not",
"in",
"master",
"[",
"'crawlids'",
"]",
":",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"=",
"{",
"'total'",
":",
"0",
",",
"'domains'",
":",
"{",
"}",
",",
"'distinct_domains'",
":",
"0",
"}",
"if",
"'expires'",
"in",
"item",
"and",
"item",
"[",
"'expires'",
"]",
"!=",
"0",
":",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'expires'",
"]",
"=",
"item",
"[",
"'expires'",
"]",
"master",
"[",
"'total_crawlids'",
"]",
"+=",
"1",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'total'",
"]",
"=",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'total'",
"]",
"+",
"1",
"if",
"domain",
"not",
"in",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'domains'",
"]",
":",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'domains'",
"]",
"[",
"domain",
"]",
"=",
"{",
"'total'",
":",
"0",
",",
"'high_priority'",
":",
"-",
"9999",
",",
"'low_priority'",
":",
"9999",
",",
"}",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'distinct_domains'",
"]",
"+=",
"1",
"domain_dict",
"[",
"domain",
"]",
"=",
"True",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'domains'",
"]",
"[",
"domain",
"]",
"[",
"'total'",
"]",
"=",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'domains'",
"]",
"[",
"domain",
"]",
"[",
"'total'",
"]",
"+",
"1",
"if",
"item",
"[",
"'priority'",
"]",
">",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'domains'",
"]",
"[",
"domain",
"]",
"[",
"'high_priority'",
"]",
":",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'domains'",
"]",
"[",
"domain",
"]",
"[",
"'high_priority'",
"]",
"=",
"item",
"[",
"'priority'",
"]",
"if",
"item",
"[",
"'priority'",
"]",
"<",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'domains'",
"]",
"[",
"domain",
"]",
"[",
"'low_priority'",
"]",
":",
"master",
"[",
"'crawlids'",
"]",
"[",
"crawlid",
"]",
"[",
"'domains'",
"]",
"[",
"domain",
"]",
"[",
"'low_priority'",
"]",
"=",
"item",
"[",
"'priority'",
"]",
"master",
"[",
"'total_pending'",
"]",
"+=",
"1",
"master",
"[",
"'total_domains'",
"]",
"=",
"len",
"(",
"domain_dict",
")",
"return",
"master"
] | 41.614286 | 24.585714 |
def get_medium(self, agent_type, index=0):
'''Returns the medium class for the
given agent_type. Optional index tells which one to give.'''
mediums = list(x for x in self.agency._agents
if x.get_descriptor().type_name == agent_type)
try:
return mediums[index]
except KeyError:
raise RuntimeError(
'Requested medium class of %s with index %d, but found only '
'%d medium of this type'
% (agent_type, index, len(mediums))), None, sys.exc_info()[2] | [
"def",
"get_medium",
"(",
"self",
",",
"agent_type",
",",
"index",
"=",
"0",
")",
":",
"mediums",
"=",
"list",
"(",
"x",
"for",
"x",
"in",
"self",
".",
"agency",
".",
"_agents",
"if",
"x",
".",
"get_descriptor",
"(",
")",
".",
"type_name",
"==",
"agent_type",
")",
"try",
":",
"return",
"mediums",
"[",
"index",
"]",
"except",
"KeyError",
":",
"raise",
"RuntimeError",
"(",
"'Requested medium class of %s with index %d, but found only '",
"'%d medium of this type'",
"%",
"(",
"agent_type",
",",
"index",
",",
"len",
"(",
"mediums",
")",
")",
")",
",",
"None",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]"
] | 47.416667 | 17.416667 |
def netconf_state_schemas_schema_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring")
schemas = ET.SubElement(netconf_state, "schemas")
schema = ET.SubElement(schemas, "schema")
identifier_key = ET.SubElement(schema, "identifier")
identifier_key.text = kwargs.pop('identifier')
format_key = ET.SubElement(schema, "format")
format_key.text = kwargs.pop('format')
version = ET.SubElement(schema, "version")
version.text = kwargs.pop('version')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"netconf_state_schemas_schema_version",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"netconf_state",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"netconf-state\"",
",",
"xmlns",
"=",
"\"urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring\"",
")",
"schemas",
"=",
"ET",
".",
"SubElement",
"(",
"netconf_state",
",",
"\"schemas\"",
")",
"schema",
"=",
"ET",
".",
"SubElement",
"(",
"schemas",
",",
"\"schema\"",
")",
"identifier_key",
"=",
"ET",
".",
"SubElement",
"(",
"schema",
",",
"\"identifier\"",
")",
"identifier_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'identifier'",
")",
"format_key",
"=",
"ET",
".",
"SubElement",
"(",
"schema",
",",
"\"format\"",
")",
"format_key",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'format'",
")",
"version",
"=",
"ET",
".",
"SubElement",
"(",
"schema",
",",
"\"version\"",
")",
"version",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'version'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | 47.375 | 16.3125 |
def remove_finished_work_units(self, work_spec_name, work_unit_names):
'''Remove some finished work units.
If `work_unit_names` is :const:`None` (which must be passed
explicitly), all finished work units in `work_spec_name` are
removed; otherwise only the specific named work units will be.
:param str work_spec_name: name of the work spec
:param list work_unit_names: names of the work units, or
:const:`None` for all in `work_spec_name`
:return: number of work units removed
'''
return self._remove_some_work_units(
work_spec_name, work_unit_names, suffix=_FINISHED) | [
"def",
"remove_finished_work_units",
"(",
"self",
",",
"work_spec_name",
",",
"work_unit_names",
")",
":",
"return",
"self",
".",
"_remove_some_work_units",
"(",
"work_spec_name",
",",
"work_unit_names",
",",
"suffix",
"=",
"_FINISHED",
")"
] | 43.4 | 23.266667 |
def rsync_upload():
"""
Uploads the project with rsync excluding some files and folders.
"""
excludes = ["*.pyc", "*.pyo", "*.db", ".DS_Store", ".coverage",
"local_settings.py", "/static", "/.git", "/.hg"]
local_dir = os.getcwd() + os.sep
return rsync_project(remote_dir=env.proj_path, local_dir=local_dir,
exclude=excludes) | [
"def",
"rsync_upload",
"(",
")",
":",
"excludes",
"=",
"[",
"\"*.pyc\"",
",",
"\"*.pyo\"",
",",
"\"*.db\"",
",",
"\".DS_Store\"",
",",
"\".coverage\"",
",",
"\"local_settings.py\"",
",",
"\"/static\"",
",",
"\"/.git\"",
",",
"\"/.hg\"",
"]",
"local_dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"+",
"os",
".",
"sep",
"return",
"rsync_project",
"(",
"remote_dir",
"=",
"env",
".",
"proj_path",
",",
"local_dir",
"=",
"local_dir",
",",
"exclude",
"=",
"excludes",
")"
] | 42.333333 | 15.222222 |
def get_checks_status_counts(self, checks=None):
""" Compute the counts of the different checks status and
return it as a defaultdict(int) with the keys being the different
status and the values being the count of the checks in that status.
:checks: None or the checks you want to count their statuses.
If None then self.checks is used.
:param checks: NoneType | dict
:type checks: None | dict
:return:
:rtype: defaultdict(int)
"""
if checks is None:
checks = self.checks
res = defaultdict(int)
res["total"] = len(checks)
for chk in checks.values():
res[chk.status] += 1
return res | [
"def",
"get_checks_status_counts",
"(",
"self",
",",
"checks",
"=",
"None",
")",
":",
"if",
"checks",
"is",
"None",
":",
"checks",
"=",
"self",
".",
"checks",
"res",
"=",
"defaultdict",
"(",
"int",
")",
"res",
"[",
"\"total\"",
"]",
"=",
"len",
"(",
"checks",
")",
"for",
"chk",
"in",
"checks",
".",
"values",
"(",
")",
":",
"res",
"[",
"chk",
".",
"status",
"]",
"+=",
"1",
"return",
"res"
] | 34.142857 | 16.619048 |
def neg_loglikelihood(y, mean, scale, shape, skewness):
""" Negative loglikelihood function
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Poisson distribution
scale : float
scale parameter for the Poisson distribution
shape : float
tail thickness parameter for the Poisson distribution
skewness : float
skewness parameter for the Poisson distribution
Returns
----------
- Negative loglikelihood of the Poisson family
"""
return -np.sum(-mean + np.log(mean)*y - sp.gammaln(y + 1)) | [
"def",
"neg_loglikelihood",
"(",
"y",
",",
"mean",
",",
"scale",
",",
"shape",
",",
"skewness",
")",
":",
"return",
"-",
"np",
".",
"sum",
"(",
"-",
"mean",
"+",
"np",
".",
"log",
"(",
"mean",
")",
"*",
"y",
"-",
"sp",
".",
"gammaln",
"(",
"y",
"+",
"1",
")",
")"
] | 27.76 | 22.72 |
def CreatedField(name='created', tz_aware=False, **kwargs):
''' A shortcut field for creation time. It sets the current date and time
when it enters the database and then doesn't update on further saves.
If you've used the Django ORM, this is the equivalent of auto_now_add
:param tz_aware: If this is True, the value will be returned in the
local time of the session. It is always saved in UTC
'''
@computed_field(DateTimeField(), one_time=True, **kwargs)
def created(obj):
if tz_aware:
import pytz
return pytz.utc.localize(datetime.utcnow())
return datetime.utcnow()
created.__name__ = name
return created | [
"def",
"CreatedField",
"(",
"name",
"=",
"'created'",
",",
"tz_aware",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"@",
"computed_field",
"(",
"DateTimeField",
"(",
")",
",",
"one_time",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
"def",
"created",
"(",
"obj",
")",
":",
"if",
"tz_aware",
":",
"import",
"pytz",
"return",
"pytz",
".",
"utc",
".",
"localize",
"(",
"datetime",
".",
"utcnow",
"(",
")",
")",
"return",
"datetime",
".",
"utcnow",
"(",
")",
"created",
".",
"__name__",
"=",
"name",
"return",
"created"
] | 41.647059 | 26.588235 |
def get_conn():
'''
Return a conn object for the passed VM data
'''
vm_ = get_configured_provider()
kwargs = vm_.copy() # pylint: disable=E1103
kwargs['username'] = vm_['user']
kwargs['project_id'] = vm_['tenant']
kwargs['auth_url'] = vm_['identity_url']
kwargs['region_name'] = vm_['compute_region']
kwargs['use_keystoneauth'] = vm_.get('use_keystoneauth', False)
if 'password' in vm_:
kwargs['password'] = vm_['password']
if 'verify' in vm_ and vm_['use_keystoneauth'] is True:
kwargs['verify'] = vm_['verify']
elif 'verify' in vm_ and vm_['use_keystoneauth'] is False:
log.warning('SSL Certificate verification option is specified but use_keystoneauth is False or not present')
conn = nova.SaltNova(**kwargs)
return conn | [
"def",
"get_conn",
"(",
")",
":",
"vm_",
"=",
"get_configured_provider",
"(",
")",
"kwargs",
"=",
"vm_",
".",
"copy",
"(",
")",
"# pylint: disable=E1103",
"kwargs",
"[",
"'username'",
"]",
"=",
"vm_",
"[",
"'user'",
"]",
"kwargs",
"[",
"'project_id'",
"]",
"=",
"vm_",
"[",
"'tenant'",
"]",
"kwargs",
"[",
"'auth_url'",
"]",
"=",
"vm_",
"[",
"'identity_url'",
"]",
"kwargs",
"[",
"'region_name'",
"]",
"=",
"vm_",
"[",
"'compute_region'",
"]",
"kwargs",
"[",
"'use_keystoneauth'",
"]",
"=",
"vm_",
".",
"get",
"(",
"'use_keystoneauth'",
",",
"False",
")",
"if",
"'password'",
"in",
"vm_",
":",
"kwargs",
"[",
"'password'",
"]",
"=",
"vm_",
"[",
"'password'",
"]",
"if",
"'verify'",
"in",
"vm_",
"and",
"vm_",
"[",
"'use_keystoneauth'",
"]",
"is",
"True",
":",
"kwargs",
"[",
"'verify'",
"]",
"=",
"vm_",
"[",
"'verify'",
"]",
"elif",
"'verify'",
"in",
"vm_",
"and",
"vm_",
"[",
"'use_keystoneauth'",
"]",
"is",
"False",
":",
"log",
".",
"warning",
"(",
"'SSL Certificate verification option is specified but use_keystoneauth is False or not present'",
")",
"conn",
"=",
"nova",
".",
"SaltNova",
"(",
"*",
"*",
"kwargs",
")",
"return",
"conn"
] | 32.916667 | 21.75 |
def read_colortable(fobj):
r"""Read colortable information from a file.
Reads a colortable, which consists of one color per line of the file, where
a color can be one of: a tuple of 3 floats, a string with a HTML color name,
or a string with a HTML hex color.
Parameters
----------
fobj : a file-like object
A file-like object to read the colors from
Returns
-------
List of tuples
A list of the RGB color values, where each RGB color is a tuple of 3 floats in the
range of [0, 1].
"""
ret = []
try:
for line in fobj:
literal = _parse(line)
if literal:
ret.append(mcolors.colorConverter.to_rgb(literal))
return ret
except (SyntaxError, ValueError):
raise RuntimeError('Malformed colortable.') | [
"def",
"read_colortable",
"(",
"fobj",
")",
":",
"ret",
"=",
"[",
"]",
"try",
":",
"for",
"line",
"in",
"fobj",
":",
"literal",
"=",
"_parse",
"(",
"line",
")",
"if",
"literal",
":",
"ret",
".",
"append",
"(",
"mcolors",
".",
"colorConverter",
".",
"to_rgb",
"(",
"literal",
")",
")",
"return",
"ret",
"except",
"(",
"SyntaxError",
",",
"ValueError",
")",
":",
"raise",
"RuntimeError",
"(",
"'Malformed colortable.'",
")"
] | 29.035714 | 22.642857 |
def check_encryption_password(self, password):
"""Checks whether the supplied password is correct for the medium.
in password of type str
The password to check.
raises :class:`VBoxErrorNotSupported`
Encryption is not configured for this medium.
raises :class:`VBoxErrorPasswordIncorrect`
The given password is incorrect.
"""
if not isinstance(password, basestring):
raise TypeError("password can only be an instance of type basestring")
self._call("checkEncryptionPassword",
in_p=[password]) | [
"def",
"check_encryption_password",
"(",
"self",
",",
"password",
")",
":",
"if",
"not",
"isinstance",
"(",
"password",
",",
"basestring",
")",
":",
"raise",
"TypeError",
"(",
"\"password can only be an instance of type basestring\"",
")",
"self",
".",
"_call",
"(",
"\"checkEncryptionPassword\"",
",",
"in_p",
"=",
"[",
"password",
"]",
")"
] | 36.470588 | 15.235294 |
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
segments = range(0, len(password), self._max_password_size)
password_parts = [
password[i:i + self._max_password_size] for i in segments]
for i, password_part in enumerate(password_parts):
curr_username = username
if i > 0:
curr_username += '{{part_%d}}' % i
self._keyring.set_password(service, curr_username, password_part) | [
"def",
"set_password",
"(",
"self",
",",
"service",
",",
"username",
",",
"password",
")",
":",
"segments",
"=",
"range",
"(",
"0",
",",
"len",
"(",
"password",
")",
",",
"self",
".",
"_max_password_size",
")",
"password_parts",
"=",
"[",
"password",
"[",
"i",
":",
"i",
"+",
"self",
".",
"_max_password_size",
"]",
"for",
"i",
"in",
"segments",
"]",
"for",
"i",
",",
"password_part",
"in",
"enumerate",
"(",
"password_parts",
")",
":",
"curr_username",
"=",
"username",
"if",
"i",
">",
"0",
":",
"curr_username",
"+=",
"'{{part_%d}}'",
"%",
"i",
"self",
".",
"_keyring",
".",
"set_password",
"(",
"service",
",",
"curr_username",
",",
"password_part",
")"
] | 47.545455 | 15.545455 |
def _set_value(self, slot_record):
"""Sets the value of this slot based on its corresponding _SlotRecord.
Does nothing if the slot has not yet been filled.
Args:
slot_record: The _SlotRecord containing this Slot's value.
"""
if slot_record.status == _SlotRecord.FILLED:
self.filled = True
self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore(
slot_record)
self._fill_datetime = slot_record.fill_time
self._value = slot_record.value | [
"def",
"_set_value",
"(",
"self",
",",
"slot_record",
")",
":",
"if",
"slot_record",
".",
"status",
"==",
"_SlotRecord",
".",
"FILLED",
":",
"self",
".",
"filled",
"=",
"True",
"self",
".",
"_filler_pipeline_key",
"=",
"_SlotRecord",
".",
"filler",
".",
"get_value_for_datastore",
"(",
"slot_record",
")",
"self",
".",
"_fill_datetime",
"=",
"slot_record",
".",
"fill_time",
"self",
".",
"_value",
"=",
"slot_record",
".",
"value"
] | 35.571429 | 17.5 |
def on_new(self):
"""
Add a new empty code editor to the tab widget
"""
editor = self.tabWidget.create_new_document()
editor.cursorPositionChanged.connect(self.on_cursor_pos_changed)
self.refresh_color_scheme() | [
"def",
"on_new",
"(",
"self",
")",
":",
"editor",
"=",
"self",
".",
"tabWidget",
".",
"create_new_document",
"(",
")",
"editor",
".",
"cursorPositionChanged",
".",
"connect",
"(",
"self",
".",
"on_cursor_pos_changed",
")",
"self",
".",
"refresh_color_scheme",
"(",
")"
] | 36 | 12.285714 |
def pause():
"""
Pause the timer, preventing subsequent time from accumulating in the
total. Renders the timer inactive, disabling other timing commands.
Returns:
float: The current time.
Raises:
PausedError: If timer already paused.
StoppedError: If timer already stopped.
"""
t = timer()
if f.t.stopped:
raise StoppedError("Cannot pause stopped timer.")
if f.t.paused:
raise PausedError("Timer already paused.")
f.t.paused = True
f.t.tmp_total += t - f.t.start_t
f.t.start_t = None
f.t.last_t = None
return t | [
"def",
"pause",
"(",
")",
":",
"t",
"=",
"timer",
"(",
")",
"if",
"f",
".",
"t",
".",
"stopped",
":",
"raise",
"StoppedError",
"(",
"\"Cannot pause stopped timer.\"",
")",
"if",
"f",
".",
"t",
".",
"paused",
":",
"raise",
"PausedError",
"(",
"\"Timer already paused.\"",
")",
"f",
".",
"t",
".",
"paused",
"=",
"True",
"f",
".",
"t",
".",
"tmp_total",
"+=",
"t",
"-",
"f",
".",
"t",
".",
"start_t",
"f",
".",
"t",
".",
"start_t",
"=",
"None",
"f",
".",
"t",
".",
"last_t",
"=",
"None",
"return",
"t"
] | 26.727273 | 19.636364 |
def profile_fields(user):
"""
Returns profile fields as a dict for the given user. Used in the
profile view template when the ``ACCOUNTS_PROFILE_VIEWS_ENABLED``
setting is set to ``True``, and also in the account approval emails
sent to administrators when the ``ACCOUNTS_APPROVAL_REQUIRED``
setting is set to ``True``.
"""
fields = OrderedDict()
try:
profile = get_profile_for_user(user)
user_fieldname = get_profile_user_fieldname()
exclude = tuple(settings.ACCOUNTS_PROFILE_FORM_EXCLUDE_FIELDS)
for field in profile._meta.fields:
if field.name not in ("id", user_fieldname) + exclude:
value = getattr(profile, field.name)
fields[field.verbose_name.title()] = value
except ProfileNotConfigured:
pass
return list(fields.items()) | [
"def",
"profile_fields",
"(",
"user",
")",
":",
"fields",
"=",
"OrderedDict",
"(",
")",
"try",
":",
"profile",
"=",
"get_profile_for_user",
"(",
"user",
")",
"user_fieldname",
"=",
"get_profile_user_fieldname",
"(",
")",
"exclude",
"=",
"tuple",
"(",
"settings",
".",
"ACCOUNTS_PROFILE_FORM_EXCLUDE_FIELDS",
")",
"for",
"field",
"in",
"profile",
".",
"_meta",
".",
"fields",
":",
"if",
"field",
".",
"name",
"not",
"in",
"(",
"\"id\"",
",",
"user_fieldname",
")",
"+",
"exclude",
":",
"value",
"=",
"getattr",
"(",
"profile",
",",
"field",
".",
"name",
")",
"fields",
"[",
"field",
".",
"verbose_name",
".",
"title",
"(",
")",
"]",
"=",
"value",
"except",
"ProfileNotConfigured",
":",
"pass",
"return",
"list",
"(",
"fields",
".",
"items",
"(",
")",
")"
] | 41.9 | 16.7 |
def getNumberOfJobsIssued(self, preemptable=None):
"""
Gets number of jobs that have been added by issueJob(s) and not
removed by removeJob
:param None or boolean preemptable: If none, return all types of jobs.
If true, return just the number of preemptable jobs. If false, return
just the number of non-preemptable jobs.
"""
if preemptable is None:
return len(self.jobBatchSystemIDToIssuedJob)
elif preemptable:
return self.preemptableJobsIssued
else:
assert len(self.jobBatchSystemIDToIssuedJob) >= self.preemptableJobsIssued
return len(self.jobBatchSystemIDToIssuedJob) - self.preemptableJobsIssued | [
"def",
"getNumberOfJobsIssued",
"(",
"self",
",",
"preemptable",
"=",
"None",
")",
":",
"if",
"preemptable",
"is",
"None",
":",
"return",
"len",
"(",
"self",
".",
"jobBatchSystemIDToIssuedJob",
")",
"elif",
"preemptable",
":",
"return",
"self",
".",
"preemptableJobsIssued",
"else",
":",
"assert",
"len",
"(",
"self",
".",
"jobBatchSystemIDToIssuedJob",
")",
">=",
"self",
".",
"preemptableJobsIssued",
"return",
"len",
"(",
"self",
".",
"jobBatchSystemIDToIssuedJob",
")",
"-",
"self",
".",
"preemptableJobsIssued"
] | 44.9375 | 21.4375 |
async def file(location, mime_type=None, headers=None, _range=None):
'''Return a response object with file data.
:param location: Location of file on system.
:param mime_type: Specific mime_type.
:param headers: Custom Headers.
:param _range:
'''
filename = path.split(location)[-1]
async with open_async(location, mode='rb') as _file:
if _range:
await _file.seek(_range.start)
out_stream = await _file.read(_range.size)
headers['Content-Range'] = 'bytes %s-%s/%s' % (
_range.start, _range.end, _range.total)
else:
out_stream = await _file.read()
mime_type = mime_type or guess_type(filename)[0] or 'text/plain'
return HTTPResponse(status=200,
headers=headers,
content_type=mime_type,
body_bytes=out_stream) | [
"async",
"def",
"file",
"(",
"location",
",",
"mime_type",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"_range",
"=",
"None",
")",
":",
"filename",
"=",
"path",
".",
"split",
"(",
"location",
")",
"[",
"-",
"1",
"]",
"async",
"with",
"open_async",
"(",
"location",
",",
"mode",
"=",
"'rb'",
")",
"as",
"_file",
":",
"if",
"_range",
":",
"await",
"_file",
".",
"seek",
"(",
"_range",
".",
"start",
")",
"out_stream",
"=",
"await",
"_file",
".",
"read",
"(",
"_range",
".",
"size",
")",
"headers",
"[",
"'Content-Range'",
"]",
"=",
"'bytes %s-%s/%s'",
"%",
"(",
"_range",
".",
"start",
",",
"_range",
".",
"end",
",",
"_range",
".",
"total",
")",
"else",
":",
"out_stream",
"=",
"await",
"_file",
".",
"read",
"(",
")",
"mime_type",
"=",
"mime_type",
"or",
"guess_type",
"(",
"filename",
")",
"[",
"0",
"]",
"or",
"'text/plain'",
"return",
"HTTPResponse",
"(",
"status",
"=",
"200",
",",
"headers",
"=",
"headers",
",",
"content_type",
"=",
"mime_type",
",",
"body_bytes",
"=",
"out_stream",
")"
] | 35.16 | 17.16 |
def compiled_model(self):
"""Returns compile ORM class for the user supplied model"""
return ALCHEMY_TEMPLATES.model.safe_substitute(class_name=self.class_name,
table_name=self.table_name,
column_definitions=self.compiled_columns,
init_function=self.compiled_init_func,
update_function=self.compiled_update_func,
hash_function=self.compiled_hash_func,
eq_function=self.compiled_eq_func,
neq_function=self.compiled_neq_func,
str_function=self.compiled_str_func,
unicode_function=self.compiled_unicode_func,
repr_function=self.compiled_repr_func,
types=", ".join(self.basic_types),
username=self.username,
foreign_keys=self.compiled_foreign_keys,
relationships=self.compiled_relationships,
named_imports=self.compiled_named_imports,
orm_imports=self.compiled_orm_imports,
get_proxy_cls_function=self.compiled_proxy_cls_func,
add_function=ALCHEMY_TEMPLATES.add_function.template,
delete_function=ALCHEMY_TEMPLATES.delete_function.template,
to_dict_function=ALCHEMY_TEMPLATES.to_dict_function.template,
to_proxy_function=ALCHEMY_TEMPLATES.to_proxy_function.template,
from_proxy_function=ALCHEMY_TEMPLATES.from_proxy_function.template) | [
"def",
"compiled_model",
"(",
"self",
")",
":",
"return",
"ALCHEMY_TEMPLATES",
".",
"model",
".",
"safe_substitute",
"(",
"class_name",
"=",
"self",
".",
"class_name",
",",
"table_name",
"=",
"self",
".",
"table_name",
",",
"column_definitions",
"=",
"self",
".",
"compiled_columns",
",",
"init_function",
"=",
"self",
".",
"compiled_init_func",
",",
"update_function",
"=",
"self",
".",
"compiled_update_func",
",",
"hash_function",
"=",
"self",
".",
"compiled_hash_func",
",",
"eq_function",
"=",
"self",
".",
"compiled_eq_func",
",",
"neq_function",
"=",
"self",
".",
"compiled_neq_func",
",",
"str_function",
"=",
"self",
".",
"compiled_str_func",
",",
"unicode_function",
"=",
"self",
".",
"compiled_unicode_func",
",",
"repr_function",
"=",
"self",
".",
"compiled_repr_func",
",",
"types",
"=",
"\", \"",
".",
"join",
"(",
"self",
".",
"basic_types",
")",
",",
"username",
"=",
"self",
".",
"username",
",",
"foreign_keys",
"=",
"self",
".",
"compiled_foreign_keys",
",",
"relationships",
"=",
"self",
".",
"compiled_relationships",
",",
"named_imports",
"=",
"self",
".",
"compiled_named_imports",
",",
"orm_imports",
"=",
"self",
".",
"compiled_orm_imports",
",",
"get_proxy_cls_function",
"=",
"self",
".",
"compiled_proxy_cls_func",
",",
"add_function",
"=",
"ALCHEMY_TEMPLATES",
".",
"add_function",
".",
"template",
",",
"delete_function",
"=",
"ALCHEMY_TEMPLATES",
".",
"delete_function",
".",
"template",
",",
"to_dict_function",
"=",
"ALCHEMY_TEMPLATES",
".",
"to_dict_function",
".",
"template",
",",
"to_proxy_function",
"=",
"ALCHEMY_TEMPLATES",
".",
"to_proxy_function",
".",
"template",
",",
"from_proxy_function",
"=",
"ALCHEMY_TEMPLATES",
".",
"from_proxy_function",
".",
"template",
")"
] | 93.28 | 53.4 |
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1: The mean of the activations of preultimate layer of the
CHEMNET ( like returned by the function 'get_predictions')
for generated samples.
-- mu2: The mean of the activations of preultimate layer of the
CHEMNET ( like returned by the function 'get_predictions')
for real samples.
-- sigma1: The covariance matrix of the activations of preultimate layer of the
CHEMNET ( like returned by the function 'get_predictions')
for generated samples.
-- sigma2: The covariance matrix of the activations of preultimate layer of the
CHEMNET ( like returned by the function 'get_predictions')
for real samples.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths"
assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean | [
"def",
"calculate_frechet_distance",
"(",
"mu1",
",",
"sigma1",
",",
"mu2",
",",
"sigma2",
",",
"eps",
"=",
"1e-6",
")",
":",
"mu1",
"=",
"np",
".",
"atleast_1d",
"(",
"mu1",
")",
"mu2",
"=",
"np",
".",
"atleast_1d",
"(",
"mu2",
")",
"sigma1",
"=",
"np",
".",
"atleast_2d",
"(",
"sigma1",
")",
"sigma2",
"=",
"np",
".",
"atleast_2d",
"(",
"sigma2",
")",
"assert",
"mu1",
".",
"shape",
"==",
"mu2",
".",
"shape",
",",
"\"Training and test mean vectors have different lengths\"",
"assert",
"sigma1",
".",
"shape",
"==",
"sigma2",
".",
"shape",
",",
"\"Training and test covariances have different dimensions\"",
"diff",
"=",
"mu1",
"-",
"mu2",
"# product might be almost singular",
"covmean",
",",
"_",
"=",
"linalg",
".",
"sqrtm",
"(",
"sigma1",
".",
"dot",
"(",
"sigma2",
")",
",",
"disp",
"=",
"False",
")",
"if",
"not",
"np",
".",
"isfinite",
"(",
"covmean",
")",
".",
"all",
"(",
")",
":",
"offset",
"=",
"np",
".",
"eye",
"(",
"sigma1",
".",
"shape",
"[",
"0",
"]",
")",
"*",
"eps",
"covmean",
"=",
"linalg",
".",
"sqrtm",
"(",
"(",
"sigma1",
"+",
"offset",
")",
".",
"dot",
"(",
"sigma2",
"+",
"offset",
")",
")",
"# numerical error might give slight imaginary component",
"if",
"np",
".",
"iscomplexobj",
"(",
"covmean",
")",
":",
"if",
"not",
"np",
".",
"allclose",
"(",
"np",
".",
"diagonal",
"(",
"covmean",
")",
".",
"imag",
",",
"0",
",",
"atol",
"=",
"1e-3",
")",
":",
"m",
"=",
"np",
".",
"max",
"(",
"np",
".",
"abs",
"(",
"covmean",
".",
"imag",
")",
")",
"raise",
"ValueError",
"(",
"\"Imaginary component {}\"",
".",
"format",
"(",
"m",
")",
")",
"covmean",
"=",
"covmean",
".",
"real",
"tr_covmean",
"=",
"np",
".",
"trace",
"(",
"covmean",
")",
"return",
"diff",
".",
"dot",
"(",
"diff",
")",
"+",
"np",
".",
"trace",
"(",
"sigma1",
")",
"+",
"np",
".",
"trace",
"(",
"sigma2",
")",
"-",
"2",
"*",
"tr_covmean"
] | 40.471698 | 23.886792 |
def solve(self, neigs=4, tol=0, guess=None, mode_profiles=True, initial_mode_guess=None):
"""
This function finds the eigenmodes.
Parameters
----------
neigs : int
number of eigenmodes to find
tol : float
Relative accuracy for eigenvalues. The default value of 0 implies machine precision.
guess : float
a guess for the refractive index. Only finds eigenvectors with an effective refractive index
higher than this value.
Returns
-------
self : an instance of the VFDModeSolver class
obtain the fields of interest for specific modes using, for example:
solver = EMpy.modesolvers.FD.VFDModeSolver(wavelength, x, y, epsf, boundary).solve()
Ex = solver.modes[0].Ex
Ey = solver.modes[0].Ey
Ez = solver.modes[0].Ez
"""
from scipy.sparse.linalg import eigen
self.nmodes = neigs
self.tol = tol
A = self.build_matrix()
if guess is not None:
# calculate shift for eigs function
k = 2 * numpy.pi / self.wl
shift = (guess * k) ** 2
else:
shift = None
[eigvals, eigvecs] = eigen.eigs(A,
k=neigs,
which='LR',
tol=0.001,
ncv=None,
v0 = initial_mode_guess,
return_eigenvectors=mode_profiles,
sigma=shift)
neffs = self.wl * scipy.sqrt(eigvals) / (2 * numpy.pi)
if mode_profiles:
Hxs = []
Hys = []
nx = self.nx
ny = self.ny
for ieig in range(neigs):
Hxs.append(eigvecs[:nx * ny, ieig].reshape(nx, ny))
Hys.append(eigvecs[nx * ny:, ieig].reshape(nx, ny))
# sort the modes
idx = numpy.flipud(numpy.argsort(neffs))
neffs = neffs[idx]
self.neff = neffs
if mode_profiles:
tmpx = []
tmpy = []
for i in idx:
tmpx.append(Hxs[i])
tmpy.append(Hys[i])
Hxs = tmpx
Hys = tmpy
[Hzs, Exs, Eys, Ezs] = self.compute_other_fields(neffs, Hxs, Hys)
self.modes = []
for (neff, Hx, Hy, Hz, Ex, Ey, Ez) in zip(neffs, Hxs, Hys, Hzs, Exs, Eys, Ezs):
self.modes.append(
FDMode(self.wl, self.x, self.y, neff, Ey, Ex, Ez, Hy, Hx, Hz).normalize())
return self | [
"def",
"solve",
"(",
"self",
",",
"neigs",
"=",
"4",
",",
"tol",
"=",
"0",
",",
"guess",
"=",
"None",
",",
"mode_profiles",
"=",
"True",
",",
"initial_mode_guess",
"=",
"None",
")",
":",
"from",
"scipy",
".",
"sparse",
".",
"linalg",
"import",
"eigen",
"self",
".",
"nmodes",
"=",
"neigs",
"self",
".",
"tol",
"=",
"tol",
"A",
"=",
"self",
".",
"build_matrix",
"(",
")",
"if",
"guess",
"is",
"not",
"None",
":",
"# calculate shift for eigs function",
"k",
"=",
"2",
"*",
"numpy",
".",
"pi",
"/",
"self",
".",
"wl",
"shift",
"=",
"(",
"guess",
"*",
"k",
")",
"**",
"2",
"else",
":",
"shift",
"=",
"None",
"[",
"eigvals",
",",
"eigvecs",
"]",
"=",
"eigen",
".",
"eigs",
"(",
"A",
",",
"k",
"=",
"neigs",
",",
"which",
"=",
"'LR'",
",",
"tol",
"=",
"0.001",
",",
"ncv",
"=",
"None",
",",
"v0",
"=",
"initial_mode_guess",
",",
"return_eigenvectors",
"=",
"mode_profiles",
",",
"sigma",
"=",
"shift",
")",
"neffs",
"=",
"self",
".",
"wl",
"*",
"scipy",
".",
"sqrt",
"(",
"eigvals",
")",
"/",
"(",
"2",
"*",
"numpy",
".",
"pi",
")",
"if",
"mode_profiles",
":",
"Hxs",
"=",
"[",
"]",
"Hys",
"=",
"[",
"]",
"nx",
"=",
"self",
".",
"nx",
"ny",
"=",
"self",
".",
"ny",
"for",
"ieig",
"in",
"range",
"(",
"neigs",
")",
":",
"Hxs",
".",
"append",
"(",
"eigvecs",
"[",
":",
"nx",
"*",
"ny",
",",
"ieig",
"]",
".",
"reshape",
"(",
"nx",
",",
"ny",
")",
")",
"Hys",
".",
"append",
"(",
"eigvecs",
"[",
"nx",
"*",
"ny",
":",
",",
"ieig",
"]",
".",
"reshape",
"(",
"nx",
",",
"ny",
")",
")",
"# sort the modes",
"idx",
"=",
"numpy",
".",
"flipud",
"(",
"numpy",
".",
"argsort",
"(",
"neffs",
")",
")",
"neffs",
"=",
"neffs",
"[",
"idx",
"]",
"self",
".",
"neff",
"=",
"neffs",
"if",
"mode_profiles",
":",
"tmpx",
"=",
"[",
"]",
"tmpy",
"=",
"[",
"]",
"for",
"i",
"in",
"idx",
":",
"tmpx",
".",
"append",
"(",
"Hxs",
"[",
"i",
"]",
")",
"tmpy",
".",
"append",
"(",
"Hys",
"[",
"i",
"]",
")",
"Hxs",
"=",
"tmpx",
"Hys",
"=",
"tmpy",
"[",
"Hzs",
",",
"Exs",
",",
"Eys",
",",
"Ezs",
"]",
"=",
"self",
".",
"compute_other_fields",
"(",
"neffs",
",",
"Hxs",
",",
"Hys",
")",
"self",
".",
"modes",
"=",
"[",
"]",
"for",
"(",
"neff",
",",
"Hx",
",",
"Hy",
",",
"Hz",
",",
"Ex",
",",
"Ey",
",",
"Ez",
")",
"in",
"zip",
"(",
"neffs",
",",
"Hxs",
",",
"Hys",
",",
"Hzs",
",",
"Exs",
",",
"Eys",
",",
"Ezs",
")",
":",
"self",
".",
"modes",
".",
"append",
"(",
"FDMode",
"(",
"self",
".",
"wl",
",",
"self",
".",
"x",
",",
"self",
".",
"y",
",",
"neff",
",",
"Ey",
",",
"Ex",
",",
"Ez",
",",
"Hy",
",",
"Hx",
",",
"Hz",
")",
".",
"normalize",
"(",
")",
")",
"return",
"self"
] | 33.935897 | 21.448718 |
def rolling_returns_cumulative(returns, window, min_periods=1, geometric=True):
""" return the rolling cumulative returns
Parameters
----------
returns : DataFrame or Series
window : number of observations
min_periods : minimum number of observations in a window
geometric : link the returns geometrically
"""
if geometric:
rc = lambda x: (1. + x[np.isfinite(x)]).prod() - 1.
else:
rc = lambda x: (x[np.isfinite(x)]).sum()
return pd.rolling_apply(returns, window, rc, min_periods=min_periods) | [
"def",
"rolling_returns_cumulative",
"(",
"returns",
",",
"window",
",",
"min_periods",
"=",
"1",
",",
"geometric",
"=",
"True",
")",
":",
"if",
"geometric",
":",
"rc",
"=",
"lambda",
"x",
":",
"(",
"1.",
"+",
"x",
"[",
"np",
".",
"isfinite",
"(",
"x",
")",
"]",
")",
".",
"prod",
"(",
")",
"-",
"1.",
"else",
":",
"rc",
"=",
"lambda",
"x",
":",
"(",
"x",
"[",
"np",
".",
"isfinite",
"(",
"x",
")",
"]",
")",
".",
"sum",
"(",
")",
"return",
"pd",
".",
"rolling_apply",
"(",
"returns",
",",
"window",
",",
"rc",
",",
"min_periods",
"=",
"min_periods",
")"
] | 33.6875 | 20.1875 |
def generate_signature(method, version, endpoint,
date, rel_url, content_type, content,
access_key, secret_key, hash_type):
'''
Generates the API request signature from the given parameters.
'''
hash_type = hash_type
hostname = endpoint._val.netloc # FIXME: migrate to public API
if version >= 'v4.20181215':
content = b''
else:
if content_type.startswith('multipart/'):
content = b''
body_hash = hashlib.new(hash_type, content).hexdigest()
sign_str = '{}\n{}\n{}\nhost:{}\ncontent-type:{}\nx-backendai-version:{}\n{}'.format( # noqa
method.upper(),
rel_url,
date.isoformat(),
hostname,
content_type.lower(),
version,
body_hash
)
sign_bytes = sign_str.encode()
sign_key = hmac.new(secret_key.encode(),
date.strftime('%Y%m%d').encode(), hash_type).digest()
sign_key = hmac.new(sign_key, hostname.encode(), hash_type).digest()
signature = hmac.new(sign_key, sign_bytes, hash_type).hexdigest()
headers = {
'Authorization': 'BackendAI signMethod=HMAC-{}, credential={}:{}'.format(
hash_type.upper(),
access_key,
signature
),
}
return headers, signature | [
"def",
"generate_signature",
"(",
"method",
",",
"version",
",",
"endpoint",
",",
"date",
",",
"rel_url",
",",
"content_type",
",",
"content",
",",
"access_key",
",",
"secret_key",
",",
"hash_type",
")",
":",
"hash_type",
"=",
"hash_type",
"hostname",
"=",
"endpoint",
".",
"_val",
".",
"netloc",
"# FIXME: migrate to public API",
"if",
"version",
">=",
"'v4.20181215'",
":",
"content",
"=",
"b''",
"else",
":",
"if",
"content_type",
".",
"startswith",
"(",
"'multipart/'",
")",
":",
"content",
"=",
"b''",
"body_hash",
"=",
"hashlib",
".",
"new",
"(",
"hash_type",
",",
"content",
")",
".",
"hexdigest",
"(",
")",
"sign_str",
"=",
"'{}\\n{}\\n{}\\nhost:{}\\ncontent-type:{}\\nx-backendai-version:{}\\n{}'",
".",
"format",
"(",
"# noqa",
"method",
".",
"upper",
"(",
")",
",",
"rel_url",
",",
"date",
".",
"isoformat",
"(",
")",
",",
"hostname",
",",
"content_type",
".",
"lower",
"(",
")",
",",
"version",
",",
"body_hash",
")",
"sign_bytes",
"=",
"sign_str",
".",
"encode",
"(",
")",
"sign_key",
"=",
"hmac",
".",
"new",
"(",
"secret_key",
".",
"encode",
"(",
")",
",",
"date",
".",
"strftime",
"(",
"'%Y%m%d'",
")",
".",
"encode",
"(",
")",
",",
"hash_type",
")",
".",
"digest",
"(",
")",
"sign_key",
"=",
"hmac",
".",
"new",
"(",
"sign_key",
",",
"hostname",
".",
"encode",
"(",
")",
",",
"hash_type",
")",
".",
"digest",
"(",
")",
"signature",
"=",
"hmac",
".",
"new",
"(",
"sign_key",
",",
"sign_bytes",
",",
"hash_type",
")",
".",
"hexdigest",
"(",
")",
"headers",
"=",
"{",
"'Authorization'",
":",
"'BackendAI signMethod=HMAC-{}, credential={}:{}'",
".",
"format",
"(",
"hash_type",
".",
"upper",
"(",
")",
",",
"access_key",
",",
"signature",
")",
",",
"}",
"return",
"headers",
",",
"signature"
] | 33.051282 | 23.769231 |
def get_itoken(self, env):
"""Returns the current internal token to use for the auth system's own
actions with other services. Each process will create its own
itoken and the token will be deleted and recreated based on the
token_life configuration value. The itoken information is stored in
memcache because the auth process that is asked by Swift to validate
the token may not be the same as the auth process that created the
token.
"""
if not self.itoken or self.itoken_expires < time() or \
env.get('HTTP_X_AUTH_NEW_TOKEN', 'false').lower() in \
TRUE_VALUES:
self.itoken = '%sitk%s' % (self.reseller_prefix, uuid4().hex)
memcache_key = '%s/auth/%s' % (self.reseller_prefix, self.itoken)
self.itoken_expires = time() + self.token_life
memcache_client = cache_from_env(env)
if not memcache_client:
raise Exception(
'No memcache set up; required for Swauth middleware')
memcache_client.set(
memcache_key,
(self.itoken_expires,
'.auth,.reseller_admin,%s.auth' % self.reseller_prefix),
time=self.token_life)
return self.itoken | [
"def",
"get_itoken",
"(",
"self",
",",
"env",
")",
":",
"if",
"not",
"self",
".",
"itoken",
"or",
"self",
".",
"itoken_expires",
"<",
"time",
"(",
")",
"or",
"env",
".",
"get",
"(",
"'HTTP_X_AUTH_NEW_TOKEN'",
",",
"'false'",
")",
".",
"lower",
"(",
")",
"in",
"TRUE_VALUES",
":",
"self",
".",
"itoken",
"=",
"'%sitk%s'",
"%",
"(",
"self",
".",
"reseller_prefix",
",",
"uuid4",
"(",
")",
".",
"hex",
")",
"memcache_key",
"=",
"'%s/auth/%s'",
"%",
"(",
"self",
".",
"reseller_prefix",
",",
"self",
".",
"itoken",
")",
"self",
".",
"itoken_expires",
"=",
"time",
"(",
")",
"+",
"self",
".",
"token_life",
"memcache_client",
"=",
"cache_from_env",
"(",
"env",
")",
"if",
"not",
"memcache_client",
":",
"raise",
"Exception",
"(",
"'No memcache set up; required for Swauth middleware'",
")",
"memcache_client",
".",
"set",
"(",
"memcache_key",
",",
"(",
"self",
".",
"itoken_expires",
",",
"'.auth,.reseller_admin,%s.auth'",
"%",
"self",
".",
"reseller_prefix",
")",
",",
"time",
"=",
"self",
".",
"token_life",
")",
"return",
"self",
".",
"itoken"
] | 51.44 | 19.4 |
def broadcast(self, body, html_body=None, exclude=()):
"""Broadcast a message to users in the chatroom"""
logger.info('broadcast on %s: %s' % (self.name, body,))
for member in filter(lambda m: m.get('STATUS') == 'ACTIVE' and m not in exclude, self.params['MEMBERS']):
logger.debug(member['JID'])
self.send_message(body, member, html_body=html_body, quiet=True) | [
"def",
"broadcast",
"(",
"self",
",",
"body",
",",
"html_body",
"=",
"None",
",",
"exclude",
"=",
"(",
")",
")",
":",
"logger",
".",
"info",
"(",
"'broadcast on %s: %s'",
"%",
"(",
"self",
".",
"name",
",",
"body",
",",
")",
")",
"for",
"member",
"in",
"filter",
"(",
"lambda",
"m",
":",
"m",
".",
"get",
"(",
"'STATUS'",
")",
"==",
"'ACTIVE'",
"and",
"m",
"not",
"in",
"exclude",
",",
"self",
".",
"params",
"[",
"'MEMBERS'",
"]",
")",
":",
"logger",
".",
"debug",
"(",
"member",
"[",
"'JID'",
"]",
")",
"self",
".",
"send_message",
"(",
"body",
",",
"member",
",",
"html_body",
"=",
"html_body",
",",
"quiet",
"=",
"True",
")"
] | 67.166667 | 24.5 |
def fs_decode(path):
"""
Decode a filesystem path using the proper filesystem encoding
:param path: The filesystem path to decode from bytes or string
:return: The filesystem path, decoded with the determined encoding
:rtype: Text
"""
path = _get_path(path)
if path is None:
raise TypeError("expected a valid path to decode")
if isinstance(path, six.binary_type):
if six.PY2:
from array import array
indexes = _invalid_utf8_indexes(array(str("B"), path))
return "".join(
chunk.decode(_fs_encoding, _fs_decode_errors)
for chunk in _chunks(path, indexes)
)
return path.decode(_fs_encoding, _fs_decode_errors)
return path | [
"def",
"fs_decode",
"(",
"path",
")",
":",
"path",
"=",
"_get_path",
"(",
"path",
")",
"if",
"path",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"expected a valid path to decode\"",
")",
"if",
"isinstance",
"(",
"path",
",",
"six",
".",
"binary_type",
")",
":",
"if",
"six",
".",
"PY2",
":",
"from",
"array",
"import",
"array",
"indexes",
"=",
"_invalid_utf8_indexes",
"(",
"array",
"(",
"str",
"(",
"\"B\"",
")",
",",
"path",
")",
")",
"return",
"\"\"",
".",
"join",
"(",
"chunk",
".",
"decode",
"(",
"_fs_encoding",
",",
"_fs_decode_errors",
")",
"for",
"chunk",
"in",
"_chunks",
"(",
"path",
",",
"indexes",
")",
")",
"return",
"path",
".",
"decode",
"(",
"_fs_encoding",
",",
"_fs_decode_errors",
")",
"return",
"path"
] | 32.304348 | 20.304348 |
def to_description_dict(self):
"""
You might need keys below in some situation
- caCertificateId
- previousOwnedBy
"""
return {
'certificateArn': self.arn,
'certificateId': self.certificate_id,
'status': self.status,
'certificatePem': self.certificate_pem,
'ownedBy': self.owner,
'creationDate': self.creation_date,
'lastModifiedDate': self.last_modified_date,
'transferData': self.transfer_data
} | [
"def",
"to_description_dict",
"(",
"self",
")",
":",
"return",
"{",
"'certificateArn'",
":",
"self",
".",
"arn",
",",
"'certificateId'",
":",
"self",
".",
"certificate_id",
",",
"'status'",
":",
"self",
".",
"status",
",",
"'certificatePem'",
":",
"self",
".",
"certificate_pem",
",",
"'ownedBy'",
":",
"self",
".",
"owner",
",",
"'creationDate'",
":",
"self",
".",
"creation_date",
",",
"'lastModifiedDate'",
":",
"self",
".",
"last_modified_date",
",",
"'transferData'",
":",
"self",
".",
"transfer_data",
"}"
] | 33.625 | 10.25 |
def find(cls, device=None):
"""
Factory method that returns the requested :py:class:`USBDevice` device, or the
first device.
:param device: Tuple describing the USB device to open, as returned
by find_all().
:type device: tuple
:returns: :py:class:`USBDevice` object utilizing the specified device
:raises: :py:class:`~alarmdecoder.util.NoDeviceError`
"""
if not have_pyftdi:
raise ImportError('The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.')
cls.find_all()
if len(cls.__devices) == 0:
raise NoDeviceError('No AD2USB devices present.')
if device is None:
device = cls.__devices[0]
vendor, product, sernum, ifcount, description = device
return USBDevice(interface=sernum, vid=vendor, pid=product) | [
"def",
"find",
"(",
"cls",
",",
"device",
"=",
"None",
")",
":",
"if",
"not",
"have_pyftdi",
":",
"raise",
"ImportError",
"(",
"'The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.'",
")",
"cls",
".",
"find_all",
"(",
")",
"if",
"len",
"(",
"cls",
".",
"__devices",
")",
"==",
"0",
":",
"raise",
"NoDeviceError",
"(",
"'No AD2USB devices present.'",
")",
"if",
"device",
"is",
"None",
":",
"device",
"=",
"cls",
".",
"__devices",
"[",
"0",
"]",
"vendor",
",",
"product",
",",
"sernum",
",",
"ifcount",
",",
"description",
"=",
"device",
"return",
"USBDevice",
"(",
"interface",
"=",
"sernum",
",",
"vid",
"=",
"vendor",
",",
"pid",
"=",
"product",
")"
] | 34.038462 | 25.576923 |
def step_use_curdir_as_working_directory(context):
"""
Uses the current directory as working directory
"""
context.workdir = os.path.abspath(".")
command_util.ensure_workdir_exists(context) | [
"def",
"step_use_curdir_as_working_directory",
"(",
"context",
")",
":",
"context",
".",
"workdir",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"\".\"",
")",
"command_util",
".",
"ensure_workdir_exists",
"(",
"context",
")"
] | 34 | 5 |
def relaxedEMDS(X0, N, d, C, b, KE, print_out=False, lamda=10):
""" Find the set of points from an edge kernel with geometric constraints, using convex rank relaxation.
"""
E = C.shape[1]
X = Variable((E, E), PSD=True)
constraints = [C[i, :] * X == b[i] for i in range(C.shape[0])]
obj = Minimize(trace(X) + lamda * norm(KE - X))
prob = Problem(obj, constraints)
try:
# CVXOPT is more accurate than SCS, even though slower.
total_cost = prob.solve(solver='CVXOPT', verbose=print_out)
except:
try:
print('CVXOPT with default cholesky failed. Trying kktsolver...')
# kktsolver is more robust than default (cholesky), even though slower.
total_cost = prob.solve(
solver='CVXOPT', verbose=print_out, kktsolver="robust")
except:
try:
print('CVXOPT with robust kktsovler failed. Trying SCS...')
# SCS is fast and robust, but inaccurate (last choice).
total_cost = prob.solve(solver='SCS', verbose=print_out)
except:
print('SCS and CVXOPT solver with default and kktsolver failed .')
if print_out:
print('status:', prob.status)
Xhat_KE, Vhat_KE = superMDS(X0, N, d, KE=X.value)
return Xhat_KE, Vhat_KE | [
"def",
"relaxedEMDS",
"(",
"X0",
",",
"N",
",",
"d",
",",
"C",
",",
"b",
",",
"KE",
",",
"print_out",
"=",
"False",
",",
"lamda",
"=",
"10",
")",
":",
"E",
"=",
"C",
".",
"shape",
"[",
"1",
"]",
"X",
"=",
"Variable",
"(",
"(",
"E",
",",
"E",
")",
",",
"PSD",
"=",
"True",
")",
"constraints",
"=",
"[",
"C",
"[",
"i",
",",
":",
"]",
"*",
"X",
"==",
"b",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"C",
".",
"shape",
"[",
"0",
"]",
")",
"]",
"obj",
"=",
"Minimize",
"(",
"trace",
"(",
"X",
")",
"+",
"lamda",
"*",
"norm",
"(",
"KE",
"-",
"X",
")",
")",
"prob",
"=",
"Problem",
"(",
"obj",
",",
"constraints",
")",
"try",
":",
"# CVXOPT is more accurate than SCS, even though slower.",
"total_cost",
"=",
"prob",
".",
"solve",
"(",
"solver",
"=",
"'CVXOPT'",
",",
"verbose",
"=",
"print_out",
")",
"except",
":",
"try",
":",
"print",
"(",
"'CVXOPT with default cholesky failed. Trying kktsolver...'",
")",
"# kktsolver is more robust than default (cholesky), even though slower.",
"total_cost",
"=",
"prob",
".",
"solve",
"(",
"solver",
"=",
"'CVXOPT'",
",",
"verbose",
"=",
"print_out",
",",
"kktsolver",
"=",
"\"robust\"",
")",
"except",
":",
"try",
":",
"print",
"(",
"'CVXOPT with robust kktsovler failed. Trying SCS...'",
")",
"# SCS is fast and robust, but inaccurate (last choice).",
"total_cost",
"=",
"prob",
".",
"solve",
"(",
"solver",
"=",
"'SCS'",
",",
"verbose",
"=",
"print_out",
")",
"except",
":",
"print",
"(",
"'SCS and CVXOPT solver with default and kktsolver failed .'",
")",
"if",
"print_out",
":",
"print",
"(",
"'status:'",
",",
"prob",
".",
"status",
")",
"Xhat_KE",
",",
"Vhat_KE",
"=",
"superMDS",
"(",
"X0",
",",
"N",
",",
"d",
",",
"KE",
"=",
"X",
".",
"value",
")",
"return",
"Xhat_KE",
",",
"Vhat_KE"
] | 41.774194 | 23.483871 |
def execute(self, source=None, hidden=False, interactive=False):
""" Reimplemented to the store history.
"""
if not hidden:
history = self.input_buffer if source is None else source
executed = super(HistoryConsoleWidget, self).execute(
source, hidden, interactive)
if executed and not hidden:
# Save the command unless it was an empty string or was identical
# to the previous command.
history = history.rstrip()
if history and (not self._history or self._history[-1] != history):
self._history.append(history)
# Emulate readline: reset all history edits.
self._history_edits = {}
# Move the history index to the most recent item.
self._history_index = len(self._history)
return executed | [
"def",
"execute",
"(",
"self",
",",
"source",
"=",
"None",
",",
"hidden",
"=",
"False",
",",
"interactive",
"=",
"False",
")",
":",
"if",
"not",
"hidden",
":",
"history",
"=",
"self",
".",
"input_buffer",
"if",
"source",
"is",
"None",
"else",
"source",
"executed",
"=",
"super",
"(",
"HistoryConsoleWidget",
",",
"self",
")",
".",
"execute",
"(",
"source",
",",
"hidden",
",",
"interactive",
")",
"if",
"executed",
"and",
"not",
"hidden",
":",
"# Save the command unless it was an empty string or was identical",
"# to the previous command.",
"history",
"=",
"history",
".",
"rstrip",
"(",
")",
"if",
"history",
"and",
"(",
"not",
"self",
".",
"_history",
"or",
"self",
".",
"_history",
"[",
"-",
"1",
"]",
"!=",
"history",
")",
":",
"self",
".",
"_history",
".",
"append",
"(",
"history",
")",
"# Emulate readline: reset all history edits.",
"self",
".",
"_history_edits",
"=",
"{",
"}",
"# Move the history index to the most recent item.",
"self",
".",
"_history_index",
"=",
"len",
"(",
"self",
".",
"_history",
")",
"return",
"executed"
] | 37.130435 | 19.652174 |
def delete_bgedge(self, bgedge, key=None):
""" Deletes a supplied :class:`bg.edge.BGEdge` from a perspective of multi-color substitution. If unique identifier ``key`` is not provided, most similar (from perspective of :meth:`bg.multicolor.Multicolor.similarity_score` result) edge between respective vertices is chosen for change.
Proxies a call to :math:`BreakpointGraph._BreakpointGraph__delete_bgedge` method.
:param bgedge: an edge to be deleted from a perspective of multi-color substitution
:type bgedge: :class:`bg.edge.BGEdge`
:param key: unique identifier of existing edges in current :class:`BreakpointGraph` instance to be changed
:type: any python object. ``int`` is expected.
:return: ``None``, performed inplace changes.
"""
self.__delete_bgedge(bgedge=bgedge, key=key) | [
"def",
"delete_bgedge",
"(",
"self",
",",
"bgedge",
",",
"key",
"=",
"None",
")",
":",
"self",
".",
"__delete_bgedge",
"(",
"bgedge",
"=",
"bgedge",
",",
"key",
"=",
"key",
")"
] | 70.5 | 25 |
def install_yum(self, fn=None, package_name=None, update=0, list_only=0):
"""
Installs system packages listed in yum-requirements.txt.
"""
assert self.genv[ROLE]
yum_req_fn = fn or self.find_template(self.genv.yum_requirments_fn)
if not yum_req_fn:
return []
assert os.path.isfile(yum_req_fn)
update = int(update)
if list_only:
return [
_.strip() for _ in open(yum_req_fn).readlines()
if _.strip() and not _.strip.startswith('#')
and (not package_name or _.strip() == package_name)
]
if update:
self.sudo_or_dryrun('yum update --assumeyes')
if package_name:
self.sudo_or_dryrun('yum install --assumeyes %s' % package_name)
else:
if self.genv.is_local:
self.put_or_dryrun(local_path=yum_req_fn)
yum_req_fn = self.genv.put_remote_fn
self.sudo_or_dryrun('yum install --assumeyes $(cat %(yum_req_fn)s)' % yum_req_fn) | [
"def",
"install_yum",
"(",
"self",
",",
"fn",
"=",
"None",
",",
"package_name",
"=",
"None",
",",
"update",
"=",
"0",
",",
"list_only",
"=",
"0",
")",
":",
"assert",
"self",
".",
"genv",
"[",
"ROLE",
"]",
"yum_req_fn",
"=",
"fn",
"or",
"self",
".",
"find_template",
"(",
"self",
".",
"genv",
".",
"yum_requirments_fn",
")",
"if",
"not",
"yum_req_fn",
":",
"return",
"[",
"]",
"assert",
"os",
".",
"path",
".",
"isfile",
"(",
"yum_req_fn",
")",
"update",
"=",
"int",
"(",
"update",
")",
"if",
"list_only",
":",
"return",
"[",
"_",
".",
"strip",
"(",
")",
"for",
"_",
"in",
"open",
"(",
"yum_req_fn",
")",
".",
"readlines",
"(",
")",
"if",
"_",
".",
"strip",
"(",
")",
"and",
"not",
"_",
".",
"strip",
".",
"startswith",
"(",
"'#'",
")",
"and",
"(",
"not",
"package_name",
"or",
"_",
".",
"strip",
"(",
")",
"==",
"package_name",
")",
"]",
"if",
"update",
":",
"self",
".",
"sudo_or_dryrun",
"(",
"'yum update --assumeyes'",
")",
"if",
"package_name",
":",
"self",
".",
"sudo_or_dryrun",
"(",
"'yum install --assumeyes %s'",
"%",
"package_name",
")",
"else",
":",
"if",
"self",
".",
"genv",
".",
"is_local",
":",
"self",
".",
"put_or_dryrun",
"(",
"local_path",
"=",
"yum_req_fn",
")",
"yum_req_fn",
"=",
"self",
".",
"genv",
".",
"put_remote_fn",
"self",
".",
"sudo_or_dryrun",
"(",
"'yum install --assumeyes $(cat %(yum_req_fn)s)'",
"%",
"yum_req_fn",
")"
] | 41.92 | 19.6 |
def say(self, phrase, **_options):
'''
Says the phrase, optionally allows to select/override any voice options.
'''
language, voice, voiceinfo, options = self._configure(**_options)
self._logger.debug("Saying '%s' with '%s'", phrase, self.SLUG)
self._say(phrase, language, voice, voiceinfo, options) | [
"def",
"say",
"(",
"self",
",",
"phrase",
",",
"*",
"*",
"_options",
")",
":",
"language",
",",
"voice",
",",
"voiceinfo",
",",
"options",
"=",
"self",
".",
"_configure",
"(",
"*",
"*",
"_options",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"Saying '%s' with '%s'\"",
",",
"phrase",
",",
"self",
".",
"SLUG",
")",
"self",
".",
"_say",
"(",
"phrase",
",",
"language",
",",
"voice",
",",
"voiceinfo",
",",
"options",
")"
] | 48.714286 | 27 |
def color2idx(self, red, green, blue):
"""Get an Excel index from"""
xlwt_colors = [
(0, 0, 0), (255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255),
(255, 255, 0), (255, 0, 255), (0, 255, 255), (0, 0, 0),
(255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255),
(255, 255, 0), (255, 0, 255), (0, 255, 255), (128, 0, 0),
(0, 128, 0), (0, 0, 128), (128, 128, 0), (128, 0, 128),
(0, 128, 128), (192, 192, 192), (128, 128, 128), (153, 153, 255),
(153, 51, 102), (255, 255, 204), (204, 255, 255), (102, 0, 102),
(255, 128, 128), (0, 102, 204), (204, 204, 255), (0, 0, 128),
(255, 0, 255), (255, 255, 0), (0, 255, 255), (128, 0, 128),
(128, 0, 0), (0, 128, 128), (0, 0, 255), (0, 204, 255),
(204, 255, 255), (204, 255, 204), (255, 255, 153), (153, 204, 255),
(255, 153, 204), (204, 153, 255), (255, 204, 153), (51, 102, 255),
(51, 204, 204), (153, 204, 0), (255, 204, 0), (255, 153, 0),
(255, 102, 0), (102, 102, 153), (150, 150, 150), (0, 51, 102),
(51, 153, 102), (0, 51, 0), (51, 51, 0), (153, 51, 0),
(153, 51, 102), (51, 51, 153), (51, 51, 51)
]
distances = [abs(red - r) + abs(green - g) + abs(blue - b)
for r, g, b in xlwt_colors]
min_dist_idx = distances.index(min(distances))
return min_dist_idx | [
"def",
"color2idx",
"(",
"self",
",",
"red",
",",
"green",
",",
"blue",
")",
":",
"xlwt_colors",
"=",
"[",
"(",
"0",
",",
"0",
",",
"0",
")",
",",
"(",
"255",
",",
"255",
",",
"255",
")",
",",
"(",
"255",
",",
"0",
",",
"0",
")",
",",
"(",
"0",
",",
"255",
",",
"0",
")",
",",
"(",
"0",
",",
"0",
",",
"255",
")",
",",
"(",
"255",
",",
"255",
",",
"0",
")",
",",
"(",
"255",
",",
"0",
",",
"255",
")",
",",
"(",
"0",
",",
"255",
",",
"255",
")",
",",
"(",
"0",
",",
"0",
",",
"0",
")",
",",
"(",
"255",
",",
"255",
",",
"255",
")",
",",
"(",
"255",
",",
"0",
",",
"0",
")",
",",
"(",
"0",
",",
"255",
",",
"0",
")",
",",
"(",
"0",
",",
"0",
",",
"255",
")",
",",
"(",
"255",
",",
"255",
",",
"0",
")",
",",
"(",
"255",
",",
"0",
",",
"255",
")",
",",
"(",
"0",
",",
"255",
",",
"255",
")",
",",
"(",
"128",
",",
"0",
",",
"0",
")",
",",
"(",
"0",
",",
"128",
",",
"0",
")",
",",
"(",
"0",
",",
"0",
",",
"128",
")",
",",
"(",
"128",
",",
"128",
",",
"0",
")",
",",
"(",
"128",
",",
"0",
",",
"128",
")",
",",
"(",
"0",
",",
"128",
",",
"128",
")",
",",
"(",
"192",
",",
"192",
",",
"192",
")",
",",
"(",
"128",
",",
"128",
",",
"128",
")",
",",
"(",
"153",
",",
"153",
",",
"255",
")",
",",
"(",
"153",
",",
"51",
",",
"102",
")",
",",
"(",
"255",
",",
"255",
",",
"204",
")",
",",
"(",
"204",
",",
"255",
",",
"255",
")",
",",
"(",
"102",
",",
"0",
",",
"102",
")",
",",
"(",
"255",
",",
"128",
",",
"128",
")",
",",
"(",
"0",
",",
"102",
",",
"204",
")",
",",
"(",
"204",
",",
"204",
",",
"255",
")",
",",
"(",
"0",
",",
"0",
",",
"128",
")",
",",
"(",
"255",
",",
"0",
",",
"255",
")",
",",
"(",
"255",
",",
"255",
",",
"0",
")",
",",
"(",
"0",
",",
"255",
",",
"255",
")",
",",
"(",
"128",
",",
"0",
",",
"128",
")",
",",
"(",
"128",
",",
"0",
",",
"0",
")",
",",
"(",
"0",
",",
"128",
",",
"128",
")",
",",
"(",
"0",
",",
"0",
",",
"255",
")",
",",
"(",
"0",
",",
"204",
",",
"255",
")",
",",
"(",
"204",
",",
"255",
",",
"255",
")",
",",
"(",
"204",
",",
"255",
",",
"204",
")",
",",
"(",
"255",
",",
"255",
",",
"153",
")",
",",
"(",
"153",
",",
"204",
",",
"255",
")",
",",
"(",
"255",
",",
"153",
",",
"204",
")",
",",
"(",
"204",
",",
"153",
",",
"255",
")",
",",
"(",
"255",
",",
"204",
",",
"153",
")",
",",
"(",
"51",
",",
"102",
",",
"255",
")",
",",
"(",
"51",
",",
"204",
",",
"204",
")",
",",
"(",
"153",
",",
"204",
",",
"0",
")",
",",
"(",
"255",
",",
"204",
",",
"0",
")",
",",
"(",
"255",
",",
"153",
",",
"0",
")",
",",
"(",
"255",
",",
"102",
",",
"0",
")",
",",
"(",
"102",
",",
"102",
",",
"153",
")",
",",
"(",
"150",
",",
"150",
",",
"150",
")",
",",
"(",
"0",
",",
"51",
",",
"102",
")",
",",
"(",
"51",
",",
"153",
",",
"102",
")",
",",
"(",
"0",
",",
"51",
",",
"0",
")",
",",
"(",
"51",
",",
"51",
",",
"0",
")",
",",
"(",
"153",
",",
"51",
",",
"0",
")",
",",
"(",
"153",
",",
"51",
",",
"102",
")",
",",
"(",
"51",
",",
"51",
",",
"153",
")",
",",
"(",
"51",
",",
"51",
",",
"51",
")",
"]",
"distances",
"=",
"[",
"abs",
"(",
"red",
"-",
"r",
")",
"+",
"abs",
"(",
"green",
"-",
"g",
")",
"+",
"abs",
"(",
"blue",
"-",
"b",
")",
"for",
"r",
",",
"g",
",",
"b",
"in",
"xlwt_colors",
"]",
"min_dist_idx",
"=",
"distances",
".",
"index",
"(",
"min",
"(",
"distances",
")",
")",
"return",
"min_dist_idx"
] | 51.357143 | 27.392857 |
def _handle_result_line(self, sline):
"""
Parses the data line and adds to the dictionary.
:param sline: a split data line to parse
:returns: the number of rows to jump and parse the next data line or
return the code error -1
"""
as_kw = sline[3]
a_result = str(sline[5].split('^')[0])
self._cur_values[as_kw] = {
'DefaultResult': 'Result',
'Result': a_result
}
return 0 | [
"def",
"_handle_result_line",
"(",
"self",
",",
"sline",
")",
":",
"as_kw",
"=",
"sline",
"[",
"3",
"]",
"a_result",
"=",
"str",
"(",
"sline",
"[",
"5",
"]",
".",
"split",
"(",
"'^'",
")",
"[",
"0",
"]",
")",
"self",
".",
"_cur_values",
"[",
"as_kw",
"]",
"=",
"{",
"'DefaultResult'",
":",
"'Result'",
",",
"'Result'",
":",
"a_result",
"}",
"return",
"0"
] | 33.5 | 11.785714 |
def do_searchhex(self, arg):
"""
[~process] sh [address-address] <hexadecimal pattern>
[~process] searchhex [address-address] <hexadecimal pattern>
"""
token_list = self.split_tokens(arg, 1, 3)
pid, tid = self.get_process_and_thread_ids_from_prefix()
process = self.get_process(pid)
if len(token_list) == 1:
pattern = token_list[0]
minAddr = None
maxAddr = None
else:
pattern = token_list[-1]
addr, size = self.input_address_range(token_list[:-1], pid, tid)
minAddr = addr
maxAddr = addr + size
iter = process.search_hexa(pattern)
if process.get_bits() == 32:
addr_width = 8
else:
addr_width = 16
for addr, bytes in iter:
print(HexDump.hexblock(bytes, addr, addr_width),) | [
"def",
"do_searchhex",
"(",
"self",
",",
"arg",
")",
":",
"token_list",
"=",
"self",
".",
"split_tokens",
"(",
"arg",
",",
"1",
",",
"3",
")",
"pid",
",",
"tid",
"=",
"self",
".",
"get_process_and_thread_ids_from_prefix",
"(",
")",
"process",
"=",
"self",
".",
"get_process",
"(",
"pid",
")",
"if",
"len",
"(",
"token_list",
")",
"==",
"1",
":",
"pattern",
"=",
"token_list",
"[",
"0",
"]",
"minAddr",
"=",
"None",
"maxAddr",
"=",
"None",
"else",
":",
"pattern",
"=",
"token_list",
"[",
"-",
"1",
"]",
"addr",
",",
"size",
"=",
"self",
".",
"input_address_range",
"(",
"token_list",
"[",
":",
"-",
"1",
"]",
",",
"pid",
",",
"tid",
")",
"minAddr",
"=",
"addr",
"maxAddr",
"=",
"addr",
"+",
"size",
"iter",
"=",
"process",
".",
"search_hexa",
"(",
"pattern",
")",
"if",
"process",
".",
"get_bits",
"(",
")",
"==",
"32",
":",
"addr_width",
"=",
"8",
"else",
":",
"addr_width",
"=",
"16",
"for",
"addr",
",",
"bytes",
"in",
"iter",
":",
"print",
"(",
"HexDump",
".",
"hexblock",
"(",
"bytes",
",",
"addr",
",",
"addr_width",
")",
",",
")"
] | 36.541667 | 13.208333 |
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default | [
"def",
"get",
"(",
"self",
",",
"key",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"return",
"self",
"[",
"key",
"]",
"except",
"(",
"KeyError",
",",
"ValueError",
",",
"IndexError",
")",
":",
"return",
"default"
] | 25.588235 | 19 |
def _read_opt_pad(self, code, *, desc):
"""Read HOPOPT padding options.
Structure of HOPOPT padding options [RFC 8200]:
* Pad1 Option:
+-+-+-+-+-+-+-+-+
| 0 |
+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
* PadN Option:
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
| 1 | Opt Data Len | Option Data
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - -
Octets Bits Name Description
0 0 hopopt.pad.type Option Type
0 0 hopopt.pad.type.value Option Number
0 0 hopopt.pad.type.action Action (00)
0 2 hopopt.pad.type.change Change Flag (0)
1 8 hopopt.opt.length Length of Option Data
2 16 hopopt.pad.padding Padding
"""
_type = self._read_opt_type(code)
if code == 0:
opt = dict(
desc=desc,
type=_type,
length=1,
)
elif code == 1:
_size = self._read_unpack(1)
_padn = self._read_fileng(_size)
opt = dict(
desc=desc,
type=_type,
length=_size + 2,
padding=_padn,
)
else:
raise ProtocolError(f'{self.alias}: [Optno {code}] invalid format')
return opt | [
"def",
"_read_opt_pad",
"(",
"self",
",",
"code",
",",
"*",
",",
"desc",
")",
":",
"_type",
"=",
"self",
".",
"_read_opt_type",
"(",
"code",
")",
"if",
"code",
"==",
"0",
":",
"opt",
"=",
"dict",
"(",
"desc",
"=",
"desc",
",",
"type",
"=",
"_type",
",",
"length",
"=",
"1",
",",
")",
"elif",
"code",
"==",
"1",
":",
"_size",
"=",
"self",
".",
"_read_unpack",
"(",
"1",
")",
"_padn",
"=",
"self",
".",
"_read_fileng",
"(",
"_size",
")",
"opt",
"=",
"dict",
"(",
"desc",
"=",
"desc",
",",
"type",
"=",
"_type",
",",
"length",
"=",
"_size",
"+",
"2",
",",
"padding",
"=",
"_padn",
",",
")",
"else",
":",
"raise",
"ProtocolError",
"(",
"f'{self.alias}: [Optno {code}] invalid format'",
")",
"return",
"opt"
] | 37.490196 | 22.745098 |
def create_radius_stops(breaks, min_radius, max_radius):
"""Convert a data breaks into a radius ramp
"""
num_breaks = len(breaks)
radius_breaks = scale_between(min_radius, max_radius, num_breaks)
stops = []
for i, b in enumerate(breaks):
stops.append([b, radius_breaks[i]])
return stops | [
"def",
"create_radius_stops",
"(",
"breaks",
",",
"min_radius",
",",
"max_radius",
")",
":",
"num_breaks",
"=",
"len",
"(",
"breaks",
")",
"radius_breaks",
"=",
"scale_between",
"(",
"min_radius",
",",
"max_radius",
",",
"num_breaks",
")",
"stops",
"=",
"[",
"]",
"for",
"i",
",",
"b",
"in",
"enumerate",
"(",
"breaks",
")",
":",
"stops",
".",
"append",
"(",
"[",
"b",
",",
"radius_breaks",
"[",
"i",
"]",
"]",
")",
"return",
"stops"
] | 31.4 | 15.6 |
def alias_catalog(self, catalog_id, alias_id):
"""Adds an ``Id`` to a ``Catalog`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Catalog`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another catalog, it is
reassigned to the given catalog ``Id``.
arg: catalog_id (osid.id.Id): the ``Id`` of a ``Catalog``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``catalog_id`` not found
raise: NullArgument - ``catalog_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinLookupSession.alias_bin_template
if self._catalog_session is not None:
return self._catalog_session.alias_catalog(catalog_id=catalog_id, alias_id=alias_id)
self._alias_id(primary_id=catalog_id, equivalent_id=alias_id) | [
"def",
"alias_catalog",
"(",
"self",
",",
"catalog_id",
",",
"alias_id",
")",
":",
"# Implemented from template for",
"# osid.resource.BinLookupSession.alias_bin_template",
"if",
"self",
".",
"_catalog_session",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_catalog_session",
".",
"alias_catalog",
"(",
"catalog_id",
"=",
"catalog_id",
",",
"alias_id",
"=",
"alias_id",
")",
"self",
".",
"_alias_id",
"(",
"primary_id",
"=",
"catalog_id",
",",
"equivalent_id",
"=",
"alias_id",
")"
] | 50.291667 | 21.041667 |
def height(self):
"""Returns the player's height (in inches).
:returns: An int representing a player's height in inches.
"""
doc = self.get_main_doc()
raw = doc('span[itemprop="height"]').text()
try:
feet, inches = map(int, raw.split('-'))
return feet * 12 + inches
except ValueError:
return None | [
"def",
"height",
"(",
"self",
")",
":",
"doc",
"=",
"self",
".",
"get_main_doc",
"(",
")",
"raw",
"=",
"doc",
"(",
"'span[itemprop=\"height\"]'",
")",
".",
"text",
"(",
")",
"try",
":",
"feet",
",",
"inches",
"=",
"map",
"(",
"int",
",",
"raw",
".",
"split",
"(",
"'-'",
")",
")",
"return",
"feet",
"*",
"12",
"+",
"inches",
"except",
"ValueError",
":",
"return",
"None"
] | 34.363636 | 12.727273 |
def to_string(cls, error_code):
"""Returns the string message for the given ``error_code``.
Args:
cls (JLinkEraseErrors): the ``JLinkEraseErrors`` class
error_code (int): error code to convert
Returns:
An error string corresponding to the error code.
Raises:
ValueError: if the error code is invalid.
"""
if error_code == cls.ILLEGAL_COMMAND:
return 'Failed to erase sector.'
return super(JLinkEraseErrors, cls).to_string(error_code) | [
"def",
"to_string",
"(",
"cls",
",",
"error_code",
")",
":",
"if",
"error_code",
"==",
"cls",
".",
"ILLEGAL_COMMAND",
":",
"return",
"'Failed to erase sector.'",
"return",
"super",
"(",
"JLinkEraseErrors",
",",
"cls",
")",
".",
"to_string",
"(",
"error_code",
")"
] | 33.0625 | 18.8125 |
def register_default_plugins(root_parser: ParserRegistryWithConverters):
"""
Utility method to register all default plugins on the given parser+converter registry
:param root_parser:
:return:
"""
# -------------------- CORE ---------------------------
try:
# -- primitive types
from parsyfiles.plugins_base.support_for_primitive_types import get_default_primitive_parsers, \
get_default_primitive_converters
root_parser.register_parsers(get_default_primitive_parsers())
root_parser.register_converters(get_default_primitive_converters())
except ImportError as e:
warn_import_error('primitive types', e)
try:
# -- collections
from parsyfiles.plugins_base.support_for_collections import get_default_collection_parsers, \
get_default_collection_converters
root_parser.register_parsers(get_default_collection_parsers(root_parser, root_parser))
root_parser.register_converters(get_default_collection_converters(root_parser))
except ImportError as e:
warn_import_error('dict', e)
try:
# -- objects
from parsyfiles.plugins_base.support_for_objects import get_default_object_parsers, \
get_default_object_converters
root_parser.register_parsers(get_default_object_parsers(root_parser, root_parser))
root_parser.register_converters(get_default_object_converters(root_parser))
except ImportError as e:
warn_import_error('objects', e)
try:
# -- config
from parsyfiles.plugins_base.support_for_configparser import get_default_config_parsers, \
get_default_config_converters
root_parser.register_parsers(get_default_config_parsers())
root_parser.register_converters(get_default_config_converters(root_parser))
except ImportError as e:
warn_import_error('config', e)
# ------------------------- OPTIONAL -----------------
try:
# -- jprops
from parsyfiles.plugins_optional.support_for_jprops import get_default_jprops_parsers
root_parser.register_parsers(get_default_jprops_parsers(root_parser, root_parser))
# root_parser.register_converters()
except ImportError as e:
warn_import_error('jprops', e)
try:
# -- yaml
from parsyfiles.plugins_optional.support_for_yaml import get_default_yaml_parsers
root_parser.register_parsers(get_default_yaml_parsers(root_parser, root_parser))
# root_parser.register_converters()
except ImportError as e:
warn_import_error('yaml', e)
try:
# -- numpy
from parsyfiles.plugins_optional.support_for_numpy import get_default_np_parsers, get_default_np_converters
root_parser.register_parsers(get_default_np_parsers())
root_parser.register_converters(get_default_np_converters())
except ImportError as e:
warn_import_error('numpy', e)
try:
# -- pandas
from parsyfiles.plugins_optional.support_for_pandas import get_default_pandas_parsers, \
get_default_pandas_converters
root_parser.register_parsers(get_default_pandas_parsers())
root_parser.register_converters(get_default_pandas_converters())
except ImportError as e:
warn_import_error('pandas', e) | [
"def",
"register_default_plugins",
"(",
"root_parser",
":",
"ParserRegistryWithConverters",
")",
":",
"# -------------------- CORE ---------------------------",
"try",
":",
"# -- primitive types",
"from",
"parsyfiles",
".",
"plugins_base",
".",
"support_for_primitive_types",
"import",
"get_default_primitive_parsers",
",",
"get_default_primitive_converters",
"root_parser",
".",
"register_parsers",
"(",
"get_default_primitive_parsers",
"(",
")",
")",
"root_parser",
".",
"register_converters",
"(",
"get_default_primitive_converters",
"(",
")",
")",
"except",
"ImportError",
"as",
"e",
":",
"warn_import_error",
"(",
"'primitive types'",
",",
"e",
")",
"try",
":",
"# -- collections",
"from",
"parsyfiles",
".",
"plugins_base",
".",
"support_for_collections",
"import",
"get_default_collection_parsers",
",",
"get_default_collection_converters",
"root_parser",
".",
"register_parsers",
"(",
"get_default_collection_parsers",
"(",
"root_parser",
",",
"root_parser",
")",
")",
"root_parser",
".",
"register_converters",
"(",
"get_default_collection_converters",
"(",
"root_parser",
")",
")",
"except",
"ImportError",
"as",
"e",
":",
"warn_import_error",
"(",
"'dict'",
",",
"e",
")",
"try",
":",
"# -- objects",
"from",
"parsyfiles",
".",
"plugins_base",
".",
"support_for_objects",
"import",
"get_default_object_parsers",
",",
"get_default_object_converters",
"root_parser",
".",
"register_parsers",
"(",
"get_default_object_parsers",
"(",
"root_parser",
",",
"root_parser",
")",
")",
"root_parser",
".",
"register_converters",
"(",
"get_default_object_converters",
"(",
"root_parser",
")",
")",
"except",
"ImportError",
"as",
"e",
":",
"warn_import_error",
"(",
"'objects'",
",",
"e",
")",
"try",
":",
"# -- config",
"from",
"parsyfiles",
".",
"plugins_base",
".",
"support_for_configparser",
"import",
"get_default_config_parsers",
",",
"get_default_config_converters",
"root_parser",
".",
"register_parsers",
"(",
"get_default_config_parsers",
"(",
")",
")",
"root_parser",
".",
"register_converters",
"(",
"get_default_config_converters",
"(",
"root_parser",
")",
")",
"except",
"ImportError",
"as",
"e",
":",
"warn_import_error",
"(",
"'config'",
",",
"e",
")",
"# ------------------------- OPTIONAL -----------------",
"try",
":",
"# -- jprops",
"from",
"parsyfiles",
".",
"plugins_optional",
".",
"support_for_jprops",
"import",
"get_default_jprops_parsers",
"root_parser",
".",
"register_parsers",
"(",
"get_default_jprops_parsers",
"(",
"root_parser",
",",
"root_parser",
")",
")",
"# root_parser.register_converters()",
"except",
"ImportError",
"as",
"e",
":",
"warn_import_error",
"(",
"'jprops'",
",",
"e",
")",
"try",
":",
"# -- yaml",
"from",
"parsyfiles",
".",
"plugins_optional",
".",
"support_for_yaml",
"import",
"get_default_yaml_parsers",
"root_parser",
".",
"register_parsers",
"(",
"get_default_yaml_parsers",
"(",
"root_parser",
",",
"root_parser",
")",
")",
"# root_parser.register_converters()",
"except",
"ImportError",
"as",
"e",
":",
"warn_import_error",
"(",
"'yaml'",
",",
"e",
")",
"try",
":",
"# -- numpy",
"from",
"parsyfiles",
".",
"plugins_optional",
".",
"support_for_numpy",
"import",
"get_default_np_parsers",
",",
"get_default_np_converters",
"root_parser",
".",
"register_parsers",
"(",
"get_default_np_parsers",
"(",
")",
")",
"root_parser",
".",
"register_converters",
"(",
"get_default_np_converters",
"(",
")",
")",
"except",
"ImportError",
"as",
"e",
":",
"warn_import_error",
"(",
"'numpy'",
",",
"e",
")",
"try",
":",
"# -- pandas",
"from",
"parsyfiles",
".",
"plugins_optional",
".",
"support_for_pandas",
"import",
"get_default_pandas_parsers",
",",
"get_default_pandas_converters",
"root_parser",
".",
"register_parsers",
"(",
"get_default_pandas_parsers",
"(",
")",
")",
"root_parser",
".",
"register_converters",
"(",
"get_default_pandas_converters",
"(",
")",
")",
"except",
"ImportError",
"as",
"e",
":",
"warn_import_error",
"(",
"'pandas'",
",",
"e",
")"
] | 46.028169 | 25.295775 |
def default_cell_formatter(table, column, row, value, **_):
"""
:type column: tri.table.Column
"""
formatter = _cell_formatters.get(type(value))
if formatter:
value = formatter(table=table, column=column, row=row, value=value)
if value is None:
return ''
return conditional_escape(value) | [
"def",
"default_cell_formatter",
"(",
"table",
",",
"column",
",",
"row",
",",
"value",
",",
"*",
"*",
"_",
")",
":",
"formatter",
"=",
"_cell_formatters",
".",
"get",
"(",
"type",
"(",
"value",
")",
")",
"if",
"formatter",
":",
"value",
"=",
"formatter",
"(",
"table",
"=",
"table",
",",
"column",
"=",
"column",
",",
"row",
"=",
"row",
",",
"value",
"=",
"value",
")",
"if",
"value",
"is",
"None",
":",
"return",
"''",
"return",
"conditional_escape",
"(",
"value",
")"
] | 26.833333 | 18.166667 |
def submit_btn(self, value, success=None):
"""This presses an input button with type=submit.
Success must be given as a tuple of a (coordinate, timeout).
Use (coordinate,) if you want to use the default timeout."""
self.press("css=input[value='{}']".format(value))
if success is not None:
assert self.is_available(*success) | [
"def",
"submit_btn",
"(",
"self",
",",
"value",
",",
"success",
"=",
"None",
")",
":",
"self",
".",
"press",
"(",
"\"css=input[value='{}']\"",
".",
"format",
"(",
"value",
")",
")",
"if",
"success",
"is",
"not",
"None",
":",
"assert",
"self",
".",
"is_available",
"(",
"*",
"success",
")"
] | 52.714286 | 8.857143 |
def find_ip(family=AF_INET, flavour="opendns"):
"""Find the publicly visible IP address of the current system.
This uses public DNS infrastructure that implement a special DNS "hack" to
return the IP address of the requester rather than some other address.
:param family: address family, optional, default AF_INET (ipv4)
:param flavour: selector for public infrastructure provider, optional
"""
flavours = {
"opendns": {
AF_INET: {
"@": ("resolver1.opendns.com", "resolver2.opendns.com"),
"qname": "myip.opendns.com",
"rdtype": "A",
},
AF_INET6: {
"@": ("resolver1.ipv6-sandbox.opendns.com", "resolver2.ipv6-sandbox.opendns.com"),
"qname": "myip.opendns.com",
"rdtype": "AAAA",
},
},
}
flavour = flavours["opendns"]
resolver = dns.resolver.Resolver()
# specify the custom nameservers to be used (as IPs):
resolver.nameservers = [next(iter(resolve(h, family=family))) for h in flavour[family]["@"]]
answers = resolver.query(qname=flavour[family]["qname"], rdtype=flavour[family]["rdtype"])
for rdata in answers:
return rdata.address
return None | [
"def",
"find_ip",
"(",
"family",
"=",
"AF_INET",
",",
"flavour",
"=",
"\"opendns\"",
")",
":",
"flavours",
"=",
"{",
"\"opendns\"",
":",
"{",
"AF_INET",
":",
"{",
"\"@\"",
":",
"(",
"\"resolver1.opendns.com\"",
",",
"\"resolver2.opendns.com\"",
")",
",",
"\"qname\"",
":",
"\"myip.opendns.com\"",
",",
"\"rdtype\"",
":",
"\"A\"",
",",
"}",
",",
"AF_INET6",
":",
"{",
"\"@\"",
":",
"(",
"\"resolver1.ipv6-sandbox.opendns.com\"",
",",
"\"resolver2.ipv6-sandbox.opendns.com\"",
")",
",",
"\"qname\"",
":",
"\"myip.opendns.com\"",
",",
"\"rdtype\"",
":",
"\"AAAA\"",
",",
"}",
",",
"}",
",",
"}",
"flavour",
"=",
"flavours",
"[",
"\"opendns\"",
"]",
"resolver",
"=",
"dns",
".",
"resolver",
".",
"Resolver",
"(",
")",
"# specify the custom nameservers to be used (as IPs):",
"resolver",
".",
"nameservers",
"=",
"[",
"next",
"(",
"iter",
"(",
"resolve",
"(",
"h",
",",
"family",
"=",
"family",
")",
")",
")",
"for",
"h",
"in",
"flavour",
"[",
"family",
"]",
"[",
"\"@\"",
"]",
"]",
"answers",
"=",
"resolver",
".",
"query",
"(",
"qname",
"=",
"flavour",
"[",
"family",
"]",
"[",
"\"qname\"",
"]",
",",
"rdtype",
"=",
"flavour",
"[",
"family",
"]",
"[",
"\"rdtype\"",
"]",
")",
"for",
"rdata",
"in",
"answers",
":",
"return",
"rdata",
".",
"address",
"return",
"None"
] | 37.666667 | 24.181818 |
def get_resource(self, resource_name, name):
# pylint: disable=too-many-locals, too-many-nested-blocks
"""Get a specific resource by name"""
try:
logger.info("Trying to get %s: '%s'", resource_name, name)
services_list = False
if resource_name == 'host' and '/' in name:
splitted_name = name.split('/')
services_list = True
name = splitted_name[0]
params = {'where': json.dumps({'name': name})}
if resource_name in ['host', 'service', 'user']:
params = {'where': json.dumps({'name': name, '_is_template': self.model})}
if resource_name == 'service' and '/' in name:
splitted_name = name.split('/')
# new_name = splitted_name[0] + '_' + splitted_name[1]
# name = splitted_name[1]
# Get host from name
response2 = self.backend.get(
'host', params={'where': json.dumps({'name': splitted_name[0]})})
if response2['_items']:
host = response2['_items'][0]
logger.info("Got host '%s' for the service '%s'",
splitted_name[0], splitted_name[1])
else:
logger.warning("Not found host '%s'!", splitted_name[0])
return False
params = {'where': json.dumps({'name': splitted_name[1],
'host': host['_id'],
'_is_template': self.model})}
if self.embedded and resource_name in self.embedded_resources:
params.update({'embedded': json.dumps(self.embedded_resources[resource_name])})
response = self.backend.get(resource_name, params=params)
if response['_items']:
response = response['_items'][0]
logger.info("-> found %s '%s': %s", resource_name, name, response['_id'])
if services_list:
# Get services for the host
params = {'where': json.dumps({'host': response['_id']})}
if self.embedded and 'service' in self.embedded_resources:
params.update(
{'embedded': json.dumps(self.embedded_resources['service'])})
response2 = self.backend.get('service', params=params)
if response2['_items']:
response['_services'] = response2['_items']
logger.info("Got %d services for host '%s'",
len(response2['_items']), splitted_name[0])
else:
logger.warning("Not found host '%s'!", splitted_name[0])
return False
# Exists in the backend, we got the element
if not self.dry_run:
logger.info("-> dumping %s: %s", resource_name, name)
# Filter fields prefixed with an _ (internal backend fields)
for field in list(response):
if field in ['_created', '_updated', '_etag', '_links', '_status']:
response.pop(field)
continue
# Filter fields prefixed with an _ in embedded items
if self.embedded and resource_name in self.embedded_resources and \
field in self.embedded_resources[resource_name]:
logger.info("-> embedded %s", field)
# Embedded items may be a list or a simple dictionary,
# always make it a list
embedded_items = response[field]
if not isinstance(response[field], list):
embedded_items = [response[field]]
# Filter fields in each embedded item
for embedded_item in embedded_items:
if not embedded_item:
continue
for embedded_field in list(embedded_item):
if embedded_field.startswith('_'):
embedded_item.pop(embedded_field)
dump = json.dumps(response, indent=4,
separators=(',', ': '), sort_keys=True)
if not self.quiet:
print(dump)
if resource_name == 'service' and '/' in name:
name = splitted_name[0] + '_' + splitted_name[1]
filename = self.file_dump(response,
'alignak-object-dump-%s-%s.json'
% (resource_name, name))
if filename:
logger.info("-> dumped %s '%s' to %s", resource_name, name, filename)
logger.info("-> dumped %s: %s", resource_name, name)
else:
if resource_name == 'service' and '/' in name:
name = splitted_name[0] + '_' + splitted_name[1]
logger.info("Dry-run mode: should have dumped an %s '%s'",
resource_name, name)
return True
else:
logger.warning("-> %s '%s' not found", resource_name, name)
return False
except BackendException as exp: # pragma: no cover, should never happen
logger.exception("Exception: %s", exp)
logger.error("Response: %s", exp.response)
print("Get error for '%s' : %s" % (resource_name, name))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Exiting with error code: 5")
return False | [
"def",
"get_resource",
"(",
"self",
",",
"resource_name",
",",
"name",
")",
":",
"# pylint: disable=too-many-locals, too-many-nested-blocks",
"try",
":",
"logger",
".",
"info",
"(",
"\"Trying to get %s: '%s'\"",
",",
"resource_name",
",",
"name",
")",
"services_list",
"=",
"False",
"if",
"resource_name",
"==",
"'host'",
"and",
"'/'",
"in",
"name",
":",
"splitted_name",
"=",
"name",
".",
"split",
"(",
"'/'",
")",
"services_list",
"=",
"True",
"name",
"=",
"splitted_name",
"[",
"0",
"]",
"params",
"=",
"{",
"'where'",
":",
"json",
".",
"dumps",
"(",
"{",
"'name'",
":",
"name",
"}",
")",
"}",
"if",
"resource_name",
"in",
"[",
"'host'",
",",
"'service'",
",",
"'user'",
"]",
":",
"params",
"=",
"{",
"'where'",
":",
"json",
".",
"dumps",
"(",
"{",
"'name'",
":",
"name",
",",
"'_is_template'",
":",
"self",
".",
"model",
"}",
")",
"}",
"if",
"resource_name",
"==",
"'service'",
"and",
"'/'",
"in",
"name",
":",
"splitted_name",
"=",
"name",
".",
"split",
"(",
"'/'",
")",
"# new_name = splitted_name[0] + '_' + splitted_name[1]",
"# name = splitted_name[1]",
"# Get host from name",
"response2",
"=",
"self",
".",
"backend",
".",
"get",
"(",
"'host'",
",",
"params",
"=",
"{",
"'where'",
":",
"json",
".",
"dumps",
"(",
"{",
"'name'",
":",
"splitted_name",
"[",
"0",
"]",
"}",
")",
"}",
")",
"if",
"response2",
"[",
"'_items'",
"]",
":",
"host",
"=",
"response2",
"[",
"'_items'",
"]",
"[",
"0",
"]",
"logger",
".",
"info",
"(",
"\"Got host '%s' for the service '%s'\"",
",",
"splitted_name",
"[",
"0",
"]",
",",
"splitted_name",
"[",
"1",
"]",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Not found host '%s'!\"",
",",
"splitted_name",
"[",
"0",
"]",
")",
"return",
"False",
"params",
"=",
"{",
"'where'",
":",
"json",
".",
"dumps",
"(",
"{",
"'name'",
":",
"splitted_name",
"[",
"1",
"]",
",",
"'host'",
":",
"host",
"[",
"'_id'",
"]",
",",
"'_is_template'",
":",
"self",
".",
"model",
"}",
")",
"}",
"if",
"self",
".",
"embedded",
"and",
"resource_name",
"in",
"self",
".",
"embedded_resources",
":",
"params",
".",
"update",
"(",
"{",
"'embedded'",
":",
"json",
".",
"dumps",
"(",
"self",
".",
"embedded_resources",
"[",
"resource_name",
"]",
")",
"}",
")",
"response",
"=",
"self",
".",
"backend",
".",
"get",
"(",
"resource_name",
",",
"params",
"=",
"params",
")",
"if",
"response",
"[",
"'_items'",
"]",
":",
"response",
"=",
"response",
"[",
"'_items'",
"]",
"[",
"0",
"]",
"logger",
".",
"info",
"(",
"\"-> found %s '%s': %s\"",
",",
"resource_name",
",",
"name",
",",
"response",
"[",
"'_id'",
"]",
")",
"if",
"services_list",
":",
"# Get services for the host",
"params",
"=",
"{",
"'where'",
":",
"json",
".",
"dumps",
"(",
"{",
"'host'",
":",
"response",
"[",
"'_id'",
"]",
"}",
")",
"}",
"if",
"self",
".",
"embedded",
"and",
"'service'",
"in",
"self",
".",
"embedded_resources",
":",
"params",
".",
"update",
"(",
"{",
"'embedded'",
":",
"json",
".",
"dumps",
"(",
"self",
".",
"embedded_resources",
"[",
"'service'",
"]",
")",
"}",
")",
"response2",
"=",
"self",
".",
"backend",
".",
"get",
"(",
"'service'",
",",
"params",
"=",
"params",
")",
"if",
"response2",
"[",
"'_items'",
"]",
":",
"response",
"[",
"'_services'",
"]",
"=",
"response2",
"[",
"'_items'",
"]",
"logger",
".",
"info",
"(",
"\"Got %d services for host '%s'\"",
",",
"len",
"(",
"response2",
"[",
"'_items'",
"]",
")",
",",
"splitted_name",
"[",
"0",
"]",
")",
"else",
":",
"logger",
".",
"warning",
"(",
"\"Not found host '%s'!\"",
",",
"splitted_name",
"[",
"0",
"]",
")",
"return",
"False",
"# Exists in the backend, we got the element",
"if",
"not",
"self",
".",
"dry_run",
":",
"logger",
".",
"info",
"(",
"\"-> dumping %s: %s\"",
",",
"resource_name",
",",
"name",
")",
"# Filter fields prefixed with an _ (internal backend fields)",
"for",
"field",
"in",
"list",
"(",
"response",
")",
":",
"if",
"field",
"in",
"[",
"'_created'",
",",
"'_updated'",
",",
"'_etag'",
",",
"'_links'",
",",
"'_status'",
"]",
":",
"response",
".",
"pop",
"(",
"field",
")",
"continue",
"# Filter fields prefixed with an _ in embedded items",
"if",
"self",
".",
"embedded",
"and",
"resource_name",
"in",
"self",
".",
"embedded_resources",
"and",
"field",
"in",
"self",
".",
"embedded_resources",
"[",
"resource_name",
"]",
":",
"logger",
".",
"info",
"(",
"\"-> embedded %s\"",
",",
"field",
")",
"# Embedded items may be a list or a simple dictionary,",
"# always make it a list",
"embedded_items",
"=",
"response",
"[",
"field",
"]",
"if",
"not",
"isinstance",
"(",
"response",
"[",
"field",
"]",
",",
"list",
")",
":",
"embedded_items",
"=",
"[",
"response",
"[",
"field",
"]",
"]",
"# Filter fields in each embedded item",
"for",
"embedded_item",
"in",
"embedded_items",
":",
"if",
"not",
"embedded_item",
":",
"continue",
"for",
"embedded_field",
"in",
"list",
"(",
"embedded_item",
")",
":",
"if",
"embedded_field",
".",
"startswith",
"(",
"'_'",
")",
":",
"embedded_item",
".",
"pop",
"(",
"embedded_field",
")",
"dump",
"=",
"json",
".",
"dumps",
"(",
"response",
",",
"indent",
"=",
"4",
",",
"separators",
"=",
"(",
"','",
",",
"': '",
")",
",",
"sort_keys",
"=",
"True",
")",
"if",
"not",
"self",
".",
"quiet",
":",
"print",
"(",
"dump",
")",
"if",
"resource_name",
"==",
"'service'",
"and",
"'/'",
"in",
"name",
":",
"name",
"=",
"splitted_name",
"[",
"0",
"]",
"+",
"'_'",
"+",
"splitted_name",
"[",
"1",
"]",
"filename",
"=",
"self",
".",
"file_dump",
"(",
"response",
",",
"'alignak-object-dump-%s-%s.json'",
"%",
"(",
"resource_name",
",",
"name",
")",
")",
"if",
"filename",
":",
"logger",
".",
"info",
"(",
"\"-> dumped %s '%s' to %s\"",
",",
"resource_name",
",",
"name",
",",
"filename",
")",
"logger",
".",
"info",
"(",
"\"-> dumped %s: %s\"",
",",
"resource_name",
",",
"name",
")",
"else",
":",
"if",
"resource_name",
"==",
"'service'",
"and",
"'/'",
"in",
"name",
":",
"name",
"=",
"splitted_name",
"[",
"0",
"]",
"+",
"'_'",
"+",
"splitted_name",
"[",
"1",
"]",
"logger",
".",
"info",
"(",
"\"Dry-run mode: should have dumped an %s '%s'\"",
",",
"resource_name",
",",
"name",
")",
"return",
"True",
"else",
":",
"logger",
".",
"warning",
"(",
"\"-> %s '%s' not found\"",
",",
"resource_name",
",",
"name",
")",
"return",
"False",
"except",
"BackendException",
"as",
"exp",
":",
"# pragma: no cover, should never happen",
"logger",
".",
"exception",
"(",
"\"Exception: %s\"",
",",
"exp",
")",
"logger",
".",
"error",
"(",
"\"Response: %s\"",
",",
"exp",
".",
"response",
")",
"print",
"(",
"\"Get error for '%s' : %s\"",
"%",
"(",
"resource_name",
",",
"name",
")",
")",
"print",
"(",
"\"~~~~~~~~~~~~~~~~~~~~~~~~~~\"",
")",
"print",
"(",
"\"Exiting with error code: 5\"",
")",
"return",
"False"
] | 49.591667 | 25.083333 |
def should_see_link_text(self, link_text, link_url):
"""Assert a link with the provided text points to the provided URL."""
elements = ElementSelector(
world.browser,
str('//a[@href="%s"][./text()="%s"]' % (link_url, link_text)),
filter_displayed=True,
)
if not elements:
raise AssertionError("Expected link not found.") | [
"def",
"should_see_link_text",
"(",
"self",
",",
"link_text",
",",
"link_url",
")",
":",
"elements",
"=",
"ElementSelector",
"(",
"world",
".",
"browser",
",",
"str",
"(",
"'//a[@href=\"%s\"][./text()=\"%s\"]'",
"%",
"(",
"link_url",
",",
"link_text",
")",
")",
",",
"filter_displayed",
"=",
"True",
",",
")",
"if",
"not",
"elements",
":",
"raise",
"AssertionError",
"(",
"\"Expected link not found.\"",
")"
] | 36 | 19 |
def fit(self,
doc, # type: str
predict_proba, # type: Callable[[Any], Any]
):
# type: (...) -> TextExplainer
"""
Explain ``predict_proba`` probabilistic classification function
for the ``doc`` example. This method fits a local classification
pipeline following LIME approach.
To get the explanation use :meth:`show_prediction`,
:meth:`show_weights`, :meth:`explain_prediction` or
:meth:`explain_weights`.
Parameters
----------
doc : str
Text to explain
predict_proba : callable
Black-box classification pipeline. ``predict_proba``
should be a function which takes a list of strings (documents)
and return a matrix of shape ``(n_samples, n_classes)`` with
probability values - a row per document and a column per output
label.
"""
self.doc_ = doc
if self.position_dependent:
samples, sims, mask, text = self.sampler.sample_near_with_mask(
doc=doc,
n_samples=self.n_samples
)
self.vec_ = SingleDocumentVectorizer(
token_pattern=self.token_pattern
).fit([doc])
X = ~mask
else:
self.vec_ = clone(self.vec).fit([doc])
samples, sims = self.sampler.sample_near(
doc=doc,
n_samples=self.n_samples
)
X = self.vec_.transform(samples)
if self.rbf_sigma is not None:
sims = rbf(1-sims, sigma=self.rbf_sigma)
self.samples_ = samples
self.similarity_ = sims
self.X_ = X
self.y_proba_ = predict_proba(samples)
self.clf_ = clone(self.clf)
self.metrics_ = _train_local_classifier(
estimator=self.clf_,
samples=X,
similarity=sims,
y_proba=self.y_proba_,
expand_factor=self.expand_factor,
random_state=self.rng_
)
return self | [
"def",
"fit",
"(",
"self",
",",
"doc",
",",
"# type: str",
"predict_proba",
",",
"# type: Callable[[Any], Any]",
")",
":",
"# type: (...) -> TextExplainer",
"self",
".",
"doc_",
"=",
"doc",
"if",
"self",
".",
"position_dependent",
":",
"samples",
",",
"sims",
",",
"mask",
",",
"text",
"=",
"self",
".",
"sampler",
".",
"sample_near_with_mask",
"(",
"doc",
"=",
"doc",
",",
"n_samples",
"=",
"self",
".",
"n_samples",
")",
"self",
".",
"vec_",
"=",
"SingleDocumentVectorizer",
"(",
"token_pattern",
"=",
"self",
".",
"token_pattern",
")",
".",
"fit",
"(",
"[",
"doc",
"]",
")",
"X",
"=",
"~",
"mask",
"else",
":",
"self",
".",
"vec_",
"=",
"clone",
"(",
"self",
".",
"vec",
")",
".",
"fit",
"(",
"[",
"doc",
"]",
")",
"samples",
",",
"sims",
"=",
"self",
".",
"sampler",
".",
"sample_near",
"(",
"doc",
"=",
"doc",
",",
"n_samples",
"=",
"self",
".",
"n_samples",
")",
"X",
"=",
"self",
".",
"vec_",
".",
"transform",
"(",
"samples",
")",
"if",
"self",
".",
"rbf_sigma",
"is",
"not",
"None",
":",
"sims",
"=",
"rbf",
"(",
"1",
"-",
"sims",
",",
"sigma",
"=",
"self",
".",
"rbf_sigma",
")",
"self",
".",
"samples_",
"=",
"samples",
"self",
".",
"similarity_",
"=",
"sims",
"self",
".",
"X_",
"=",
"X",
"self",
".",
"y_proba_",
"=",
"predict_proba",
"(",
"samples",
")",
"self",
".",
"clf_",
"=",
"clone",
"(",
"self",
".",
"clf",
")",
"self",
".",
"metrics_",
"=",
"_train_local_classifier",
"(",
"estimator",
"=",
"self",
".",
"clf_",
",",
"samples",
"=",
"X",
",",
"similarity",
"=",
"sims",
",",
"y_proba",
"=",
"self",
".",
"y_proba_",
",",
"expand_factor",
"=",
"self",
".",
"expand_factor",
",",
"random_state",
"=",
"self",
".",
"rng_",
")",
"return",
"self"
] | 32.983871 | 17.5 |
def mv_connect_stations(mv_grid_district, graph, debug=False):
""" Connect LV stations to MV grid
Args
----
mv_grid_district: MVGridDistrictDing0
MVGridDistrictDing0 object for which the connection process has to be done
graph: :networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes
debug: bool, defaults to False
If True, information is printed during process
Returns
-------
:networkx:`NetworkX Graph Obj< >`
NetworkX graph object with nodes and newly created branches
"""
# WGS84 (conformal) to ETRS (equidistant) projection
proj1 = partial(
pyproj.transform,
pyproj.Proj(init='epsg:4326'), # source coordinate system
pyproj.Proj(init='epsg:3035')) # destination coordinate system
# ETRS (equidistant) to WGS84 (conformal) projection
proj2 = partial(
pyproj.transform,
pyproj.Proj(init='epsg:3035'), # source coordinate system
pyproj.Proj(init='epsg:4326')) # destination coordinate system
conn_dist_weight = cfg_ding0.get('mv_connect', 'load_area_sat_conn_dist_weight')
conn_dist_ring_mod = cfg_ding0.get('mv_connect', 'load_area_stat_conn_dist_ring_mod')
for lv_load_area in mv_grid_district.lv_load_areas():
# exclude aggregated Load Areas and choose only load areas that were connected to grid before
if not lv_load_area.is_aggregated and \
lv_load_area.lv_load_area_centre not in mv_grid_district.mv_grid.graph_isolated_nodes():
lv_load_area_centre = lv_load_area.lv_load_area_centre
# there's only one station: Replace Load Area centre by station in graph
if lv_load_area.lv_grid_districts_count() == 1:
# get station
lv_station = list(lv_load_area.lv_grid_districts())[0].lv_grid.station()
# get branches that are connected to Load Area centre
branches = mv_grid_district.mv_grid.graph_branches_from_node(lv_load_area_centre)
# connect LV station, delete Load Area centre
for node, branch in branches:
# backup kind and type of branch
branch_kind = branch['branch'].kind
branch_type = branch['branch'].type
branch_ring = branch['branch'].ring
# respect circuit breaker if existent
circ_breaker = branch['branch'].circuit_breaker
if circ_breaker is not None:
branch['branch'].circuit_breaker.geo_data = calc_geo_centre_point(lv_station, node)
# delete old branch to Load Area centre and create a new one to LV station
graph.remove_edge(lv_load_area_centre, node)
branch_length = calc_geo_dist_vincenty(lv_station, node)
branch = BranchDing0(length=branch_length,
circuit_breaker=circ_breaker,
kind=branch_kind,
type=branch_type,
ring=branch_ring)
if circ_breaker is not None:
circ_breaker.branch = branch
graph.add_edge(lv_station, node, branch=branch)
# delete Load Area centre from graph
graph.remove_node(lv_load_area_centre)
# there're more than one station: Do normal connection process (as in satellites)
else:
# connect LV stations of all grid districts
# =========================================
for lv_grid_district in lv_load_area.lv_grid_districts():
# get branches that are partly or fully located in load area
branches = calc_geo_branches_in_polygon(mv_grid_district.mv_grid,
lv_load_area.geo_area,
mode='intersects',
proj=proj1)
# filter branches that belong to satellites (load area groups) if Load Area is not a satellite
# itself
if not lv_load_area.is_satellite:
branches_valid = []
for branch in branches:
node1 = branch['adj_nodes'][0]
node2 = branch['adj_nodes'][1]
lv_load_area_group = get_lv_load_area_group_from_node_pair(node1, node2)
# delete branch as possible conn. target if it belongs to a group (=satellite) or
# if it belongs to a ring different from the ring of the current LVLA
if (lv_load_area_group is None) and\
(branch['branch'].ring is lv_load_area.ring):
branches_valid.append(branch)
branches = branches_valid
# find possible connection objects
lv_station = lv_grid_district.lv_grid.station()
lv_station_shp = transform(proj1, lv_station.geo_data)
conn_objects_min_stack = find_nearest_conn_objects(lv_station_shp, branches, proj1,
conn_dist_weight, debug,
branches_only=False)
# connect!
connect_node(lv_station,
lv_station_shp,
mv_grid_district.mv_grid,
conn_objects_min_stack[0],
proj2,
graph,
conn_dist_ring_mod,
debug)
# Replace Load Area centre by cable distributor
# ================================================
# create cable distributor and add it to grid
cable_dist = MVCableDistributorDing0(geo_data=lv_load_area_centre.geo_data,
grid=mv_grid_district.mv_grid)
mv_grid_district.mv_grid.add_cable_distributor(cable_dist)
# get branches that are connected to Load Area centre
branches = mv_grid_district.mv_grid.graph_branches_from_node(lv_load_area_centre)
# connect LV station, delete Load Area centre
for node, branch in branches:
# backup kind and type of branch
branch_kind = branch['branch'].kind
branch_type = branch['branch'].type
branch_ring = branch['branch'].ring
# respect circuit breaker if existent
circ_breaker = branch['branch'].circuit_breaker
if circ_breaker is not None:
branch['branch'].circuit_breaker.geo_data = calc_geo_centre_point(cable_dist, node)
# delete old branch to Load Area centre and create a new one to LV station
graph.remove_edge(lv_load_area_centre, node)
branch_length = calc_geo_dist_vincenty(cable_dist, node)
branch = BranchDing0(length=branch_length,
circuit_breaker=circ_breaker,
kind=branch_kind,
type=branch_type,
ring=branch_ring)
if circ_breaker is not None:
circ_breaker.branch = branch
graph.add_edge(cable_dist, node, branch=branch)
# delete Load Area centre from graph
graph.remove_node(lv_load_area_centre)
# Replace all overhead lines by cables
# ====================================
# if grid's default type is overhead line
if mv_grid_district.mv_grid.default_branch_kind == 'line':
# get all branches in load area
branches = calc_geo_branches_in_polygon(mv_grid_district.mv_grid,
lv_load_area.geo_area,
mode='contains',
proj=proj1)
# set type
for branch in branches:
branch['branch'].kind = mv_grid_district.mv_grid.default_branch_kind_settle
branch['branch'].type = mv_grid_district.mv_grid.default_branch_type_settle
return graph | [
"def",
"mv_connect_stations",
"(",
"mv_grid_district",
",",
"graph",
",",
"debug",
"=",
"False",
")",
":",
"# WGS84 (conformal) to ETRS (equidistant) projection",
"proj1",
"=",
"partial",
"(",
"pyproj",
".",
"transform",
",",
"pyproj",
".",
"Proj",
"(",
"init",
"=",
"'epsg:4326'",
")",
",",
"# source coordinate system",
"pyproj",
".",
"Proj",
"(",
"init",
"=",
"'epsg:3035'",
")",
")",
"# destination coordinate system",
"# ETRS (equidistant) to WGS84 (conformal) projection",
"proj2",
"=",
"partial",
"(",
"pyproj",
".",
"transform",
",",
"pyproj",
".",
"Proj",
"(",
"init",
"=",
"'epsg:3035'",
")",
",",
"# source coordinate system",
"pyproj",
".",
"Proj",
"(",
"init",
"=",
"'epsg:4326'",
")",
")",
"# destination coordinate system",
"conn_dist_weight",
"=",
"cfg_ding0",
".",
"get",
"(",
"'mv_connect'",
",",
"'load_area_sat_conn_dist_weight'",
")",
"conn_dist_ring_mod",
"=",
"cfg_ding0",
".",
"get",
"(",
"'mv_connect'",
",",
"'load_area_stat_conn_dist_ring_mod'",
")",
"for",
"lv_load_area",
"in",
"mv_grid_district",
".",
"lv_load_areas",
"(",
")",
":",
"# exclude aggregated Load Areas and choose only load areas that were connected to grid before",
"if",
"not",
"lv_load_area",
".",
"is_aggregated",
"and",
"lv_load_area",
".",
"lv_load_area_centre",
"not",
"in",
"mv_grid_district",
".",
"mv_grid",
".",
"graph_isolated_nodes",
"(",
")",
":",
"lv_load_area_centre",
"=",
"lv_load_area",
".",
"lv_load_area_centre",
"# there's only one station: Replace Load Area centre by station in graph",
"if",
"lv_load_area",
".",
"lv_grid_districts_count",
"(",
")",
"==",
"1",
":",
"# get station",
"lv_station",
"=",
"list",
"(",
"lv_load_area",
".",
"lv_grid_districts",
"(",
")",
")",
"[",
"0",
"]",
".",
"lv_grid",
".",
"station",
"(",
")",
"# get branches that are connected to Load Area centre",
"branches",
"=",
"mv_grid_district",
".",
"mv_grid",
".",
"graph_branches_from_node",
"(",
"lv_load_area_centre",
")",
"# connect LV station, delete Load Area centre",
"for",
"node",
",",
"branch",
"in",
"branches",
":",
"# backup kind and type of branch",
"branch_kind",
"=",
"branch",
"[",
"'branch'",
"]",
".",
"kind",
"branch_type",
"=",
"branch",
"[",
"'branch'",
"]",
".",
"type",
"branch_ring",
"=",
"branch",
"[",
"'branch'",
"]",
".",
"ring",
"# respect circuit breaker if existent",
"circ_breaker",
"=",
"branch",
"[",
"'branch'",
"]",
".",
"circuit_breaker",
"if",
"circ_breaker",
"is",
"not",
"None",
":",
"branch",
"[",
"'branch'",
"]",
".",
"circuit_breaker",
".",
"geo_data",
"=",
"calc_geo_centre_point",
"(",
"lv_station",
",",
"node",
")",
"# delete old branch to Load Area centre and create a new one to LV station",
"graph",
".",
"remove_edge",
"(",
"lv_load_area_centre",
",",
"node",
")",
"branch_length",
"=",
"calc_geo_dist_vincenty",
"(",
"lv_station",
",",
"node",
")",
"branch",
"=",
"BranchDing0",
"(",
"length",
"=",
"branch_length",
",",
"circuit_breaker",
"=",
"circ_breaker",
",",
"kind",
"=",
"branch_kind",
",",
"type",
"=",
"branch_type",
",",
"ring",
"=",
"branch_ring",
")",
"if",
"circ_breaker",
"is",
"not",
"None",
":",
"circ_breaker",
".",
"branch",
"=",
"branch",
"graph",
".",
"add_edge",
"(",
"lv_station",
",",
"node",
",",
"branch",
"=",
"branch",
")",
"# delete Load Area centre from graph",
"graph",
".",
"remove_node",
"(",
"lv_load_area_centre",
")",
"# there're more than one station: Do normal connection process (as in satellites)",
"else",
":",
"# connect LV stations of all grid districts",
"# =========================================",
"for",
"lv_grid_district",
"in",
"lv_load_area",
".",
"lv_grid_districts",
"(",
")",
":",
"# get branches that are partly or fully located in load area",
"branches",
"=",
"calc_geo_branches_in_polygon",
"(",
"mv_grid_district",
".",
"mv_grid",
",",
"lv_load_area",
".",
"geo_area",
",",
"mode",
"=",
"'intersects'",
",",
"proj",
"=",
"proj1",
")",
"# filter branches that belong to satellites (load area groups) if Load Area is not a satellite",
"# itself",
"if",
"not",
"lv_load_area",
".",
"is_satellite",
":",
"branches_valid",
"=",
"[",
"]",
"for",
"branch",
"in",
"branches",
":",
"node1",
"=",
"branch",
"[",
"'adj_nodes'",
"]",
"[",
"0",
"]",
"node2",
"=",
"branch",
"[",
"'adj_nodes'",
"]",
"[",
"1",
"]",
"lv_load_area_group",
"=",
"get_lv_load_area_group_from_node_pair",
"(",
"node1",
",",
"node2",
")",
"# delete branch as possible conn. target if it belongs to a group (=satellite) or",
"# if it belongs to a ring different from the ring of the current LVLA",
"if",
"(",
"lv_load_area_group",
"is",
"None",
")",
"and",
"(",
"branch",
"[",
"'branch'",
"]",
".",
"ring",
"is",
"lv_load_area",
".",
"ring",
")",
":",
"branches_valid",
".",
"append",
"(",
"branch",
")",
"branches",
"=",
"branches_valid",
"# find possible connection objects",
"lv_station",
"=",
"lv_grid_district",
".",
"lv_grid",
".",
"station",
"(",
")",
"lv_station_shp",
"=",
"transform",
"(",
"proj1",
",",
"lv_station",
".",
"geo_data",
")",
"conn_objects_min_stack",
"=",
"find_nearest_conn_objects",
"(",
"lv_station_shp",
",",
"branches",
",",
"proj1",
",",
"conn_dist_weight",
",",
"debug",
",",
"branches_only",
"=",
"False",
")",
"# connect!",
"connect_node",
"(",
"lv_station",
",",
"lv_station_shp",
",",
"mv_grid_district",
".",
"mv_grid",
",",
"conn_objects_min_stack",
"[",
"0",
"]",
",",
"proj2",
",",
"graph",
",",
"conn_dist_ring_mod",
",",
"debug",
")",
"# Replace Load Area centre by cable distributor",
"# ================================================",
"# create cable distributor and add it to grid",
"cable_dist",
"=",
"MVCableDistributorDing0",
"(",
"geo_data",
"=",
"lv_load_area_centre",
".",
"geo_data",
",",
"grid",
"=",
"mv_grid_district",
".",
"mv_grid",
")",
"mv_grid_district",
".",
"mv_grid",
".",
"add_cable_distributor",
"(",
"cable_dist",
")",
"# get branches that are connected to Load Area centre",
"branches",
"=",
"mv_grid_district",
".",
"mv_grid",
".",
"graph_branches_from_node",
"(",
"lv_load_area_centre",
")",
"# connect LV station, delete Load Area centre",
"for",
"node",
",",
"branch",
"in",
"branches",
":",
"# backup kind and type of branch",
"branch_kind",
"=",
"branch",
"[",
"'branch'",
"]",
".",
"kind",
"branch_type",
"=",
"branch",
"[",
"'branch'",
"]",
".",
"type",
"branch_ring",
"=",
"branch",
"[",
"'branch'",
"]",
".",
"ring",
"# respect circuit breaker if existent",
"circ_breaker",
"=",
"branch",
"[",
"'branch'",
"]",
".",
"circuit_breaker",
"if",
"circ_breaker",
"is",
"not",
"None",
":",
"branch",
"[",
"'branch'",
"]",
".",
"circuit_breaker",
".",
"geo_data",
"=",
"calc_geo_centre_point",
"(",
"cable_dist",
",",
"node",
")",
"# delete old branch to Load Area centre and create a new one to LV station",
"graph",
".",
"remove_edge",
"(",
"lv_load_area_centre",
",",
"node",
")",
"branch_length",
"=",
"calc_geo_dist_vincenty",
"(",
"cable_dist",
",",
"node",
")",
"branch",
"=",
"BranchDing0",
"(",
"length",
"=",
"branch_length",
",",
"circuit_breaker",
"=",
"circ_breaker",
",",
"kind",
"=",
"branch_kind",
",",
"type",
"=",
"branch_type",
",",
"ring",
"=",
"branch_ring",
")",
"if",
"circ_breaker",
"is",
"not",
"None",
":",
"circ_breaker",
".",
"branch",
"=",
"branch",
"graph",
".",
"add_edge",
"(",
"cable_dist",
",",
"node",
",",
"branch",
"=",
"branch",
")",
"# delete Load Area centre from graph",
"graph",
".",
"remove_node",
"(",
"lv_load_area_centre",
")",
"# Replace all overhead lines by cables",
"# ====================================",
"# if grid's default type is overhead line",
"if",
"mv_grid_district",
".",
"mv_grid",
".",
"default_branch_kind",
"==",
"'line'",
":",
"# get all branches in load area",
"branches",
"=",
"calc_geo_branches_in_polygon",
"(",
"mv_grid_district",
".",
"mv_grid",
",",
"lv_load_area",
".",
"geo_area",
",",
"mode",
"=",
"'contains'",
",",
"proj",
"=",
"proj1",
")",
"# set type",
"for",
"branch",
"in",
"branches",
":",
"branch",
"[",
"'branch'",
"]",
".",
"kind",
"=",
"mv_grid_district",
".",
"mv_grid",
".",
"default_branch_kind_settle",
"branch",
"[",
"'branch'",
"]",
".",
"type",
"=",
"mv_grid_district",
".",
"mv_grid",
".",
"default_branch_type_settle",
"return",
"graph"
] | 50.758621 | 27.637931 |
def set_value(self, var_name, value):
"""Set the value of a given variable to a given value.
Parameters
----------
var_name : str
The name of the variable in the model whose value should be set.
value : float
The value the variable should be set to
"""
if var_name in self.outside_name_map:
var_name = self.outside_name_map[var_name]
print('%s=%.5f' % (var_name, 1e9*value))
if var_name == 'Precipitation':
value = 1e9*value
species_idx = self.species_name_map[var_name]
self.state[species_idx] = value | [
"def",
"set_value",
"(",
"self",
",",
"var_name",
",",
"value",
")",
":",
"if",
"var_name",
"in",
"self",
".",
"outside_name_map",
":",
"var_name",
"=",
"self",
".",
"outside_name_map",
"[",
"var_name",
"]",
"print",
"(",
"'%s=%.5f'",
"%",
"(",
"var_name",
",",
"1e9",
"*",
"value",
")",
")",
"if",
"var_name",
"==",
"'Precipitation'",
":",
"value",
"=",
"1e9",
"*",
"value",
"species_idx",
"=",
"self",
".",
"species_name_map",
"[",
"var_name",
"]",
"self",
".",
"state",
"[",
"species_idx",
"]",
"=",
"value"
] | 35.277778 | 14.777778 |
def _table_attrs(table):
'''
Helper function to find valid table attributes
'''
cmd = ['osqueryi'] + ['--json'] + ['pragma table_info({0})'.format(table)]
res = __salt__['cmd.run_all'](cmd)
if res['retcode'] == 0:
attrs = []
text = salt.utils.json.loads(res['stdout'])
for item in text:
attrs.append(item['name'])
return attrs
return False | [
"def",
"_table_attrs",
"(",
"table",
")",
":",
"cmd",
"=",
"[",
"'osqueryi'",
"]",
"+",
"[",
"'--json'",
"]",
"+",
"[",
"'pragma table_info({0})'",
".",
"format",
"(",
"table",
")",
"]",
"res",
"=",
"__salt__",
"[",
"'cmd.run_all'",
"]",
"(",
"cmd",
")",
"if",
"res",
"[",
"'retcode'",
"]",
"==",
"0",
":",
"attrs",
"=",
"[",
"]",
"text",
"=",
"salt",
".",
"utils",
".",
"json",
".",
"loads",
"(",
"res",
"[",
"'stdout'",
"]",
")",
"for",
"item",
"in",
"text",
":",
"attrs",
".",
"append",
"(",
"item",
"[",
"'name'",
"]",
")",
"return",
"attrs",
"return",
"False"
] | 30.692308 | 18.384615 |
def iglob(pathname, patterns):
"""
Allow multiple file formats. This is also recursive. For example:
>>> iglob("apps", "*.py,*.pyc")
"""
matches = []
patterns = patterns.split(",") if "," in patterns else listify(patterns)
for root, dirnames, filenames in os.walk(pathname):
matching = []
for pattern in patterns:
matching.extend(fnmatch.filter(filenames, pattern))
for filename in matching:
matches.append(op.join(root, filename))
return natsorted(matches) | [
"def",
"iglob",
"(",
"pathname",
",",
"patterns",
")",
":",
"matches",
"=",
"[",
"]",
"patterns",
"=",
"patterns",
".",
"split",
"(",
"\",\"",
")",
"if",
"\",\"",
"in",
"patterns",
"else",
"listify",
"(",
"patterns",
")",
"for",
"root",
",",
"dirnames",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"pathname",
")",
":",
"matching",
"=",
"[",
"]",
"for",
"pattern",
"in",
"patterns",
":",
"matching",
".",
"extend",
"(",
"fnmatch",
".",
"filter",
"(",
"filenames",
",",
"pattern",
")",
")",
"for",
"filename",
"in",
"matching",
":",
"matches",
".",
"append",
"(",
"op",
".",
"join",
"(",
"root",
",",
"filename",
")",
")",
"return",
"natsorted",
"(",
"matches",
")"
] | 34.933333 | 15.866667 |
def _unpack_gvars(g):
""" Unpack collection of GVars to BufferDict or numpy array. """
if g is not None:
g = _gvar.gvar(g)
if not hasattr(g, 'flat'):
# must be a scalar (ie, not an array and not a dictionary)
g = numpy.asarray(g)
return g | [
"def",
"_unpack_gvars",
"(",
"g",
")",
":",
"if",
"g",
"is",
"not",
"None",
":",
"g",
"=",
"_gvar",
".",
"gvar",
"(",
"g",
")",
"if",
"not",
"hasattr",
"(",
"g",
",",
"'flat'",
")",
":",
"# must be a scalar (ie, not an array and not a dictionary)",
"g",
"=",
"numpy",
".",
"asarray",
"(",
"g",
")",
"return",
"g"
] | 35.375 | 15.625 |
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draw the path
"""
nmax = rcParams['agg.path.chunksize'] # here at least for testing
npts = path.vertices.shape[0]
if (nmax > 100 and npts > nmax and path.should_simplify and
rgbFace is None and gc.get_hatch() is None):
nch = np.ceil(npts/float(nmax))
chsize = int(np.ceil(npts/nch))
i0 = np.arange(0, npts, chsize)
i1 = np.zeros_like(i0)
i1[:-1] = i0[1:] - 1
i1[-1] = npts
for ii0, ii1 in zip(i0, i1):
v = path.vertices[ii0:ii1,:]
c = path.codes
if c is not None:
c = c[ii0:ii1]
c[0] = Path.MOVETO # move to end of last chunk
p = Path(v, c)
self._renderer.draw_path(gc, p, transform, rgbFace)
else:
self._renderer.draw_path(gc, path, transform, rgbFace) | [
"def",
"draw_path",
"(",
"self",
",",
"gc",
",",
"path",
",",
"transform",
",",
"rgbFace",
"=",
"None",
")",
":",
"nmax",
"=",
"rcParams",
"[",
"'agg.path.chunksize'",
"]",
"# here at least for testing",
"npts",
"=",
"path",
".",
"vertices",
".",
"shape",
"[",
"0",
"]",
"if",
"(",
"nmax",
">",
"100",
"and",
"npts",
">",
"nmax",
"and",
"path",
".",
"should_simplify",
"and",
"rgbFace",
"is",
"None",
"and",
"gc",
".",
"get_hatch",
"(",
")",
"is",
"None",
")",
":",
"nch",
"=",
"np",
".",
"ceil",
"(",
"npts",
"/",
"float",
"(",
"nmax",
")",
")",
"chsize",
"=",
"int",
"(",
"np",
".",
"ceil",
"(",
"npts",
"/",
"nch",
")",
")",
"i0",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"npts",
",",
"chsize",
")",
"i1",
"=",
"np",
".",
"zeros_like",
"(",
"i0",
")",
"i1",
"[",
":",
"-",
"1",
"]",
"=",
"i0",
"[",
"1",
":",
"]",
"-",
"1",
"i1",
"[",
"-",
"1",
"]",
"=",
"npts",
"for",
"ii0",
",",
"ii1",
"in",
"zip",
"(",
"i0",
",",
"i1",
")",
":",
"v",
"=",
"path",
".",
"vertices",
"[",
"ii0",
":",
"ii1",
",",
":",
"]",
"c",
"=",
"path",
".",
"codes",
"if",
"c",
"is",
"not",
"None",
":",
"c",
"=",
"c",
"[",
"ii0",
":",
"ii1",
"]",
"c",
"[",
"0",
"]",
"=",
"Path",
".",
"MOVETO",
"# move to end of last chunk",
"p",
"=",
"Path",
"(",
"v",
",",
"c",
")",
"self",
".",
"_renderer",
".",
"draw_path",
"(",
"gc",
",",
"p",
",",
"transform",
",",
"rgbFace",
")",
"else",
":",
"self",
".",
"_renderer",
".",
"draw_path",
"(",
"gc",
",",
"path",
",",
"transform",
",",
"rgbFace",
")"
] | 40.583333 | 12.25 |
def _update_version(connection, version):
""" Updates version in the db to the given version.
Args:
connection (sqlalchemy connection): sqlalchemy session where to update version.
version (int): version of the migration.
"""
if connection.engine.name == 'sqlite':
connection.execute('PRAGMA user_version = {}'.format(version))
elif connection.engine.name == 'postgresql':
connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_SCHEMA_NAME)))
connection.execute(DDL('CREATE SCHEMA IF NOT EXISTS {};'.format(POSTGRES_PARTITION_SCHEMA_NAME)))
connection.execute('CREATE TABLE IF NOT EXISTS {}.user_version(version INTEGER NOT NULL);'
.format(POSTGRES_SCHEMA_NAME))
# upsert.
if connection.execute('SELECT * FROM {}.user_version;'.format(POSTGRES_SCHEMA_NAME)).fetchone():
# update
connection.execute('UPDATE {}.user_version SET version = {};'
.format(POSTGRES_SCHEMA_NAME, version))
else:
# insert
connection.execute('INSERT INTO {}.user_version (version) VALUES ({})'
.format(POSTGRES_SCHEMA_NAME, version))
else:
raise DatabaseMissingError('Do not know how to migrate {} engine.'
.format(connection.engine.driver)) | [
"def",
"_update_version",
"(",
"connection",
",",
"version",
")",
":",
"if",
"connection",
".",
"engine",
".",
"name",
"==",
"'sqlite'",
":",
"connection",
".",
"execute",
"(",
"'PRAGMA user_version = {}'",
".",
"format",
"(",
"version",
")",
")",
"elif",
"connection",
".",
"engine",
".",
"name",
"==",
"'postgresql'",
":",
"connection",
".",
"execute",
"(",
"DDL",
"(",
"'CREATE SCHEMA IF NOT EXISTS {};'",
".",
"format",
"(",
"POSTGRES_SCHEMA_NAME",
")",
")",
")",
"connection",
".",
"execute",
"(",
"DDL",
"(",
"'CREATE SCHEMA IF NOT EXISTS {};'",
".",
"format",
"(",
"POSTGRES_PARTITION_SCHEMA_NAME",
")",
")",
")",
"connection",
".",
"execute",
"(",
"'CREATE TABLE IF NOT EXISTS {}.user_version(version INTEGER NOT NULL);'",
".",
"format",
"(",
"POSTGRES_SCHEMA_NAME",
")",
")",
"# upsert.",
"if",
"connection",
".",
"execute",
"(",
"'SELECT * FROM {}.user_version;'",
".",
"format",
"(",
"POSTGRES_SCHEMA_NAME",
")",
")",
".",
"fetchone",
"(",
")",
":",
"# update",
"connection",
".",
"execute",
"(",
"'UPDATE {}.user_version SET version = {};'",
".",
"format",
"(",
"POSTGRES_SCHEMA_NAME",
",",
"version",
")",
")",
"else",
":",
"# insert",
"connection",
".",
"execute",
"(",
"'INSERT INTO {}.user_version (version) VALUES ({})'",
".",
"format",
"(",
"POSTGRES_SCHEMA_NAME",
",",
"version",
")",
")",
"else",
":",
"raise",
"DatabaseMissingError",
"(",
"'Do not know how to migrate {} engine.'",
".",
"format",
"(",
"connection",
".",
"engine",
".",
"driver",
")",
")"
] | 44.612903 | 30.483871 |
def allow_attribute_comments(chain, node):
"""
This augmentation is to allow comments on class attributes, for example:
class SomeClass(object):
some_attribute = 5
''' This is a docstring for the above attribute '''
"""
# TODO: find the relevant citation for why this is the correct way to comment attributes
if isinstance(node.previous_sibling(), astroid.Assign) and \
isinstance(node.parent, (astroid.Class, astroid.Module)) and \
isinstance(node.value, astroid.Const) and \
isinstance(node.value.value, BASESTRING):
return
chain() | [
"def",
"allow_attribute_comments",
"(",
"chain",
",",
"node",
")",
":",
"# TODO: find the relevant citation for why this is the correct way to comment attributes",
"if",
"isinstance",
"(",
"node",
".",
"previous_sibling",
"(",
")",
",",
"astroid",
".",
"Assign",
")",
"and",
"isinstance",
"(",
"node",
".",
"parent",
",",
"(",
"astroid",
".",
"Class",
",",
"astroid",
".",
"Module",
")",
")",
"and",
"isinstance",
"(",
"node",
".",
"value",
",",
"astroid",
".",
"Const",
")",
"and",
"isinstance",
"(",
"node",
".",
"value",
".",
"value",
",",
"BASESTRING",
")",
":",
"return",
"chain",
"(",
")"
] | 35.764706 | 23.294118 |
def init_config(policy_config):
"""Get policy lambda execution configuration.
cli parameters are serialized into the policy lambda config,
we merge those with any policy specific execution options.
--assume role and -s output directory get special handling, as
to disambiguate any cli context.
account id is sourced from the config options or from api call
and cached as a global
"""
global account_id
exec_options = policy_config.get('execution-options', {})
# Remove some configuration options that don't make sense to translate from
# cli to lambda automatically.
# - assume role on cli doesn't translate, it is the default lambda role and
# used to provision the lambda.
# - profile doesnt translate to lambda its `home` dir setup dependent
# - dryrun doesn't translate (and shouldn't be present)
# - region doesn't translate from cli (the lambda is bound to a region), and
# on the cli represents the region the lambda is provisioned in.
for k in ('assume_role', 'profile', 'region', 'dryrun', 'cache'):
exec_options.pop(k, None)
# a cli local directory doesn't translate to lambda
if not exec_options.get('output_dir', '').startswith('s3'):
exec_options['output_dir'] = get_local_output_dir()
# we can source account id from the cli parameters to avoid the sts call
if exec_options.get('account_id'):
account_id = exec_options['account_id']
# merge with policy specific configuration
exec_options.update(
policy_config['policies'][0].get('mode', {}).get('execution-options', {}))
# if using assume role in lambda ensure that the correct
# execution account is captured in options.
if 'assume_role' in exec_options:
account_id = exec_options['assume_role'].split(':')[4]
elif account_id is None:
session = boto3.Session()
account_id = get_account_id_from_sts(session)
exec_options['account_id'] = account_id
# Historical compatibility with manually set execution options
# previously this was a boolean, its now a string value with the
# boolean flag triggering a string value of 'aws'
if 'metrics_enabled' in exec_options \
and isinstance(exec_options['metrics_enabled'], bool) \
and exec_options['metrics_enabled']:
exec_options['metrics_enabled'] = 'aws'
return Config.empty(**exec_options) | [
"def",
"init_config",
"(",
"policy_config",
")",
":",
"global",
"account_id",
"exec_options",
"=",
"policy_config",
".",
"get",
"(",
"'execution-options'",
",",
"{",
"}",
")",
"# Remove some configuration options that don't make sense to translate from",
"# cli to lambda automatically.",
"# - assume role on cli doesn't translate, it is the default lambda role and",
"# used to provision the lambda.",
"# - profile doesnt translate to lambda its `home` dir setup dependent",
"# - dryrun doesn't translate (and shouldn't be present)",
"# - region doesn't translate from cli (the lambda is bound to a region), and",
"# on the cli represents the region the lambda is provisioned in.",
"for",
"k",
"in",
"(",
"'assume_role'",
",",
"'profile'",
",",
"'region'",
",",
"'dryrun'",
",",
"'cache'",
")",
":",
"exec_options",
".",
"pop",
"(",
"k",
",",
"None",
")",
"# a cli local directory doesn't translate to lambda",
"if",
"not",
"exec_options",
".",
"get",
"(",
"'output_dir'",
",",
"''",
")",
".",
"startswith",
"(",
"'s3'",
")",
":",
"exec_options",
"[",
"'output_dir'",
"]",
"=",
"get_local_output_dir",
"(",
")",
"# we can source account id from the cli parameters to avoid the sts call",
"if",
"exec_options",
".",
"get",
"(",
"'account_id'",
")",
":",
"account_id",
"=",
"exec_options",
"[",
"'account_id'",
"]",
"# merge with policy specific configuration",
"exec_options",
".",
"update",
"(",
"policy_config",
"[",
"'policies'",
"]",
"[",
"0",
"]",
".",
"get",
"(",
"'mode'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'execution-options'",
",",
"{",
"}",
")",
")",
"# if using assume role in lambda ensure that the correct",
"# execution account is captured in options.",
"if",
"'assume_role'",
"in",
"exec_options",
":",
"account_id",
"=",
"exec_options",
"[",
"'assume_role'",
"]",
".",
"split",
"(",
"':'",
")",
"[",
"4",
"]",
"elif",
"account_id",
"is",
"None",
":",
"session",
"=",
"boto3",
".",
"Session",
"(",
")",
"account_id",
"=",
"get_account_id_from_sts",
"(",
"session",
")",
"exec_options",
"[",
"'account_id'",
"]",
"=",
"account_id",
"# Historical compatibility with manually set execution options",
"# previously this was a boolean, its now a string value with the",
"# boolean flag triggering a string value of 'aws'",
"if",
"'metrics_enabled'",
"in",
"exec_options",
"and",
"isinstance",
"(",
"exec_options",
"[",
"'metrics_enabled'",
"]",
",",
"bool",
")",
"and",
"exec_options",
"[",
"'metrics_enabled'",
"]",
":",
"exec_options",
"[",
"'metrics_enabled'",
"]",
"=",
"'aws'",
"return",
"Config",
".",
"empty",
"(",
"*",
"*",
"exec_options",
")"
] | 41.77193 | 21.210526 |
def setrank(self,v):
"""set rank value for vertex v and add it to the corresponding layer.
The Layer is created if it is the first vertex with this rank.
"""
assert self.dag
r=max([self.grx[x].rank for x in v.N(-1)]+[-1])+1
self.grx[v].rank=r
# add it to its layer:
try:
self.layers[r].append(v)
except IndexError:
assert r==len(self.layers)
self.layers.append(Layer([v])) | [
"def",
"setrank",
"(",
"self",
",",
"v",
")",
":",
"assert",
"self",
".",
"dag",
"r",
"=",
"max",
"(",
"[",
"self",
".",
"grx",
"[",
"x",
"]",
".",
"rank",
"for",
"x",
"in",
"v",
".",
"N",
"(",
"-",
"1",
")",
"]",
"+",
"[",
"-",
"1",
"]",
")",
"+",
"1",
"self",
".",
"grx",
"[",
"v",
"]",
".",
"rank",
"=",
"r",
"# add it to its layer:",
"try",
":",
"self",
".",
"layers",
"[",
"r",
"]",
".",
"append",
"(",
"v",
")",
"except",
"IndexError",
":",
"assert",
"r",
"==",
"len",
"(",
"self",
".",
"layers",
")",
"self",
".",
"layers",
".",
"append",
"(",
"Layer",
"(",
"[",
"v",
"]",
")",
")"
] | 36.230769 | 12.384615 |
def get_headers(self):
""" Return a HTTPStatus compliant headers attribute
FIX: duplicate headers will collide terribly!
"""
headers = {'Content-Type': goldman.JSON_MIMETYPE}
for error in self.errors:
if 'headers' in error:
headers.update(error['headers'])
return headers | [
"def",
"get_headers",
"(",
"self",
")",
":",
"headers",
"=",
"{",
"'Content-Type'",
":",
"goldman",
".",
"JSON_MIMETYPE",
"}",
"for",
"error",
"in",
"self",
".",
"errors",
":",
"if",
"'headers'",
"in",
"error",
":",
"headers",
".",
"update",
"(",
"error",
"[",
"'headers'",
"]",
")",
"return",
"headers"
] | 28.25 | 17.25 |
def reply_contact(
self,
phone_number: str,
first_name: str,
quote: bool = None,
last_name: str = "",
vcard: str = "",
disable_notification: bool = None,
reply_to_message_id: int = None,
reply_markup: Union[
"pyrogram.InlineKeyboardMarkup",
"pyrogram.ReplyKeyboardMarkup",
"pyrogram.ReplyKeyboardRemove",
"pyrogram.ForceReply"
] = None
) -> "Message":
"""Bound method *reply_contact* of :obj:`Message <pyrogram.Message>`.
Use as a shortcut for:
.. code-block:: python
client.send_contact(
chat_id=message.chat.id,
phone_number=phone_number,
first_name=first_name
)
Example:
.. code-block:: python
message.reply_contact(phone_number, "Dan")
Args:
phone_number (``str``):
Contact's phone number.
first_name (``str``):
Contact's first name.
quote (``bool``, *optional*):
If ``True``, the message will be sent as a reply to this message.
If *reply_to_message_id* is passed, this parameter will be ignored.
Defaults to ``True`` in group chats and ``False`` in private chats.
last_name (``str``, *optional*):
Contact's last name.
vcard (``str``, *optional*):
Additional data about the contact in the form of a vCard, 0-2048 bytes
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
reply_markup (:obj:`InlineKeyboardMarkup` | :obj:`ReplyKeyboardMarkup` | :obj:`ReplyKeyboardRemove` | :obj:`ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
Returns:
On success, the sent :obj:`Message <pyrogram.Message>` is returned.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
if quote is None:
quote = self.chat.type != "private"
if reply_to_message_id is None and quote:
reply_to_message_id = self.message_id
return self._client.send_contact(
chat_id=self.chat.id,
phone_number=phone_number,
first_name=first_name,
last_name=last_name,
vcard=vcard,
disable_notification=disable_notification,
reply_to_message_id=reply_to_message_id,
reply_markup=reply_markup
) | [
"def",
"reply_contact",
"(",
"self",
",",
"phone_number",
":",
"str",
",",
"first_name",
":",
"str",
",",
"quote",
":",
"bool",
"=",
"None",
",",
"last_name",
":",
"str",
"=",
"\"\"",
",",
"vcard",
":",
"str",
"=",
"\"\"",
",",
"disable_notification",
":",
"bool",
"=",
"None",
",",
"reply_to_message_id",
":",
"int",
"=",
"None",
",",
"reply_markup",
":",
"Union",
"[",
"\"pyrogram.InlineKeyboardMarkup\"",
",",
"\"pyrogram.ReplyKeyboardMarkup\"",
",",
"\"pyrogram.ReplyKeyboardRemove\"",
",",
"\"pyrogram.ForceReply\"",
"]",
"=",
"None",
")",
"->",
"\"Message\"",
":",
"if",
"quote",
"is",
"None",
":",
"quote",
"=",
"self",
".",
"chat",
".",
"type",
"!=",
"\"private\"",
"if",
"reply_to_message_id",
"is",
"None",
"and",
"quote",
":",
"reply_to_message_id",
"=",
"self",
".",
"message_id",
"return",
"self",
".",
"_client",
".",
"send_contact",
"(",
"chat_id",
"=",
"self",
".",
"chat",
".",
"id",
",",
"phone_number",
"=",
"phone_number",
",",
"first_name",
"=",
"first_name",
",",
"last_name",
"=",
"last_name",
",",
"vcard",
"=",
"vcard",
",",
"disable_notification",
"=",
"disable_notification",
",",
"reply_to_message_id",
"=",
"reply_to_message_id",
",",
"reply_markup",
"=",
"reply_markup",
")"
] | 34.25 | 21.202381 |
def confirm(action, default=None, skip=False):
"""
A shortcut for typical confirmation prompt.
:param action:
a string describing the action, e.g. "Apply changes". A question mark
will be appended.
:param default:
`bool` or `None`. Determines what happens when user hits :kbd:`Enter`
without typing in a choice. If `True`, default choice is "yes". If
`False`, it is "no". If `None` the prompt keeps reappearing until user
types in a choice (not necessarily acceptable) or until the number of
iteration reaches the limit. Default is `None`.
:param skip:
`bool`; if `True`, no interactive prompt is used and default choice is
returned (useful for batch mode). Default is `False`.
Usage::
def delete(key, silent=False):
item = db.get(Item, args.key)
if confirm('Delete '+item.title, default=True, skip=silent):
item.delete()
print('Item deleted.')
else:
print('Operation cancelled.')
Returns `None` on `KeyboardInterrupt` event.
"""
MAX_ITERATIONS = 3
if skip:
return default
else:
defaults = {
None: ('y','n'),
True: ('Y','n'),
False: ('y','N'),
}
y, n = defaults[default]
prompt = text_type('{action}? ({y}/{n})').format(**locals())
choice = None
try:
if default is None:
cnt = 1
while not choice and cnt < MAX_ITERATIONS:
choice = safe_input(prompt)
cnt += 1
else:
choice = safe_input(prompt)
except KeyboardInterrupt:
return None
if choice in ('yes', 'y', 'Y'):
return True
if choice in ('no', 'n', 'N'):
return False
if default is not None:
return default
return None | [
"def",
"confirm",
"(",
"action",
",",
"default",
"=",
"None",
",",
"skip",
"=",
"False",
")",
":",
"MAX_ITERATIONS",
"=",
"3",
"if",
"skip",
":",
"return",
"default",
"else",
":",
"defaults",
"=",
"{",
"None",
":",
"(",
"'y'",
",",
"'n'",
")",
",",
"True",
":",
"(",
"'Y'",
",",
"'n'",
")",
",",
"False",
":",
"(",
"'y'",
",",
"'N'",
")",
",",
"}",
"y",
",",
"n",
"=",
"defaults",
"[",
"default",
"]",
"prompt",
"=",
"text_type",
"(",
"'{action}? ({y}/{n})'",
")",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
"choice",
"=",
"None",
"try",
":",
"if",
"default",
"is",
"None",
":",
"cnt",
"=",
"1",
"while",
"not",
"choice",
"and",
"cnt",
"<",
"MAX_ITERATIONS",
":",
"choice",
"=",
"safe_input",
"(",
"prompt",
")",
"cnt",
"+=",
"1",
"else",
":",
"choice",
"=",
"safe_input",
"(",
"prompt",
")",
"except",
"KeyboardInterrupt",
":",
"return",
"None",
"if",
"choice",
"in",
"(",
"'yes'",
",",
"'y'",
",",
"'Y'",
")",
":",
"return",
"True",
"if",
"choice",
"in",
"(",
"'no'",
",",
"'n'",
",",
"'N'",
")",
":",
"return",
"False",
"if",
"default",
"is",
"not",
"None",
":",
"return",
"default",
"return",
"None"
] | 29.968254 | 20.793651 |
def remove_existing_links(root_dir):
"""Delete any symlinks present at the root of a directory.
Parameters
----------
root_dir : `str`
Directory that might contain symlinks.
Notes
-----
This function is used to remove any symlinks created by `link_directories`.
Running ``remove_existing_links`` at the beginning of a build ensures that
builds are isolated. For example, if a package is un-setup it won't
re-appear in the documentation because its symlink still exists.
"""
logger = logging.getLogger(__name__)
for name in os.listdir(root_dir):
full_name = os.path.join(root_dir, name)
if os.path.islink(full_name):
logger.debug('Deleting existing symlink {0}'.format(full_name))
os.remove(full_name) | [
"def",
"remove_existing_links",
"(",
"root_dir",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"root_dir",
")",
":",
"full_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"name",
")",
"if",
"os",
".",
"path",
".",
"islink",
"(",
"full_name",
")",
":",
"logger",
".",
"debug",
"(",
"'Deleting existing symlink {0}'",
".",
"format",
"(",
"full_name",
")",
")",
"os",
".",
"remove",
"(",
"full_name",
")"
] | 35.545455 | 20.772727 |
def create_paste(self, content):
"""Create a raw paste of the given content.
Returns a URL to the paste, or raises a ``pasteraw.Error`` if something
tragic happens instead.
"""
r = requests.post(
self.endpoint + '/pastes',
data={'content': content},
allow_redirects=False)
if r.status_code == 302:
return r.headers['Location']
if r.status_code == 413:
raise MaxLengthExceeded('%d bytes' % len(content))
try:
error_message = r.json()['error']
except Exception:
error_message = r.text
raise UnexpectedError(error_message) | [
"def",
"create_paste",
"(",
"self",
",",
"content",
")",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"self",
".",
"endpoint",
"+",
"'/pastes'",
",",
"data",
"=",
"{",
"'content'",
":",
"content",
"}",
",",
"allow_redirects",
"=",
"False",
")",
"if",
"r",
".",
"status_code",
"==",
"302",
":",
"return",
"r",
".",
"headers",
"[",
"'Location'",
"]",
"if",
"r",
".",
"status_code",
"==",
"413",
":",
"raise",
"MaxLengthExceeded",
"(",
"'%d bytes'",
"%",
"len",
"(",
"content",
")",
")",
"try",
":",
"error_message",
"=",
"r",
".",
"json",
"(",
")",
"[",
"'error'",
"]",
"except",
"Exception",
":",
"error_message",
"=",
"r",
".",
"text",
"raise",
"UnexpectedError",
"(",
"error_message",
")"
] | 27.75 | 17.333333 |
def create_admin(user_config_path: str = 'CONFIG.superuser') -> bool:
"""
Creates a superuser from a specified dict/object bundle located at ``user_config_path``.
Skips if the specified object contains no email or no username.
If a user with the specified username already exists and has no usable password it updates user's password with
a specified one.
``user_config_path`` can accept any path to a deep nested object, like dict of dicts,
object of dicts of objects, and so on. Let's assume you have this weird config in your ``settings.py``:
::
class MyConfigObject:
my_var = {
'user': {
'username': 'user',
'password': 'qwe',
'email': 'no@example.com',
}
}
local_config = MyConfigObject()
To access the ``'user'`` bundle you have to specify: ``local_config.my_var.user``.
:param user_config_path: dot-separated path to object or dict, default is ``'CONFIG.superuser'``
:return: ``True`` if user has been created, ``False`` otherwise
"""
from django.conf import settings
wf('Creating superuser... ', False)
username, email, password = [
dot_path(settings, '{0}.{1}'.format(user_config_path, 'username')),
dot_path(settings, '{0}.{1}'.format(user_config_path, 'email')),
dot_path(settings, '{0}.{1}'.format(user_config_path, 'password')),
]
if not all([username, email]):
wf('[SKIP: username and email should not be empty]\n')
return False
from django.db import IntegrityError
try:
execute_from_command_line([
'./manage.py', 'createsuperuser',
'--username', username,
'--email', email,
'--noinput'
])
except IntegrityError:
pass
if password:
# after `execute_from_command_line` models are loaded
from django.contrib.auth.models import User
user = User.objects.get(username=username)
# do not change password if it was set before
if not user.has_usable_password():
user.set_password(password)
user.save()
else:
wf('[SKIP update password: password is empty]\n')
wf('[+]\n')
return True | [
"def",
"create_admin",
"(",
"user_config_path",
":",
"str",
"=",
"'CONFIG.superuser'",
")",
"->",
"bool",
":",
"from",
"django",
".",
"conf",
"import",
"settings",
"wf",
"(",
"'Creating superuser... '",
",",
"False",
")",
"username",
",",
"email",
",",
"password",
"=",
"[",
"dot_path",
"(",
"settings",
",",
"'{0}.{1}'",
".",
"format",
"(",
"user_config_path",
",",
"'username'",
")",
")",
",",
"dot_path",
"(",
"settings",
",",
"'{0}.{1}'",
".",
"format",
"(",
"user_config_path",
",",
"'email'",
")",
")",
",",
"dot_path",
"(",
"settings",
",",
"'{0}.{1}'",
".",
"format",
"(",
"user_config_path",
",",
"'password'",
")",
")",
",",
"]",
"if",
"not",
"all",
"(",
"[",
"username",
",",
"email",
"]",
")",
":",
"wf",
"(",
"'[SKIP: username and email should not be empty]\\n'",
")",
"return",
"False",
"from",
"django",
".",
"db",
"import",
"IntegrityError",
"try",
":",
"execute_from_command_line",
"(",
"[",
"'./manage.py'",
",",
"'createsuperuser'",
",",
"'--username'",
",",
"username",
",",
"'--email'",
",",
"email",
",",
"'--noinput'",
"]",
")",
"except",
"IntegrityError",
":",
"pass",
"if",
"password",
":",
"# after `execute_from_command_line` models are loaded",
"from",
"django",
".",
"contrib",
".",
"auth",
".",
"models",
"import",
"User",
"user",
"=",
"User",
".",
"objects",
".",
"get",
"(",
"username",
"=",
"username",
")",
"# do not change password if it was set before",
"if",
"not",
"user",
".",
"has_usable_password",
"(",
")",
":",
"user",
".",
"set_password",
"(",
"password",
")",
"user",
".",
"save",
"(",
")",
"else",
":",
"wf",
"(",
"'[SKIP update password: password is empty]\\n'",
")",
"wf",
"(",
"'[+]\\n'",
")",
"return",
"True"
] | 36.209677 | 23.403226 |
def setBendField(self, x):
""" set bend magnetic field
:param x: new bend field to be assigned, [T]
:return: None
"""
if x != self.bend_field:
self.bend_field = x
self.refresh = True | [
"def",
"setBendField",
"(",
"self",
",",
"x",
")",
":",
"if",
"x",
"!=",
"self",
".",
"bend_field",
":",
"self",
".",
"bend_field",
"=",
"x",
"self",
".",
"refresh",
"=",
"True"
] | 26.555556 | 12.333333 |
def ReadCronJobRun(self, job_id, run_id, cursor=None):
"""Reads a single cron job run from the db."""
query = ("SELECT run, UNIX_TIMESTAMP(write_time) FROM cron_job_runs "
"WHERE job_id = %s AND run_id = %s")
num_runs = cursor.execute(
query, [job_id, db_utils.CronJobRunIDToInt(run_id)])
if num_runs == 0:
raise db.UnknownCronJobRunError(
"Run with job id %s and run id %s not found." % (job_id, run_id))
return self._CronJobRunFromRow(cursor.fetchall()[0]) | [
"def",
"ReadCronJobRun",
"(",
"self",
",",
"job_id",
",",
"run_id",
",",
"cursor",
"=",
"None",
")",
":",
"query",
"=",
"(",
"\"SELECT run, UNIX_TIMESTAMP(write_time) FROM cron_job_runs \"",
"\"WHERE job_id = %s AND run_id = %s\"",
")",
"num_runs",
"=",
"cursor",
".",
"execute",
"(",
"query",
",",
"[",
"job_id",
",",
"db_utils",
".",
"CronJobRunIDToInt",
"(",
"run_id",
")",
"]",
")",
"if",
"num_runs",
"==",
"0",
":",
"raise",
"db",
".",
"UnknownCronJobRunError",
"(",
"\"Run with job id %s and run id %s not found.\"",
"%",
"(",
"job_id",
",",
"run_id",
")",
")",
"return",
"self",
".",
"_CronJobRunFromRow",
"(",
"cursor",
".",
"fetchall",
"(",
")",
"[",
"0",
"]",
")"
] | 46 | 18 |
def trans(self, id, parameters=None, domain=None, locale=None):
"""
Throws RuntimeError whenever a message is missing
"""
if parameters is None:
parameters = {}
if locale is None:
locale = self.locale
else:
self._assert_valid_locale(locale)
if domain is None:
domain = 'messages'
catalogue = self.get_catalogue(locale)
if not catalogue.has(id, domain):
raise RuntimeError(
"There is no translation for {0} in domain {1}".format(
id,
domain
)
)
msg = self.get_catalogue(locale).get(id, domain)
return self.format(msg, parameters) | [
"def",
"trans",
"(",
"self",
",",
"id",
",",
"parameters",
"=",
"None",
",",
"domain",
"=",
"None",
",",
"locale",
"=",
"None",
")",
":",
"if",
"parameters",
"is",
"None",
":",
"parameters",
"=",
"{",
"}",
"if",
"locale",
"is",
"None",
":",
"locale",
"=",
"self",
".",
"locale",
"else",
":",
"self",
".",
"_assert_valid_locale",
"(",
"locale",
")",
"if",
"domain",
"is",
"None",
":",
"domain",
"=",
"'messages'",
"catalogue",
"=",
"self",
".",
"get_catalogue",
"(",
"locale",
")",
"if",
"not",
"catalogue",
".",
"has",
"(",
"id",
",",
"domain",
")",
":",
"raise",
"RuntimeError",
"(",
"\"There is no translation for {0} in domain {1}\"",
".",
"format",
"(",
"id",
",",
"domain",
")",
")",
"msg",
"=",
"self",
".",
"get_catalogue",
"(",
"locale",
")",
".",
"get",
"(",
"id",
",",
"domain",
")",
"return",
"self",
".",
"format",
"(",
"msg",
",",
"parameters",
")"
] | 28.423077 | 17.192308 |
def sort_by(self, *fields, **kwargs):
"""
Indicate how the results should be sorted. This can also be used for
*top-N* style queries
### Parameters
- **fields**: The fields by which to sort. This can be either a single
field or a list of fields. If you wish to specify order, you can
use the `Asc` or `Desc` wrapper classes.
- **max**: Maximum number of results to return. This can be used instead
of `LIMIT` and is also faster.
Example of sorting by `foo` ascending and `bar` descending:
```
sort_by(Asc('@foo'), Desc('@bar'))
```
Return the top 10 customers:
```
AggregateRequest()\
.group_by('@customer', r.sum('@paid').alias(FIELDNAME))\
.sort_by(Desc('@paid'), max=10)
```
"""
self._max = kwargs.get('max', 0)
if isinstance(fields, (string_types, SortDirection)):
fields = [fields]
for f in fields:
if isinstance(f, SortDirection):
self._sortby += [f.field, f.DIRSTRING]
else:
self._sortby.append(f)
return self | [
"def",
"sort_by",
"(",
"self",
",",
"*",
"fields",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_max",
"=",
"kwargs",
".",
"get",
"(",
"'max'",
",",
"0",
")",
"if",
"isinstance",
"(",
"fields",
",",
"(",
"string_types",
",",
"SortDirection",
")",
")",
":",
"fields",
"=",
"[",
"fields",
"]",
"for",
"f",
"in",
"fields",
":",
"if",
"isinstance",
"(",
"f",
",",
"SortDirection",
")",
":",
"self",
".",
"_sortby",
"+=",
"[",
"f",
".",
"field",
",",
"f",
".",
"DIRSTRING",
"]",
"else",
":",
"self",
".",
"_sortby",
".",
"append",
"(",
"f",
")",
"return",
"self"
] | 31.540541 | 21.108108 |
def delete(self, bulk_id):
"""Update many objects with a single toa
:param str bulk_id: The bulk id for the job you want to delete
"""
collection_name = self.request.headers.get("collection")
if not collection_name:
self.raise_error(400, "Missing a collection name header")
self.revisions = BaseAsyncMotorDocument("%s_revisions" % collection_name)
self.logger.info("Deleting revisions with bulk_id %s" % (bulk_id))
result = yield self.revisions.collection.remove({"meta.bulk_id": bulk_id})
self.write(result) | [
"def",
"delete",
"(",
"self",
",",
"bulk_id",
")",
":",
"collection_name",
"=",
"self",
".",
"request",
".",
"headers",
".",
"get",
"(",
"\"collection\"",
")",
"if",
"not",
"collection_name",
":",
"self",
".",
"raise_error",
"(",
"400",
",",
"\"Missing a collection name header\"",
")",
"self",
".",
"revisions",
"=",
"BaseAsyncMotorDocument",
"(",
"\"%s_revisions\"",
"%",
"collection_name",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Deleting revisions with bulk_id %s\"",
"%",
"(",
"bulk_id",
")",
")",
"result",
"=",
"yield",
"self",
".",
"revisions",
".",
"collection",
".",
"remove",
"(",
"{",
"\"meta.bulk_id\"",
":",
"bulk_id",
"}",
")",
"self",
".",
"write",
"(",
"result",
")"
] | 32.333333 | 28.722222 |
def tablestructure(tablename, dataman=True, column=True, subtable=False,
sort=False):
"""Print the structure of a table.
It is the same as :func:`table.showstructure`, but without the need to open
the table first.
"""
t = table(tablename, ack=False)
six.print_(t.showstructure(dataman, column, subtable, sort)) | [
"def",
"tablestructure",
"(",
"tablename",
",",
"dataman",
"=",
"True",
",",
"column",
"=",
"True",
",",
"subtable",
"=",
"False",
",",
"sort",
"=",
"False",
")",
":",
"t",
"=",
"table",
"(",
"tablename",
",",
"ack",
"=",
"False",
")",
"six",
".",
"print_",
"(",
"t",
".",
"showstructure",
"(",
"dataman",
",",
"column",
",",
"subtable",
",",
"sort",
")",
")"
] | 34.6 | 20.9 |
def _compute_magnitude_terms(self, rup, coeffs):
"""
First three terms of equation (8) on p. 203:
``c1 + c2*(M - 6) + c3*(M - 6)**2``
"""
adj_mag = rup.mag - self.CONSTS['ref_mag']
return coeffs['c1'] + coeffs['c2']*adj_mag + coeffs['c3']*adj_mag**2 | [
"def",
"_compute_magnitude_terms",
"(",
"self",
",",
"rup",
",",
"coeffs",
")",
":",
"adj_mag",
"=",
"rup",
".",
"mag",
"-",
"self",
".",
"CONSTS",
"[",
"'ref_mag'",
"]",
"return",
"coeffs",
"[",
"'c1'",
"]",
"+",
"coeffs",
"[",
"'c2'",
"]",
"*",
"adj_mag",
"+",
"coeffs",
"[",
"'c3'",
"]",
"*",
"adj_mag",
"**",
"2"
] | 32.333333 | 16.555556 |
def counter(self, ch, part=None):
"""Return a counter on the channel ch.
ch: string or integer.
The channel index number or channel name.
part: int or None
The 0-based enumeration of a True part to return. This
has an effect whether or not the mask or filter is turned
on. Raise IndexError if the part does not exist.
See `Counter
<https://docs.python.org/2.7/library/collections.html#counter-objects>`_
for the counter object returned.
"""
return Counter(self(self._key(ch), part=part)) | [
"def",
"counter",
"(",
"self",
",",
"ch",
",",
"part",
"=",
"None",
")",
":",
"return",
"Counter",
"(",
"self",
"(",
"self",
".",
"_key",
"(",
"ch",
")",
",",
"part",
"=",
"part",
")",
")"
] | 34.529412 | 20.823529 |
def parse_atom_bytes(data: bytes) -> AtomFeed:
"""Parse an Atom feed from a byte-string containing XML data."""
root = parse_xml(BytesIO(data)).getroot()
return _parse_atom(root) | [
"def",
"parse_atom_bytes",
"(",
"data",
":",
"bytes",
")",
"->",
"AtomFeed",
":",
"root",
"=",
"parse_xml",
"(",
"BytesIO",
"(",
"data",
")",
")",
".",
"getroot",
"(",
")",
"return",
"_parse_atom",
"(",
"root",
")"
] | 46.75 | 5.75 |
def _set_position(self, value):
"""
Subclasses may override this method.
"""
pX, pY = self.position
x, y = value
dX = x - pX
dY = y - pY
self.moveBy((dX, dY)) | [
"def",
"_set_position",
"(",
"self",
",",
"value",
")",
":",
"pX",
",",
"pY",
"=",
"self",
".",
"position",
"x",
",",
"y",
"=",
"value",
"dX",
"=",
"x",
"-",
"pX",
"dY",
"=",
"y",
"-",
"pY",
"self",
".",
"moveBy",
"(",
"(",
"dX",
",",
"dY",
")",
")"
] | 23.777778 | 10.666667 |
def save_file(self,
path: str,
file_id: int = None,
file_part: int = 0,
progress: callable = None,
progress_args: tuple = ()):
"""Use this method to upload a file onto Telegram servers, without actually sending the message to anyone.
This is a utility method intended to be used **only** when working with Raw Functions (i.e: a Telegram API
method you wish to use which is not available yet in the Client class as an easy-to-use method), whenever an
InputFile type is required.
Args:
path (``str``):
The path of the file you want to upload that exists on your local machine.
file_id (``int``, *optional*):
In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk.
file_part (``int``, *optional*):
In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk.
progress (``callable``, *optional*):
Pass a callback function to view the upload progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Other Parameters:
client (:obj:`Client <pyrogram.Client>`):
The Client itself, useful when you want to call other API methods inside the callback function.
current (``int``):
The amount of bytes uploaded so far.
total (``int``):
The size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the *progress_args* parameter.
You can either keep *\*args* or add every single extra argument in your function signature.
Returns:
On success, the uploaded file is returned in form of an InputFile object.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
part_size = 512 * 1024
file_size = os.path.getsize(path)
if file_size == 0:
raise ValueError("File size equals to 0 B")
if file_size > 1500 * 1024 * 1024:
raise ValueError("Telegram doesn't support uploading files bigger than 1500 MiB")
file_total_parts = int(math.ceil(file_size / part_size))
is_big = True if file_size > 10 * 1024 * 1024 else False
is_missing_part = True if file_id is not None else False
file_id = file_id or self.rnd_id()
md5_sum = md5() if not is_big and not is_missing_part else None
session = Session(self, self.dc_id, self.auth_key, is_media=True)
session.start()
try:
with open(path, "rb") as f:
f.seek(part_size * file_part)
while True:
chunk = f.read(part_size)
if not chunk:
if not is_big:
md5_sum = "".join([hex(i)[2:].zfill(2) for i in md5_sum.digest()])
break
for _ in range(3):
if is_big:
rpc = functions.upload.SaveBigFilePart(
file_id=file_id,
file_part=file_part,
file_total_parts=file_total_parts,
bytes=chunk
)
else:
rpc = functions.upload.SaveFilePart(
file_id=file_id,
file_part=file_part,
bytes=chunk
)
if session.send(rpc):
break
else:
raise AssertionError("Telegram didn't accept chunk #{} of {}".format(file_part, path))
if is_missing_part:
return
if not is_big:
md5_sum.update(chunk)
file_part += 1
if progress:
progress(self, min(file_part * part_size, file_size), file_size, *progress_args)
except Client.StopTransmission:
raise
except Exception as e:
log.error(e, exc_info=True)
else:
if is_big:
return types.InputFileBig(
id=file_id,
parts=file_total_parts,
name=os.path.basename(path),
)
else:
return types.InputFile(
id=file_id,
parts=file_total_parts,
name=os.path.basename(path),
md5_checksum=md5_sum
)
finally:
session.stop() | [
"def",
"save_file",
"(",
"self",
",",
"path",
":",
"str",
",",
"file_id",
":",
"int",
"=",
"None",
",",
"file_part",
":",
"int",
"=",
"0",
",",
"progress",
":",
"callable",
"=",
"None",
",",
"progress_args",
":",
"tuple",
"=",
"(",
")",
")",
":",
"part_size",
"=",
"512",
"*",
"1024",
"file_size",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"path",
")",
"if",
"file_size",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"File size equals to 0 B\"",
")",
"if",
"file_size",
">",
"1500",
"*",
"1024",
"*",
"1024",
":",
"raise",
"ValueError",
"(",
"\"Telegram doesn't support uploading files bigger than 1500 MiB\"",
")",
"file_total_parts",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"file_size",
"/",
"part_size",
")",
")",
"is_big",
"=",
"True",
"if",
"file_size",
">",
"10",
"*",
"1024",
"*",
"1024",
"else",
"False",
"is_missing_part",
"=",
"True",
"if",
"file_id",
"is",
"not",
"None",
"else",
"False",
"file_id",
"=",
"file_id",
"or",
"self",
".",
"rnd_id",
"(",
")",
"md5_sum",
"=",
"md5",
"(",
")",
"if",
"not",
"is_big",
"and",
"not",
"is_missing_part",
"else",
"None",
"session",
"=",
"Session",
"(",
"self",
",",
"self",
".",
"dc_id",
",",
"self",
".",
"auth_key",
",",
"is_media",
"=",
"True",
")",
"session",
".",
"start",
"(",
")",
"try",
":",
"with",
"open",
"(",
"path",
",",
"\"rb\"",
")",
"as",
"f",
":",
"f",
".",
"seek",
"(",
"part_size",
"*",
"file_part",
")",
"while",
"True",
":",
"chunk",
"=",
"f",
".",
"read",
"(",
"part_size",
")",
"if",
"not",
"chunk",
":",
"if",
"not",
"is_big",
":",
"md5_sum",
"=",
"\"\"",
".",
"join",
"(",
"[",
"hex",
"(",
"i",
")",
"[",
"2",
":",
"]",
".",
"zfill",
"(",
"2",
")",
"for",
"i",
"in",
"md5_sum",
".",
"digest",
"(",
")",
"]",
")",
"break",
"for",
"_",
"in",
"range",
"(",
"3",
")",
":",
"if",
"is_big",
":",
"rpc",
"=",
"functions",
".",
"upload",
".",
"SaveBigFilePart",
"(",
"file_id",
"=",
"file_id",
",",
"file_part",
"=",
"file_part",
",",
"file_total_parts",
"=",
"file_total_parts",
",",
"bytes",
"=",
"chunk",
")",
"else",
":",
"rpc",
"=",
"functions",
".",
"upload",
".",
"SaveFilePart",
"(",
"file_id",
"=",
"file_id",
",",
"file_part",
"=",
"file_part",
",",
"bytes",
"=",
"chunk",
")",
"if",
"session",
".",
"send",
"(",
"rpc",
")",
":",
"break",
"else",
":",
"raise",
"AssertionError",
"(",
"\"Telegram didn't accept chunk #{} of {}\"",
".",
"format",
"(",
"file_part",
",",
"path",
")",
")",
"if",
"is_missing_part",
":",
"return",
"if",
"not",
"is_big",
":",
"md5_sum",
".",
"update",
"(",
"chunk",
")",
"file_part",
"+=",
"1",
"if",
"progress",
":",
"progress",
"(",
"self",
",",
"min",
"(",
"file_part",
"*",
"part_size",
",",
"file_size",
")",
",",
"file_size",
",",
"*",
"progress_args",
")",
"except",
"Client",
".",
"StopTransmission",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"error",
"(",
"e",
",",
"exc_info",
"=",
"True",
")",
"else",
":",
"if",
"is_big",
":",
"return",
"types",
".",
"InputFileBig",
"(",
"id",
"=",
"file_id",
",",
"parts",
"=",
"file_total_parts",
",",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
",",
")",
"else",
":",
"return",
"types",
".",
"InputFile",
"(",
"id",
"=",
"file_id",
",",
"parts",
"=",
"file_total_parts",
",",
"name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"path",
")",
",",
"md5_checksum",
"=",
"md5_sum",
")",
"finally",
":",
"session",
".",
"stop",
"(",
")"
] | 39.454545 | 23.583333 |
async def get_delta_json(
self,
rr_delta_builder: Callable[['HolderProver', str, int, int, dict], Awaitable[Tuple[str, int]]],
fro: int,
to: int) -> (str, int):
"""
Get rev reg delta json, and its timestamp on the distributed ledger,
from cached rev reg delta frames list or distributed ledger,
updating cache as necessary.
Raise BadRevStateTime if caller asks for a delta to the future.
On return of any previously existing rev reg delta frame, always update its query time beforehand.
:param rr_delta_builder: callback to build rev reg delta if need be (specify anchor instance's
_build_rr_delta())
:param fro: least time (epoch seconds) of interest; lower-bounds 'to' on frame housing return data
:param to: greatest time (epoch seconds) of interest; upper-bounds returned revocation delta timestamp
:return: rev reg delta json and ledger timestamp (epoch seconds)
"""
LOGGER.debug(
'RevoCacheEntry.get_delta_json >>> rr_delta_builder: %s, fro: %s, to: %s',
rr_delta_builder.__name__,
fro,
to)
rv = await self._get_update(rr_delta_builder, fro, to, True)
LOGGER.debug('RevoCacheEntry.get_delta_json <<< %s', rv)
return rv | [
"async",
"def",
"get_delta_json",
"(",
"self",
",",
"rr_delta_builder",
":",
"Callable",
"[",
"[",
"'HolderProver'",
",",
"str",
",",
"int",
",",
"int",
",",
"dict",
"]",
",",
"Awaitable",
"[",
"Tuple",
"[",
"str",
",",
"int",
"]",
"]",
"]",
",",
"fro",
":",
"int",
",",
"to",
":",
"int",
")",
"->",
"(",
"str",
",",
"int",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'RevoCacheEntry.get_delta_json >>> rr_delta_builder: %s, fro: %s, to: %s'",
",",
"rr_delta_builder",
".",
"__name__",
",",
"fro",
",",
"to",
")",
"rv",
"=",
"await",
"self",
".",
"_get_update",
"(",
"rr_delta_builder",
",",
"fro",
",",
"to",
",",
"True",
")",
"LOGGER",
".",
"debug",
"(",
"'RevoCacheEntry.get_delta_json <<< %s'",
",",
"rv",
")",
"return",
"rv"
] | 44.266667 | 30.8 |
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx) | [
"def",
"set_info",
"(",
"self",
",",
"info",
")",
":",
"idx",
"=",
"info",
".",
"get",
"(",
"self",
".",
"name",
")",
"if",
"idx",
"is",
"not",
"None",
":",
"self",
".",
"__dict__",
".",
"update",
"(",
"idx",
")"
] | 34.2 | 7.6 |
def wrapFn(fn, *args, **kwargs):
"""
Makes a Job out of a function. \
Convenience function for constructor of :class:`toil.job.FunctionWrappingJob`.
:param fn: Function to be run with ``*args`` and ``**kwargs`` as arguments. \
See toil.job.JobFunctionWrappingJob for reserved keyword arguments used \
to specify resource requirements.
:return: The new function that wraps fn.
:rtype: toil.job.FunctionWrappingJob
"""
if PromisedRequirement.convertPromises(kwargs):
return PromisedRequirementFunctionWrappingJob.create(fn, *args, **kwargs)
else:
return FunctionWrappingJob(fn, *args, **kwargs) | [
"def",
"wrapFn",
"(",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"PromisedRequirement",
".",
"convertPromises",
"(",
"kwargs",
")",
":",
"return",
"PromisedRequirementFunctionWrappingJob",
".",
"create",
"(",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"else",
":",
"return",
"FunctionWrappingJob",
"(",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 46.066667 | 19.933333 |
def mkBinPacking(w,q):
"""mkBinPacking: convert a cutting stock instance into bin packing format"""
s = []
for j in range(len(w)):
for i in range(q[j]):
s.append(w[j])
return s | [
"def",
"mkBinPacking",
"(",
"w",
",",
"q",
")",
":",
"s",
"=",
"[",
"]",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"w",
")",
")",
":",
"for",
"i",
"in",
"range",
"(",
"q",
"[",
"j",
"]",
")",
":",
"s",
".",
"append",
"(",
"w",
"[",
"j",
"]",
")",
"return",
"s"
] | 29.428571 | 16.285714 |
def validate(
feed: "Feed", *, as_df: bool = True, include_warnings: bool = True
) -> Union[List, DataFrame]:
"""
Check whether the given feed satisfies the GTFS.
Parameters
----------
feed : Feed
as_df : boolean
If ``True``, then return the resulting report as a DataFrame;
otherwise return the result as a list
include_warnings : boolean
If ``True``, then include problems of types ``'error'`` and
``'warning'``; otherwise, only return problems of type
``'error'``
Returns
-------
list or DataFrame
Run all the table-checking functions: :func:`check_agency`,
:func:`check_calendar`, etc.
This yields a possibly empty list of items
[problem type, message, table, rows].
If ``as_df``, then format the error list as a DataFrame with the
columns
- ``'type'``: 'error' or 'warning'; 'error' means the GTFS is
violated; 'warning' means there is a problem but it's not a
GTFS violation
- ``'message'``: description of the problem
- ``'table'``: table in which problem occurs, e.g. 'routes'
- ``'rows'``: rows of the table's DataFrame where problem occurs
Return early if the feed is missing required tables or required
columns.
Notes
-----
- This function interprets the GTFS liberally, classifying problems
as warnings rather than errors where the GTFS is unclear.
For example if a trip_id listed in the trips table is not listed
in the stop times table (a trip with no stop times),
then that's a warning and not an error.
- Timing benchmark: on a 2.80 GHz processor machine with 16 GB of
memory, this function checks `this 31 MB Southeast Queensland feed
<http://transitfeeds.com/p/translink/21/20170310>`_
in 22 seconds, including warnings.
"""
problems = []
# Check for invalid columns and check the required tables
checkers = [
"check_agency",
"check_calendar",
"check_calendar_dates",
"check_fare_attributes",
"check_fare_rules",
"check_feed_info",
"check_frequencies",
"check_routes",
"check_shapes",
"check_stops",
"check_stop_times",
"check_transfers",
"check_trips",
]
for checker in checkers:
problems.extend(
globals()[checker](feed, include_warnings=include_warnings)
)
# Check calendar/calendar_dates combo
if feed.calendar is None and feed.calendar_dates is None:
problems.append(
["error", "Missing both tables", "calendar & calendar_dates", []]
)
return format_problems(problems, as_df=as_df) | [
"def",
"validate",
"(",
"feed",
":",
"\"Feed\"",
",",
"*",
",",
"as_df",
":",
"bool",
"=",
"True",
",",
"include_warnings",
":",
"bool",
"=",
"True",
")",
"->",
"Union",
"[",
"List",
",",
"DataFrame",
"]",
":",
"problems",
"=",
"[",
"]",
"# Check for invalid columns and check the required tables",
"checkers",
"=",
"[",
"\"check_agency\"",
",",
"\"check_calendar\"",
",",
"\"check_calendar_dates\"",
",",
"\"check_fare_attributes\"",
",",
"\"check_fare_rules\"",
",",
"\"check_feed_info\"",
",",
"\"check_frequencies\"",
",",
"\"check_routes\"",
",",
"\"check_shapes\"",
",",
"\"check_stops\"",
",",
"\"check_stop_times\"",
",",
"\"check_transfers\"",
",",
"\"check_trips\"",
",",
"]",
"for",
"checker",
"in",
"checkers",
":",
"problems",
".",
"extend",
"(",
"globals",
"(",
")",
"[",
"checker",
"]",
"(",
"feed",
",",
"include_warnings",
"=",
"include_warnings",
")",
")",
"# Check calendar/calendar_dates combo",
"if",
"feed",
".",
"calendar",
"is",
"None",
"and",
"feed",
".",
"calendar_dates",
"is",
"None",
":",
"problems",
".",
"append",
"(",
"[",
"\"error\"",
",",
"\"Missing both tables\"",
",",
"\"calendar & calendar_dates\"",
",",
"[",
"]",
"]",
")",
"return",
"format_problems",
"(",
"problems",
",",
"as_df",
"=",
"as_df",
")"
] | 33.6 | 22.15 |
def returns_fake(self, *args, **kwargs):
"""Set the last call to return a new :class:`fudge.Fake`.
Any given arguments are passed to the :class:`fudge.Fake` constructor
Take note that this is different from the cascading nature of
other methods. This will return an instance of the *new* Fake,
not self, so you should be careful to store its return value in a new
variable.
I.E.::
>>> session = Fake('session')
>>> query = session.provides('query').returns_fake(name="Query")
>>> assert query is not session
>>> query = query.provides('one').returns(['object'])
>>> session.query().one()
['object']
"""
exp = self._get_current_call()
endpoint = kwargs.get('name', exp.call_name)
name = self._endpoint_name(endpoint)
kwargs['name'] = '%s()' % name
fake = self.__class__(*args, **kwargs)
exp.return_val = fake
return fake | [
"def",
"returns_fake",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"exp",
"=",
"self",
".",
"_get_current_call",
"(",
")",
"endpoint",
"=",
"kwargs",
".",
"get",
"(",
"'name'",
",",
"exp",
".",
"call_name",
")",
"name",
"=",
"self",
".",
"_endpoint_name",
"(",
"endpoint",
")",
"kwargs",
"[",
"'name'",
"]",
"=",
"'%s()'",
"%",
"name",
"fake",
"=",
"self",
".",
"__class__",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"exp",
".",
"return_val",
"=",
"fake",
"return",
"fake"
] | 35.392857 | 20.25 |
def get_fw_rule(self, rule_id):
"""Return the firewall rule, given its ID. """
rule = None
try:
rule = self.neutronclient.show_firewall_rule(rule_id)
except Exception as exc:
LOG.error("Failed to get firewall rule for id %(id)s "
"Exc %(exc)s", {'id': rule_id, 'exc': str(exc)})
return rule | [
"def",
"get_fw_rule",
"(",
"self",
",",
"rule_id",
")",
":",
"rule",
"=",
"None",
"try",
":",
"rule",
"=",
"self",
".",
"neutronclient",
".",
"show_firewall_rule",
"(",
"rule_id",
")",
"except",
"Exception",
"as",
"exc",
":",
"LOG",
".",
"error",
"(",
"\"Failed to get firewall rule for id %(id)s \"",
"\"Exc %(exc)s\"",
",",
"{",
"'id'",
":",
"rule_id",
",",
"'exc'",
":",
"str",
"(",
"exc",
")",
"}",
")",
"return",
"rule"
] | 40.888889 | 18.666667 |
def _installV2Powerups(anonymousSite):
"""
Install the given L{AnonymousSite} for the powerup interfaces it was given
in version 2.
"""
anonymousSite.store.powerUp(anonymousSite, IWebViewer)
anonymousSite.store.powerUp(anonymousSite, IMantissaSite) | [
"def",
"_installV2Powerups",
"(",
"anonymousSite",
")",
":",
"anonymousSite",
".",
"store",
".",
"powerUp",
"(",
"anonymousSite",
",",
"IWebViewer",
")",
"anonymousSite",
".",
"store",
".",
"powerUp",
"(",
"anonymousSite",
",",
"IMantissaSite",
")"
] | 38 | 14.571429 |
def get_source_by_name(self, name):
"""Return a single source in the ROI with the given name. The
input name string can match any of the strings in the names
property of the source object. Case and whitespace are
ignored when matching name strings. If no sources are found
or multiple sources then an exception is thrown.
Parameters
----------
name : str
Name string.
Returns
-------
srcs : `~fermipy.roi_model.Model`
A source object.
"""
srcs = self.get_sources_by_name(name)
if len(srcs) == 1:
return srcs[0]
elif len(srcs) == 0:
raise Exception('No source matching name: ' + name)
elif len(srcs) > 1:
raise Exception('Multiple sources matching name: ' + name) | [
"def",
"get_source_by_name",
"(",
"self",
",",
"name",
")",
":",
"srcs",
"=",
"self",
".",
"get_sources_by_name",
"(",
"name",
")",
"if",
"len",
"(",
"srcs",
")",
"==",
"1",
":",
"return",
"srcs",
"[",
"0",
"]",
"elif",
"len",
"(",
"srcs",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"'No source matching name: '",
"+",
"name",
")",
"elif",
"len",
"(",
"srcs",
")",
">",
"1",
":",
"raise",
"Exception",
"(",
"'Multiple sources matching name: '",
"+",
"name",
")"
] | 31.961538 | 19.846154 |
def deep_type(obj, depth = None, max_sample = None, get_type = None):
"""Tries to construct a type for a given value. In contrast to type(...),
deep_type does its best to fit structured types from typing as close as
possible to the given value.
E.g. deep_type((1, 2, 'a')) will return Tuple[int, int, str] rather than
just tuple.
Supports various types from typing, but not yet all.
Also detects nesting up to given depth (uses pytypes.default_typecheck_depth
if no value is given).
If a value for max_sample is given, this number of elements is probed
from lists, sets and dictionaries to determine the element type. By default,
all elements are probed. If there are fewer elements than max_sample, all
existing elements are probed.
Optionally, a custom get_type function can be provided to further
customize how types are resolved. By default it uses type function.
"""
return _deep_type(obj, [], 0, depth, max_sample, get_type) | [
"def",
"deep_type",
"(",
"obj",
",",
"depth",
"=",
"None",
",",
"max_sample",
"=",
"None",
",",
"get_type",
"=",
"None",
")",
":",
"return",
"_deep_type",
"(",
"obj",
",",
"[",
"]",
",",
"0",
",",
"depth",
",",
"max_sample",
",",
"get_type",
")"
] | 57.529412 | 23.647059 |
def ReadContainer(self, collection_link, options=None):
"""Reads a collection.
:param str collection_link:
The link to the document collection.
:param dict options:
The request options for the request.
:return:
The read Collection.
:rtype:
dict
"""
if options is None:
options = {}
path = base.GetPathFromLink(collection_link)
collection_id = base.GetResourceIdOrFullNameFromLink(collection_link)
return self.Read(path,
'colls',
collection_id,
None,
options) | [
"def",
"ReadContainer",
"(",
"self",
",",
"collection_link",
",",
"options",
"=",
"None",
")",
":",
"if",
"options",
"is",
"None",
":",
"options",
"=",
"{",
"}",
"path",
"=",
"base",
".",
"GetPathFromLink",
"(",
"collection_link",
")",
"collection_id",
"=",
"base",
".",
"GetResourceIdOrFullNameFromLink",
"(",
"collection_link",
")",
"return",
"self",
".",
"Read",
"(",
"path",
",",
"'colls'",
",",
"collection_id",
",",
"None",
",",
"options",
")"
] | 28.291667 | 16.75 |
def compose(self, *args, **kwargs):
"""
Compose layer and masks (mask, vector mask, and clipping layers).
:return: :py:class:`PIL.Image`, or `None` if the layer has no pixel.
"""
from psd_tools.api.composer import compose_layer
if self.bbox == (0, 0, 0, 0):
return None
return compose_layer(self, *args, **kwargs) | [
"def",
"compose",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"from",
"psd_tools",
".",
"api",
".",
"composer",
"import",
"compose_layer",
"if",
"self",
".",
"bbox",
"==",
"(",
"0",
",",
"0",
",",
"0",
",",
"0",
")",
":",
"return",
"None",
"return",
"compose_layer",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | 37.3 | 16.1 |
def match_key(self) -> Tuple[bool, bool, int, List[WeightedPart]]:
"""A Key to sort the rules by weight for matching.
The key leads to ordering:
- By first order by defaults as they are simple rules without
conversions.
- Then on the complexity of the rule, i.e. does it have any
converted parts. This is as simple rules are quick to match
or reject.
- Then by the number of parts, with more complex (more parts)
first.
- Finally by the weights themselves. Note that weights are also
sub keyed by converter first then weight second.
"""
if self.map is None:
raise RuntimeError(f"{self!r} is not bound to a Map")
complex_rule = any(weight.converter for weight in self._weights)
return (not bool(self.defaults), complex_rule, -len(self._weights), self._weights) | [
"def",
"match_key",
"(",
"self",
")",
"->",
"Tuple",
"[",
"bool",
",",
"bool",
",",
"int",
",",
"List",
"[",
"WeightedPart",
"]",
"]",
":",
"if",
"self",
".",
"map",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"f\"{self!r} is not bound to a Map\"",
")",
"complex_rule",
"=",
"any",
"(",
"weight",
".",
"converter",
"for",
"weight",
"in",
"self",
".",
"_weights",
")",
"return",
"(",
"not",
"bool",
"(",
"self",
".",
"defaults",
")",
",",
"complex_rule",
",",
"-",
"len",
"(",
"self",
".",
"_weights",
")",
",",
"self",
".",
"_weights",
")"
] | 47.052632 | 24.157895 |
def download(self):
''' Downloads HTML from url. '''
self.page = requests.get(self.url)
self.tree = html.fromstring(self.page.text) | [
"def",
"download",
"(",
"self",
")",
":",
"self",
".",
"page",
"=",
"requests",
".",
"get",
"(",
"self",
".",
"url",
")",
"self",
".",
"tree",
"=",
"html",
".",
"fromstring",
"(",
"self",
".",
"page",
".",
"text",
")"
] | 26.8 | 15.2 |