hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5ddf0466f903b62327ed962a3e97704a2eedcd4 | 6,201 | py | Python | sdk/python/pulumi_azure_nextgen/synapse/v20190601preview/workspace_aad_admin.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/synapse/v20190601preview/workspace_aad_admin.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/synapse/v20190601preview/workspace_aad_admin.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['WorkspaceAadAdmin']
class WorkspaceAadAdmin(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
administrator_type: Optional[pulumi.Input[str]] = None,
login: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sid: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Workspace active directory administrator
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] administrator_type: Workspace active directory administrator type
:param pulumi.Input[str] login: Login of the workspace active directory administrator
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] sid: Object ID of the workspace active directory administrator
:param pulumi.Input[str] tenant_id: Tenant ID of the workspace active directory administrator
:param pulumi.Input[str] workspace_name: The name of the workspace
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['administrator_type'] = administrator_type
__props__['login'] = login
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['sid'] = sid
__props__['tenant_id'] = tenant_id
if workspace_name is None and not opts.urn:
raise TypeError("Missing required property 'workspace_name'")
__props__['workspace_name'] = workspace_name
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:synapse:WorkspaceAadAdmin"), pulumi.Alias(type_="azure-nextgen:synapse/latest:WorkspaceAadAdmin"), pulumi.Alias(type_="azure-nextgen:synapse/v20201201:WorkspaceAadAdmin")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WorkspaceAadAdmin, __self__).__init__(
'azure-nextgen:synapse/v20190601preview:WorkspaceAadAdmin',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WorkspaceAadAdmin':
"""
Get an existing WorkspaceAadAdmin resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return WorkspaceAadAdmin(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="administratorType")
def administrator_type(self) -> pulumi.Output[Optional[str]]:
"""
Workspace active directory administrator type
"""
return pulumi.get(self, "administrator_type")
@property
@pulumi.getter
def login(self) -> pulumi.Output[Optional[str]]:
"""
Login of the workspace active directory administrator
"""
return pulumi.get(self, "login")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sid(self) -> pulumi.Output[Optional[str]]:
"""
Object ID of the workspace active directory administrator
"""
return pulumi.get(self, "sid")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
Tenant ID of the workspace active directory administrator
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.898649 | 259 | 0.648928 |
e40c945b21bef2dd14aaeb2e5f006b669d6ced87 | 824 | py | Python | amadeus/travel/analytics/air_traffic/_traveled.py | tsolakoua/amadeus-python | 56c0e5cb0510aab5a80646d07593d94c9cba2c69 | [
"MIT"
] | 125 | 2018-04-09T07:27:24.000Z | 2022-02-22T11:45:20.000Z | amadeus/travel/analytics/air_traffic/_traveled.py | tsolakoua/amadeus-python | 56c0e5cb0510aab5a80646d07593d94c9cba2c69 | [
"MIT"
] | 58 | 2018-03-29T14:58:01.000Z | 2022-03-17T10:18:07.000Z | amadeus/travel/analytics/air_traffic/_traveled.py | tsolakoua/amadeus-python | 56c0e5cb0510aab5a80646d07593d94c9cba2c69 | [
"MIT"
] | 58 | 2018-04-06T10:56:20.000Z | 2022-03-04T01:23:24.000Z | from amadeus.client.decorator import Decorator
class Traveled(Decorator, object):
def get(self, **params):
'''
Returns a list of air traffic reports, based on number of travelers.
.. code-block:: python
amadeus.travel.analytics.air_traffic.traveled.get(
originCityCode='LHR',
period='2017-01'
)
:param originCityCode: IATA code of the origin city, for
example ``"BOS"`` for Boston.
:param period: period when consumers are traveling
in ``YYYY-MM`` format
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get('/v1/travel/analytics/air-traffic/traveled',
**params)
| 31.692308 | 76 | 0.588592 |
634848c475e6f5ce7fc277576abe9b2e777d9c9c | 3,528 | py | Python | poweredge_exporter.py | cermegno/prometheus-grafana-dell | 079a5369c44c2ab5e19358221687198b2c076b27 | [
"MIT"
] | null | null | null | poweredge_exporter.py | cermegno/prometheus-grafana-dell | 079a5369c44c2ab5e19358221687198b2c076b27 | [
"MIT"
] | null | null | null | poweredge_exporter.py | cermegno/prometheus-grafana-dell | 079a5369c44c2ab5e19358221687198b2c076b27 | [
"MIT"
] | null | null | null | from prometheus_client import start_http_server, Gauge, Counter, Enum
import requests
import time
import json
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
### Adjust this section to your environment
baseurl = "https://10.1.1.1/redfish/v1" # Use here the IP address of your iDRAC
interval = "300" #Amount of secs between polls
username = "redfish"
password = ""
headers = {"Content-type": "application/json"}
# Create Prometheus metrics
FAN_SPEED = Gauge('fan_speed','Fan speeds in RPM',['name'])
TEMPERATURE = Gauge('temperature','Temperature of various components in Celsius',['name'])
POWER = Gauge('power','Power Output in Watts',['name'])
VOLTAGE = Gauge('voltage','Voltages',['name'])
HEALTH = Gauge('server_health', 'Health of the system')
def thermal_metrics():
health = "OK"
url = baseurl + "/Chassis/System.Embedded.1/Thermal#"
# The same Redfish API endpoint provides fan and temperature information
resp = requests.get(url, auth=(username, password), headers=headers, verify=False)
if resp.status_code == 200:
json_resp = json.loads(resp.content)
for each_item in json_resp["Fans"]:
FAN_SPEED.labels(name=each_item["Name"]).set(each_item["Reading"])
if each_item["Status"]["Health"] != "OK": # Redfish API provides health info for every component
health = "ERROR"
for each_item in json_resp["Temperatures"]:
TEMPERATURE.labels(name=each_item["Name"]).set(each_item["ReadingCelsius"])
if each_item["Status"]["Health"] != "OK":
health = "ERROR"
else:
print("Failed to get Thermal metrics")
return health
def power_metrics():
health = "OK"
url = baseurl + "/Chassis/System.Embedded.1/Power#"
# The same Redfish API endpoint provides volts and watts information
resp = requests.get(url, auth=(username, password), headers=headers, verify=False)
if resp.status_code == 200:
json_resp = json.loads(resp.content)
for each_item in json_resp["PowerSupplies"]:
POWER.labels(name=each_item["Name"]).set(each_item["PowerOutputWatts"])
if each_item["Status"]["Health"] != "OK":
health = "ERROR"
for each_item in json_resp["Voltages"]:
if each_item["PhysicalContext"] == "PowerSupply":
VOLTAGE.labels(name=each_item["Name"]).set(each_item["ReadingVolts"])
if each_item["Status"]["Health"] != "OK":
health = "ERROR"
else:
print("Failed to get Power metrics")
return health
def calculate_health(health_items):
if "ERROR" in health_items:
HEALTH.set(0) # Convert the health label to a number so that we can use "gauge" metric type
else:
HEALTH.set(100)
return
if __name__ == '__main__':
# Start up the server to expose the metrics.
start_http_server(8000)
print("Point your browser to 'http://<your_ip>:8000/metrics' to see the metrics ...")
while True:
print("Collecting now ... ", end="", flush=True)
health_items = [] # List to store health impact from various components
start = time.time()
health_items.append(thermal_metrics())
health_items.append(power_metrics())
calculate_health(health_items)
end = time.time()
print("collection completed in ", "%.2f" % (end - start), " seconds")
time.sleep(interval)
| 38.769231 | 108 | 0.659014 |
c51f695f01dde1b6319f3730f536fa10591e7be7 | 3,282 | py | Python | simulation/simulation_exp4p.py | fengzhiyugithub/striatum-trival01 | 8368e4d8ebc4b54cb30f4b500b1758f4c57f4657 | [
"BSD-2-Clause"
] | 1 | 2020-10-12T09:39:24.000Z | 2020-10-12T09:39:24.000Z | simulation/simulation_exp4p.py | fengzhiyugithub/striatum-trival01 | 8368e4d8ebc4b54cb30f4b500b1758f4c57f4657 | [
"BSD-2-Clause"
] | null | null | null | simulation/simulation_exp4p.py | fengzhiyugithub/striatum-trival01 | 8368e4d8ebc4b54cb30f4b500b1758f4c57f4657 | [
"BSD-2-Clause"
] | null | null | null | import six
from six.moves import range, zip
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
import numpy as np
import matplotlib.pyplot as plt
from striatum.storage import MemoryHistoryStorage, MemoryModelStorage
from striatum.bandit import Exp4P
from striatum.storage import Action
#from striatum.bandit.bandit import Action
from striatum import simulation
def train_expert(history_context, history_action):
n_round = len(history_context)
history_context = np.array([history_context[t] for t in range(n_round)])
history_action = np.array([history_action[t] for t in range(n_round)])
logreg = OneVsRestClassifier(LogisticRegression())
mnb = OneVsRestClassifier(MultinomialNB())
logreg.fit(history_context, history_action)
mnb.fit(history_context, history_action)
return [logreg, mnb]
def get_advice(context, action_ids, experts):
advice = {}
for t, context_t in six.viewitems(context):
advice[t] = {}
for exp_i, expert in enumerate(experts):
prob = expert.predict_proba(context_t[np.newaxis, :])[0]
advice[t][exp_i] = {}
for action_id, action_prob in zip(action_ids, prob):
advice[t][exp_i][action_id] = action_prob
return advice
def main(): # pylint: disable=too-many-locals
n_rounds = 1000
context_dimension = 5
actions = [Action(i) for i in range(5)]
action_ids = [0, 1, 2, 3, 4]
context1, desired_actions1 = simulation.simulate_data(
3000, context_dimension, actions, "Exp4P", random_state=0)
experts = train_expert(context1, desired_actions1)
# Parameter tuning
tuning_region = np.arange(0.01, 1, 0.05)
ctr_tuning = np.empty(len(tuning_region))
advice1 = get_advice(context1, action_ids, experts)
for delta_i, delta in enumerate(tuning_region):
historystorage = MemoryHistoryStorage()
modelstorage = MemoryModelStorage()
policy = Exp4P(
actions, historystorage, modelstorage, delta=delta, p_min=None)
cum_regret = simulation.evaluate_policy(policy, advice1,
desired_actions1)
ctr_tuning[delta_i] = n_rounds - cum_regret[-1]
ctr_tuning /= n_rounds
delta_opt = tuning_region[np.argmax(ctr_tuning)]
simulation.plot_tuning_curve(
tuning_region, ctr_tuning, label="delta changes")
# Regret Analysis
n_rounds = 10000
context2, desired_actions2 = simulation.simulate_data(
n_rounds, context_dimension, actions, "Exp4P", random_state=1)
advice2 = get_advice(context2, action_ids, experts)
historystorage = MemoryHistoryStorage()
modelstorage = MemoryModelStorage()
policy = Exp4P(
actions, historystorage, modelstorage, delta=delta_opt, p_min=None)
for t in range(n_rounds):
history_id, action = policy.get_action(advice2[t], 1)
action_id = action[0]['action'].action_id
if desired_actions2[t] != action_id:
policy.reward(history_id, {action_id: 0})
else:
policy.reward(history_id, {action_id: 1})
policy.plot_avg_regret()
plt.show()
if __name__ == '__main__':
main()
| 36.065934 | 76 | 0.695308 |
354bdfeed64563d9d273b7fdb6aea7d40c4530a5 | 241 | py | Python | products/urls.py | RawFlash/BabyShoesShop | 624ccbc921b847c6f7257a240c02a6c295fdf74e | [
"bzip2-1.0.6"
] | null | null | null | products/urls.py | RawFlash/BabyShoesShop | 624ccbc921b847c6f7257a240c02a6c295fdf74e | [
"bzip2-1.0.6"
] | null | null | null | products/urls.py | RawFlash/BabyShoesShop | 624ccbc921b847c6f7257a240c02a6c295fdf74e | [
"bzip2-1.0.6"
] | null | null | null | from django.urls import path, include
from django.contrib import admin
from products import views
urlpatterns = [
# url(r'^landing123/', views.landing, name='landing'),
path('product/<product_id>/', views.product, name='product'),
] | 30.125 | 65 | 0.721992 |
48ce48d2af7427a90c9c79d8407c9a618b9e4144 | 1,195 | py | Python | summarize_from_feedback/datasets/tldr.py | badrinath-newzera/openai-summary-inference | 8ad630942ee198b21cd98c9ba980eddc0e917f7f | [
"CC-BY-4.0"
] | null | null | null | summarize_from_feedback/datasets/tldr.py | badrinath-newzera/openai-summary-inference | 8ad630942ee198b21cd98c9ba980eddc0e917f7f | [
"CC-BY-4.0"
] | null | null | null | summarize_from_feedback/datasets/tldr.py | badrinath-newzera/openai-summary-inference | 8ad630942ee198b21cd98c9ba980eddc0e917f7f | [
"CC-BY-4.0"
] | null | null | null | import json
from summarize_from_feedback.utils import blobs
# ['id', 'subreddit', 'title', 'post', 'summary']
# t3_1ov8e0 -> id
# and k != "subreddit"
def tldr_filtered_generator(split):
assert split in ["test", "train", "valid"]
f = open('results_new.json', 'r')
datas = json.load(f)
datas = json.loads(datas)
datas = datas['results']
for data in datas:
for artcle in data['articles']:
yield dict(reference=data["event_synopis"], article=artcle['headline'] + ' ' + " ".join(artcle['article_body'].split()[:100]))
def tldr_filtered_queries_generator(split):
assert split in ["test", "train", "valid"]
gcs_path = f"https://openaipublic.blob.core.windows.net/summarize-from-feedback/datasets/tldr_3_filtered_queries/{split}.jsonl"
with blobs.open_file_cached(gcs_path, "rb") as f:
datas = [json.loads(l) for l in f.readlines()]
for data in datas:
# NOTE: don't use ref summary, not filtered
yield dict(reference=data["summary"], **{k: v for (k, v) in data.items() if k != "summary"})
if __name__ == "__main__":
for x in tldr_filtered_generator("train"):
print(list(x.keys()))
break
| 31.447368 | 138 | 0.645188 |
830523cfac5f1cfad3c51c7cc14f73f68ad6e7a6 | 386 | py | Python | misclientes/migrations/0048_auto_20190301_1753.py | mrbrazzi/django-misclientes | 8017cc67e243e4384c3f52ae73d06e16f8fb8d5b | [
"Apache-2.0"
] | null | null | null | misclientes/migrations/0048_auto_20190301_1753.py | mrbrazzi/django-misclientes | 8017cc67e243e4384c3f52ae73d06e16f8fb8d5b | [
"Apache-2.0"
] | null | null | null | misclientes/migrations/0048_auto_20190301_1753.py | mrbrazzi/django-misclientes | 8017cc67e243e4384c3f52ae73d06e16f8fb8d5b | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.6 on 2019-03-01 16:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('misclientes', '0047_auto_20190228_1859'),
]
operations = [
migrations.AlterModelOptions(
name='enterprise',
options={'permissions': (('view_enterprise', 'Hide content'),)},
),
]
| 21.444444 | 76 | 0.611399 |
eefd1f2ff608b580f034f9b54b21c0301da64523 | 2,427 | py | Python | doc/conf.py | pietrobarbiero/dbgen | 5af33a815597049407ed9e8707d335b9a4379d64 | [
"Apache-2.0"
] | 1 | 2020-03-05T16:11:39.000Z | 2020-03-05T16:11:39.000Z | doc/conf.py | pietrobarbiero/dbgen | 5af33a815597049407ed9e8707d335b9a4379d64 | [
"Apache-2.0"
] | null | null | null | doc/conf.py | pietrobarbiero/dbgen | 5af33a815597049407ed9e8707d335b9a4379d64 | [
"Apache-2.0"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
project = 'DBGen'
copyright = '2020, Pietro Barbiero'
author = 'Pietro Barbiero'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx_rtd_theme']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_options = {
'canonical_url': 'https://dbgen.readthedocs.io/en/latest/',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
# Toc options
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 33.708333 | 79 | 0.668315 |
bd2a1406b3fd7c92c3ec9decf0fa49ffa2351b8d | 69 | py | Python | demo_api/gunicorn.conf.py | benedictleedm/sgnlp | 03f0fda8c517d9ca4baf737ce4c46b2495bbd3ba | [
"MIT"
] | 14 | 2021-08-02T01:52:18.000Z | 2022-01-14T10:16:02.000Z | demo_api/gunicorn.conf.py | benedictleedm/sgnlp | 03f0fda8c517d9ca4baf737ce4c46b2495bbd3ba | [
"MIT"
] | 29 | 2021-08-02T01:53:46.000Z | 2022-03-30T05:40:46.000Z | demo_api/gunicorn.conf.py | benedictleedm/sgnlp | 03f0fda8c517d9ca4baf737ce4c46b2495bbd3ba | [
"MIT"
] | 7 | 2021-08-02T01:54:19.000Z | 2022-01-07T06:37:45.000Z | bind = "0.0.0.0:8000"
wsgi_app = "api:app"
timeout = 180
workers = 1
| 13.8 | 21 | 0.637681 |
35d4df31319ed54a1745ff49fede5aa80ec61d49 | 975 | py | Python | packages/fetchai/skills/fetch_block/__init__.py | bryanchriswhite/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | 1 | 2022-01-23T22:28:43.000Z | 2022-01-23T22:28:43.000Z | packages/fetchai/skills/fetch_block/__init__.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | null | null | null | packages/fetchai/skills/fetch_block/__init__.py | salman6049/agents-aea | d3f177a963eb855d9528555167255bf2b478f4ba | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the implementation of the FetchBlock skill."""
from aea.configurations.base import PublicId
PUBLIC_ID = PublicId.from_str("fetchai/fetch_block:0.12.0")
| 37.5 | 80 | 0.613333 |
e487d51153c3283f7384e57dc38304c295eaf8a7 | 651 | py | Python | tracardi/process_engine/action/v1/connectors/mautic/add_points/model/config.py | bytepl/tracardi | e8fa4684fa6bd3d05165fe48aa925fc6c1e74923 | [
"MIT"
] | null | null | null | tracardi/process_engine/action/v1/connectors/mautic/add_points/model/config.py | bytepl/tracardi | e8fa4684fa6bd3d05165fe48aa925fc6c1e74923 | [
"MIT"
] | null | null | null | tracardi/process_engine/action/v1/connectors/mautic/add_points/model/config.py | bytepl/tracardi | e8fa4684fa6bd3d05165fe48aa925fc6c1e74923 | [
"MIT"
] | null | null | null | from pydantic import BaseModel, validator
from tracardi.domain.named_entity import NamedEntity
class Config(BaseModel):
source: NamedEntity
contact_id: str
points: str
@validator("contact_id")
def validate_contact_id(cls, value):
if value is None or len(value) == 0:
print(value)
raise ValueError("This field cannot be empty.")
return value
@validator("points")
def validate_points(cls, value):
if value is None or len(value) == 0 or not value.isnumeric():
print(value)
raise ValueError("This field must contain an integer.")
return value
| 28.304348 | 69 | 0.648233 |
91d4926a81751f9a3aa0bd5e2259b862da383f2c | 64,132 | py | Python | gym_env/env.py | lujiayou123/poker | da044d4a341933a2e77a316997e67c8404c2b130 | [
"MIT"
] | null | null | null | gym_env/env.py | lujiayou123/poker | da044d4a341933a2e77a316997e67c8404c2b130 | [
"MIT"
] | null | null | null | gym_env/env.py | lujiayou123/poker | da044d4a341933a2e77a316997e67c8404c2b130 | [
"MIT"
] | null | null | null | """Groupier functions"""
import logging
from enum import Enum
import time
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from gym import Env
from gym.spaces import Discrete
from gym_env.rendering import PygletWindow, WHITE, RED, GREEN, BLUE
from tools.hand_evaluator import get_winner
from tools.helper import flatten
__author__ = 'Nicolas Dickreuter'
log = logging.getLogger(__name__)
winner_in_episodes = []
class CommunityData:
"""Data available to everybody"""
def __init__(self, num_players):
"""data"""
self.current_player_position = [False] * num_players # ix[0] = dealer
self.stage = [False] * 4 # one hot: preflop, flop, turn, river
self.community_pot = None
self.current_round_pot = None
self.active_players = [False] * num_players # one hot encoded, 0 = dealer
self.big_blind = 0
self.small_blind = 0
self.legal_moves = [0 for action in Action]
class StageData:
"""Preflop, flop, turn and river"""
def __init__(self, num_players):
"""data"""
self.calls = [False] * num_players # ix[0] = dealer
self.raises = [False] * num_players # ix[0] = dealer
self.min_call_at_action = [0] * num_players # ix[0] = dealer
self.contribution = [0] * num_players # ix[0] = dealer
self.stack_at_action = [0] * num_players # ix[0] = dealer
self.community_pot_at_action = [0] * num_players # ix[0] = dealer
class PlayerData:
"Player specific information"
def __init__(self):
"""data"""
self.position = None
self.equity_to_river_alive = 0
self.equity_to_river_2plr = 0
self.equity_to_river_3plr = 0
self.stack = None
class Action(Enum):
"""Allowed actions"""
FOLD = 0
CHECK = 1
CALL = 2
ALL_IN = 3
RAISE_200 = 4#mini raise
RAISE_250 = 5
RAISE_300 = 6#加注到前面人的三倍
RAISE_350 = 7
RAISE_400 = 8
RAISE_450 = 9
RAISE_500 = 10
RAISE_550 = 11
RAISE_600 = 12
RAISE_10_POT = 13
RAISE_20_POT = 14
RAISE_30_POT = 15
RAISE_40_POT = 16
RAISE_50_POT = 17
RAISE_60_POT = 18
RAISE_70_POT = 19
RAISE_80_POT = 20
RAISE_90_POT = 21
RAISE_100_POT = 22#加注到100%的底池
RAISE_125_POT = 23
RAISE_150_POT = 24
RAISE_175_POT = 25
RAISE_200_POT = 26
SMALL_BLIND = 27
BIG_BLIND = 28
class Stage(Enum):
"""Allowed actions"""
PREFLOP = 0
FLOP = 1
TURN = 2
RIVER = 3
END_HIDDEN = 4
SHOWDOWN = 5
class HoldemTable(Env):
"""Pokergame environment"""
def __init__(self, initial_stacks=2000, small_blind=1, big_blind=2, render=False, funds_plot=True,
max_raising_rounds=2, use_cpp_montecarlo=False):
"""
The table needs to be initialized once at the beginning
Args:
num_of_players (int): number of players that need to be added
initial_stacks (real): initial stacks per placyer
small_blind (real)
big_blind (real)
render (bool): render table after each move in graphical format
funds_plot (bool): show plot of funds history at end of each episode
max_raising_rounds (int): max raises per round per player
"""
if use_cpp_montecarlo:
import cppimport
calculator = cppimport.imp("tools.montecarlo_cpp.pymontecarlo")
get_equity = calculator.montecarlo
else:
from tools.montecarlo_python import get_equity
from tools.montecarlo_python import get_short_notation
from tools.montecarlo_python import get_open_range
self.get_equity = get_equity
self.get_short_notation = get_short_notation
self.get_open_range = get_open_range
self.use_cpp_montecarlo = use_cpp_montecarlo
self.num_of_players = 0
self.small_blind = small_blind
self.big_blind = big_blind
self.render_switch = render
self.players = []
self.table_cards = None
self.dealer_pos = None
self.player_status = [] # one hot encoded
self.current_player = None
self.player_cycle = None # cycle iterator
self.stage = None
self.last_player_pot = None
self.viewer = None
self.player_max_win = None # used for side pots
self.second_round = False
self.last_caller = None
self.last_raiser = None
self.raisers = []
self.callers = []
self.played_in_round = None
self.min_call = None
self.community_data = None
self.player_data = None
self.stage_data = None
self.deck = None
self.action = None
self.winner_ix = None
self.initial_stacks = initial_stacks
self.acting_agent = None
self.funds_plot = funds_plot
self.max_round_raising = max_raising_rounds
# pots
self.community_pot = 0
self.current_round_pot = 9
self.player_pots = None # individual player pots
self.observation = None
self.reward = 0
self.info = None
self.done = False
self.funds_history = None
self.array_everything = None
self.legal_moves = None
# self.illegal_move_reward = -1
self.action_space = Discrete(len(Action) - 2)
self.first_action_for_hand = None
self.EV = 0
def reset(self):
"""Reset after game over."""
self.observation = None
self.reward = 0
self.info = None
self.done = False
self.funds_history = pd.DataFrame()
self.first_action_for_hand = [True] * len(self.players)
self.EV = 0
for player in self.players:
player.stack = self.initial_stacks
player.reward = 0
self.dealer_pos = 0
self.player_cycle = PlayerCycle(self.players, dealer_idx=-1, max_steps_after_raiser=len(self.players))
self._start_new_hand()
self._get_environment()
# auto play for agents where autoplay is set
if self._agent_is_autoplay() and not self.done:
self.step('initial_player_autoplay') # kick off the first action after bb by an autoplay agent
return self.array_everything
def step(self, action): # pylint: disable=arguments-differ
"""
Next player makes a move and a new environment is observed.
Args:
action: Used for testing only. Needs to be of Action type
"""
# loop over step function, calling the agent's action method
# until either the env id sone, or an agent is just a shell and
# and will get a call from to the step function externally (e.g. via
# keras-rl
self.reward = 0
self.acting_agent = self.player_cycle.idx
if self._agent_is_autoplay():
while self._agent_is_autoplay() and not self.done:
log.debug("Autoplay agent. Call action method of agent.")
self._get_environment()
# call agent's action method
action = self.current_player.agent_obj.action(self.legal_moves, self.observation, self.info)
if Action(action) not in self.legal_moves:
self._illegal_move(action)
else:
self._execute_step(Action(action))
# time.sleep(5)
if self.first_action_for_hand[self.acting_agent] or self.done:
self.first_action_for_hand[self.acting_agent] = False
# self._calculate_reward()
else: # action received from player shell (e.g. keras rl, not autoplay)
self._get_environment() # get legal moves
if Action(action) not in self.legal_moves:
self._illegal_move(action)
else:
self._execute_step(Action(action))
# time.sleep(5)
if self.first_action_for_hand[self.acting_agent] or self.done:
self.first_action_for_hand[self.acting_agent] = False
# self._calculate_reward()
# print("!!!!sr{}".format(self.reward))
log.info(f"Previous action reward for seat {self.acting_agent}: {self.reward}")
# for i in range(len(self.players)):
# if not hasattr(self.players[i].agent_obj, 'autoplay'):
# self.reward = self.players[i].reward
# print("self.reward {}".format(self.reward))
# break
# for i in range(len(self.players)):
# if hasattr(self.players[i].agent_obj, 'autoplay'):
# self.reward = self.players[i].reward
print("self.reward {}".format(self.reward))
return self.array_everything, self.reward, self.done, self.info
def _execute_step(self, action):
# if self.stage==Stage.PREFLOP:
# print("PREFLOP")
# print(action)
# print(self.current_player.cards)
# print(self.current_player.range)
# crd1 , crd2 = self.get_short_notation(self.current_player.cards)
# print(crd1 , crd2)
# allowed_range = self.get_open_range(self.current_player.range)
# print(allowed_range)
# time.sleep(10)
if self.stage==Stage.PREFLOP:
crd1 , crd2 = self.get_short_notation(self.current_player.cards)
allowed_range = self.get_open_range(self.current_player.range)
# print(crd1)
# print(allowed_range)
if crd1 in allowed_range or crd2 in allowed_range:
if self.current_player.name=='DQN-8':
print("Suggest Play")
print(Action(action))
else:
if self.current_player.name=='DQN-8':
print("Suggest Fold")
action = Action.FOLD
# print(action)
self._process_decision(action)
# print("EV:{}".format(self.reward))
self._next_player()
if self.stage in [Stage.END_HIDDEN, Stage.SHOWDOWN]:
self._end_hand()
self._start_new_hand()
self._get_environment()
def _illegal_move(self, action):
log.warning(f"{action} is an Illegal move, try again. Currently allowed: {self.legal_moves}")
# print(action)
# time.sleep(30)
# self.reward = self.illegal_move_reward
def _agent_is_autoplay(self, idx=None):
if not idx:
return hasattr(self.current_player.agent_obj, 'autoplay')
return hasattr(self.players[idx].agent_obj, 'autoplay')#self.players[idx].agent_obj,有没有'autoplay'属性
def _get_environment(self):
"""Observe the environment"""
if not self.done:
self._get_legal_moves()
self.observation = None
# self.reward = 0
self.info = None
self.community_data = CommunityData(len(self.players))
self.community_data.community_pot = self.community_pot / (self.big_blind * 100)
self.community_data.current_round_pot = self.current_round_pot / (self.big_blind * 100)
self.community_data.small_blind = self.small_blind
self.community_data.big_blind = self.big_blind
self.community_data.stage[np.minimum(self.stage.value, 3)] = 1 # pylint: disable= invalid-sequence-index
self.community_data.legal_moves = [action in self.legal_moves for action in Action]
# self.cummunity_data.active_players
self.player_data = PlayerData()
self.player_data.stack = [player.stack / (self.big_blind * 100) for player in self.players]
if not self.current_player: # game over
self.current_player = self.players[self.winner_ix]
self.player_data.position = self.current_player.seat
# print("players:",len(self.players))
# for i in range(len(self.players)):
# time.sleep(30)
max_range = self.players[0].range
for i in range(len(self.players)):
if self.players[i].range >= max_range:
max_range = self.players[i].range
# print("max range:{}".format(max_range))
# time.sleep(10)
self.current_player.equity_alive = self.get_equity(set(self.current_player.cards), set(self.table_cards),
sum(self.player_cycle.alive), 100 , max_range)
self.player_data.equity_to_river_alive = self.current_player.equity_alive
arr1 = np.array(list(flatten(self.player_data.__dict__.values())))
arr2 = np.array(list(flatten(self.community_data.__dict__.values())))
arr3 = np.array([list(flatten(sd.__dict__.values())) for sd in self.stage_data]).flatten()
# arr_legal_only = np.array(self.community_data.legal_moves).flatten()
self.array_everything = np.concatenate([arr1, arr2, arr3]).flatten()
self.observation = self.array_everything
self._get_legal_moves()
self.info = {'player_data': self.player_data.__dict__,
'community_data': self.community_data.__dict__,
'stage_data': [stage.__dict__ for stage in self.stage_data],
'legal_moves': self.legal_moves}
self.observation_space = self.array_everything.shape
if self.render_switch:
self.render()
def _calculate_reward(self):#EV作为奖赏
"""
Preliminiary implementation of reward function
- Currently missing potential additional winnings from future contributions
"""
# if last_action == Action.FOLD:
# self.reward = -(
# self.community_pot + self.current_round_pot)
# else:
# self.reward = self.player_data.equity_to_river_alive * (self.community_pot + self.current_round_pot) - \
# (1 - self.player_data.equity_to_river_alive) * self.player_pots[self.current_player.seat]
# _ = last_action
if not hasattr(self.current_player.agent_obj, 'autoplay'):
self.current_player.reward += self.EV
self.reward += self.current_player.reward
print("!!!!!self.reward{}".format(self.reward))
# print("{} reward {}".format(self.current_player.name,self.current_player.reward))
# if self.done:
# won = 1 if not self._agent_is_autoplay(idx=self.winner_ix) else -1
# self.reward = self.initial_stacks * len(self.players) * won
# log.debug(f"Keras-rl agent has reward {self.reward}")
#
# elif len(self.funds_history) > 1:
# self.reward = self.funds_history.iloc[-1, self.acting_agent] - self.funds_history.iloc[
# -2, self.acting_agent]
#
# else:
# pass
def compute_EV(self,fold_rate,contribution):
a = fold_rate * (self.community_pot + self.current_round_pot)
b = (1-fold_rate) * (self.current_player.equity_alive * (self.community_pot + self.current_round_pot + contribution - self.player_pots[self.current_player.seat]))#当前整个底池+自己的投入
c = (1-fold_rate) * (1 - self.current_player.equity_alive) * contribution
EV = a +b -c
return EV
def _process_decision(self, action): # pylint: disable=too-many-statements
"""Process the decisions that have been made by an agent."""
# print("!!!!!!MIMI CALL:{}".format(self.min_call))
if action not in [Action.SMALL_BLIND, Action.BIG_BLIND]:
assert action in set(self.legal_moves), "Illegal decision"
if action == Action.FOLD:#0 self.community_pot + self.current_round_pot
self.player_cycle.deactivate_current()
self.player_cycle.mark_folder()
self.EV = 0 # W*(POT)-(1-W)*Mini_Call
print("{} FOLD EV:{}".format(self.current_player.name,self.EV))
else:
if action == Action.CALL:#2
contribution = min(self.min_call - self.player_pots[self.current_player.seat],
self.current_player.stack)
self.callers.append(self.current_player.seat)
self.last_caller = self.current_player.seat
fold_rate = 0
# if self.current_player.name=="LJY":
# print("community pot:{}".format(self.community_pot))
# print("current_round_pot:{}".format(self.current_round_pot))
# print("player_pot:{}".format(self.player_pots[self.current_player.seat]))
# print("reward:{}".format(self.reward))
self.EV= self.compute_EV(fold_rate,contribution)
print("{} CALL EV:{}".format(self.current_player.name, self.EV))
# verify the player has enough in his stack
elif action == Action.CHECK:#1
contribution = 0
self.player_cycle.mark_checker()
fold_rate = 0
self.EV= self.compute_EV(fold_rate,contribution)
print("{} CHECK EV:{}".format(self.current_player.name,self.EV))
elif action == Action.ALL_IN:#3
# print("###########min call:{}".format(self.min_call))
contribution = self.current_player.stack
self.raisers.append(self.current_player.seat)
ptr = self.current_player.stack / (self.community_pot + self.current_round_pot)
if ptr > 0.5 and ptr <= 1:
fold_rate = 0.66
elif ptr > 1 and ptr <=1.5:
fold_rate = 0.85
elif ptr > 1.5 and ptr <2:
fold_rate = 0.90
else:
fold_rate = 0.95
self.EV= self.compute_EV(fold_rate,contribution)
print("{} ALLIN EV:{}".format(self.current_player.name,self.EV))
elif action == Action.RAISE_200:#加注到前面那个人的两倍
# contribution = self.min_call + 3 * self.big_blind
contribution = self.min_call * 2 - self.player_pots[self.current_player.seat]
# print("###########min call:{}".format(self.min_call))
self.raisers.append(self.current_player.seat)
fold_rate = 0.2
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_250:#加注到前面那个人的两倍
# contribution = self.min_call + 3 * self.big_blind
contribution = self.min_call * 2.5 - self.player_pots[self.current_player.seat]
# print("###########min call:{}".format(self.min_call))
self.raisers.append(self.current_player.seat)
fold_rate = 0.25
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_300:#加注到前面那个人的两倍
# contribution = self.min_call + 3 * self.big_blind
contribution = self.min_call * 3 - self.player_pots[self.current_player.seat]
# print("###########min call:{}".format(self.min_call))
self.raisers.append(self.current_player.seat)
fold_rate = 0.3
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_350:#加注到前面那个人的两倍
# contribution = self.min_call + 3 * self.big_blind
contribution = self.min_call * 3.5 - self.player_pots[self.current_player.seat]
# print("###########min call:{}".format(self.min_call))
self.raisers.append(self.current_player.seat)
fold_rate = 0.35
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_400:#加注到前面那个人的两倍
# contribution = self.min_call + 3 * self.big_blind
contribution = self.min_call * 4 - self.player_pots[self.current_player.seat]
# print("###########min call:{}".format(self.min_call))
self.raisers.append(self.current_player.seat)
fold_rate = 0.4
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_450:#加注到前面那个人的两倍
# contribution = self.min_call + 3 * self.big_blind
contribution = self.min_call * 4.5 - self.player_pots[self.current_player.seat]
# print("###########min call:{}".format(self.min_call))
self.raisers.append(self.current_player.seat)
fold_rate = 0.45
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_500:#加注到前面那个人的两倍
# contribution = self.min_call + 3 * self.big_blind
contribution = self.min_call * 5 - self.player_pots[self.current_player.seat]
# print("###########min call:{}".format(self.min_call))
self.raisers.append(self.current_player.seat)
fold_rate = 0.5
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_550:#加注到前面那个人的两倍
# contribution = self.min_call + 3 * self.big_blind
contribution = self.min_call * 5.5 - self.player_pots[self.current_player.seat]
# print("###########min call:{}".format(self.min_call))
self.raisers.append(self.current_player.seat)
fold_rate = 0.55
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_600:#加注到前面那个人的两倍
# contribution = self.min_call + 3 * self.big_blind
contribution = self.min_call * 6 - self.player_pots[self.current_player.seat]
# print("###########min call:{}".format(self.min_call))
self.raisers.append(self.current_player.seat)
fold_rate = 0.6
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_10_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 0.1
self.raisers.append(self.current_player.seat)
fold_rate = 0.1
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_20_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 0.2
self.raisers.append(self.current_player.seat)
fold_rate = 0.2
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_30_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 0.3
self.raisers.append(self.current_player.seat)
fold_rate = 0.25
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_40_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 0.4
self.raisers.append(self.current_player.seat)
fold_rate = 0.35
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_50_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 0.5
self.raisers.append(self.current_player.seat)
fold_rate = 0.5
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_60_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 0.6
self.raisers.append(self.current_player.seat)
fold_rate = 0.55
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_70_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 0.7
self.raisers.append(self.current_player.seat)
fold_rate = 0.6
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_80_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 0.8
self.raisers.append(self.current_player.seat)
fold_rate = 0.65
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_90_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 0.9
self.raisers.append(self.current_player.seat)
fold_rate = 0.7
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_100_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot)
self.raisers.append(self.current_player.seat)
fold_rate = 0.8
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_125_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 1.25
self.raisers.append(self.current_player.seat)
fold_rate = 0.82
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_150_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 1.5
self.raisers.append(self.current_player.seat)
fold_rate = 0.85
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_175_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 1.75
self.raisers.append(self.current_player.seat)
fold_rate = 0.88
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
elif action == Action.RAISE_200_POT:
# print("###########min call:{}".format(self.min_call))
# print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
contribution = (self.community_pot + self.current_round_pot) * 2
self.raisers.append(self.current_player.seat)
fold_rate = 0.9
self.EV= self.compute_EV(fold_rate,contribution)
print("{} RAISE TO {} EV:{}".format(self.current_player.name,contribution,self.EV))
# elif action == Action.RAISE_3BB:#min_call=2,也就是最多下注的那个人的下注量。以小盲为例子,大盲为2,1+2+3*2=9
# # contribution = self.min_call + 3 * self.big_blind
# contribution = self.min_call * 3 - self.player_pots[self.current_player.seat]
# # print("###########min call:{}".format(self.min_call))
# self.raisers.append(self.current_player.seat)
#
# elif action == Action.RAISE_HALF_POT:
# # print("###########min call:{}".format(self.min_call))
# # print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
# contribution = (self.community_pot + self.current_round_pot) / 2
# if(contribution <= 2 * self.min_call):
# contribution = 2 * self.min_call
# self.raisers.append(self.current_player.seat)
#
# elif action == Action.RAISE_POT:
# # print("###########min call:{}".format(self.min_call))
# # print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
# contribution = (self.community_pot + self.current_round_pot)
# if(contribution <= 2 * self.min_call):
# contribution = 2 * self.min_call
# self.raisers.append(self.current_player.seat)
#
# elif action == Action.RAISE_2POT:
# # print("###########min call:{}".format(self.min_call))
# # print("community_pot:{},current_round_pot:{}".format(self.community_pot,self.current_round_pot))
# contribution = (self.community_pot + self.current_round_pot) * 2
# if(contribution <= 2 * self.min_call):
# contribution = 2 * self.min_call
# self.raisers.append(self.current_player.seat)
elif action == Action.SMALL_BLIND:
contribution = np.minimum(self.small_blind, self.current_player.stack)
self.last_raiser = self.current_player.seat
self.EV = 0
print("{} SMALL BLIND EV:{}".format(self.current_player.name,self.EV))
elif action == Action.BIG_BLIND:
contribution = np.minimum(self.big_blind, self.current_player.stack)
self.last_raiser = self.current_player.seat
self.player_cycle.mark_bb()
self.EV = 0
print("{} BIG BLIND EV:{}".format(self.current_player.name,self.EV))
else:
raise RuntimeError("Illegal action.")
if contribution > self.min_call:#下注量>最小下注量
self.player_cycle.mark_raiser()
self.last_raiser = self.current_player.seat
self.current_player.stack -= contribution #下注后,更新stack
print("{} action {} EV{}".format(self.current_player.name,action,self.EV))
if not hasattr(self.current_player.agent_obj, 'autoplay'):
self.reward += self.EV
self.current_player.reward += self.EV
# self.reward = self.current_player.reward
print("{} reward {}".format(self.current_player.name,self.current_player.reward))
print("sr{}".format(self.reward))
self.player_pots[self.current_player.seat] += contribution#下注后,更新投入筹码
self.current_round_pot += contribution#下注后,更新底池
self.last_player_pot = self.player_pots[self.current_player.seat]#更新底池
if self.current_player.stack == 0 and contribution > 0:
self.player_cycle.mark_out_of_cash_but_contributed()
self.min_call = max(self.min_call, contribution)
self.current_player.actions.append(action)
self.current_player.last_action_in_stage = action.name
self.current_player.temp_stack.append(self.current_player.stack)
self.player_max_win[self.current_player.seat] += contribution # side pot
pos = self.player_cycle.idx
rnd = self.stage.value + self.second_round
self.stage_data[rnd].calls[pos] = action == Action.CALL
self.stage_data[rnd].raises[pos] = action in [Action.RAISE_10_POT, Action.RAISE_20_POT,Action.RAISE_30_POT,Action.RAISE_40_POT,Action.RAISE_50_POT,Action.RAISE_60_POT,Action.RAISE_70_POT,
Action.RAISE_80_POT,Action.RAISE_90_POT,Action.RAISE_100_POT,Action.RAISE_125_POT,Action.RAISE_150_POT,Action.RAISE_175_POT,Action.RAISE_200_POT,
Action.RAISE_200,Action.RAISE_250,Action.RAISE_300,Action.RAISE_350,Action.RAISE_400,Action.RAISE_450,Action.RAISE_500,Action.RAISE_550,Action.RAISE_600]
self.stage_data[rnd].min_call_at_action[pos] = self.min_call / (self.big_blind * 100)
self.stage_data[rnd].community_pot_at_action[pos] = self.community_pot / (self.big_blind * 100)
self.stage_data[rnd].contribution[pos] += contribution / (self.big_blind * 100)
self.stage_data[rnd].stack_at_action[pos] = self.current_player.stack / (self.big_blind * 100)
self.player_cycle.update_alive()
log.info(
f"Seat {self.current_player.seat} ({self.current_player.name}): {action} - Remaining stack: {self.current_player.stack}, "
f"Round pot: {self.current_round_pot}, Community pot: {self.community_pot}, "
f"player pot: {self.player_pots[self.current_player.seat]}")
def _start_new_hand(self):
"""Deal new cards to players and reset table states."""
self._save_funds_history()
if self._check_game_over():
return
log.info("")
log.info("++++++++++++++++++")
log.info("Starting new hand.")
log.info("++++++++++++++++++")
self.table_cards = []
self._create_card_deck()
self.stage = Stage.PREFLOP
# preflop round1,2, flop>: round 1,2, turn etc...
self.stage_data = [StageData(len(self.players)) for _ in range(8)]
# pots
self.community_pot = 0
self.current_round_pot = 0
self.player_pots = [0] * len(self.players)
self.player_max_win = [0] * len(self.players)
self.last_player_pot = 0
self.played_in_round = 0
self.first_action_for_hand = [True] * len(self.players)
for player in self.players:
player.cards = []
self._next_dealer()
self._distribute_cards()
self._initiate_round()
def _save_funds_history(self):
"""Keep track of player funds history"""
funds_dict = {i: player.stack for i, player in enumerate(self.players)}
self.funds_history = pd.concat([self.funds_history, pd.DataFrame(funds_dict, index=[0])])
def _check_game_over(self):
"""Check if only one player has money left"""
player_alive = []
self.player_cycle.new_hand_reset()
for idx, player in enumerate(self.players):
if player.stack > 0:
player_alive.append(True)
else:
self.player_status.append(False)
self.player_cycle.deactivate_player(idx)
remaining_players = sum(player_alive)
if remaining_players < 2:#######################################################################################针对MTT,有人输完才算一个episode
# if self.stage == Stage.SHOWDOWN:
self._game_over()
return True
return False
def _game_over(self):
"""End of an episode."""
log.info("Game over.")
self.done = True
player_names = [f"{i} - {player.name}" for i, player in enumerate(self.players)]
self.funds_history.columns = player_names
if self.funds_plot:
self.funds_history.reset_index(drop=True).plot()
log.info(self.funds_history)
plt.show()
winner_in_episodes.append(self.winner_ix)
league_table = pd.Series(winner_in_episodes).value_counts()
best_player = league_table.index[0]
log.info(league_table)
print("Best Player : {}".format(self.players[best_player].name))
log.info(f"Best Player: {best_player}")
def _initiate_round(self):
"""A new round (flop, turn, river) is initiated"""
self.last_caller = None
self.last_raiser = None
self.raisers = []
self.callers = []
self.min_call = 0
for player in self.players:
player.last_action_in_stage = ''
self.player_cycle.new_round_reset()
if self.stage == Stage.PREFLOP:
log.info("")
log.info("===Round: Stage: PREFLOP")
# max steps total will be adjusted again at bb
self.player_cycle.max_steps_total = len(self.players) * self.max_round_raising + 2
self._next_player()
self._process_decision(Action.SMALL_BLIND)
self._next_player()
self._process_decision(Action.BIG_BLIND)
self._next_player()
elif self.stage in [Stage.FLOP, Stage.TURN, Stage.RIVER]:
self.player_cycle.max_steps_total = len(self.players) * self.max_round_raising
self._next_player()
elif self.stage == Stage.SHOWDOWN:
log.info("Showdown")
else:
raise RuntimeError()
def add_player(self, agent):
"""Add a player to the table. Has to happen at the very beginning"""
self.num_of_players += 1
player = PlayerShell(stack_size=self.initial_stacks, name=agent.name,range=agent.range)
player.agent_obj = agent
# player.range = agent.range
player.seat = len(self.players) # assign next seat number to player
player.stack = self.initial_stacks
player.reward = 0
self.players.append(player)
self.player_status = [True] * len(self.players)
self.player_pots = [0] * len(self.players)
# print(self.players)
def _end_round(self):
"""End of preflop, flop, turn or river"""
self._close_round()
if self.stage == Stage.PREFLOP:
self.stage = Stage.FLOP
self._distribute_cards_to_table(3)
elif self.stage == Stage.FLOP:
self.stage = Stage.TURN
self._distribute_cards_to_table(1)
elif self.stage == Stage.TURN:
self.stage = Stage.RIVER
self._distribute_cards_to_table(1)
elif self.stage == Stage.RIVER:
self.stage = Stage.SHOWDOWN
log.info("--------------------------------")
log.info(f"===ROUND: {self.stage} ===")
self._clean_up_pots()
def _clean_up_pots(self):
# self.community_pot += self.current_round_pot
self.current_round_pot = 0
self.player_pots = [0] * len(self.players)
# print("player",self.player_pots)
def _close_round(self):
"""put player_pots into community pots"""
# self.community_pot += sum(self.player_pots)
self.community_pot += self.current_round_pot
# print("close round - community pot{}".format(self.community_pot))
self.player_pots = [0] * len(self.players)
# print("player",self.player_pots)
self.played_in_round = 0
def _end_hand(self):
# self._clean_up_pots()
self.winner_ix = self._get_winner()
self._award_winner(self.winner_ix)
self._clean_up_pots()
def _get_winner(self):
"""Determine which player has won the hand"""
potential_winners = self.player_cycle.get_potential_winners()
potential_winner_idx = [i for i, potential_winner in enumerate(potential_winners) if potential_winner]
if sum(potential_winners) == 1:
winner_ix = [i for i, active in enumerate(potential_winners) if active][0]
winning_card_type = 'Only remaining player in round'
else:
assert self.stage == Stage.SHOWDOWN
remaining_player_winner_ix, winning_card_type = get_winner([player.cards
for ix, player in enumerate(self.players) if
potential_winners[ix]],
self.table_cards)
winner_ix = potential_winner_idx[remaining_player_winner_ix]
log.info(f"{self.players[winner_ix].name} won: {winning_card_type}")
print(f"{self.players[winner_ix].name} won: {winning_card_type}")
return winner_ix
def _award_winner(self, winner_ix):
"""Hand the pot to the winner and handle side pots"""
max_win_per_player_for_winner = self.player_max_win[winner_ix]
total_winnings = sum(np.minimum(max_win_per_player_for_winner, self.player_max_win))
remains = np.maximum(0, np.array(self.player_max_win) - max_win_per_player_for_winner) # to be returned
exact_winnings = (self.initial_stacks - self.players[winner_ix].stack) / self.big_blind
self.players[winner_ix].stack += total_winnings
self.winner_ix = winner_ix
# print("{} win {}$".format(self.players[winner_ix].name,total_winnings))
print("{} win {} bbs".format(self.players[winner_ix].name,exact_winnings))
if total_winnings < sum(self.player_max_win):
log.info("Returning side pots")
for i, player in enumerate(self.players):
player.stack += remains[i]
def _next_dealer(self):
self.dealer_pos = self.player_cycle.next_dealer().seat
def _next_player(self):
"""Move to the next player"""
self.current_player = self.player_cycle.next_player()
if not self.current_player:
if sum(self.player_cycle.alive) < 2:
log.info("Only one player remaining in round")
self.stage = Stage.END_HIDDEN
else:
log.info("End round - no current player returned")
self._end_round()
# todo: in some cases no new round should be initialized bc only one player is playing only it seems
self._initiate_round()
elif self.current_player == 'max_steps_total' or self.current_player == 'max_steps_after_raiser':
log.debug(self.current_player)
log.info("End of round ")
self._end_round()
return
def _get_legal_moves(self):
"""Determine what moves are allowed in the current state"""
self.legal_moves = []
if self.current_round_pot == 0:#当前没人下注
self.legal_moves.append(Action.CHECK)
self.legal_moves.append(Action.FOLD)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.1) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_10_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.2) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_20_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.3) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_30_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.4) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_40_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.5) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_50_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.6) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_60_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.7) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_70_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.8) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_80_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.9) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_90_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 1.0) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_100_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 1.25) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_125_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 1.5) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_150_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 1.75) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_175_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 2) >= (self.big_blind):
self.legal_moves.append(Action.RAISE_200_POT)
if self.current_player.stack > 0:
self.legal_moves.append(Action.ALL_IN)
###########################################################################
else:#有人下注了
self.legal_moves.append(Action.CALL)
self.legal_moves.append(Action.FOLD)
if self.current_player.stack >= (self.min_call * 2 - self.player_pots[self.current_player.seat]) >= self.min_call:
self.legal_moves.append(Action.RAISE_200)
if self.current_player.stack >= (self.min_call * 2.5 - self.player_pots[self.current_player.seat]) >= self.min_call:
self.legal_moves.append(Action.RAISE_250)
if self.current_player.stack >= (self.min_call * 3 - self.player_pots[self.current_player.seat]) >= self.min_call:
self.legal_moves.append(Action.RAISE_300)
if self.current_player.stack >= (self.min_call * 3.5 - self.player_pots[self.current_player.seat]) >= self.min_call:
self.legal_moves.append(Action.RAISE_350)
if self.current_player.stack >= (self.min_call * 4 - self.player_pots[self.current_player.seat]) >= self.min_call:
self.legal_moves.append(Action.RAISE_400)
if self.current_player.stack >= (self.min_call * 4.5 - self.player_pots[self.current_player.seat]) >= self.min_call:
self.legal_moves.append(Action.RAISE_450)
if self.current_player.stack >= (self.min_call * 5 - self.player_pots[self.current_player.seat]) >= self.min_call:
self.legal_moves.append(Action.RAISE_500)
if self.current_player.stack >= (self.min_call * 5.5 - self.player_pots[self.current_player.seat]) >= self.min_call:
self.legal_moves.append(Action.RAISE_550)
if self.current_player.stack >= (self.min_call * 6 - self.player_pots[self.current_player.seat]) >= self.min_call:
self.legal_moves.append(Action.RAISE_600)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.1) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_10_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.2) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_20_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.3) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_30_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.4) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_40_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.5) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_50_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.6) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_60_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.7) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_70_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.8) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_80_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 0.9) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_90_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 1.0) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_100_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 1.25) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_125_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 1.5) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_150_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 1.75) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_175_POT)
if self.current_player.stack >= ((self.community_pot + self.current_round_pot) * 2) >= (self.min_call * 2):
self.legal_moves.append(Action.RAISE_200_POT)
if self.current_player.stack > 0:
self.legal_moves.append(Action.ALL_IN)
log.debug(f"Community+current round pot pot: {self.community_pot + self.current_round_pot}")
def _create_card_deck(self):
values = "23456789TJQKA"
suites = "CDHS"
self.deck = [] # contains cards in the deck
_ = [self.deck.append(x + y) for x in values for y in suites]
def _distribute_cards(self):
log.info(f"Dealer is at position {self.dealer_pos}")
for player in self.players:
player.cards = []
if player.stack <= 0:
continue
for _ in range(2):
card = np.random.randint(0, len(self.deck))
player.cards.append(self.deck.pop(card))
log.info(f"Player {player.seat} got {player.cards} and ${player.stack}")
def _distribute_cards_to_table(self, amount_of_cards):
for _ in range(amount_of_cards):
card = np.random.randint(0, len(self.deck))
self.table_cards.append(self.deck.pop(card))
log.info(f"Cards on table: {self.table_cards}")
def render(self, mode='human'):
"""Render the current state"""
screen_width = 600
screen_height = 400
table_radius = 200
face_radius = 10
if self.viewer is None:
self.viewer = PygletWindow(screen_width + 50, screen_height + 50)
self.viewer.reset()
self.viewer.circle(screen_width / 2, screen_height / 2, table_radius, color=BLUE,
thickness=0)
for i in range(len(self.players)):
degrees = i * (360 / len(self.players))
radian = (degrees * (np.pi / 180))
x = (face_radius + table_radius) * np.cos(radian) + screen_width / 2
y = (face_radius + table_radius) * np.sin(radian) + screen_height / 2
if self.player_cycle.alive[i]:
color = GREEN
else:
color = RED
self.viewer.circle(x, y, face_radius, color=color, thickness=2)
try:
if i == self.current_player.seat:
self.viewer.rectangle(x - 60, y, 150, -50, (255, 0, 0, 10))
except AttributeError:
pass
self.viewer.text(f"{self.players[i].name}", x - 60, y - 15,
font_size=10,
color=WHITE)
self.viewer.text(f"Player {self.players[i].seat}: {self.players[i].cards}", x - 60, y,
font_size=10,
color=WHITE)
# print(self.player_status[i])
if self.player_status[i]==True:
equity_alive = int(round(float(self.players[i].equity_alive) * 100))
self.viewer.text(f"${self.players[i].stack} (EQ: {equity_alive}%)", x - 60, y + 15, font_size=10,
color=WHITE)
else:
equity_alive = 0
self.viewer.text(f"${self.players[i].stack} (EQ: {equity_alive}%)", x - 60, y + 15, font_size=10,
color=WHITE)
try:
self.viewer.text(self.players[i].last_action_in_stage, x - 60, y + 30, font_size=10, color=WHITE)
except IndexError:
pass
x_inner = (-face_radius + table_radius - 60) * np.cos(radian) + screen_width / 2
y_inner = (-face_radius + table_radius - 60) * np.sin(radian) + screen_height / 2
self.viewer.text(f"${self.player_pots[i]}", x_inner, y_inner, font_size=10, color=WHITE)
self.viewer.text(f"{self.table_cards}", screen_width / 2 - 40, screen_height / 2, font_size=10,
color=WHITE)
self.viewer.text(f"${self.community_pot}", screen_width / 2 - 15, screen_height / 2 + 30, font_size=10,
color=WHITE)
self.viewer.text(f"${self.current_round_pot}", screen_width / 2 - 15, screen_height / 2 + 50,
font_size=10,
color=WHITE)
x_button = (-face_radius + table_radius - 20) * np.cos(radian) + screen_width / 2
y_button = (-face_radius + table_radius - 20) * np.sin(radian) + screen_height / 2
try:
if i == self.player_cycle.dealer_idx:
self.viewer.circle(x_button, y_button, 5, color=BLUE, thickness=2)
except AttributeError:
pass
self.viewer.update()
class PlayerCycle:
"""Handle the circularity of the Table."""
def __init__(self, lst, start_idx=0, dealer_idx=0, max_steps_total=None,
last_raiser_step=None, max_steps_after_raiser=None):
"""Cycle over a list"""
self.lst = lst
self.start_idx = start_idx
self.size = len(lst)
self.max_steps_total = max_steps_total
self.last_raiser_step = last_raiser_step
self.max_steps_after_raiser = max_steps_after_raiser
self.last_raiser = None
self.counter = 0
self.second_round = False
self.idx = 0
self.dealer_idx = dealer_idx
self.can_still_make_moves_in_this_hand = [] # if the player can still play in this round
self.alive = [True] * len(self.lst) # if the player can still play in the following rounds
self.out_of_cash_but_contributed = [False] * len(self.lst)
self.new_hand_reset()
self.checkers = 0
self.folder = None
def new_hand_reset(self):
"""Reset state if a new hand is dealt"""
self.idx = self.start_idx
self.can_still_make_moves_in_this_hand = [True] * len(self.lst)
self.out_of_cash_but_contributed = [False] * len(self.lst)
self.folder = [False] * len(self.lst)
self.counter = 0
def new_round_reset(self):
"""Reset the state for the next stage: flop, turn or river"""
self.counter = 0
self.second_round = False
self.idx = self.dealer_idx
self.last_raiser_step = len(self.lst)
self.checkers = 0
def next_player(self, step=1):
"""Switch to the next player in the round."""
if sum(np.array(self.can_still_make_moves_in_this_hand) + np.array(self.out_of_cash_but_contributed)) < 2:
log.debug("Only one player remaining")
return False # only one player remains
self.idx += step
self.counter += step
self.idx %= len(self.lst)
if self.counter > len(self.lst):
self.second_round = True
if self.max_steps_total and (self.counter >= self.max_steps_total):
log.debug("Max steps total has been reached")
return False
raiser_reference = self.last_raiser if self.last_raiser else 0
if self.max_steps_after_raiser and (self.counter >= self.max_steps_after_raiser + raiser_reference):
log.debug("max steps after raiser has been reached")
return False
if self.checkers == sum(self.alive):
log.debug("All players checked")
return False
while True:
if self.can_still_make_moves_in_this_hand[self.idx]:
break
self.idx += 1
self.counter += 1
self.idx %= len(self.lst)
if self.max_steps_total and self.counter >= self.max_steps_total:
log.debug("Max steps total has been reached after jumping some folders")
return False
self.update_alive()
return self.lst[self.idx]
def next_dealer(self):
"""Move the dealer to the next player that's still in the round."""
self.dealer_idx += 1
self.dealer_idx %= len(self.lst)
while True:
if self.can_still_make_moves_in_this_hand[self.dealer_idx]:
break
self.dealer_idx += 1
self.dealer_idx %= len(self.lst)
return self.lst[self.dealer_idx]
def set_idx(self, idx):
"""Set the index to a specific player"""
self.idx = idx
def deactivate_player(self, idx):
"""Deactivate a pleyr if he has folded or is out of cash."""
assert self.can_still_make_moves_in_this_hand[idx], "Already deactivated"
self.can_still_make_moves_in_this_hand[idx] = False
def deactivate_current(self):
"""Deactivate the current player if he has folded or is out of cash."""
assert self.can_still_make_moves_in_this_hand[self.idx], "Already deactivated"
self.can_still_make_moves_in_this_hand[self.idx] = False
def mark_folder(self):
"""Mark a player as no longer eligible to win cash from the current hand"""
self.folder[self.idx] = True
def mark_raiser(self):
"""Mark a raise for the current player."""
self.last_raiser = self.counter
def mark_checker(self):
"""Counter the number of checks in the round"""
self.checkers += 1
def mark_out_of_cash_but_contributed(self):
"""Mark current player as a raiser or caller, but is out of cash."""
self.out_of_cash_but_contributed[self.idx] = True
self.deactivate_current()
def mark_bb(self):
"""Ensure bb can raise"""
self.last_raiser_step = self.counter + len(self.lst)
self.max_steps_total = self.counter + len(self.lst) * 2
def is_raising_allowed(self):########################################################################################################################################永远能加注
"""Check if raising is still allowed at this position"""
return True
# return self.counter <= self.last_raiser_step
def update_alive(self):
"""Update the alive property"""
self.alive = np.array(self.can_still_make_moves_in_this_hand) + \
np.array(self.out_of_cash_but_contributed)
def get_potential_winners(self):
"""Players eligible to win the pot"""
potential_winners = np.logical_and(np.logical_or(np.array(self.can_still_make_moves_in_this_hand),
np.array(self.out_of_cash_but_contributed)),
np.logical_not(np.array(self.folder)))
return potential_winners
class PlayerShell:
"""Player shell"""
def __init__(self, stack_size, name, range):
"""Initiaization of an agent"""
self.stack = stack_size
self.seat = None
self.equity_alive = 0
self.actions = []
self.last_action_in_stage = ''
self.temp_stack = []
self.name = name
self.agent_obj = None
self.range = range
self.reward = 0
| 46.880117 | 211 | 0.596036 |
3dccc572cf1b75750c4d8e70dd0e506c6937f81e | 28,910 | py | Python | tests/providers/test_person.py | valestel/faker | 48e0b6b8da004c753809fbd4fe8ccfeb4fad757e | [
"MIT"
] | null | null | null | tests/providers/test_person.py | valestel/faker | 48e0b6b8da004c753809fbd4fe8ccfeb4fad757e | [
"MIT"
] | null | null | null | tests/providers/test_person.py | valestel/faker | 48e0b6b8da004c753809fbd4fe8ccfeb4fad757e | [
"MIT"
] | null | null | null | import datetime
import re
import unittest
from unittest import mock
from faker import Faker
from faker.providers.person.ar_AA import Provider as ArProvider
from faker.providers.person.cs_CZ import Provider as CsCZProvider
from faker.providers.person.en import Provider as EnProvider
from faker.providers.person.en_IN import Provider as EnINProvider
from faker.providers.person.en_US import Provider as EnUSProvider
from faker.providers.person.es_ES import Provider as EsESProvider
from faker.providers.person.fi_FI import Provider as FiProvider
from faker.providers.person.hy_AM import Provider as HyAmProvider
from faker.providers.person.ne_NP import Provider as NeProvider
from faker.providers.person.or_IN import Provider as OrINProvider
from faker.providers.person.pl_PL import Provider as PlPLProvider
from faker.providers.person.pl_PL import checksum_identity_card_number as pl_checksum_identity_card_number
from faker.providers.person.pt_PT import Provider as PtPtProvider
from faker.providers.person.ru_RU import Provider as RuProvider
from faker.providers.person.ru_RU import translit
from faker.providers.person.sv_SE import Provider as SvSEProvider
from faker.providers.person.ta_IN import Provider as TaINProvider
from faker.providers.person.th_TH import Provider as ThThProvider
from faker.providers.person.zh_CN import Provider as ZhCNProvider
from faker.providers.person.zh_TW import Provider as ZhTWProvider
class TestAr(unittest.TestCase):
""" Tests person in the ar locale """
def setUp(self):
self.fake = Faker('ar')
Faker.seed(0)
def test_first_name(self):
# General first name
name = self.fake.first_name()
assert name
self.assertIsInstance(name, str)
assert name in ArProvider.first_names
# Females first name
name = self.fake.first_name_female()
assert name
self.assertIsInstance(name, str)
assert name in ArProvider.first_names
assert name in ArProvider.first_names_female
# Male first name
name = self.fake.first_name_male()
assert name
self.assertIsInstance(name, str)
assert name in ArProvider.first_names
assert name in ArProvider.first_names_male
def test_last_name(self):
# There's no gender-specific last name in Arabic.
assert not hasattr(ArProvider, 'last_names_male')
assert not hasattr(ArProvider, 'last_names_female')
# All last names apply for all genders.
assert hasattr(ArProvider, 'last_names')
# General last name.
name = self.fake.last_name()
assert name
self.assertIsInstance(name, str)
assert name in ArProvider.last_names
# Females last name.
name = self.fake.last_name_female()
assert name
self.assertIsInstance(name, str)
assert name in ArProvider.last_names
assert name in ArProvider.last_names
# Male last name.
name = self.fake.last_name_male()
assert name
self.assertIsInstance(name, str)
assert name in ArProvider.last_names
assert name in ArProvider.last_names
class TestJaJP(unittest.TestCase):
""" Tests person in the ja_JP locale """
def setUp(self):
self.fake = Faker('ja')
Faker.seed(0)
def test_person(self):
name = self.fake.name()
assert name
assert isinstance(name, str)
first_name = self.fake.first_name()
assert first_name
assert isinstance(first_name, str)
last_name = self.fake.last_name()
assert last_name
assert isinstance(last_name, str)
kana_name = self.fake.kana_name()
assert kana_name
assert isinstance(kana_name, str)
first_kana_name = self.fake.first_kana_name()
assert first_kana_name
assert isinstance(first_kana_name, str)
first_kana_name_male = self.fake.first_kana_name_male()
assert first_kana_name_male
assert isinstance(first_kana_name_male, str)
first_kana_name_female = self.fake.first_kana_name_female()
assert first_kana_name_female
assert isinstance(first_kana_name_female, str)
last_kana_name = self.fake.last_kana_name()
assert last_kana_name
assert isinstance(last_kana_name, str)
romanized_name = self.fake.romanized_name()
assert romanized_name
assert isinstance(romanized_name, str)
first_romanized_name = self.fake.first_romanized_name()
assert first_romanized_name
assert isinstance(first_romanized_name, str)
first_romanized_name_male = self.fake.first_romanized_name_male()
assert first_romanized_name_male
assert isinstance(first_romanized_name_male, str)
first_romanized_name_female = self.fake.first_romanized_name_female()
assert first_romanized_name_female
assert isinstance(first_romanized_name_female, str)
last_romanized_name = self.fake.last_romanized_name()
assert last_romanized_name
assert isinstance(last_romanized_name, str)
first_name_pair = self.fake.first_name_pair()
assert first_name_pair
assert len(first_name_pair) == 3
assert all(map(lambda s: isinstance(s, str), first_name_pair))
first_name_male_pair = self.fake.first_name_male_pair()
assert first_name_male_pair
assert len(first_name_male_pair) == 3
assert all(map(lambda s: isinstance(s, str), first_name_male_pair))
first_name_female_pair = self.fake.first_name_female_pair()
assert first_name_female_pair
assert len(first_name_female_pair) == 3
assert all(map(lambda s: isinstance(s, str), first_name_female_pair))
last_name_pair = self.fake.last_name_pair()
assert last_name_pair
assert len(last_name_pair) == 3
assert all(map(lambda s: isinstance(s, str), last_name_pair))
class TestNeNP(unittest.TestCase):
def setUp(self):
self.fake = Faker('ne_NP')
Faker.seed(0)
def test_names(self):
name = self.fake.name().split()
assert all(isinstance(n, str) for n in name)
# name should always be 2-3 words. If 3, first word
# should be a prefix.
assert name[-2] in NeProvider.first_names
assert name[-1] in NeProvider.last_names
prefixes = NeProvider.prefixes_male + NeProvider.prefixes_female
if len(name) == 3:
assert name[0] in prefixes
class TestFiFI(unittest.TestCase):
def setUp(self):
self.fake = Faker('fi_FI')
Faker.seed(0)
def test_gender_first_names(self):
female_name = self.fake.first_name_female()
self.assertIsInstance(female_name, str)
assert female_name in FiProvider.first_names_female
male_name = self.fake.first_name_male()
self.assertIsInstance(male_name, str)
assert male_name in FiProvider.first_names_male
first_name = self.fake.first_name()
self.assertIsInstance(first_name, str)
assert first_name in FiProvider.first_names
def test_last_names(self):
last_name = self.fake.last_name()
self.assertIsInstance(last_name, str)
assert last_name in FiProvider.last_names
class TestSvSE(unittest.TestCase):
def setUp(self):
self.fake = Faker('sv_SE')
Faker.seed(0)
def test_gender_first_names(self):
"""simple test to verify that we are pulling gender specific names"""
name = self.fake.first_name_female()
assert name in SvSEProvider.first_names_female
name = self.fake.first_name_male()
assert name in SvSEProvider.first_names_male
name = self.fake.first_name()
assert name in SvSEProvider.first_names
class TestPlPL(unittest.TestCase):
def setUp(self):
self.fake = Faker('pl_PL')
Faker.seed(0)
def test_identity_card_number_checksum(self):
assert pl_checksum_identity_card_number(['A', 'I', 'S', 8, 5, 0, 2, 1, 4]) == 8
assert pl_checksum_identity_card_number(['A', 'U', 'L', 9, 2, 7, 2, 8, 5]) == 9
assert pl_checksum_identity_card_number(['A', 'E', 'I', 2, 5, 1, 8, 2, 4]) == 2
assert pl_checksum_identity_card_number(['A', 'H', 'F', 2, 2, 0, 6, 8, 0]) == 2
assert pl_checksum_identity_card_number(['A', 'X', 'E', 8, 2, 0, 3, 4, 0]) == 8
def test_identity_card_number(self):
for _ in range(100):
assert re.search(r'^[A-Z]{3}\d{6}$', self.fake.identity_card_number())
@mock.patch.object(PlPLProvider, 'random_digit')
def test_pesel_birth_date(self, mock_random_digit):
mock_random_digit.side_effect = [3, 5, 8, 8, 7, 9, 9, 3]
assert self.fake.pesel(datetime.date(1999, 12, 31)) == '99123135885'
assert self.fake.pesel(datetime.date(2000, 1, 1)) == '00210179936'
@mock.patch.object(PlPLProvider, 'random_digit')
def test_pesel_sex_male(self, mock_random_digit):
mock_random_digit.side_effect = [1, 3, 4, 5, 6, 1, 7, 0]
assert self.fake.pesel(datetime.date(1909, 3, 3), 'M') == '09030313454'
assert self.fake.pesel(datetime.date(1913, 8, 16), 'M') == '13081661718'
@mock.patch.object(PlPLProvider, 'random_digit')
def test_pesel_sex_female(self, mock_random_digit):
mock_random_digit.side_effect = [4, 9, 1, 6, 6, 1, 7, 3]
assert self.fake.pesel(datetime.date(2007, 4, 13), 'F') == '07241349161'
assert self.fake.pesel(datetime.date(1933, 12, 16), 'F') == '33121661744'
@mock.patch.object(PlPLProvider, 'random_digit')
def test_pwz_doctor(self, mock_random_digit):
mock_random_digit.side_effect = [6, 9, 1, 9, 6, 5, 2, 7, 9, 9, 1, 5]
assert self.fake.pwz_doctor() == '2691965'
assert self.fake.pwz_doctor() == '4279915'
@mock.patch.object(PlPLProvider, 'random_digit')
def test_pwz_doctor_check_digit_zero(self, mock_random_digit):
mock_random_digit.side_effect = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 9, 9]
assert self.fake.pwz_doctor() == '6000012'
assert self.fake.pwz_doctor() == '1000090'
@mock.patch.object(PlPLProvider, 'random_int')
@mock.patch.object(PlPLProvider, 'random_digit')
def test_pwz_nurse(self, mock_random_digit, mock_random_int):
mock_random_digit.side_effect = [3, 4, 5, 6, 7, 1, 7, 5, 1, 2]
mock_random_int.side_effect = [45, 3]
assert self.fake.pwz_nurse(kind='nurse') == '4534567P'
assert self.fake.pwz_nurse(kind='midwife') == '0317512A'
@staticmethod
def validate_nip(nip_str):
"""
Validates NIP using recommended code
https://pl.wikibooks.org/wiki/Kody_%C5%BAr%C3%B3d%C5%82owe/Implementacja_NIP
"""
nip_str = nip_str.replace('-', '')
if len(nip_str) != 10 or not nip_str.isdigit():
return False
digits = [int(i) for i in nip_str]
weights = (6, 5, 7, 2, 3, 4, 5, 6, 7)
check_sum = sum(d * w for d, w in zip(digits, weights)) % 11
return check_sum == digits[9]
def test_nip(self):
for _ in range(100):
assert self.validate_nip(self.fake.nip())
class TestCsCZ(unittest.TestCase):
def setUp(self):
self.fake = Faker('cs_CZ')
Faker.seed(0)
def test_name_male(self):
male_name = self.fake.name_male()
name_parts = male_name.split(" ")
first_name, last_name = "", ""
if len(name_parts) == 2:
first_name = name_parts[0]
last_name = name_parts[1]
elif len(name_parts) == 4:
first_name = name_parts[1]
last_name = name_parts[2]
elif len(name_parts) == 3:
if name_parts[-1] in CsCZProvider.suffixes:
first_name = name_parts[0]
last_name = name_parts[1]
else:
first_name = name_parts[1]
last_name = name_parts[2]
assert first_name in CsCZProvider.first_names_male
assert last_name in CsCZProvider.last_names_male
def test_name_female(self):
female_name = self.fake.name_female()
name_parts = female_name.split(" ")
first_name, last_name = "", ""
if len(name_parts) == 2:
first_name = name_parts[0]
last_name = name_parts[1]
elif len(name_parts) == 4:
first_name = name_parts[1]
last_name = name_parts[2]
elif len(name_parts) == 3:
if name_parts[-1] in CsCZProvider.suffixes:
first_name = name_parts[0]
last_name = name_parts[1]
else:
first_name = name_parts[1]
last_name = name_parts[2]
assert first_name in CsCZProvider.first_names_female
assert last_name in CsCZProvider.last_names_female
class TestThTh(unittest.TestCase):
""" Tests person in the th_TH locale """
def setUp(self):
self.fake = Faker('th_TH')
Faker.seed(0)
def test_first_name(self):
# General first name
name = self.fake.first_name()
assert name
self.assertIsInstance(name, str)
assert name in ThThProvider.first_names
def test_last_name(self):
# There's no gender-specific last name in Thai.
assert not hasattr(ThThProvider, 'last_names_male')
assert not hasattr(ThThProvider, 'last_names_female')
# All last names apply for all genders.
assert hasattr(ThThProvider, 'last_names')
# General last name.
name = self.fake.last_name()
assert name
self.assertIsInstance(name, str)
assert name in ThThProvider.last_names
def test_name(self):
# Full name
name = self.fake.name()
assert name
self.assertIsInstance(name, str)
class TestZhCN(unittest.TestCase):
def setUp(self):
self.fake = Faker('zh_CN')
Faker.seed(0)
def test_last_name(self):
# There's no gender-specific last name in Chinese.
assert not hasattr(ZhCNProvider, 'last_names_male')
assert not hasattr(ZhCNProvider, 'last_names_female')
assert not hasattr(ZhCNProvider, 'last_romanized_names_male')
assert not hasattr(ZhCNProvider, 'last_romanized_names_female')
# All last names apply for all genders.
assert hasattr(ZhCNProvider, 'last_names')
# General last name.
name = self.fake.last_name()
assert name
self.assertIsInstance(name, str)
assert name in ZhCNProvider.last_names
# Females last name.
name = self.fake.last_name_female()
assert name
self.assertIsInstance(name, str)
assert name in ZhCNProvider.last_names
# Male last name.
name = self.fake.last_name_male()
assert name
self.assertIsInstance(name, str)
assert name in ZhCNProvider.last_names
# General last romanized name
name = self.fake.last_romanized_name()
assert name
self.assertIsInstance(name, str)
assert name in ZhCNProvider.last_romanized_names
def test_first_name(self):
# General first name
name = self.fake.first_name()
assert name
self.assertIsInstance(name, str)
assert name in ZhCNProvider.first_names
# Females first name
name = self.fake.first_name_female()
assert name
self.assertIsInstance(name, str)
assert name in ZhCNProvider.first_names
assert name in ZhCNProvider.first_names_female
# Male first name
name = self.fake.first_name_male()
assert name
self.assertIsInstance(name, str)
assert name in ZhCNProvider.first_names
assert name in ZhCNProvider.first_names_male
# General first romanized name
name = self.fake.first_romanized_name()
assert name
self.assertIsInstance(name, str)
assert name in ZhCNProvider.first_romanized_names
def test_name(self):
# Full name
name = self.fake.name()
assert name
self.assertIsInstance(name, str)
assert name[0] in ZhCNProvider.last_names or name[:2] in ZhCNProvider.last_names
assert name[1:] in ZhCNProvider.first_names or name[2:] in ZhCNProvider.first_names
# Full romanized name
name = self.fake.romanized_name()
assert name
self.assertIsInstance(name, str)
first_romanized_name, last_romanized_name = name.split(' ')
assert first_romanized_name in ZhCNProvider.first_romanized_names
assert last_romanized_name in ZhCNProvider.last_romanized_names
class TestZhTW(unittest.TestCase):
def setUp(self):
self.fake = Faker('zh_TW')
Faker.seed(0)
def test_last_name(self):
# There's no gender-specific last name in Chinese.
assert not hasattr(ZhTWProvider, 'last_names_male')
assert not hasattr(ZhTWProvider, 'last_names_female')
assert not hasattr(ZhTWProvider, 'last_romanized_names_male')
assert not hasattr(ZhTWProvider, 'last_romanized_names_female')
# All last names apply for all genders.
assert hasattr(ZhTWProvider, 'last_names')
# General last name.
name = self.fake.last_name()
assert name
self.assertIsInstance(name, str)
assert name in ZhTWProvider.last_names
# Females last name.
name = self.fake.last_name_female()
assert name
self.assertIsInstance(name, str)
assert name in ZhTWProvider.last_names
# Male last name.
name = self.fake.last_name_male()
assert name
self.assertIsInstance(name, str)
assert name in ZhTWProvider.last_names
# General last romanized name
name = self.fake.last_romanized_name()
assert name
self.assertIsInstance(name, str)
assert name in ZhTWProvider.last_romanized_names
def test_first_name(self):
# General first name
name = self.fake.first_name()
assert name
self.assertIsInstance(name, str)
assert name in ZhTWProvider.first_names
# Females first name
name = self.fake.first_name_female()
assert name
self.assertIsInstance(name, str)
assert name in ZhTWProvider.first_names
assert name in ZhTWProvider.first_names_female
# Male first name
name = self.fake.first_name_male()
assert name
self.assertIsInstance(name, str)
assert name in ZhTWProvider.first_names
assert name in ZhTWProvider.first_names_male
# General first romanized name
name = self.fake.first_romanized_name()
assert name
self.assertIsInstance(name, str)
assert name in ZhTWProvider.first_romanized_names
def test_name(self):
# Full name
name = self.fake.name()
assert name
self.assertIsInstance(name, str)
assert name[0] in ZhTWProvider.last_names or name[:2] in ZhTWProvider.last_names
assert name[1:] in ZhTWProvider.first_names or name[2:] in ZhTWProvider.first_names
# Full romanized name
name = self.fake.romanized_name()
assert name
self.assertIsInstance(name, str)
first_romanized_name, last_romanized_name = name.split(' ')
assert first_romanized_name in ZhTWProvider.first_romanized_names
assert last_romanized_name in ZhTWProvider.last_romanized_names
class TestHyAM(unittest.TestCase):
""" Tests person in the hy_AM locale """
def setUp(self):
self.fake = Faker('hy_AM')
Faker.seed(0)
def test_name(self):
# General name
name = self.fake.name()
self.assertIsInstance(name, str)
# Female name
name = self.fake.name_female()
self.assertIsInstance(name, str)
# Male name
name = self.fake.name_male()
self.assertIsInstance(name, str)
def test_first_name(self):
# General first name
name = self.fake.first_name()
self.assertIsInstance(name, str)
assert name in HyAmProvider.first_names
# Female first name
name = self.fake.first_name_female()
self.assertIsInstance(name, str)
assert name in HyAmProvider.first_names
assert name in HyAmProvider.first_names_female
# Male first name
name = self.fake.first_name_male()
self.assertIsInstance(name, str)
assert name in HyAmProvider.first_names
assert name in HyAmProvider.first_names_male
def test_last_name(self):
# There's no gender-specific last name in Armenian.
assert not hasattr(HyAmProvider, 'last_names_male')
assert not hasattr(HyAmProvider, 'last_names_female')
# All last names apply for all genders.
assert hasattr(HyAmProvider, 'last_names')
# General last name.
name = self.fake.last_name()
self.assertIsInstance(name, str)
assert name in HyAmProvider.last_names
# Females last name.
name = self.fake.last_name_female()
self.assertIsInstance(name, str)
assert name in HyAmProvider.last_names
# Male last name.
name = self.fake.last_name_male()
self.assertIsInstance(name, str)
assert name in HyAmProvider.last_names
class TestTaIN(unittest.TestCase):
def setUp(self):
self.fake = Faker('ta_IN')
Faker.seed(0)
def test_gender_first_names(self):
"""simple test to verify that we are pulling gender specific names"""
name = self.fake.first_name_female()
assert name in TaINProvider.first_names_female
name = self.fake.first_name_male()
assert name in TaINProvider.first_names_male
name = self.fake.first_name()
assert name in TaINProvider.first_names
class TestRuRU(unittest.TestCase):
""" Tests person in the ru_RU locale """
def setUp(self):
self.fake = Faker('ru_RU')
Faker.seed(0)
def test_translit(self):
assert translit('Александр Сергеевич Пушкин') == 'Aleksandr Sergeevich Pushkin'
assert translit('Анна Андреевна Ахматова') == 'Anna Andreevna Akhmatova'
assert translit('Михаил') == 'Mikhail'
assert translit('Фёдор') == 'Fedor'
assert translit('Екатерина') == 'Yekaterina'
assert translit('Анастасия') == 'Anastasiya'
assert translit('Юрьевич') == 'Yurevich'
assert translit('Никитична') == 'Nikitichna'
assert translit('Щербакова') == 'Shcherbakova'
assert translit('Маяковский') == 'Mayakovskiy'
assert translit('Петров-Водкин') == 'Petrov-Vodkin'
assert translit('Воронцова-Дашкова') == 'Vorontsova-Dashkova'
assert translit('А.С.Пушкин') == 'A.S.Pushkin'
assert translit('А. С. Пушкин') == 'A. S. Pushkin'
assert translit('тов. И.И.Сидоров') == 'tov. I.I.Sidorov'
assert translit('г-н А.Б.Петров') == 'g-n A.B.Petrov'
assert translit('г-жа Ю.М.Петрова') == 'g-zha Yu.M.Petrova'
def test_name_female(self):
first_name = self.fake.first_name_female()
assert first_name in RuProvider.first_names_female
middle_name = self.fake.middle_name_female()
assert middle_name in RuProvider.middle_names_female
last_name = self.fake.last_name_female()
assert last_name in RuProvider.last_names_female
def test_name_male(self):
first_name = self.fake.first_name_male()
assert first_name in RuProvider.first_names_male
middle_name = self.fake.middle_name_male()
assert middle_name in RuProvider.middle_names_male
last_name = self.fake.last_name_male()
assert last_name in RuProvider.last_names_male
def test_language_name(self):
language_name = self.fake.language_name()
assert language_name in RuProvider.language_names
class TestEsES(unittest.TestCase):
"""Tests person in the es_ES locale."""
def setUp(self):
self.fake = Faker('es_ES')
Faker.seed(0)
def test_language_name(self):
language_name = self.fake.language_name()
assert language_name in EsESProvider.language_names
class TestPtPt(unittest.TestCase):
"""Tests person in the pt_PT locale."""
def setUp(self):
self.fake = Faker('pt_PT')
Faker.seed(0)
def test_male_first_name(self):
first_name_male = self.fake.first_name_male()
assert first_name_male in PtPtProvider.first_names_male
def test_female_first_name(self):
first_name_female = self.fake.first_name_female()
assert first_name_female in PtPtProvider.first_names_female
def test_last_name(self):
last_name = self.fake.last_name()
assert last_name in PtPtProvider.last_names
class TestUs(unittest.TestCase):
""" Tests person in the en_US locale """
def setUp(self):
self.fake = Faker('en_US')
Faker.seed(0)
def test_first_names(self):
# General first name
name = self.fake.first_name()
self.assertIsInstance(name, str)
assert name in EnUSProvider.first_names
# Female first name
name = self.fake.first_name_female()
self.assertIsInstance(name, str)
assert name in EnUSProvider.first_names
assert name in EnUSProvider.first_names_female
# Male first name
name = self.fake.first_name_male()
self.assertIsInstance(name, str)
assert name in EnUSProvider.first_names
assert name in EnUSProvider.first_names_male
# Nonbinary first name
name = self.fake.first_name_nonbinary()
self.assertIsInstance(name, str)
assert name in EnUSProvider.first_names
assert name in EnUSProvider.first_names_nonbinary
def test_last_names(self):
# General last name
name = self.fake.last_name()
self.assertIsInstance(name, str)
assert name in EnUSProvider.last_names
# Female last name
name = self.fake.last_name_female()
self.assertIsInstance(name, str)
assert name in EnUSProvider.last_names
# Male last name
name = self.fake.last_name_male()
self.assertIsInstance(name, str)
assert name in EnUSProvider.last_names
# Nonbinary last name
name = self.fake.last_name_nonbinary()
self.assertIsInstance(name, str)
assert name in EnUSProvider.last_names
def test_prefix(self):
# Nonbinary prefix
prefix = self.fake.prefix_nonbinary()
self.assertIsInstance(prefix, str)
assert prefix in EnUSProvider.prefixes_nonbinary
def test_suffix(self):
# Nonbinary suffix
suffix = self.fake.suffix_nonbinary()
self.assertIsInstance(suffix, str)
assert suffix in EnUSProvider.suffixes_nonbinary
class TestEn(unittest.TestCase):
""" Tests person in the en locale """
def setUp(self):
self.fake = Faker('en')
Faker.seed(0)
def test_suffix(self):
# Traditional suffix -- provider does not offer a nonbinary suffix at this time
suffix = self.fake.suffix()
self.assertIsInstance(suffix, str)
assert suffix in EnProvider.suffixes_male or suffix in EnProvider.suffixes_female
class TestOrIN(unittest.TestCase):
def setUp(self):
self.fake = Faker('or_IN')
Faker.seed(0)
def test_first_names(self):
"""simple test to verify that we are pulling gender specific names"""
name = self.fake.first_name_female()
assert name in OrINProvider.first_names_female
name = self.fake.first_name_male()
assert name in OrINProvider.first_names_male
name = self.fake.first_name_unisex()
assert name in OrINProvider.first_names_unisex
name = self.fake.first_name()
assert name in OrINProvider.first_names
def test_middle_names(self):
""" test the middle name """
name = self.fake.middle_name()
assert name in OrINProvider.middle_names
def test_last_names(self):
""" test the last name is generating from the provided tuple """
last_name = self.fake.last_name()
assert last_name in OrINProvider.last_names
class TestEnIN(unittest.TestCase):
""" Tests person in the en_IN locale """
def setUp(self):
self.fake = Faker('en_IN')
Faker.seed(0)
def test_first_name(self):
first_name = self.fake.first_name()
assert first_name in EnINProvider.first_names
def test_last_name(self):
last_name = self.fake.last_name()
assert last_name in EnINProvider.last_names
| 34.873341 | 106 | 0.661882 |
950b1d539c0a52d35d7d715b262f182ea5ac04ca | 528 | py | Python | command.py | epichide/ExonDataSimulation | 813d3103953f93e7ee6b0bbef1749e8f06ac65f4 | [
"MIT"
] | null | null | null | command.py | epichide/ExonDataSimulation | 813d3103953f93e7ee6b0bbef1749e8f06ac65f4 | [
"MIT"
] | null | null | null | command.py | epichide/ExonDataSimulation | 813d3103953f93e7ee6b0bbef1749e8f06ac65f4 | [
"MIT"
] | null | null | null | import os
os.system("python ./simu.py -dep reference.depth 350")
os.system("python ./simu.py -reg")
os.system("python ./simu.py -mut")
os.system("python ./simu.py -read -R 111")
os.system("bwa mem ../fastq/GRCh38_latest_genomic.fna ../fastq/R1_111.fasrq ../fastq/R2_111.fastq > ../fastq/sam/444.sam")
os.system('samtool view -bS ../fastq/sam/444.sam > ../fastq/sam/444.bam')
os.system('samtool sort ../fastq/sam/444.bam > ../fastq/sam/444.sort.bam')
os.system('samtool depth ../fastq/sam/444.sort.bam > ../fastq/sam/444.depth')
| 52.8 | 122 | 0.691288 |
c01327a21783b27578d7c1b7ec0907cdf60abcc1 | 4,790 | py | Python | test/test_pack.py | pgcudahy/sos | ee902841003c7630db501101038f370650955ef9 | [
"BSD-3-Clause"
] | 90 | 2019-06-25T15:33:22.000Z | 2022-03-31T03:50:58.000Z | test/test_pack.py | pgcudahy/sos | ee902841003c7630db501101038f370650955ef9 | [
"BSD-3-Clause"
] | 223 | 2019-06-24T13:47:38.000Z | 2022-03-29T21:37:10.000Z | test/test_pack.py | pgcudahy/sos | ee902841003c7630db501101038f370650955ef9 | [
"BSD-3-Clause"
] | 26 | 2019-06-27T19:24:23.000Z | 2022-03-24T03:41:37.000Z | #!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
#
# import os
# import shutil
# import subprocess
# import unittest
#
#
# class TestPack(unittest.TestCase):
# def setUp(self):
# if os.path.isdir('temp'):
# shutil.rmtree('temp')
# os.mkdir('temp')
# os.chdir('temp')
# with open('test.sos', 'w') as script:
# script.write('''
# %from included include *
# parameter: name='t_f1'
# [0]
# output: name
# import os
# with open(_output, 'wb') as out:
# out.write(os.urandom(10000))
#
# [1]
# output: os.path.join('t_d1', 't_f2')
# import os
# with open(_output, 'wb') as out:
# out.write(os.urandom(50000))
# with open(os.path.join('t_d1', 'ut_f4'), 'wb') as out:
# out.write(os.urandom(10000))
#
# [2]
# output: os.path.join('t_d2', 't_d3', 't_f3')
# import os
# with open(_output, 'wb') as out:
# out.write(os.urandom(5000))
#
# ''')
# with open('included.sos', 'w') as script:
# script.write('''
# # does nothing
# a = 1
# ''')
# subprocess.call('sos run test -s force -w', shell=True)
# # create some other files and directory
# for d in ('ut_d1', 'ut_d2', 'ut_d2/ut_d3'):
# os.mkdir(d)
# for f in ('ut_f1', 'ut_d1/ut_f2', 'ut_d2/ut_d3/ut_f3'):
# with open(f, 'w') as tf:
# tf.write(f)
#
# def assertExists(self, fdlist):
# for fd in fdlist:
# self.assertTrue(os.path.exists(fd), '{} does not exist'.format(fd))
#
# def assertNonExists(self, fdlist):
# for fd in fdlist:
# self.assertFalse(os.path.exists(fd), '{} still exists'.format(fd))
#
# def testSetup(self):
# self.assertExists(['ut_d1', 'ut_d2', 'ut_d2/ut_d3', 'ut_f1',
# 'ut_d1/ut_f2', 'ut_d2/ut_d3/ut_f3'])
# self.assertExists(['t_f1', 't_d1/t_f2', 't_d2/t_d3/t_f3', 't_d2/t_d3', 't_d2'])
# # this is the tricky part, directory containing untracked file should remain
# self.assertExists(['t_d1', 't_d1/ut_f4'])
#
# def testDryrun(self):
# '''Test dryrun mode'''
# self.assertEqual(subprocess.call(
# 'sos pack -o b.sar -i t_d1/ut_f4 --dryrun', shell=True), 0)
# self.assertFalse(os.path.isfile('b.sar'))
#
# def testPackZapped(self):
# '''Test archiving of zapped files'''
# self.assertEqual(subprocess.call('sos remove t_d1/t_f2 --zap -y', shell=True), 0)
# self.assertEqual(subprocess.call('sos pack -o a.sar', shell=True), 0)
# self.assertEqual(subprocess.call('sos unpack a.sar -y', shell=True), 0)
#
# def testPackUnpack(self):
# '''Test pack command'''
# self.assertEqual(subprocess.call('sos pack -o a.sar', shell=True), 0)
# # extra file
# self.assertEqual(subprocess.call('sos pack -o b.sar -i t_d1/ut_f4', shell=True), 0)
# # extra directory
# self.assertEqual(subprocess.call('sos pack -o b.sar -i t_d1 -y', shell=True), 0)
# # unpack
# self.assertEqual(subprocess.call('sos unpack a.sar', shell=True), 0)
# # unpack to a different directory
# self.assertEqual(subprocess.call('sos unpack a.sar -d tmp', shell=True), 0)
# # list content
# self.assertEqual(subprocess.call('sos unpack a.sar -l', shell=True), 0)
#
# def testUnpackScript(self):
# '''Test -s option of unpack'''
# self.assertEqual(subprocess.call('sos pack -o a.sar', shell=True), 0)
# os.remove('test.sos')
# os.remove('included.sos')
# # unpack
# self.assertEqual(subprocess.call('sos unpack a.sar', shell=True), 0)
# self.assertFalse(os.path.isfile('test.sos'))
# self.assertFalse(os.path.isfile('included.sos'))
# # unpack to a different directory
# self.assertEqual(subprocess.call('sos unpack a.sar -s -y', shell=True), 0)
# self.assertTrue(os.path.isfile('test.sos'))
# self.assertTrue(os.path.isfile('included.sos'))
#
# def testUnpackSelected(self):
# # unpack selected file
# self.assertEqual(subprocess.call('sos pack -o a.sar -i t_d1/ut_f4', shell=True), 0)
# shutil.rmtree('.sos')
# shutil.rmtree('t_d1')
# shutil.rmtree('t_d2')
# self.assertEqual(subprocess.call('sos unpack a.sar ut_f4', shell=True), 0)
# self.assertTrue(os.path.isfile('t_d1/ut_f4'))
# self.assertFalse(os.path.exists('t_d2'))
#
# def tearDown(self):
# os.chdir('..')
# try:
# shutil.rmtree('temp')
# except Exception:
# pass
#
#
# if __name__ == '__main__':
# unittest.main()
| 36.564885 | 93 | 0.579958 |
1e3692bc86d11ad2df069f8ec5367b76f54a4cd4 | 1,760 | py | Python | server-code/data-logger.py | SuperChamp234/bridge-watcher | a86c2561c35dafa04673223beff5eecd2d60af62 | [
"MIT"
] | null | null | null | server-code/data-logger.py | SuperChamp234/bridge-watcher | a86c2561c35dafa04673223beff5eecd2d60af62 | [
"MIT"
] | null | null | null | server-code/data-logger.py | SuperChamp234/bridge-watcher | a86c2561c35dafa04673223beff5eecd2d60af62 | [
"MIT"
] | null | null | null | import paho.mqtt.client as mqttClient
import time
import json
def write_json(new_data, filename='~/bridge-watcher/my-app/build/data/bridgedata.json'):
with open(filename,'r+') as file:
# First we load existing data into a dict.
file_data = json.load(file)
# Join new_data with file_data inside emp_details
file_data["data"].append(new_data)
# Sets file's current position at offset.
file.seek(0)
# convert back to json.
json.dump(file_data, file, indent = 4)
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to broker")
global Connected #Use global variable
Connected = True #Signal connection
else:
print("Connection failed")
def on_message(client, userdata, message):
print("Message received: " + message.payload.decode("utf-8"))
write_json(eval(message.payload.decode("utf-8")))
Connected = False #global variable for the state of the connection
broker_address= "bridgewatcher.centralindia.cloudapp.azure.com" #Broker address
port = 1883 #Broker port
user = "BridgeWatcherAdmin" #Connection username
password = "BridgeWatcherAdmin" #Connection password
topic = "test"
client = mqttClient.Client("PythonCLI") #create new instance
client.username_pw_set(user, password=password) #set username and password
client.on_connect= on_connect #attach function to callback
client.on_message= on_message #attach function to callback
client.connect(broker_address,port,60) #connect
client.subscribe(topic) #subscribe
client.loop_forever() #then keep listening forever | 36.666667 | 88 | 0.665341 |
9e5152556b85b4ba6d13e639cc1ffa88c73283ac | 30,673 | py | Python | tools/frontend-aurelia.py | Dev00355/fundamental-tools-copy-from-sap | 1857db103789dde84e9eb40105ecaf029a4cf360 | [
"Apache-2.0"
] | null | null | null | tools/frontend-aurelia.py | Dev00355/fundamental-tools-copy-from-sap | 1857db103789dde84e9eb40105ecaf029a4cf360 | [
"Apache-2.0"
] | null | null | null | tools/frontend-aurelia.py | Dev00355/fundamental-tools-copy-from-sap | 1857db103789dde84e9eb40105ecaf029a4cf360 | [
"Apache-2.0"
] | null | null | null | #!/home/pos/.virtualenvs/picoUI/bin/python
# SPDX-FileCopyrightText: 2014 SAP SE Srdjan Boskovic <srdjan.boskovic@sap.com>
#
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
""" Parse 1 RFM metadata into UI7 elements
:param rfm name, Params, Fields
:return: RFM Parameters/Field View and Parameters Model
"""
import os
import json
import codecs
import re
import shutil
import sys
from collections import OrderedDict
from datetime import datetime
from generator import VERSION, catalog, rfm_sets
from backend import INPUT_TYPE_KEY, INPUT_TYPE_BINARY, INPUT_TYPE_LIST
REMOVE_DDIC = False
for arg in sys.argv:
if arg == "-d":
REMOVE_DDIC = True
break
REMOVE_TYPE = False
for arg in sys.argv:
if arg == "-t":
REMOVE_TYPE = True
break
"""
There are ca. 30 predefined ABAP Dictionary Data Types, maintained as domain values of DATATYPE_D data-element:
https://help.sap.com/viewer/ec1c9c8191b74de98feb94001a95dd76/7.4.16/en-US/cf21f2e5446011d189700000e8322d00.html
These DDIC types are mapped to internal ABAP Processor Data Types:
https://help.sap.com/viewer/ec1c9c8191b74de98feb94001a95dd76/7.4.16/en-US/cf21f2f2446011d189700000e8322d00.html
Int DDIC Description
--- ---- ---------------------------------------------------------
N ACCP Posting period YYYYMM
C CHAR Character String
C CLNT Client
C CUKY Currency key, referenced by CURR fields
P CURR Currency field, stored as DEC
D DATS Date field (YYYYMMDD) stored as char(8)
P DEC Counter or amount field with comma and sign
F FLTP Floating point number, accurate to 8 bytes
b INT1 1-byte integer, integer number <= 255
s INT2 2-byte integer, only for length field before LCHR or LRAW
I INT4 4-byte integer, integer number with sign
C LANG Language key
C LCHR Long character string, requires preceding INT2 field
X LRAW Long byte string, requires preceding INT2 field
N NUMC Character string with only digits
s PREC Precision of a QUAN field
P QUAN Quantity field, points to a unit field with format UNIT
X RAW Uninterpreted sequence of bytes
y RSTR Byte String of Variable Length
g SSTR Short Character String of Variable Length
g STRG Character String of Variable Length
T TIMS Time field (hhmmss), stored as char(6)
- VARC Long character string, no longer supported from Rel. 3.0
C UNIT Unit key for QUAN fields
Int DDIC Description
--- ---- ---------------------------------------------------------
# Character
C CHAR Character String
C CLNT Client
C CUKY Currency key, referenced by CURR fields
C LANG Language key
C LCHR Long character string, requires preceding INT2 field
C UNIT Unit key for QUAN fields
# Date, Time
D DATS Date field (YYYYMMDD) stored as char(8)
T TIMS Time field (hhmmss), stored as char(6)
# Integer
b INT1 1-byte integer, integer number <= 255
s INT2 2-byte integer, only for length field before LCHR or LRAW
I INT4 4-byte integer, integer number with sign
# Numeric
N ACCP Posting period YYYYMM
N NUMC Character string with only digits
# Float
F FLTP Floating point number, accurate to 8 bytes
# Decimal
P DEC Counter or amount field with comma and sign
# Currency, Quantity
P CURR Currency field, stored as DEC, points to currency field with format CUKY
P QUAN Quantity field, points to a unit field with format UNIT
s PREC Precision of a QUAN field
# String
g SSTR Short Character String of Variable Length
g STRG Character String of Variable Length
# Bytes
X RAW Uninterpreted sequence of bytes
X LRAW Long byte string, requires preceding INT2 field
y RSTR Byte String of Variable Length
? VARC Long character string, no longer supported from Rel. 3.0
This script parses RFM metadata and generates html5 fragments (ui elements) and Javascript fragments accordingly,
like models initialization, table column headers.
Character Strings in ABAP
https://help.sap.com/doc/abapdocu_750_index_htm/7.50/en-US/index.htm?file=abenddic_builtin_types_intro.htm
Character-like data objects contain character strings. A character-like data object either has a character-like
data type (c, n, or string) or it is a date/time type (d or t), or it is a flat structure with exclusively
character-like components.
ABAP supports the character format UCS-2 and a character always occupies two bytes. This ensures that all
characters from the system code page UTF-16 are handled correctly (except for those in the surrogate area.
These characters occupy four bytes and hence are handled as two characters by ABAP. This can produce unexpected
results when cutting character strings or comparing individual characters in character sets.
"""
FORMATTER_ON = "<!-- @formatter:on -->"
FORMATTER_OFF = "<!-- @formatter:off -->"
ELEMENT_PREFIX = "ui-"
HTML_TAG = "ui-tag"
INPUT_TYPE_BINARY_TAG = "checkbox"
INPUT_TYPE_LIST_TAG = "combo"
COLUMN_TAGNAME = "dg-column"
DATE_TAGNAME = "date"
TIME_TAGNAME = "time"
MODEL_PREFIX = "model/aurelia"
JS_FORMAT = "format"
JS_TYPE = "type"
ABAP_TYPE = "abap-ddic"
TIMESTAMP = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
PARAMCLASS = OrderedDict([("I", "INPUT"), ("E", "OUTPUT"), ("C", "CHANGING"), ("T", "TABLE")])
HEADER = """<!-- %s %s -->"""
HEADER_JS = """//
// %s %s
//
"""
HEADER_JS_PARAM = """// %s %s %s %s
"""
HEADER_PARAMCLASS = """
<!-- %s PARAMETERS -->"""
HEADER_JS_PARAMCLASS = """// %s PARAMETERS"""
FIELD_ATTRIBUTES = [ABAP_TYPE, JS_TYPE, JS_FORMAT, "abap-length", "abap-mid", "abap-shlp"]
DDIC_JS = {
# Posting period YYYYMM
"ACCP": {JS_TYPE: "string", JS_FORMAT: "accp", HTML_TAG: "input"},
# Client 000-999
"CLNT": {JS_TYPE: "string", JS_FORMAT: "numeric", HTML_TAG: "input"},
# Language 1 char key
"LANG": {JS_TYPE: "string", JS_FORMAT: "lang", HTML_TAG: "lang"},
# Character String
"CHAR": {JS_TYPE: "string", HTML_TAG: "input"},
# Date field (YYYYMMDD) stored as char(8)
"DATS": {JS_TYPE: "string", JS_FORMAT: "date", HTML_TAG: DATE_TAGNAME},
# Time field (hhmmss), stored as char(6)
"TIMS": {JS_TYPE: "string", JS_FORMAT: "time", HTML_TAG: TIME_TAGNAME},
# Boolean 1 char
"BOOLEAN": {JS_TYPE: "boolean", HTML_TAG: INPUT_TYPE_BINARY_TAG},
# Character string with only digits
"NUMC": {JS_TYPE: "string", JS_FORMAT: "numeric", HTML_TAG: "input"},
# Floating point number, accurate to 8 bytes
"FLTP": {JS_TYPE: "number", JS_FORMAT: "float", HTML_TAG: "input"},
# 1-byte integer, integer number <= 255
"INT1": {JS_TYPE: "number", JS_FORMAT: "integer", HTML_TAG: "input"},
# 2-byte integer, only for length field before LCHR or LRAW
"INT2": {JS_TYPE: "number", JS_FORMAT: "integer", HTML_TAG: "input"},
# 4-byte integer, integer number with sign
"INT4": {JS_TYPE: "number", JS_FORMAT: "integer", HTML_TAG: "input"},
# Counter or amount field with comma and sign
"DEC": {JS_TYPE: "number", JS_FORMAT: "decimal", HTML_TAG: "input"},
"D16R": {JS_TYPE: "number", JS_FORMAT: "decimal", HTML_TAG: "input"},
"D34R": {JS_TYPE: "number", JS_FORMAT: "decimal", HTML_TAG: "input"},
# Currency field, stored as DEC, points to currency field with format CUKY
"CURR": {JS_TYPE: "number", JS_FORMAT: "currency", HTML_TAG: "input"},
# Quantity field, points to a unit field with format UNIT
"QUAN": {JS_TYPE: "number", JS_FORMAT: "quantity", HTML_TAG: "input"},
# Currency key, referenced by CURR fields
"CUKY": {JS_TYPE: "string", HTML_TAG: "input"},
# Unit key for QUAN fields
"UNIT": {JS_TYPE: "string", HTML_TAG: "input"},
# Precision of a QUAN field
"PREC": {JS_TYPE: "number", JS_FORMAT: "integer", HTML_TAG: "number"},
# Long character string, requires preceding INT2 field
"LCHR": {JS_TYPE: "string", HTML_TAG: "text"},
# Byte String of Variable Length
"RSTR": {JS_TYPE: "string", HTML_TAG: "text"},
# Short Character String of Variable Length
"SSTR": {JS_TYPE: "string", HTML_TAG: "text"},
# Character String of Variable Length
"STRG": {JS_TYPE: "string", HTML_TAG: "text"},
# Uninterpreted sequence of bytes
"RAW": {JS_TYPE: "string", HTML_TAG: "text"},
# Long byte string, requires preceding INT2 field
"LRAW": {JS_TYPE: "string", HTML_TAG: "text"},
# native
"STRING": {JS_TYPE: "string", HTML_TAG: "text"},
}
# field initial value
def get_field_inital(rfm_field):
init = {"number": "0", "string": "''"}
try:
initial = init[DDIC_JS[rfm_field["format"]["DATATYPE"]][JS_TYPE]]
except Exception:
print("Datatype [%s] not supported" % rfm_field["format"]["DATATYPE"])
print(rfm_field)
return "?"
return initial
class ModelParser:
def __init__(self, rfmset):
self.rfmset = rfmset
# clear the frontend model
if os.path.exists("data/%s/%s" % (rfmset, MODEL_PREFIX)):
shutil.rmtree("data/%s/%s" % (rfmset, MODEL_PREFIX))
os.makedirs("data/%s/%s" % (rfmset, MODEL_PREFIX))
# read the backend model
with codecs.open("data/%s/Params.json" % rfmset, encoding="utf-8", mode="r") as fin:
self.Parameters = json.load(fin, encoding="utf-8")
with codecs.open("data/%s/Fields.json" % rfmset, encoding="utf-8", mode="r") as fin:
self.Fields = OrderedDict(json.load(fin, encoding="utf-8"))
with codecs.open("data/%s/Helps.json" % rfmset, encoding="utf-8", mode="r") as fin:
self.Helps = OrderedDict(json.load(fin, encoding="utf-8"))
def escape_quotes(self, ucstr):
return ucstr.replace('"', """)
def parse(self):
self.headers()
self.rfm_init()
self.parameter_init()
self.helps()
def helps(self):
help_js = Writer("valueInput", "js")
for shlp_key in sorted(self.Helps):
[shlp_type, shlp_id] = shlp_key.split()
if shlp_type in "CT,CH":
shlp = self.Helps[shlp_key]
if shlp is None:
continue
if "valueProperty" not in shlp:
continue
if len(shlp["valueProperty"]) == 1:
value_property = "'%s'" % shlp["valueProperty"][0]
else:
shlp["valueProperty"] = [
"'%s'" % name.encode("ascii") for name in shlp["valueProperty"]
]
value_property = "[%s]" % ",".join(shlp["valueProperty"])
requested_fields = value_property
help_js.write("// %s" % shlp["text"])
help_js.write(
"%s: {type: '%s', id: '%s', valueProperty: %s,"
% (shlp_key.replace(" ", "_"), shlp_type, shlp_id, value_property)
)
help_js.write(
" displayProperty: [], selection: [], requestedFields: %s },"
% requested_fields
)
help_js.write("")
help_js.save()
def headers(self):
for rfm_name in sorted(self.Parameters):
model = Writer(rfm_name)
model_js = Writer(rfm_name, "js")
model.write(HEADER % (rfm_name, VERSION)) # , TIMESTAMP
model_js.write(HEADER_JS % (rfm_name, VERSION)) # , TIMESTAMP
model.save()
model_js.save()
def rfm_init(self):
for rfm_name in sorted(self.Parameters):
rfm_params = self.Parameters[rfm_name]
comma = ","
index_last = len(rfm_params) - 1
index = 0
model_js = Writer(rfm_name, "js")
model_js.write("%s = {" % rfm_name.replace("/", "_"))
model_js.addindent()
# RFM parameters init
for param_class in PARAMCLASS:
paramclass_header = False
for parameter_name in sorted(rfm_params):
rfm_parameter = rfm_params[parameter_name]
if rfm_parameter["PARAMCLASS"] != param_class:
continue
if not paramclass_header:
paramclass_header = True
model_js.newline()
model_js.write(HEADER_JS_PARAMCLASS % PARAMCLASS[param_class])
model_js.newline()
if index == index_last:
comma = ""
index += 1
if rfm_parameter["PARAMTYPE"] == "VARIABLE":
field_ddic = self.Fields[rfm_parameter["FIELDKEY"]]
if "LENG" not in field_ddic["format"]:
if field_ddic["format"]["DATATYPE"] in ["STRG", "RSTR"]:
field_ddic["format"]["LENG"] = 0
else:
field_ddic["format"]["LENG"] = -1
ttype = (
field_ddic["format"]["DATATYPE"] + "(%u)" % field_ddic["format"]["LENG"]
)
model_js.write(
u"{0: <40} {1: <30}".format(
"%s: %s%s" % (parameter_name, get_field_inital(field_ddic), comma),
"// %-10s %-30s %s"
% (ttype, rfm_parameter["FIELDKEY"], rfm_parameter["PARAMTEXT"]),
)
)
elif rfm_parameter["PARAMTYPE"] == "STRUCTURE":
model_js.write(
u"{0: <40} {1: <30}".format(
"%s: {}%s" % (parameter_name, comma),
"// %s : %s"
% (rfm_parameter["FIELDKEY"], rfm_parameter["PARAMTEXT"]),
)
)
elif rfm_parameter["PARAMTYPE"] == "TABLE":
model_js.write(
u"{0: <40} {1: <30}".format(
"%s: []%s" % (parameter_name, comma),
"// %s : %s"
% (rfm_parameter["FIELDKEY"], rfm_parameter["PARAMTEXT"]),
)
)
else:
raise ValueError("Invalid parameter type [%s]" % rfm_parameter["PARAMTYPE"])
model_js.deindent()
model_js.write("};")
model_js.save()
def parameter_init(self):
def structure_init(model_js, rfm_parameter):
param_ddic = self.Fields[rfm_parameter["FIELDKEY"]]
model_js.newline()
model_js.write(
"// %s %s %s"
% (
rfm_parameter["PARAMETER"],
rfm_parameter["FIELDKEY"],
rfm_parameter["PARAMTEXT"],
)
)
model_js.write()
model_js.write("/* eslint-disable key-spacing */")
model_js.write("// prettier-ignore")
model_js.write("%s = {" % rfm_parameter["PARAMETER"])
index_last = len(param_ddic) - 1
model_js.addindent()
for index, field_name in enumerate(sorted(param_ddic)):
field_ddic = param_ddic[field_name]
line = "%-30s: %s" % (field_name, get_field_inital(field_ddic))
line += "," if index < index_last else " "
if "FIELDTEXT" not in field_ddic["text"]:
print(rfm_parameter["PARAMETER"], field_name)
if "FIELDTEXT" in field_ddic["text"]:
line += " // %s" % field_ddic["text"]["FIELDTEXT"]
else:
print(field_ddic)
raise ValueError("%s: %s" % (rfm_parameter["PARAMETER"], field_name))
model_js.write(line)
model_js.deindent()
model_js.write("};")
model_js.write("/* eslint-enable key-spacing */")
def get_abap_attrs(markup):
element = ""
if REMOVE_DDIC:
del markup[ABAP_TYPE]
if REMOVE_TYPE:
del markup[JS_TYPE]
abap = " data-abap.bind='{"
lena = len(abap)
for attr in FIELD_ATTRIBUTES:
if attr in markup:
if len(abap) > lena:
abap += ", "
if attr == "abap-shlp":
abap += '"%s":%s' % (attr.replace("abap-", ""), markup[attr])
else:
abap += '"%s":"%s"' % (attr.replace("abap-", ""), markup[attr])
del markup[attr]
abap += "}'"
tagname = markup[HTML_TAG].replace(ELEMENT_PREFIX, "")
# no attributes required for ui-checkbox, date, time
if tagname in [INPUT_TYPE_BINARY_TAG, DATE_TAGNAME, TIME_TAGNAME]:
abap = ""
if "alpha-exit" in markup:
abap += ' alpha-exit="%s" ' % markup["alpha-exit"]
del markup["alpha-exit"]
element += abap
if tagname not in [COLUMN_TAGNAME, INPUT_TYPE_BINARY_TAG, DATE_TAGNAME, TIME_TAGNAME]:
element += "\n " + " " * len(markup[HTML_TAG])
element += ' label="%s"' % markup["abap-text"]
del markup["abap-text"]
if len(markup) > 1: # only HTML_TAG left
# remove 'ui:tag:', '<tagname>'
markup_text = str(markup)
m = re.search("(.+?), 'ui-tag(.+?)}", markup_text)
if m:
markup_text = m.group(1) + "}"
element += ' markup="%s"' % str(markup_text)
return element
def html_table(model, rfm_parameter):
param_ddic = self.Fields[rfm_parameter["FIELDKEY"]]
model.newline()
model.write(
"<!-- %s %s %s -->"
% (
rfm_parameter["PARAMETER"],
rfm_parameter["FIELDKEY"],
rfm_parameter["PARAMTEXT"],
)
)
model.write(
"""<ui-datagrid data.bind="%s" title="%s"
data-summary="false" default-sort=""
selectable rowselect.trigger="object.selectObject($event.detail)">"""
% (rfm_parameter["PARAMKEY"], rfm_parameter["PARAMTEXT"])
)
model.addindent()
model.write(FORMATTER_OFF)
for rfm_field in sorted(param_ddic):
markup = html_markup(param_ddic[rfm_field], rfm_field, COLUMN_TAGNAME)
if markup[ABAP_TYPE] in ["CUKY", "UNIT"]:
continue
element = '<%s sortable field="%s"' % (markup[HTML_TAG], rfm_field)
if JS_FORMAT in markup:
if markup[JS_FORMAT] == "boolean":
# markup['type'] = 'boolean'
# del markup['format']
if "shlp" in markup:
del markup["shlp"]
del markup["bind"]
if "abap-unit" in markup:
element += ' unit="%s"' % markup["abap-unit"]
del markup["abap-unit"]
element += get_abap_attrs(markup)
element += "></%s>" % markup[HTML_TAG]
model.write(element)
model.write(FORMATTER_ON)
model.deindent()
model.write("</ui-datagrid>")
html_structure(model, rfm_parameter)
def html_structure(model, rfm_parameter):
param_ddic = self.Fields[rfm_parameter["FIELDKEY"]]
if rfm_parameter["PARAMTYPE"] not in ["TABLE", "STRUCTURE"]:
raise ValueError(rfm_parameter["PARAMTYPE"])
model.newline()
model.write(
"<!-- %s %s %s -->"
% (
rfm_parameter["PARAMETER"],
rfm_parameter["FIELDKEY"],
rfm_parameter["PARAMTEXT"],
)
)
for rfm_field in sorted(param_ddic):
html_field(model, rfm_parameter, rfm_field)
def html_field(model, rfm_parameter, rfm_field):
if type(rfm_field) is dict:
param_ddic = rfm_field
bind = rfm_parameter["PARAMETER"]
else:
param_ddic = self.Fields[rfm_parameter["FIELDKEY"]][rfm_field]
bind = "%s.%s" % (rfm_parameter["PARAMETER"], rfm_field)
markup = html_markup(param_ddic, bind)
# currency and uom only within respective inputs
if markup[ABAP_TYPE] in ["CUKY", "UNIT"]:
return
if markup[ABAP_TYPE] == "DATS":
bind_attr = "date"
elif markup[ABAP_TYPE] == "TIMS":
bind_attr = "time"
else:
bind_attr = "value"
element = '<%s %s.bind="%s"' % (markup[HTML_TAG], bind_attr, markup["bind"])
del markup["bind"]
if "abap-shlp" in markup:
# custom attribute in form elements
element += " shlp.bind='%s'" % markup["abap-shlp"]
del markup["abap-shlp"]
if "abap-unit" in markup:
element += ' unit.bind="%s.%s"' % (rfm_parameter["PARAMETER"], markup["abap-unit"])
del markup["abap-unit"]
element += get_abap_attrs(markup)
element += "></%s>" % markup[HTML_TAG]
model.write(element)
model.newline()
def html_markup(ddic, bind, tagname=""):
if "format" not in ddic:
print(ddic.keys())
markup = {ABAP_TYPE: ddic["format"]["DATATYPE"], "bind": bind}
# JS_TYPE, HTML_TAG, JS_FORMAT -> markup
markup.update(DDIC_JS[ddic["format"]["DATATYPE"]])
# use checkbox and combo tags for binary and list inputs
if INPUT_TYPE_KEY in ddic["format"]:
if ddic["format"][INPUT_TYPE_KEY] == INPUT_TYPE_BINARY:
markup[JS_FORMAT] = "boolean"
markup[HTML_TAG] = INPUT_TYPE_BINARY_TAG
elif ddic["format"][INPUT_TYPE_KEY] == INPUT_TYPE_LIST:
markup[HTML_TAG] = INPUT_TYPE_LIST_TAG
# replace ui field tagname with coulumn tagname
if tagname:
markup[HTML_TAG] = tagname
# add ui element prefix
markup[HTML_TAG] = ELEMENT_PREFIX + markup[HTML_TAG]
# todo: not set in backend.py
if "REQUIRED" in ddic["format"]:
markup["required"] = True
if ddic["format"]["DATATYPE"] in ["CURR", "QUAN"]:
# currency or quantity should reference the UoM or currency key
if "REFFIELD" in ddic["format"]:
markup["abap-unit"] = ddic["format"]["REFFIELD"]
else:
markup["abap-unit"] = "todo: unit not found"
print("! Unit not found for:", bind)
# raise Exception('rfm %s parameter %s CURR or QUAN field %s: no REFFIELD found'
# % (rfm_name, rfm_param, fieldname))
if "input" in ddic:
if "CONVEXIT" in ddic["input"]:
markup["alpha-exit"] = ddic["input"]["CONVEXIT"]
if "SHLP" in ddic["input"]:
# empty SHLP happen sometimes
if ddic["input"]["SHLP"].strip():
try:
markup["abap-shlp"] = '{"type":"%s", "id":"%s"}' % tuple(
ddic["input"]["SHLP"].encode("ascii").split()
)
except Exception:
raise ValueError("Invalid SHLP format: [%s]" % ddic["input"]["SHLP"])
if "MEMORYID" in ddic["input"]:
markup["abap-mid"] = ddic["input"]["MEMORYID"]
if (
ddic["format"]["DATATYPE"] not in ["DATS", "TIMS", "ACCP"]
and "BOOLEAN" not in ddic["format"]
):
markup["abap-length"] = field_length(ddic)
if "abap-shlp" in markup:
# shlp not needed for boolean, date, time
if JS_FORMAT in markup:
if markup[JS_FORMAT] == "boolean":
del markup["abap-shlp"]
if markup[ABAP_TYPE] in ["DATS", "TIMS"]:
# no value input needed for date and time
del markup["abap-shlp"]
else:
# add search help if not maintained for quantity, currency, language
if markup[ABAP_TYPE] == "CUKY":
markup["abap-shlp"] = '{"type":"CT", "id":"TCURC"}'
elif markup[ABAP_TYPE] == "UNIT":
markup["abap-shlp"] = '{"type":"SH", "id":"H_T006"}'
elif markup[ABAP_TYPE] == "LANG":
markup["abap-shlp"] = '{"type":"SH", "id":"H_T002"}'
markup["abap-text"] = ddic["text"]["FIELDTEXT"]
return markup
def field_length(ddic):
length = 0
if "DECIMALS" in ddic["format"]:
if ddic["format"]["DATATYPE"] == "FLTP":
# abap len 16.16, is actually 1.15
length = "1.15"
else:
if ddic["format"]["DATATYPE"] in ["DEC", "CURR"]:
# abap int part = field length - decimal point
decrement = 1
elif ddic["format"]["DATATYPE"] in ["QUAN"]:
# abap int part = field length - decimal places
decrement = ddic["format"]["DECIMALS"]
else:
decrement = 0
length = "%s.%s" % (
ddic["format"]["LENG"] - decrement,
ddic["format"]["DECIMALS"],
)
else:
# no decimals
if "LENG" in ddic["format"]:
length = "%s" % ddic["format"]["LENG"]
else:
length = -1 # indicate string element, has no fixed length
# sign
if "SIGN" in ddic["format"]:
length = "+" + length
return length
for rfm_name in sorted(self.Parameters):
model = Writer(rfm_name)
model_js = Writer(rfm_name, "js")
rfm_params = self.Parameters[rfm_name]
for param_class in PARAMCLASS:
paramclass_header = False
# paramclass_header_js = False
for parameter_name in sorted(rfm_params):
rfm_parameter = self.Parameters[rfm_name][parameter_name]
if rfm_parameter["PARAMCLASS"] != param_class:
continue
if not paramclass_header:
paramclass_header = True
model.write(HEADER_PARAMCLASS % PARAMCLASS[param_class])
model_js.newline()
model_js.write("//")
model_js.write(HEADER_JS_PARAMCLASS % PARAMCLASS[param_class])
model_js.write("//")
if rfm_parameter["PARAMTYPE"] == "TABLE":
structure_init(model_js, rfm_parameter)
html_table(model, rfm_parameter)
elif rfm_parameter["PARAMTYPE"] == "STRUCTURE":
structure_init(model_js, rfm_parameter)
html_structure(model, rfm_parameter)
elif rfm_parameter["PARAMTYPE"] == "VARIABLE":
html_field(model, rfm_parameter, self.Fields[rfm_parameter["FIELDKEY"]])
else:
raise ValueError("Invalid parameter type [%s]" % rfm_parameter["PARAMTYPE"])
model.save()
model_js.save()
def __del__(self):
# self.writer_close()
pass
class Writer:
def __init__(self, rfm_name, write_to="HTML"):
self.rfm_name = rfm_name
self.rfmset = rfmset
if write_to.upper() == "HTML":
self.write_to_html = True
self.indent_step = 4
else:
self.write_to_html = False
self.indent_step = 2
self.reset()
def reset(self):
self.indent_count = 0
self.Indent = self.indent_count * " "
self.output = []
def addindent(self):
self.indent_count += self.indent_step
self.Indent = self.indent_count * " "
def deindent(self):
self.indent_count -= self.indent_step
self.Indent = self.indent_count * " "
def write(self, output=""):
self.output.append(self.Indent + output)
def newline(self):
self.output.append("")
def save(self):
rfm_name = self.rfm_name.replace("/", "_")
if rfm_name[0] == "_":
rfm_name = rfm_name[1:]
if self.write_to_html:
output_filename = "data/%s/%s/%s.html" % (rfmset, MODEL_PREFIX, rfm_name)
else:
output_filename = "data/%s/%s/%s.js" % (rfmset, MODEL_PREFIX, rfm_name)
with codecs.open(output_filename, encoding="utf-8", mode="a") as output_file:
for line in self.output:
output_file.write("%s\n" % line)
self.reset()
if __name__ == "__main__":
if len(rfm_sets) == 0:
rfm_sets = catalog
for rfmset in sorted(rfm_sets):
print("Processed %u ABAP API of %s model" % (len(catalog[rfmset]), rfmset))
model_parser = ModelParser(rfmset)
model_parser.parse()
del model_parser
| 37.91471 | 119 | 0.52525 |
147cef560b5373deab3785dcce7db1b8472515a2 | 6,219 | py | Python | hybrid/ubuntu/hyb/bid/type2/len5/hybrid_len5_t2_train.py | 1130310223/Static-Dynamic-Attention | 1da223b06ae41f14575960e247fb13506ed8a124 | [
"MIT"
] | 1 | 2020-04-16T08:42:38.000Z | 2020-04-16T08:42:38.000Z | hybrid/ubuntu/hyb/bid/type2/len5/hybrid_len5_t2_train.py | 1130310223/Static-Dynamic-Attention | 1da223b06ae41f14575960e247fb13506ed8a124 | [
"MIT"
] | null | null | null | hybrid/ubuntu/hyb/bid/type2/len5/hybrid_len5_t2_train.py | 1130310223/Static-Dynamic-Attention | 1da223b06ae41f14575960e247fb13506ed8a124 | [
"MIT"
] | 1 | 2020-04-16T08:41:53.000Z | 2020-04-16T08:41:53.000Z | # -*- coding: utf-8 -*-
import sys
import os
import random
import re
import time
import torch
from torch.autograd import Variable
from torch import optim
import torch.nn as nn
#sys.path.append('../')
from hybrid_t2_model import Seq2Seq
from hybrid_data_utils import *
import psutil
proc = psutil.Process(os.getpid())
def init_command_line(argv):
from argparse import ArgumentParser
usage = "seq2seq"
description = ArgumentParser(usage)
description.add_argument("--w2v_path", type=str, default="/users3/yfwang/data/w2v/ubuntu/")
description.add_argument("--corpus_path", type=str, default="/users3/yfwang/data/corpus/ubuntu/")
description.add_argument("--w2v", type=str, default="ubuntu_train_all_200e.w2v")
description.add_argument("--train_file", type=str, default="ubuntu_train_sessions.txt")
description.add_argument("--max_context_size", type=int, default=5)
description.add_argument("--batch_size", type=int, default=64)
description.add_argument("--enc_hidden_size", type=int, default=512)
description.add_argument("--max_senten_len", type=int, default=15)
description.add_argument("--lr", type=float, default=0.001)
description.add_argument("--weight_decay", type=float, default=1e-5)
description.add_argument("--dropout", type=float, default=0.5)
description.add_argument("--epochs", type=int, default=10)
description.add_argument("--teach_forcing", type=int, default=1)
description.add_argument("--shuffle", type=int, default=1)
description.add_argument("--print_every", type=int, default=200, help="print every batches when training")
description.add_argument("--save_model", type=int, default=1)
description.add_argument("--weights", type=str, default=None)
return description.parse_args(argv)
opts = init_command_line(sys.argv[1:])
print ("Configure:")
print (" w2v:",os.path.join(opts.w2v_path,opts.w2v))
print (" train_file:",os.path.join(opts.corpus_path,opts.train_file))
print (" max_context_size:",opts.max_context_size)
print (" batch_size:",opts.batch_size)
print (" enc_hidden_size:",opts.enc_hidden_size)
print (" max_senten_len:",opts.max_senten_len)
print (" learning rate:",opts.lr)
print (" weight_decay:",opts.weight_decay)
print (" dropout:",opts.dropout)
print (" epochs:",opts.epochs)
print (" teach_forcing:",opts.teach_forcing)
print (" shuffle:",opts.shuffle)
print (" print_every:",opts.print_every)
print (" save_model:",opts.save_model)
print (" weights:",opts.weights)
print ("")
'''单个batch的训练函数'''
def train_batch(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,model,model_optimizer,criterion,ini_idx):
loss = 0
model_optimizer.zero_grad()
list_pred = model(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,ini_idx)
# 预测的每个字的loss相加,构成整句的loss
for idx,reply_tensor in enumerate(reply_tensor_batch):
loss_s = criterion(list_pred[idx],Variable(reply_tensor).cuda())
loss += loss_s
loss.backward()
model_optimizer.step()
return loss.data[0]
# 多轮训练函数
def train_model(word2index,ini_idx,corpus_pairs,model,model_optimizer,criterion,epochs,
batch_size,max_senten_len,max_context_size,print_every,save_model,shuffle):
print ("start training...")
model.train()
state_loss = 10000.0
for ei in range(epochs):
print ("Iteration {}: ".format(ei+1))
epoch_loss = 0
every_loss = 0
t0 = time.time()
pairs_batches,num_batches = buildingPairsBatch(corpus_pairs,batch_size,shuffle=shuffle)
print ("num_batches:",num_batches)
idx_batch = 0
for reply_tensor_batch, contexts_tensor_batch, pad_matrix_batch in getTensorsPairsBatch(word2index,pairs_batches,max_context_size):
loss = train_batch(reply_tensor_batch,contexts_tensor_batch,pad_matrix_batch,model,model_optimizer,criterion,ini_idx)
epoch_loss += loss
every_loss += loss
if (idx_batch+1)%print_every == 0:
every_avg_loss = every_loss/(max_senten_len*(idx_batch+1))
#every_loss = 0
t = round((time.time()-t0),2)
print ("{} batches finished, avg_loss:{},{}".format(idx_batch+1, every_avg_loss,str(t)))
idx_batch += 1
print ("memory percent: %.2f%%" % (proc.memory_percent()))
mem_info = proc.memory_info()
res_mem_use = mem_info[0]
print ("res_mem_use: {:.2f}MB".format(float(res_mem_use)/1024/1024))
epoch_avg_loss = epoch_loss/(max_senten_len*num_batches)
print ("epoch_avg_loss:",epoch_avg_loss)
if save_model and epoch_avg_loss < state_loss:
print ("save model...")
torch.save(model.state_dict(), "./seq2seq_parameters_IterEnd")
state_loss = epoch_avg_loss
print ("Iteration time:",time.time()-t0)
print ("=============================================" )
print ("")
if __name__ == '__main__':
ini_char = '</i>'
unk_char = '<unk>'
t0 = time.time()
print ("loading word2vec...")
ctable = W2vCharacterTable(os.path.join(opts.w2v_path,opts.w2v),ini_char,unk_char)
print(" dict size:",ctable.getDictSize())
print (" emb size:",ctable.getEmbSize())
print ("")
train_file_name = os.path.join(opts.corpus_path,opts.train_file)
ctable,corpus_pairs = readingData(ctable,train_file_name,opts.max_senten_len,opts.max_context_size)
print (time.time()-t0)
print ("")
seq2seq = Seq2Seq(ctable.getDictSize(),ctable.getEmbSize(),opts.enc_hidden_size,opts.batch_size,opts.dropout,
opts.max_senten_len,opts.teach_forcing).cuda()
# 加载保存好的模型继续训练
if opts.weights != None:
print ("load weights...")
seq2seq.load_state_dict(torch.load(opts.weights))
else:
seq2seq.init_parameters(ctable.getEmbMatrix())
model_optimizer = optim.Adam(seq2seq.parameters(), lr=opts.lr, weight_decay=opts.weight_decay)
criterion = nn.NLLLoss()
print ("memory percent: %.2f%%" % (proc.memory_percent()))
mem_info = proc.memory_info()
res_mem_use = mem_info[0]
print ("res_mem_use: {:.2f}MB".format(float(res_mem_use)/1024/1024))
print ("")
word2index = ctable.getWord2Index()
ini_idx = word2index[ini_char]
train_model(word2index,ini_idx,corpus_pairs,seq2seq,model_optimizer,criterion,opts.epochs,opts.batch_size,
opts.max_senten_len,opts.max_context_size,opts.print_every,opts.save_model,opts.shuffle)
print ("")
| 37.920732 | 134 | 0.728413 |
7dc524e525d55fb89153722fb964fbd6893f2498 | 1,569 | py | Python | python/src/base/TextUtil/TextUtil.py | weiwei02/Technical--Documentation | d53d702b17cbeb9e4940764c6e4a4277382ec0cf | [
"Apache-2.0"
] | 2 | 2017-06-25T13:30:40.000Z | 2017-09-18T16:50:40.000Z | python/src/base/TextUtil/TextUtil.py | weiwei02/Technical--Documentation | d53d702b17cbeb9e4940764c6e4a4277382ec0cf | [
"Apache-2.0"
] | null | null | null | python/src/base/TextUtil/TextUtil.py | weiwei02/Technical--Documentation | d53d702b17cbeb9e4940764c6e4a4277382ec0cf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""
该模块提供一些简单的字符串操作方法
:author Wang Weiwei <email>weiwei02@vip.qq.com / weiwei.wang@100credit.com</email>
:sine 2017/8/10
:version 1.0
"""
import string
def simplify(text, whitespace=string.whitespace, delete=""):
"""
将字符串中多余的空格给去掉
:param text: 原
字符串
:param whitespace:
:param delete:
:return:
>>> simplify(" this and\\n that\\t too")
'this and that too'v
"""
result = []
word = ""
for char in text:
if char in delete:
continue
elif char in whitespace:
if word:
result.append(word + " ")
word = ""
else:
word += char
if word:
result.append(word)
return "".join(result).rstrip()
def is_balanced(text, brackets="()[]{}<>"):
"""
判断字符串是否由括号开头和结尾
:param text:
:param brackets:
:return:
>>> is_balanced("(python (is (not (lisp))))")
True
"""
counts = {}
left_for_right = {}
for left, right in zip(brackets[::2], brackets[1::2]):
assert left != right, "the bracket characters must differ"
counts[left] = 0
left_for_right[right] = left
for c in text:
if c in counts:
counts[c] += 1
elif c in left_for_right:
left = left_for_right[c]
if counts[left] == 0:
return False
counts[left] -= 1
return not any(counts.values())
if __name__ == "__main__":
import doctest
doctest.testmod() | 21.202703 | 85 | 0.527087 |
33763811e3e4e8ff1c268b3f8d8d6a7f3ec04c81 | 24,325 | py | Python | nova/api/openstack/compute/volumes.py | cloudbase/nova | ddbbf5782759c5b437c8a7bbb9291d038bcf915b | [
"Apache-2.0"
] | null | null | null | nova/api/openstack/compute/volumes.py | cloudbase/nova | ddbbf5782759c5b437c8a7bbb9291d038bcf915b | [
"Apache-2.0"
] | 1 | 2016-04-04T18:41:59.000Z | 2016-04-04T18:41:59.000Z | nova/api/openstack/compute/volumes.py | cloudbase/nova | ddbbf5782759c5b437c8a7bbb9291d038bcf915b | [
"Apache-2.0"
] | 2 | 2015-12-04T23:51:46.000Z | 2016-06-07T20:01:59.000Z | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The volumes extension."""
from oslo_utils import strutils
from webob import exc
from nova.api.openstack import api_version_request
from nova.api.openstack.api_version_request \
import MAX_PROXY_API_SUPPORT_VERSION
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import volumes as volumes_schema
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova import objects
from nova.policies import volumes as vol_policies
from nova.policies import volumes_attachments as va_policies
from nova.volume import cinder
ALIAS = "os-volumes"
def _translate_volume_detail_view(context, vol):
"""Maps keys for volumes details view."""
d = _translate_volume_summary_view(context, vol)
# No additional data / lookups at the moment
return d
def _translate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol['id']
d['status'] = vol['status']
d['size'] = vol['size']
d['availabilityZone'] = vol['availability_zone']
d['createdAt'] = vol['created_at']
if vol['attach_status'] == 'attached':
# NOTE(ildikov): The attachments field in the volume info that
# Cinder sends is converted to an OrderedDict with the
# instance_uuid as key to make it easier for the multiattach
# feature to check the required information. Multiattach will
# be enable in the Nova API in Newton.
# The format looks like the following:
# attachments = {'instance_uuid': {
# 'attachment_id': 'attachment_uuid',
# 'mountpoint': '/dev/sda/
# }
# }
attachment = vol['attachments'].items()[0]
d['attachments'] = [_translate_attachment_detail_view(vol['id'],
attachment[0],
attachment[1].get('mountpoint'))]
else:
d['attachments'] = [{}]
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
if vol['volume_type_id'] and vol.get('volume_type'):
d['volumeType'] = vol['volume_type']['name']
else:
d['volumeType'] = vol['volume_type_id']
d['snapshotId'] = vol['snapshot_id']
if vol.get('volume_metadata'):
d['metadata'] = vol.get('volume_metadata')
else:
d['metadata'] = {}
return d
class VolumeController(wsgi.Controller):
"""The Volumes API controller for the OpenStack API."""
def __init__(self):
self.volume_api = cinder.API()
super(VolumeController, self).__init__()
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data about the given volume."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
try:
vol = self.volume_api.get(context, id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return {'volume': _translate_volume_detail_view(context, vol)}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.response(202)
@extensions.expected_errors((400, 404))
def delete(self, req, id):
"""Delete a volume."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
try:
self.volume_api.delete(context, id)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors(())
def index(self, req):
"""Returns a summary list of volumes."""
return self._items(req, entity_maker=_translate_volume_summary_view)
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors(())
def detail(self, req):
"""Returns a detailed list of volumes."""
return self._items(req, entity_maker=_translate_volume_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of volumes, transformed through entity_maker."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
volumes = self.volume_api.get_all(context)
limited_list = common.limited(volumes, req)
res = [entity_maker(context, vol) for vol in limited_list]
return {'volumes': res}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 403, 404))
@validation.schema(volumes_schema.create)
def create(self, req, body):
"""Creates a new volume."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
vol = body['volume']
vol_type = vol.get('volume_type')
metadata = vol.get('metadata')
snapshot_id = vol.get('snapshot_id', None)
if snapshot_id is not None:
try:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
else:
snapshot = None
size = vol.get('size', None)
if size is None and snapshot is not None:
size = snapshot['volume_size']
availability_zone = vol.get('availability_zone')
try:
new_volume = self.volume_api.create(
context,
size,
vol.get('display_name'),
vol.get('display_description'),
snapshot=snapshot,
volume_type=vol_type,
metadata=metadata,
availability_zone=availability_zone
)
except exception.InvalidInput as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
except exception.OverQuota as err:
raise exc.HTTPForbidden(explanation=err.format_message())
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
retval = _translate_volume_detail_view(context, dict(new_volume))
result = {'volume': retval}
location = '%s/%s' % (req.url, new_volume['id'])
return wsgi.ResponseObject(result, headers=dict(location=location))
def _translate_attachment_detail_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment details view."""
d = _translate_attachment_summary_view(volume_id,
instance_uuid,
mountpoint)
# No additional data / lookups at the moment
return d
def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint):
"""Maps keys for attachment summary view."""
d = {}
# NOTE(justinsb): We use the volume id as he id of the attachment object
d['id'] = volume_id
d['volumeId'] = volume_id
d['serverId'] = instance_uuid
if mountpoint:
d['device'] = mountpoint
return d
def _check_request_version(req, min_version, method, server_id, server_state):
if not api_version_request.is_supported(req, min_version=min_version):
exc_inv = exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=server_id,
state=server_state,
method=method)
common.raise_http_conflict_for_instance_invalid_state(
exc_inv,
method,
server_id)
class VolumeAttachmentController(wsgi.Controller):
"""The volume attachment API controller for the OpenStack API.
A child resource of the server. Note that we use the volume id
as the ID of the attachment (though this is not guaranteed externally)
"""
def __init__(self):
self.compute_api = compute.API()
self.volume_api = cinder.API()
super(VolumeAttachmentController, self).__init__()
@extensions.expected_errors(404)
def index(self, req, server_id):
"""Returns the list of volume attachments for a given instance."""
context = req.environ['nova.context']
context.can(va_policies.POLICY_ROOT % 'index')
return self._items(req, server_id,
entity_maker=_translate_attachment_summary_view)
@extensions.expected_errors(404)
def show(self, req, server_id, id):
"""Return data about the given volume attachment."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
context.can(va_policies.POLICY_ROOT % 'show')
volume_id = id
instance = common.get_instance(self.compute_api, context, server_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
if not bdms:
msg = _("Instance %s is not attached.") % server_id
raise exc.HTTPNotFound(explanation=msg)
assigned_mountpoint = None
for bdm in bdms:
if bdm.volume_id == volume_id:
assigned_mountpoint = bdm.device_name
break
if assigned_mountpoint is None:
msg = _("volume_id not found: %s") % volume_id
raise exc.HTTPNotFound(explanation=msg)
return {'volumeAttachment': _translate_attachment_detail_view(
volume_id,
instance.uuid,
assigned_mountpoint)}
@extensions.expected_errors((400, 404, 409))
@validation.schema(volumes_schema.create_volume_attachment)
def create(self, req, server_id, body):
"""Attach a volume to an instance."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
context.can(va_policies.POLICY_ROOT % 'create')
volume_id = body['volumeAttachment']['volumeId']
device = body['volumeAttachment'].get('device')
instance = common.get_instance(self.compute_api, context, server_id)
if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
_check_request_version(req, '2.20', 'attach_volume',
server_id, instance.vm_state)
try:
device = self.compute_api.attach_volume(context, instance,
volume_id, device)
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.DevicePathInUse as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'attach_volume', server_id)
except (exception.InvalidVolume,
exception.InvalidDevicePath) as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
# The attach is async
attachment = {}
attachment['id'] = volume_id
attachment['serverId'] = server_id
attachment['volumeId'] = volume_id
attachment['device'] = device
# NOTE(justinsb): And now, we have a problem...
# The attach is async, so there's a window in which we don't see
# the attachment (until the attachment completes). We could also
# get problems with concurrent requests. I think we need an
# attachment state, and to write to the DB here, but that's a bigger
# change.
# For now, we'll probably have to rely on libraries being smart
# TODO(justinsb): How do I return "accepted" here?
return {'volumeAttachment': attachment}
@wsgi.response(202)
@extensions.expected_errors((400, 404, 409))
@validation.schema(volumes_schema.update_volume_attachment)
def update(self, req, server_id, id, body):
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
context.can(va_policies.POLICY_ROOT % 'update')
old_volume_id = id
try:
old_volume = self.volume_api.get(context, old_volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
new_volume_id = body['volumeAttachment']['volumeId']
try:
new_volume = self.volume_api.get(context, new_volume_id)
except exception.VolumeNotFound as e:
# NOTE: This BadRequest is different from the above NotFound even
# though the same VolumeNotFound exception. This is intentional
# because new_volume_id is specified in a request body and if a
# nonexistent resource in the body (not URI) the code should be
# 400 Bad Request as API-WG guideline. On the other hand,
# old_volume_id is specified with URI. So it is valid to return
# NotFound response if that is not existent.
raise exc.HTTPBadRequest(explanation=e.format_message())
instance = common.get_instance(self.compute_api, context, server_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
found = False
try:
for bdm in bdms:
if bdm.volume_id != old_volume_id:
continue
try:
self.compute_api.swap_volume(context, instance, old_volume,
new_volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'swap_volume', server_id)
if not found:
msg = _("The volume was either invalid or not attached to the "
"instance.")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@extensions.expected_errors((400, 403, 404, 409))
def delete(self, req, server_id, id):
"""Detach a volume from an instance."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
context.can(va_policies.POLICY_ROOT % 'delete')
volume_id = id
instance = common.get_instance(self.compute_api, context, server_id)
if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
_check_request_version(req, '2.20', 'detach_volume',
server_id, instance.vm_state)
try:
volume = self.volume_api.get(context, volume_id)
except exception.VolumeNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
if not bdms:
msg = _("Instance %s is not attached.") % server_id
raise exc.HTTPNotFound(explanation=msg)
found = False
try:
for bdm in bdms:
if bdm.volume_id != volume_id:
continue
if bdm.is_root:
msg = _("Can't detach root device volume")
raise exc.HTTPForbidden(explanation=msg)
try:
self.compute_api.detach_volume(context, instance, volume)
found = True
break
except exception.VolumeUnattached:
# The volume is not attached. Treat it as NotFound
# by falling through.
pass
except exception.InvalidVolume as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceUnknownCell as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'detach_volume', server_id)
if not found:
msg = _("volume_id not found: %s") % volume_id
raise exc.HTTPNotFound(explanation=msg)
def _items(self, req, server_id, entity_maker):
"""Returns a list of attachments, transformed through entity_maker."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
instance = common.get_instance(self.compute_api, context, server_id)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
limited_list = common.limited(bdms, req)
results = []
for bdm in limited_list:
if bdm.volume_id:
results.append(entity_maker(bdm.volume_id,
bdm.instance_uuid,
bdm.device_name))
return {'volumeAttachments': results}
def _translate_snapshot_detail_view(context, vol):
"""Maps keys for snapshots details view."""
d = _translate_snapshot_summary_view(context, vol)
# NOTE(gagupta): No additional data / lookups at the moment
return d
def _translate_snapshot_summary_view(context, vol):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = vol['id']
d['volumeId'] = vol['volume_id']
d['status'] = vol['status']
# NOTE(gagupta): We map volume_size as the snapshot size
d['size'] = vol['volume_size']
d['createdAt'] = vol['created_at']
d['displayName'] = vol['display_name']
d['displayDescription'] = vol['display_description']
return d
class SnapshotController(wsgi.Controller):
"""The Snapshots API controller for the OpenStack API."""
def __init__(self):
self.volume_api = cinder.API()
super(SnapshotController, self).__init__()
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data about the given snapshot."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
try:
vol = self.volume_api.get_snapshot(context, id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return {'snapshot': _translate_snapshot_detail_view(context, vol)}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@wsgi.response(202)
@extensions.expected_errors(404)
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
try:
self.volume_api.delete_snapshot(context, id)
except exception.SnapshotNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors(())
def index(self, req):
"""Returns a summary list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_summary_view)
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors(())
def detail(self, req):
"""Returns a detailed list of snapshots."""
return self._items(req, entity_maker=_translate_snapshot_detail_view)
def _items(self, req, entity_maker):
"""Returns a list of snapshots, transformed through entity_maker."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
snapshots = self.volume_api.get_all_snapshots(context)
limited_list = common.limited(snapshots, req)
res = [entity_maker(context, snapshot) for snapshot in limited_list]
return {'snapshots': res}
@wsgi.Controller.api_version("2.1", MAX_PROXY_API_SUPPORT_VERSION)
@extensions.expected_errors((400, 403))
@validation.schema(volumes_schema.snapshot_create)
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
context.can(vol_policies.BASE_POLICY_NAME)
snapshot = body['snapshot']
volume_id = snapshot['volume_id']
force = snapshot.get('force', False)
force = strutils.bool_from_string(force, strict=True)
if force:
create_func = self.volume_api.create_snapshot_force
else:
create_func = self.volume_api.create_snapshot
try:
new_snapshot = create_func(context, volume_id,
snapshot.get('display_name'),
snapshot.get('display_description'))
except exception.OverQuota as e:
raise exc.HTTPForbidden(explanation=e.format_message())
retval = _translate_snapshot_detail_view(context, new_snapshot)
return {'snapshot': retval}
class Volumes(extensions.V21APIExtensionBase):
"""Volumes support."""
name = "Volumes"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
ALIAS, VolumeController(), collection_actions={'detail': 'GET'})
resources.append(res)
res = extensions.ResourceExtension('os-volumes_boot',
inherits='servers')
resources.append(res)
res = extensions.ResourceExtension('os-volume_attachments',
VolumeAttachmentController(),
parent=dict(
member_name='server',
collection_name='servers'))
resources.append(res)
res = extensions.ResourceExtension(
'os-snapshots', SnapshotController(),
collection_actions={'detail': 'GET'})
resources.append(res)
return resources
def get_controller_extensions(self):
return []
| 38.007813 | 79 | 0.630216 |
1554895f5b844fc6bab5aec316d6b0654541fc86 | 6,747 | py | Python | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/appengine/admin/tools/conversion/yaml_schema_v1.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2017-11-29T18:52:27.000Z | 2017-11-29T18:52:27.000Z | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/appengine/admin/tools/conversion/yaml_schema_v1.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/appengine/admin/tools/conversion/yaml_schema_v1.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2020-07-25T12:09:01.000Z | 2020-07-25T12:09:01.000Z | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Definition for conversion between legacy YAML and the API JSON formats."""
from googlecloudsdk.third_party.appengine.admin.tools.conversion import converters as c
from googlecloudsdk.third_party.appengine.admin.tools.conversion import schema as s
SCHEMA = s.Message(
api_config=s.Message(
url=s.Value(converter=c.ToJsonString),
login=s.Value(converter=c.EnumConverter('LOGIN')),
secure=s.Value('security_level', converter=c.EnumConverter('SECURE')),
auth_fail_action=s.Value(converter=c.EnumConverter('AUTH_FAIL_ACTION')),
script=s.Value(converter=c.ToJsonString)),
auto_id_policy=s.Value('beta_settings',
lambda val: {'auto_id_policy': val}),
automatic_scaling=s.Message(
converter=c.ConvertAutomaticScaling,
cool_down_period_sec=s.Value('cool_down_period',
converter=c.SecondsToDuration),
cpu_utilization=s.Message(
target_utilization=s.Value(),
aggregation_window_length_sec=s.Value('aggregation_window_length',
converter=c.SecondsToDuration)
),
max_num_instances=s.Value('max_total_instances'),
min_pending_latency=s.Value(converter=c.LatencyToDuration),
min_idle_instances=s.Value(converter=
c.StringToInt(handle_automatic=True)),
max_idle_instances=s.Value(converter=
c.StringToInt(handle_automatic=True)),
max_pending_latency=s.Value(converter=c.LatencyToDuration),
max_concurrent_requests=s.Value(converter=c.StringToInt()),
min_num_instances=s.Value('min_total_instances'),
target_network_sent_bytes_per_sec=s.Value(
'target_sent_bytes_per_second'),
target_network_sent_packets_per_sec=s.Value(
'target_sent_packets_per_second'),
target_network_received_bytes_per_sec=s.Value(
'target_received_bytes_per_second'),
target_network_received_packets_per_sec=s.Value(
'target_received_packets_per_second'),
target_disk_write_bytes_per_sec=s.Value(
'target_write_bytes_per_second'),
target_disk_write_ops_per_sec=s.Value(
'target_write_ops_per_second'),
target_disk_read_bytes_per_sec=s.Value(
'target_read_bytes_per_second'),
target_disk_read_ops_per_sec=s.Value(
'target_read_ops_per_second'),
target_request_count_per_sec=s.Value(
'target_request_count_per_second'),
target_concurrent_requests=s.Value()),
basic_scaling=s.Message(
idle_timeout=s.Value(converter=c.IdleTimeoutToDuration),
max_instances=s.Value(converter=c.StringToInt())),
beta_settings=s.Map(),
default_expiration=s.Value(converter=c.ExpirationToDuration),
endpoints_api_service=s.Message(
name=s.Value(),
config_id=s.Value(),
),
env=s.Value(),
env_variables=s.Map(),
error_handlers=s.RepeatedField(element=s.Message(
error_code=s.Value(converter=c.EnumConverter('ERROR_CODE')),
file=s.Value('static_file', converter=c.ToJsonString),
mime_type=s.Value(converter=c.ToJsonString))),
# Restructure the handler after it's complete, since this is more
# complicated than a simple rename.
handlers=s.RepeatedField(element=s.Message(
converter=c.ConvertUrlHandler,
auth_fail_action=s.Value(converter=c.EnumConverter('AUTH_FAIL_ACTION')),
static_dir=s.Value(converter=c.ToJsonString),
secure=s.Value('security_level', converter=c.EnumConverter('SECURE')),
redirect_http_response_code=s.Value(
converter=c.EnumConverter('REDIRECT_HTTP_RESPONSE_CODE')),
http_headers=s.Map(),
url=s.Value('url_regex'),
expiration=s.Value(converter=c.ExpirationToDuration),
static_files=s.Value('path', converter=c.ToJsonString),
script=s.Value('script_path', converter=c.ToJsonString),
upload=s.Value('upload_path_regex', converter=c.ToJsonString),
api_endpoint=s.Value(),
application_readable=s.Value(),
position=s.Value(),
login=s.Value(converter=c.EnumConverter('LOGIN')),
mime_type=s.Value(converter=c.ToJsonString),
require_matching_file=s.Value())),
health_check=s.Message(
check_interval_sec=s.Value('check_interval',
converter=c.SecondsToDuration),
timeout_sec=s.Value('timeout', converter=c.SecondsToDuration),
healthy_threshold=s.Value(),
enable_health_check=s.Value('disable_health_check', converter=c.Not),
unhealthy_threshold=s.Value(),
host=s.Value(converter=c.ToJsonString),
restart_threshold=s.Value()),
inbound_services=s.RepeatedField(element=s.Value(
converter=c.EnumConverter('INBOUND_SERVICE'))),
instance_class=s.Value(converter=c.ToJsonString),
libraries=s.RepeatedField(element=s.Message(
version=s.Value(converter=c.ToJsonString),
name=s.Value(converter=c.ToJsonString))),
manual_scaling=s.Message(
instances=s.Value(converter=c.StringToInt())),
network=s.Message(
instance_tag=s.Value(converter=c.ToJsonString),
name=s.Value(converter=c.ToJsonString),
subnetwork_name=s.Value(converter=c.ToJsonString),
forwarded_ports=s.RepeatedField(element=s.Value(converter=
c.ToJsonString))),
nobuild_files=s.Value('nobuild_files_regex', converter=c.ToJsonString),
resources=s.Message(
memory_gb=s.Value(),
disk_size_gb=s.Value('disk_gb'),
cpu=s.Value(),
volumes=s.RepeatedField(element=s.Message(
name=s.Value(converter=c.ToJsonString),
volume_type=s.Value(converter=c.ToJsonString),
size_gb=s.Value()))),
runtime=s.Value(converter=c.ToJsonString),
threadsafe=s.Value(),
version=s.Value('id', converter=c.ToJsonString),
vm=s.Value(),
vm_settings=s.Map('beta_settings'))
| 48.539568 | 87 | 0.680006 |
83e29d4bef87b47e796227dcefb85fed59ecc32c | 21,009 | py | Python | src/scripts/slurm_status.py | yongleyuan/BLAH | bf1defb3447c8e3535a04feaa1171a4ee9c3592f | [
"Apache-2.0"
] | null | null | null | src/scripts/slurm_status.py | yongleyuan/BLAH | bf1defb3447c8e3535a04feaa1171a4ee9c3592f | [
"Apache-2.0"
] | null | null | null | src/scripts/slurm_status.py | yongleyuan/BLAH | bf1defb3447c8e3535a04feaa1171a4ee9c3592f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# File: slurm_status.py
#
# Author: Brian Bockelman (bbockelm@cse.unl.edu)
# Jaime Frey (jfrey@cs.wisc.edu)
#
# Copyright (c) University of Nebraska-Lincoln. 2012
# University of Wisconsin-Madison. 2016
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Query SLURM for the status of a given job
Internally, it creates a cache of the SLURM response for all jobs and
will reuse this for subsequent queries.
"""
import os
import re
import pwd
import sys
import time
import errno
import fcntl
import random
import struct
import subprocess
import signal
import tempfile
import traceback
import pickle
import csv
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import blah
cache_timeout = 60
launchtime = time.time()
def log(msg):
"""
A very lightweight log - not meant to be used in production, but helps
when debugging scale tests
"""
print(time.strftime("%x %X"), os.getpid(), msg, file=sys.stderr)
def createCacheDir():
uid = os.geteuid()
username = pwd.getpwuid(uid).pw_name
cache_dir = os.path.join("/var/tmp", "slurm_cache_%s" % username)
try:
os.mkdir(cache_dir, 0o755)
except OSError as oe:
if oe.errno != errno.EEXIST:
raise
s = os.stat(cache_dir)
if s.st_uid != uid:
raise Exception("Unable to check cache because it is owned by UID %d" % s.st_uid)
return cache_dir
def initLog():
"""
Determine whether to create a logfile based on the presence of a file
in the user's slurm cache directory. If so, make the logfile there.
"""
cache_dir = createCacheDir()
if os.path.exists(os.path.join(cache_dir, "slurm_status.debug")):
filename = os.path.join(cache_dir, "slurm_status.log")
else:
filename = "/dev/null"
fd = open(filename, "a")
# Do NOT close the file descriptor blahp originally hands us for stderr.
# This causes blahp to lose all status updates.
os.dup(2)
os.dup2(fd.fileno(), 2)
# Something else from a prior life - see gratia-probe-common's GratiaWrapper.py
def ExclusiveLock(fd, timeout=120):
"""
Grabs an exclusive lock on fd
If the lock is owned by another process, and that process is older than the
timeout, then the other process will be signaled. If the timeout is
negative, then the other process is never signaled.
If we are unable to hold the lock, this call will not block on the lock;
rather, it will throw an exception.
By default, the timeout is 120 seconds.
"""
# POSIX file locking is cruelly crude. There's nothing to do besides
# try / sleep to grab the lock, no equivalent of polling.
# Why hello, thundering herd.
# An alternate would be to block on the lock, and use signals to interupt.
# This would mess up Gratia's flawed use of signals already, and not be
# able to report on who has the lock. I don't like indefinite waits!
max_time = 30
starttime = time.time()
tries = 1
while time.time() - starttime < max_time:
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return
except IOError as ie:
if not ((ie.errno == errno.EACCES) or (ie.errno == errno.EAGAIN)):
raise
if check_lock(fd, timeout):
time.sleep(.2) # Fast case; however, we have *no clue* how
# long it takes to clean/release the old lock.
# Nor do we know if we'd get it if we did
# fcntl.lockf w/ blocking immediately. Blech.
# Check again immediately, especially if this was the last
# iteration in the for loop.
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return
except IOError as ie:
if not ((ie.errno == errno.EACCES) or (ie.errno == errno.EAGAIN)):
raise
sleeptime = random.random()
log("Unable to acquire lock, try %i; will sleep for %.2f " \
"seconds and try for %.2f more seconds." % (tries, sleeptime, max_time - (time.time()-starttime)))
tries += 1
time.sleep(sleeptime)
log("Fatal exception - Unable to acquire lock")
raise Exception("Unable to acquire lock")
def check_lock(fd, timeout):
"""
For internal use only.
Given a fd that is locked, determine which process has the lock.
Kill said process if it is older than "timeout" seconds.
This will log the PID of the "other process".
"""
pid = get_lock_pid(fd)
if pid == os.getpid():
return True
if timeout < 0:
log("Another process, %d, holds the cache lock." % pid)
return False
try:
age = get_pid_age(pid)
except:
log("Another process, %d, holds the cache lock." % pid)
log("Unable to get the other process's age; will not time it out.")
return False
log("Another process, %d (age %d seconds), holds the cache lock." % (pid, age))
if age > timeout:
os.kill(pid, signal.SIGKILL)
else:
return False
return True
linux_struct_flock = "hhxxxxqqixxxx"
try:
os.O_LARGEFILE
except AttributeError:
start_len = "hhlli"
def get_lock_pid(fd):
# For reference, here's the definition of struct flock on Linux
# (/usr/include/bits/fcntl.h).
#
# struct flock
# {
# short int l_type; /* Type of lock: F_RDLCK, F_WRLCK, or F_UNLCK. */
# short int l_whence; /* Where `l_start' is relative to (like `lseek'). */
# __off_t l_start; /* Offset where the lock begins. */
# __off_t l_len; /* Size of the locked area; zero means until EOF. */
# __pid_t l_pid; /* Process holding the lock. */
# };
#
# Note that things are different on Darwin
# Assuming off_t is unsigned long long, pid_t is int
try:
if sys.platform == "darwin":
arg = struct.pack("QQihh", 0, 0, 0, fcntl.F_WRLCK, 0)
else:
arg = struct.pack(linux_struct_flock, fcntl.F_WRLCK, 0, 0, 0, 0)
result = fcntl.fcntl(fd, fcntl.F_GETLK, arg)
except IOError as ie:
if ie.errno != errno.EINVAL:
raise
log("Unable to determine which PID has the lock due to a " \
"python portability failure. Contact the developers with your" \
" platform information for support.")
return False
if sys.platform == "darwin":
_, _, pid, _, _ = struct.unpack("QQihh", result)
else:
_, _, _, _, pid = struct.unpack(linux_struct_flock, result)
return pid
def get_pid_age(pid):
now = time.time()
st = os.stat("/proc/%d" % pid)
return now - st.st_ctime
def call_squeue(jobid="", cluster=""):
"""
Call squeue directly for a jobid.
If none is specified, query all jobid's.
Returns a python dictionary with the job info.
"""
squeue = get_slurm_location('squeue')
starttime = time.time()
log("Starting squeue.")
command = (squeue, '-o', '%i %T')
if cluster:
command += ('-M', cluster)
if jobid:
command += ('-j', jobid)
else:
uid = os.geteuid()
username = pwd.getpwuid(uid).pw_name
command += ('-u', username)
squeue_proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
squeue_out, _ = squeue_proc.communicate()
# In Python 3 subprocess.Popen opens streams as bytes so we need to decode them into str
if squeue_out is not str:
squeue_out = squeue_out.decode('latin-1')
log("Finished squeue (time=%f)." % (time.time()-starttime))
if squeue_proc.returncode == 0:
result = parse_squeue(squeue_out)
if jobid and (squeue_proc.returncode == 1 or squeue_proc.returncode == 0 and jobid not in result): # Completed
result = {jobid: {'BatchJobId': '"%s"' % jobid, "JobStatus": "4", "ExitCode": ' 0'}}
elif squeue_proc.returncode != 0:
raise Exception("squeue failed with exit code %s" % str(squeue_proc.returncode))
# If the job has completed...
if jobid is not "" and jobid in result and "JobStatus" in result[jobid] and (result[jobid]["JobStatus"] == '4' or result[jobid]["JobStatus"] == '3'):
# Get the finished job stats and update the result
finished_job_stats = get_finished_job_stats(jobid, cluster)
result[jobid].update(finished_job_stats)
return result
def which(program):
"""
Determine if the program is in the path.
arg program: name of the program to search
returns: full path to executable, or None if executable is not found
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def convert_cpu_to_seconds(cpu_string):
# The time fields in sacct's output have this format:
# [DD-[hh:]]mm:ss.sss
# Convert that to just seconds.
elem = re.split('[-:]', cpu_string)
# Convert seconds to a float, truncate to int at end
secs = float(elem[-1]) + int(elem[-2]) * 60
if len(elem) > 2:
secs += int(elem[-3]) * 3600
if len(elem) > 3:
secs += int(elem[-4]) * 86400
return int(secs)
def get_finished_job_stats(jobid, cluster):
"""
Get a completed job's statistics such as used RAM and cpu usage.
"""
# First, list the attributes that we want
return_dict = { "ImageSize": 0, "ExitCode": 0, "RemoteUserCpu": 0, "RemoteSysCpu": 0 }
# Next, query the appropriate interfaces for the completed job information
sacct = get_slurm_location('sacct')
if cluster != "":
sacct += " -M %s" % cluster
log("Querying sacct for completed job for jobid: %s" % (str(jobid)))
# List of attributes required from sacct
attributes = "UserCPU,SystemCPU,MaxRSS,ExitCode"
child_stdout = os.popen("%s -j %s --noconvert -P --format %s" % (sacct, str(jobid), attributes))
sacct_data = child_stdout.readlines()
ret = child_stdout.close()
if ret:
# retry without --noconvert for slurm < 15.8
child_stdout = os.popen("%s -j %s -P --format %s" % (sacct, str(jobid), attributes))
sacct_data = child_stdout.readlines()
child_stdout.close()
try:
reader = csv.DictReader(sacct_data, delimiter="|")
except Exception as e:
log("Unable to read in CSV output from sacct: %s" % str(e))
return return_dict
# Slurm can return more than 1 row, for some odd reason.
# so sum up relevant values
for row in reader:
if row["UserCPU"] is not "":
try:
return_dict['RemoteUserCpu'] += convert_cpu_to_seconds(row["UserCPU"])
except:
log("Failed to parse CPU usage for job id %s: %s" % (jobid, row["UserCPU"]))
raise
if row["SystemCPU"] is not "":
try:
return_dict['RemoteSysCpu'] += convert_cpu_to_seconds(row["SystemCPU"])
except:
log("Failed to parse CPU usage for job id %s: %s" % (jobid, row["SystemCPU"]))
raise
if row["MaxRSS"] is not "":
# Remove the trailing [KMGTP] and scale the value appropriately
# Note: We assume that all values will have a suffix, and we
# want the value in kilos.
# With the --noconvert option, there should be no suffix, and the value is in bytes.
try:
value = row["MaxRSS"]
factor = 1
if value[-1] == 'M':
factor = 1024
elif value[-1] == 'G':
factor = 1024 * 1024
elif value[-1] == 'T':
factor = 1024 * 1024 * 1024
elif value[-1] == 'P':
factor = 1024 * 1024 * 1024 * 1024
elif value[-1] == 'K':
factor = 1
else:
# The last value is not a letter (or unrecognized scaling factor), and is in bytes, convert to k
value = str(int(value) / 1024)
return_dict["ImageSize"] += int(float(value.strip('KMGTP'))) * factor
except:
log("Failed to parse memory usage for job id %s: %s" % (jobid, row["MaxRSS"]))
raise
if row["ExitCode"] is not "":
try:
return_dict["ExitCode"] = int(row["ExitCode"].split(":")[0])
except:
log("Failed to parse memory usage for job id %s: %s" % (jobid, row["MaxRSS"]))
raise
return return_dict
def env_expand(path):
""" substitute occurences of $VAR or ${VAR} in path """
def envmatch(m):
return os.getenv(m.group(1) or m.group(2)) or ""
return re.sub(r'\$(?:{(\w+)}|(\w+))', envmatch, path)
_slurm_location_cache = None
def get_slurm_location(program):
"""
Locate the copy of the slurm bin the blahp configuration wants to use.
"""
global _slurm_location_cache
if _slurm_location_cache is not None:
return os.path.join(_slurm_location_cache, program)
slurm_bindir = env_expand(config.get('slurm_binpath'))
slurm_bin_location = os.path.join(slurm_bindir, program)
if not os.path.exists(slurm_bin_location):
raise Exception("Could not find %s in slurm_binpath=%s" % (program, slurm_bindir))
_slurm_location_cache = slurm_bindir
return slurm_bin_location
job_id_re = re.compile("JobId=([0-9]+) .*")
exec_host_re = re.compile("\s*BatchHost=([\w\-.]+)")
status_re = re.compile("\s*JobState=([\w]+) .*")
exit_status_re = re.compile(".* ExitCode=(-?[0-9]+:[0-9]+)")
status_mapping = {"BOOT_FAIL": 4, "CANCELLED": 3, "COMPLETED": 4, "CONFIGURING": 1, "COMPLETING": 2, "FAILED": 4, "NODE_FAIL": 4, "PENDING": 1, "PREEMPTED": 4, "RUNNING": 2, "SPECIAL_EXIT": 4, "STOPPED": 2, "SUSPENDED": 2, "TIMEOUT": 4}
def parse_squeue(output):
"""
Parse the stdout of "squeue -o '%i %T'" into a python dictionary
containing the information we need.
"""
job_info = {}
cur_job_id = None
for line in output.split('\n'):
line = line.strip()
fields = line.split(' ')
if len(fields) < 2 or fields[0] == "JOBID":
continue
cur_job_id = fields[0];
cur_job_info = {}
job_info[cur_job_id] = cur_job_info
cur_job_info["BatchJobId"] = cur_job_id
status = status_mapping.get(fields[1], 0)
if status != 0:
cur_job_info["JobStatus"] = str(status)
if cur_job_id:
job_info[cur_job_id] = cur_job_info
return job_info
def job_dict_to_string(info):
result = ["%s=%s;" % (i[0], i[1]) for i in info.items()]
return "[" + " ".join(result) + " ]"
def fill_cache(cache_location, cluster):
log("Starting query to fill cache.")
results = call_squeue("", cluster)
log("Finished query to fill cache.")
(fd, filename) = tempfile.mkstemp(dir = "/var/tmp")
# Open the file with a proper python file object
f = os.fdopen(fd, "w")
writer = csv.writer(f, delimiter='\t')
try:
try:
for key, val in results.items():
key = key.split(".")[0]
writer.writerow([key, pickle.dumps(val).hex()])
os.fsync(fd)
except:
os.unlink(filename)
raise
finally:
f.close()
os.rename(filename, cache_location)
global launchtime
launchtime = time.time()
cache_line_re = re.compile("([0-9]+[\.\w\-]+):\s+(.+)")
def cache_to_status(jobid, fd):
reader = csv.reader(fd, delimiter='\t')
for row in reader:
if row[0] == jobid:
return pickle.loads(bytes.fromhex(row[1]))
def check_cache(jobid, cluster, recurse=True):
uid = os.geteuid()
username = pwd.getpwuid(uid).pw_name
cache_dir = os.path.join("/var/tmp", "slurm_cache_%s" % username)
if recurse:
try:
s = os.stat(cache_dir)
except OSError as oe:
if oe.errno != 2:
raise
os.mkdir(cache_dir, 0o755)
s = os.stat(cache_dir)
if s.st_uid != uid:
raise Exception("Unable to check cache because it is owned by UID %d" % s.st_uid)
cache_location = os.path.join(cache_dir, "blahp_results_cache")
if cluster != "":
cache_location += "-%s" % cluster
try:
fd = open(cache_location, "r+")
except IOError as ie:
if ie.errno != 2:
raise
# Create an empty file so we can hold the file lock
fd = open(cache_location, "w+")
ExclusiveLock(fd)
# If someone grabbed the lock between when we opened and tried to
# acquire, they may have filled the cache
if os.stat(cache_location).st_size == 0:
fill_cache(cache_location, cluster)
fd.close()
if recurse:
return check_cache(jobid, cluster, recurse=False)
else:
return None
ExclusiveLock(fd)
s = os.fstat(fd.fileno())
if s.st_uid != uid:
raise Exception("Unable to check cache file because it is owned by UID %d" % s.st_uid)
if (s.st_size == 0) or (launchtime - s.st_mtime > cache_timeout):
# If someone filled the cache between when we opened the file and
# grabbed the lock, we may not need to fill the cache.
s2 = os.stat(cache_location)
if (s2.st_size == 0) or (launchtime - s2.st_mtime > cache_timeout):
fill_cache(cache_location, cluster)
if recurse:
return check_cache(jobid, cluster, recurse=False)
else:
return None
return cache_to_status(jobid, fd)
job_status_re = re.compile(".*JobStatus=(\d+);.*")
def main():
initLog()
# Accept the optional -w argument, but ignore it
if len(sys.argv) == 2:
jobid_arg = sys.argv[1]
elif len(sys.argv) == 3 and sys.argv[1] == "-w":
jobid_arg = sys.argv[2]
else:
print("1Usage: slurm_status.py slurm/<date>/<jobid>")
return 1
jobid = jobid_arg.split("/")[-1]
cluster = ""
jobid_list = jobid.split("@")
if len( jobid_list ) > 1:
jobid = jobid_list[0]
cluster = jobid_list[1]
global config
config = blah.BlahConfigParser(defaults={'slurm_binpath': '/usr/bin'})
log("Checking cache for jobid %s" % jobid)
if cluster != "":
log("Job in remote cluster %s" % cluster)
cache_contents = None
try:
cache_contents = check_cache(jobid, cluster)
except Exception as e:
msg = "1ERROR: Internal exception, %s" % str(e)
log(msg)
#print msg
if not cache_contents:
log("Jobid %s not in cache; querying SLURM" % jobid)
results = call_squeue(jobid, cluster)
log("Finished querying SLURM for jobid %s" % jobid)
if not results or jobid not in results:
log("1ERROR: Unable to find job %s" % jobid)
print("1ERROR: Unable to find job %s" % jobid)
else:
log("0%s" % job_dict_to_string(results[jobid]))
print("0%s" % job_dict_to_string(results[jobid]))
else:
log("Jobid %s in cache." % jobid)
log("0%s" % job_dict_to_string(cache_contents))
if cache_contents["JobStatus"] == '4' or cache_contents["JobStatus"] == '3':
finished_job_stats = get_finished_job_stats(jobid, cluster)
cache_contents.update(finished_job_stats)
print("0%s" % job_dict_to_string(cache_contents))
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except SystemExit:
raise
except Exception as e:
exc_traceback = sys.exc_info()[2]
tb = traceback.extract_tb(exc_traceback)
log(traceback.format_exc())
print("1ERROR: {0}: {1} (file {2}, line {3})".format(e.__class__.__name__, str(e).replace("\n", "\\n"),
tb[-1].filename, tb[-1].lineno))
sys.exit(0)
| 35.608475 | 236 | 0.597839 |
d4902ccc3cc928e23f292f3d700a7a75cec4673d | 258 | py | Python | isimip_data/core/templatetags/core_tags.py | ISI-MIP/isimip-data | a0e4772362cc60db91e7689ec397840dcaaacddb | [
"MIT"
] | 3 | 2020-02-10T10:13:17.000Z | 2021-12-21T09:10:50.000Z | isimip_data/core/templatetags/core_tags.py | ISI-MIP/isimip-data | a0e4772362cc60db91e7689ec397840dcaaacddb | [
"MIT"
] | 17 | 2020-02-10T16:09:12.000Z | 2021-07-02T09:03:37.000Z | isimip_data/core/templatetags/core_tags.py | ISI-MIP/isimip-data | a0e4772362cc60db91e7689ec397840dcaaacddb | [
"MIT"
] | null | null | null | from django import template
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def split_br(string):
return [paragraph for paragraph in string.split('<br>') if paragraph]
| 23.454545 | 73 | 0.790698 |
43e93b823f866ba3f9bc569af7cf0fbaf08d0c13 | 70,896 | py | Python | src/sage/rings/polynomial/multi_polynomial_element.py | qedhandle/sage | 8453ffb849b047893b6c61dd09176a84c9133342 | [
"BSL-1.0"
] | null | null | null | src/sage/rings/polynomial/multi_polynomial_element.py | qedhandle/sage | 8453ffb849b047893b6c61dd09176a84c9133342 | [
"BSL-1.0"
] | null | null | null | src/sage/rings/polynomial/multi_polynomial_element.py | qedhandle/sage | 8453ffb849b047893b6c61dd09176a84c9133342 | [
"BSL-1.0"
] | null | null | null | """
Generic Multivariate Polynomials
AUTHORS:
- David Joyner: first version
- William Stein: use dict's instead of lists
- Martin Albrecht malb@informatik.uni-bremen.de: some functions added
- William Stein (2006-02-11): added better __div__ behavior.
- Kiran S. Kedlaya (2006-02-12): added Macaulay2 analogues of some
Singular features
- William Stein (2006-04-19): added e.g.,
``f[1,3]`` to get coeff of `xy^3`; added examples of the new
``R.x,y = PolynomialRing(QQ,2)`` notation.
- Martin Albrecht: improved singular coercions (restructured class
hierarchy) and added ETuples
- Robert Bradshaw (2007-08-14): added support for coercion of
polynomials in a subset of variables (including multi-level
univariate rings)
- Joel B. Mohler (2008-03): Refactored interactions with ETuples.
EXAMPLES:
We verify Lagrange's four squares identity::
sage: R.<a0,a1,a2,a3,b0,b1,b2,b3> = QQbar[]
sage: (a0^2 + a1^2 + a2^2 + a3^2)*(b0^2 + b1^2 + b2^2 + b3^2) == (a0*b0 - a1*b1 - a2*b2 - a3*b3)^2 + (a0*b1 + a1*b0 + a2*b3 - a3*b2)^2 + (a0*b2 - a1*b3 + a2*b0 + a3*b1)^2 + (a0*b3 + a1*b2 - a2*b1 + a3*b0)^2
True
"""
#*****************************************************************************
#
# Sage: Open Source Mathematical Software
#
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
#*****************************************************************************
from sage.structure.element import CommutativeRingElement, coerce_binop
from sage.misc.all import prod
import sage.rings.integer
from sage.rings.qqbar_decorators import handle_AA_and_QQbar
from . import polydict
from sage.structure.factorization import Factorization
from sage.rings.polynomial.polynomial_singular_interface import Polynomial_singular_repr
from sage.structure.sequence import Sequence
from .multi_polynomial import MPolynomial
from sage.categories.morphism import Morphism
from sage.misc.lazy_attribute import lazy_attribute
def is_MPolynomial(x):
return isinstance(x, MPolynomial)
class MPolynomial_element(MPolynomial):
def __init__(self, parent, x):
"""
EXAMPLES::
sage: K.<cuberoot2> = NumberField(x^3 - 2)
sage: L.<cuberoot3> = K.extension(x^3 - 3)
sage: S.<sqrt2> = L.extension(x^2 - 2)
sage: S
Number Field in sqrt2 with defining polynomial x^2 - 2 over its base field
sage: P.<x,y,z> = PolynomialRing(S) # indirect doctest
"""
CommutativeRingElement.__init__(self, parent)
self.__element = x
def _repr_(self):
"""
EXAMPLES::
sage: P.<x,y,z> = PolynomialRing(QQbar)
sage: x + QQbar.random_element() # indirect doctest
x + 0.4142135623730951?
"""
return "%s"%self.__element
####################
def __call__(self, *x, **kwds):
r"""
Evaluate this multi-variate polynomial at `x`, where
`x` is either the tuple of values to substitute in, or one
can use functional notation `f(a_0,a_1,a_2, \ldots)` to
evaluate `f` with the ith variable replaced by
`a_i`.
EXAMPLES::
sage: R.<x,y> = CC[]
sage: f = x^2 + y^2
sage: f(1,2)
5.00000000000000
sage: f((1,2))
5.00000000000000
::
sage: x = PolynomialRing(CC,3,'x').gens()
sage: f = x[0] + x[1] - 2*x[1]*x[2]
sage: f
(-2.00000000000000)*x1*x2 + x0 + x1
sage: f(1,2,0)
3.00000000000000
sage: f(1,2,5)
-17.0000000000000
TESTS:
Check :trac:`27446`::
sage: P = PolynomialRing(QQ, 't', 0)
sage: a = P(1)
sage: a(()).parent()
Rational Field
AUTHORS:
- David Kohel (2005-09-27)
"""
if len(kwds) > 0:
f = self.subs(**kwds)
if len(x) > 0:
return f(*x)
else:
return f
if len(x) == 1 and isinstance(x[0], (list, tuple)):
x = x[0]
n = self.parent().ngens()
if len(x) != n:
raise TypeError("x must be of correct length")
if n == 0:
return self.constant_coefficient()
try:
K = x[0].parent()
except AttributeError:
K = self.parent().base_ring()
y = K(0)
for (m,c) in self.element().dict().items():
y += c*prod([ x[i]**m[i] for i in range(n) if m[i] != 0])
return y
def _richcmp_(self, right, op):
"""
Compare ``self`` to ``right`` with respect to the term order of
self.parent().
EXAMPLES::
sage: R.<x,y,z>=PolynomialRing(QQbar,3,order='lex')
sage: x^1*y^2 > y^3*z^4
True
sage: x^3*y^2*z^4 < x^3*y^2*z^1
False
::
sage: R.<x,y,z>=PolynomialRing(CC,3,order='deglex')
sage: x^1*y^2*z^3 > x^3*y^2*z^0
True
sage: x^1*y^2*z^4 < x^1*y^1*z^5
False
::
sage: R.<x,y,z>=PolynomialRing(QQbar,3,order='degrevlex')
sage: x^1*y^5*z^2 > x^4*y^1*z^3
True
sage: x^4*y^7*z^1 < x^4*y^2*z^3
False
"""
return self.__element.rich_compare(right.__element, op,
self.parent().term_order().sortkey)
def _im_gens_(self, codomain, im_gens, base_map=None):
"""
EXAMPLES::
sage: R.<x,y> = PolynomialRing(QQbar, 2)
sage: f = R.hom([y,x], R)
sage: f(x^2 + 3*y^5) # indirect doctest
3*x^5 + y^2
You can specify a map on the base ring::
sage: F.<x,y> = ZZ[]
sage: F = F.fraction_field(); x,y = F(x),F(y)
sage: cc = F.hom([y,x])
sage: R.<z,w> = F[]
sage: phi = R.hom([w,z], base_map=cc)
sage: phi(w/x)
1/y*z
"""
n = self.parent().ngens()
if n == 0:
return codomain._coerce_(self)
y = codomain(0)
if base_map is None:
# Just use conversion
base_map = codomain
for (m,c) in self.element().dict().items():
y += base_map(c)*prod([ im_gens[i]**m[i] for i in range(n) if m[i] ])
return y
def number_of_terms(self):
"""
Return the number of non-zero coefficients of this polynomial.
This is also called weight, :meth:`hamming_weight` or sparsity.
EXAMPLES::
sage: R.<x, y> = CC[]
sage: f = x^3 - y
sage: f.number_of_terms()
2
sage: R(0).number_of_terms()
0
sage: f = (x+y)^100
sage: f.number_of_terms()
101
The method :meth:`hamming_weight` is an alias::
sage: f.hamming_weight()
101
"""
return len(self.element().dict())
hamming_weight = number_of_terms
def _add_(self, right):
#return self.parent()(self.__element + right.__element)
return self.__class__(self.parent(),self.__element + right.__element)
def _sub_(self, right):
# return self.parent()(self.__element - right.__element)
return self.__class__(self.parent(),self.__element - right.__element)
def _mul_(self, right):
#return self.parent()(self.__element * right.__element)
return self.__class__(self.parent(),self.__element * right.__element)
def _lmul_(self, a):
"""
Left Scalar Multiplication
EXAMPLES:
Note that it is not really possible to do a meaningful
example since sage mpoly rings refuse to have non-commutative
bases.
::
sage: R.<x,y> = QQbar[]
sage: f = (x + y)
sage: 3*f
3*x + 3*y
"""
return self.__class__(self.parent(),self.__element.scalar_lmult(a))
def _rmul_(self, a):
"""
Right Scalar Multiplication
EXAMPLES:
Note that it is not really possible to do a meaningful
example since sage mpoly rings refuse to have non-commutative
bases.
::
sage: R.<x,y> = QQbar[]
sage: f = (x + y)
sage: f*3
3*x + 3*y
"""
return self.__class__(self.parent(),self.__element.scalar_rmult(a))
def _div_(self, right):
r"""
EXAMPLES::
sage: R.<x,y> = CC['x,y']
sage: f = (x + y)/x; f
(x + y)/x
sage: f.parent()
Fraction Field of Multivariate Polynomial Ring in x, y over
Complex Field with 53 bits of precision
If dividing by a scalar, there is no need to go to the fraction
field of the polynomial ring::
sage: f = (x + y)/2; f
0.500000000000000*x + 0.500000000000000*y
sage: f.parent()
Multivariate Polynomial Ring in x, y over Complex Field with
53 bits of precision
TESTS:
Ensure that :trac:`13704` is fixed.::
sage: R.<t>=PolynomialRing(QQ)
sage: S.<x,y>=PolynomialRing(R)
sage: x/S(2)
1/2*x
"""
if right in self.base_ring():
inv = self.base_ring().one()/self.base_ring()(right)
return inv*self
return self.parent().fraction_field()(self, right, coerce=False)
def __rpow__(self, n):
if not isinstance(n, (int, sage.rings.integer.Integer)):
raise TypeError("The exponent must be an integer.")
return self.parent()(self.__element**n)
def element(self):
return self.__element
def change_ring(self, R):
r"""
Change the base ring of this polynomial to ``R``.
INPUT:
- ``R`` -- ring or morphism.
OUTPUT: a new polynomial converted to ``R``.
EXAMPLES::
sage: R.<x,y> = QQ[]
sage: f = x^2 + 5*y
sage: f.change_ring(GF(5))
x^2
::
sage: K.<w> = CyclotomicField(5)
sage: R.<x,y> = K[]
sage: f = x^2 + w*y
sage: f.change_ring(K.embeddings(QQbar)[1])
x^2 + (-0.8090169943749474? + 0.5877852522924731?*I)*y
"""
if isinstance(R, Morphism):
#if we're given a hom of the base ring extend to a poly hom
if R.domain() == self.base_ring():
R = self.parent().hom(R, self.parent().change_ring(R.codomain()))
return R(self)
else:
return self.parent().change_ring(R)(self)
class MPolynomial_polydict(Polynomial_singular_repr, MPolynomial_element):
r"""
Multivariate polynomials implemented in pure python using
polydicts.
"""
def __init__(self, parent, x):
"""
EXAMPLES::
sage: R, x = PolynomialRing(QQbar, 10, 'x').objgens()
sage: x
(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9)
sage: loads(dumps(x)) == x
True
"""
if not isinstance(x, polydict.PolyDict):
x = polydict.PolyDict(x, parent.base_ring().zero(), remove_zero=True)
MPolynomial_element.__init__(self, parent, x)
def _new_constant_poly(self, x, P):
"""
Quickly create a new constant polynomial with value x in parent P.
ASSUMPTION:
x must be an element of the base ring of P. That assumption is
not verified.
EXAMPLES::
sage: R.<x,y> = QQ['t'][]
sage: x._new_constant_poly(R.base_ring()(2),R)
2
"""
return MPolynomial_polydict(P, {P._zero_tuple:x})
def __neg__(self):
"""
EXAMPLES::
sage: R.<x,y>=QQbar[]
sage: -x
-x
sage: -(y-1)
-y + 1
"""
return self*(-1)
def _repr_(self):
"""
EXAMPLES::
sage: R.<x,y>=QQbar[]
sage: repr(-x^2-y+1) # indirect doc-test
'-x^2 - y + 1'
sage: K.<I>=QuadraticField(-1)
sage: R.<x,y>=K[]
sage: repr(-I*y-x^2) # indirect doc-test
'-x^2 + (-I)*y'
"""
try:
key = self.parent().term_order().sortkey
except AttributeError:
key = None
atomic = self.parent().base_ring()._repr_option('element_is_atomic')
return self.element().poly_repr(self.parent().variable_names(),
atomic_coefficients=atomic,
sortkey=key)
def _latex_(self):
r"""
EXAMPLES::
sage: R.<x,y>=QQbar[]
sage: latex(-x^2-y+1)
-x^{2} - y + 1
sage: K.<I>=QuadraticField(-1)
sage: R.<x,y>=K[]
sage: latex(-I*y+I*x^2)
\left(\sqrt{-1}\right) x^{2} + \left(-\sqrt{-1}\right) y
"""
try:
key = self.parent().term_order().sortkey
except AttributeError:
key = None
atomic = self.parent().base_ring()._repr_option('element_is_atomic')
return self.element().latex(self.parent().latex_variable_names(),
atomic_coefficients=atomic, sortkey=key)
def _repr_with_changed_varnames(self, varnames):
"""
EXAMPLES::
sage: R.<x,y>=QQbar[]
sage: f=-x^2-y+1
sage: f._repr_with_changed_varnames(['jack','jill'])
'-jack^2 - jill + 1'
"""
try:
key = self.parent().term_order().sortkey
except AttributeError:
key = None
atomic = self.parent().base_ring()._repr_option('element_is_atomic')
return self.element().poly_repr(varnames,
atomic_coefficients=atomic, sortkey=key)
def _macaulay2_(self, macaulay2=None):
"""
EXAMPLES::
sage: R = GF(13)['a,b']['c,d']
sage: macaulay2(R('a^2 + c')) # optional - macaulay2
2
c + a
TESTS:
Elements of the base ring are coerced to the polynomial ring
correctly::
sage: macaulay2(R('a^2')).ring()._operator('===', R) # optional - macaulay2
true
"""
if macaulay2 is None:
from sage.interfaces.macaulay2 import macaulay2 as m2_default
macaulay2 = m2_default
m2_parent = macaulay2(self.parent())
macaulay2.use(m2_parent)
return macaulay2('substitute(%s,%s)' % (repr(self), m2_parent._name))
def degrees(self):
r"""
Returns a tuple (precisely - an ``ETuple``) with the
degree of each variable in this polynomial. The list of degrees is,
of course, ordered by the order of the generators.
EXAMPLES::
sage: R.<x,y,z>=PolynomialRing(QQbar)
sage: f = 3*x^2 - 2*y + 7*x^2*y^2 + 5
sage: f.degrees()
(2, 2, 0)
sage: f = x^2+z^2
sage: f.degrees()
(2, 0, 2)
sage: f.total_degree() # this simply illustrates that total degree is not the sum of the degrees
2
sage: R.<x,y,z,u>=PolynomialRing(QQbar)
sage: f=(1-x)*(1+y+z+x^3)^5
sage: f.degrees()
(16, 5, 5, 0)
sage: R(0).degrees()
(0, 0, 0, 0)
"""
if self.is_zero():
return polydict.ETuple({},self.parent().ngens())
else:
return self._MPolynomial_element__element.max_exp()
def degree(self, x=None, std_grading=False):
"""
Return the degree of self in x, where x must be one of the
generators for the parent of self.
INPUT:
- ``x`` - multivariate polynomial (a generator of the parent
of self). If ``x`` is not specified (or is None), return
the total degree, which is the maximum degree of any
monomial. Note that a weighted term ordering alters the
grading of the generators of the ring; see the tests below.
To avoid this behavior, set the optional argument ``std_grading=True``.
OUTPUT: integer
EXAMPLES::
sage: R.<x,y> = RR[]
sage: f = y^2 - x^9 - x
sage: f.degree(x)
9
sage: f.degree(y)
2
sage: (y^10*x - 7*x^2*y^5 + 5*x^3).degree(x)
3
sage: (y^10*x - 7*x^2*y^5 + 5*x^3).degree(y)
10
Note that total degree takes into account if we are working in a polynomial
ring with a weighted term order.
::
sage: R = PolynomialRing(QQ,'x,y',order=TermOrder('wdeglex',(2,3)))
sage: x,y = R.gens()
sage: x.degree()
2
sage: y.degree()
3
sage: x.degree(y),x.degree(x),y.degree(x),y.degree(y)
(0, 1, 0, 1)
sage: f = (x^2*y+x*y^2)
sage: f.degree(x)
2
sage: f.degree(y)
2
sage: f.degree()
8
sage: f.degree(std_grading=True)
3
Note that if ``x`` is not a generator of the parent of self,
for example if it is a generator of a polynomial algebra which
maps naturally to this one, then it is converted to an element
of this algebra. (This fixes the problem reported in
:trac:`17366`.)
::
sage: x, y = ZZ['x','y'].gens()
sage: GF(3037000453)['x','y'].gen(0).degree(x)
1
sage: x0, y0 = QQ['x','y'].gens()
sage: GF(3037000453)['x','y'].gen(0).degree(x0)
Traceback (most recent call last):
...
TypeError: x must canonically coerce to parent
sage: GF(3037000453)['x','y'].gen(0).degree(x^2)
Traceback (most recent call last):
...
TypeError: x must be one of the generators of the parent
TESTS::
sage: R = PolynomialRing(GF(2)['t'],'x,y',order=TermOrder('wdeglex',(2,3)))
sage: x,y = R.gens()
sage: x.degree()
2
sage: y.degree()
3
sage: x.degree(y),x.degree(x),y.degree(x),y.degree(y)
(0, 1, 0, 1)
sage: f = (x^2*y+x*y^2)
sage: f.degree(x)
2
sage: f.degree(y)
2
sage: f.degree()
8
sage: f.degree(std_grading=True)
3
sage: R(0).degree()
-1
Degree of zero polynomial for other implementation :trac:`20048` ::
sage: R.<x,y> = GF(3037000453)[]
sage: R.zero().degree(x)
-1
"""
if x is None:
if std_grading or not self.parent().term_order().is_weighted_degree_order():
return self.element().degree(None)
return self.weighted_degree(self.parent().term_order().weights())
if isinstance(x, MPolynomial):
if not x.parent() is self.parent():
try:
x = self.parent().coerce(x)
except TypeError:
raise TypeError("x must canonically coerce to parent")
if not x.is_generator():
raise TypeError("x must be one of the generators of the parent")
else:
raise TypeError("x must be one of the generators of the parent")
return self.element().degree(x.element())
def total_degree(self):
"""
Return the total degree of self, which is the maximum degree of any
monomial in self.
EXAMPLES::
sage: R.<x,y,z> = QQbar[]
sage: f=2*x*y^3*z^2
sage: f.total_degree()
6
sage: f=4*x^2*y^2*z^3
sage: f.total_degree()
7
sage: f=99*x^6*y^3*z^9
sage: f.total_degree()
18
sage: f=x*y^3*z^6+3*x^2
sage: f.total_degree()
10
sage: f=z^3+8*x^4*y^5*z
sage: f.total_degree()
10
sage: f=z^9+10*x^4+y^8*x^2
sage: f.total_degree()
10
"""
return self.degree()
def monomial_coefficient(self, mon):
"""
Return the coefficient in the base ring of the monomial mon in
self, where mon must have the same parent as self.
This function contrasts with the function
``coefficient`` which returns the coefficient of a
monomial viewing this polynomial in a polynomial ring over a base
ring having fewer variables.
INPUT:
- ``mon`` - a monomial
OUTPUT: coefficient in base ring
.. SEEALSO::
For coefficients in a base ring of fewer variables, look
at :meth:`coefficient`.
EXAMPLES:
The parent of the return is a member of the base ring.
::
sage: R.<x,y>=QQbar[]
The parent of the return is a member of the base ring.
::
sage: f = 2 * x * y
sage: c = f.monomial_coefficient(x*y); c
2
sage: c.parent()
Algebraic Field
::
sage: f = y^2 + y^2*x - x^9 - 7*x + 5*x*y
sage: f.monomial_coefficient(y^2)
1
sage: f.monomial_coefficient(x*y)
5
sage: f.monomial_coefficient(x^9)
-1
sage: f.monomial_coefficient(x^10)
0
::
sage: var('a')
a
sage: K.<a> = NumberField(a^2+a+1)
sage: P.<x,y> = K[]
sage: f=(a*x-1)*((a+1)*y-1); f
-x*y + (-a)*x + (-a - 1)*y + 1
sage: f.monomial_coefficient(x)
-a
"""
if not (isinstance(mon, MPolynomial) and mon.parent() is self.parent() and mon.is_monomial()):
raise TypeError("mon must be a monomial in the parent of self.")
R = self.parent().base_ring()
return R(self.element().monomial_coefficient(mon.element().dict()))
def dict(self):
"""
Return underlying dictionary with keys the exponents and values
the coefficients of this polynomial.
"""
return self.element().dict()
def __iter__(self):
"""
Iterate over ``self`` respecting the term order.
EXAMPLES::
sage: R.<x,y,z> = PolynomialRing(QQbar, order='lex')
sage: f = (x^1*y^5*z^2 + x^2*z + x^4*y^1*z^3)
sage: list(f)
[(1, x^4*y*z^3), (1, x^2*z), (1, x*y^5*z^2)]
::
sage: R.<x,y,z> = PolynomialRing(QQbar, order='deglex')
sage: f = (x^1*y^5*z^2 + x^2*z + x^4*y^1*z^3)
sage: list(f)
[(1, x^4*y*z^3), (1, x*y^5*z^2), (1, x^2*z)]
::
sage: R.<x,y,z> = PolynomialRing(QQbar, order='degrevlex')
sage: f = (x^1*y^5*z^2 + x^2*z + x^4*y^1*z^3)
sage: list(f)
[(1, x*y^5*z^2), (1, x^4*y*z^3), (1, x^2*z)]
::
sage: R = ZZ['t']
sage: P.<x,y,z> = PolynomialRing(R,3)
sage: f = 3*x^3*y + 16*x + 7
sage: [(c,m) for c,m in f]
[(3, x^3*y), (16, x), (7, 1)]
sage: f = P.random_element(10,10)
sage: sum(c*m for c,m in f) == f
True
"""
elt = self.element()
ring = self.parent()
one = ring.base_ring().one()
for exp in self._exponents:
yield (elt[exp],
MPolynomial_polydict(ring, polydict.PolyDict({exp:one},
force_int_exponents=False,
force_etuples=False))
)
def __getitem__(self, x):
"""
Return the coefficient corresponding to ``x``.
INPUT:
- ``x`` -- a tuple or, in case of a single-variable
MPolynomial ring x can also be an integer
EXAMPLES::
sage: R.<x, y> = PolynomialRing(QQbar, 2)
sage: f = -10*x^3*y + 17*x*y
sage: f[3,1]
-10
sage: f[1,1]
17
sage: f[0,1]
0
::
sage: R.<x> = PolynomialRing(QQbar,1); R
Multivariate Polynomial Ring in x over Algebraic Field
sage: f = 5*x^2 + 3; f
5*x^2 + 3
sage: f[2]
5
"""
if isinstance(x, MPolynomial):
return self.monomial_coefficient(x)
if not isinstance(x, tuple):
try:
x = tuple(x)
except TypeError:
x = (x, )
try:
return self.element()[x]
except KeyError:
return self.parent().base_ring().zero()
def iterator_exp_coeff(self, as_ETuples=True):
"""
Iterate over ``self`` as pairs of ((E)Tuple, coefficient).
INPUT:
- ``as_ETuples`` -- (default: ``True``) if ``True`` iterate over
pairs whose first element is an ETuple, otherwise as a tuples
EXAMPLES::
sage: R.<x,y,z> = PolynomialRing(QQbar, order='lex')
sage: f = (x^1*y^5*z^2 + x^2*z + x^4*y^1*z^3)
sage: list(f.iterator_exp_coeff())
[((4, 1, 3), 1), ((2, 0, 1), 1), ((1, 5, 2), 1)]
sage: R.<x,y,z> = PolynomialRing(QQbar, order='deglex')
sage: f = (x^1*y^5*z^2 + x^2*z + x^4*y^1*z^3)
sage: list(f.iterator_exp_coeff(as_ETuples=False))
[((4, 1, 3), 1), ((1, 5, 2), 1), ((2, 0, 1), 1)]
"""
elt = self.element()
if as_ETuples:
for exp in self._exponents:
yield (exp, elt[exp])
else:
for exp in self._exponents:
yield (tuple(exp), elt[exp])
def coefficient(self, degrees):
"""
Return the coefficient of the variables with the degrees specified
in the python dictionary ``degrees``. Mathematically,
this is the coefficient in the base ring adjoined by the variables
of this ring not listed in ``degrees``. However, the
result has the same parent as this polynomial.
This function contrasts with the function
``monomial_coefficient`` which returns the coefficient
in the base ring of a monomial.
INPUT:
- ``degrees`` - Can be any of:
- a dictionary of degree restrictions
- a list of degree restrictions (with None in
the unrestricted variables)
- a monomial (very fast, but not as flexible)
OUTPUT: element of the parent of self
.. SEEALSO::
For coefficients of specific monomials, look at
:meth:`monomial_coefficient`.
EXAMPLES::
sage: R.<x, y> = QQbar[]
sage: f = 2 * x * y
sage: c = f.coefficient({x:1,y:1}); c
2
sage: c.parent()
Multivariate Polynomial Ring in x, y over Algebraic Field
sage: c in PolynomialRing(QQbar, 2, names = ['x','y'])
True
sage: f = y^2 - x^9 - 7*x + 5*x*y
sage: f.coefficient({y:1})
5*x
sage: f.coefficient({y:0})
-x^9 + (-7)*x
sage: f.coefficient({x:0,y:0})
0
sage: f=(1+y+y^2)*(1+x+x^2)
sage: f.coefficient({x:0})
y^2 + y + 1
sage: f.coefficient([0,None])
y^2 + y + 1
sage: f.coefficient(x)
y^2 + y + 1
sage: # Be aware that this may not be what you think!
sage: # The physical appearance of the variable x is deceiving -- particularly if the exponent would be a variable.
sage: f.coefficient(x^0) # outputs the full polynomial
x^2*y^2 + x^2*y + x*y^2 + x^2 + x*y + y^2 + x + y + 1
::
sage: R.<x,y> = RR[]
sage: f=x*y+5
sage: c=f.coefficient({x:0,y:0}); c
5.00000000000000
sage: parent(c)
Multivariate Polynomial Ring in x, y over Real Field with 53 bits of precision
AUTHORS:
- Joel B. Mohler (2007-10-31)
"""
looking_for = None
if isinstance(degrees, MPolynomial) and degrees.parent() == self.parent() and degrees.is_monomial():
looking_for = [e if e > 0 else None for e in degrees._exponents[0]]
elif isinstance(degrees, list):
looking_for = degrees
elif isinstance(degrees, dict):
poly_vars = self.parent().gens()
looking_for = [None] * len(poly_vars)
for d, exp in degrees.items():
for i in range(len(poly_vars)):
if d == poly_vars[i]:
looking_for[i] = exp
if not looking_for:
raise ValueError("You must pass a dictionary list or monomial.")
return self.parent()(self.element().polynomial_coefficient(looking_for))
@lazy_attribute
def _exponents(self):
"""
Return the exponents of the monomials appearing in ``self`` for
internal use only.
EXAMPLES::
sage: R.<a,b,c> = PolynomialRing(QQbar, 3)
sage: f = a^3 + b + 2*b^2
sage: f._exponents
[(3, 0, 0), (0, 2, 0), (0, 1, 0)]
"""
return sorted(self.element().dict(), key=self.parent().term_order().sortkey, reverse=True)
def exponents(self, as_ETuples=True):
r"""
Return the exponents of the monomials appearing in ``self``.
INPUT:
- ``as_ETuples`` -- (default: ``True``): return the list of
exponents as a list of ETuples
OUTPUT:
The list of exponents as a list of ETuples or tuples.
EXAMPLES::
sage: R.<a,b,c> = PolynomialRing(QQbar, 3)
sage: f = a^3 + b + 2*b^2
sage: f.exponents()
[(3, 0, 0), (0, 2, 0), (0, 1, 0)]
By default the list of exponents is a list of ETuples::
sage: type(f.exponents()[0])
<type 'sage.rings.polynomial.polydict.ETuple'>
sage: type(f.exponents(as_ETuples=False)[0])
<... 'tuple'>
TESTS:
Check that we can mutate the list and not change the result::
sage: R.<a,b,c> = PolynomialRing(QQbar, 3)
sage: f = a^3 + b + 2*b^2
sage: E = f.exponents(); E
[(3, 0, 0), (0, 2, 0), (0, 1, 0)]
sage: E.pop()
(0, 1, 0)
sage: E != f.exponents()
True
"""
if as_ETuples:
return list(self._exponents) # Make a shallow copy
else:
return [tuple(e) for e in self._exponents]
def inverse_of_unit(self):
"""
Return the inverse of a unit in a ring.
TESTS::
sage: R.<c> = QQ[]
sage: l = R(2)
sage: l.inverse_of_unit().parent()
Univariate Polynomial Ring in c over Rational Field
"""
if self.is_unit():
d = self.element().dict()
if len(d) != 1:
raise NotImplementedError
return list(d.values())[0].inverse_of_unit()
raise ArithmeticError("is not a unit")
def is_homogeneous(self):
"""
Return True if self is a homogeneous polynomial.
EXAMPLES::
sage: R.<x,y> = QQbar[]
sage: (x+y).is_homogeneous()
True
sage: (x.parent()(0)).is_homogeneous()
True
sage: (x+y^2).is_homogeneous()
False
sage: (x^2 + y^2).is_homogeneous()
True
sage: (x^2 + y^2*x).is_homogeneous()
False
sage: (x^2*y + y^2*x).is_homogeneous()
True
"""
return self.element().is_homogeneous()
def _homogenize(self, var):
r"""
Return ``self`` if ``self`` is homogeneous.
Otherwise return a homogenized polynomial constructed by modifying
the degree of the variable with index ``var``.
INPUT:
- ``var`` - an integer indicating which variable to
use to homogenize (0 <= var < parent(self).ngens())
OUTPUT: a multivariate polynomial
EXAMPLES::
sage: P.<x,y> = QQbar[]
sage: f = x^2 + y + 1 + 5*x*y^1
sage: g = f.homogenize('z'); g # indirect doctest
x^2 + 5*x*y + y*z + z^2
sage: g.parent()
Multivariate Polynomial Ring in x, y, z over Algebraic Field
SEE: ``self.homogenize``
"""
if self.is_homogeneous():
return self
X = self.element().homogenize(var)
R = self.parent()
return R(X)
def is_generator(self):
"""
Return ``True`` if ``self`` is a generator of its parent.
EXAMPLES::
sage: R.<x,y>=QQbar[]
sage: x.is_generator()
True
sage: (x+y-y).is_generator()
True
sage: (x*y).is_generator()
False
"""
elt = self.element()
if len(elt) == 1:
(e, c), = elt.dict().items()
return e.nonzero_values() == [1] and c.is_one()
return False
def is_monomial(self):
"""
Return ``True`` if ``self`` is a monomial, which we define to be a
product of generators with coefficient 1.
Use :meth:`is_term` to allow the coefficient to not be 1.
EXAMPLES::
sage: R.<x,y>=QQbar[]
sage: x.is_monomial()
True
sage: (x+2*y).is_monomial()
False
sage: (2*x).is_monomial()
False
sage: (x*y).is_monomial()
True
To allow a non-1 leading coefficient, use is_term()::
sage: (2*x*y).is_term()
True
sage: (2*x*y).is_monomial()
False
"""
return len(self.element()) == 1 and self.element().coefficients()[0] == 1
def is_term(self):
"""
Return ``True`` if ``self`` is a term, which we define to be a
product of generators times some coefficient, which need
not be 1.
Use :meth:`is_monomial` to require that the coefficient be 1.
EXAMPLES::
sage: R.<x,y>=QQbar[]
sage: x.is_term()
True
sage: (x+2*y).is_term()
False
sage: (2*x).is_term()
True
sage: (7*x^5*y).is_term()
True
To require leading coefficient 1, use is_monomial()::
sage: (2*x*y).is_monomial()
False
sage: (2*x*y).is_term()
True
"""
return len(self.element()) == 1
def subs(self, fixed=None, **kw):
"""
Fixes some given variables in a given multivariate polynomial and
returns the changed multivariate polynomials. The polynomial itself
is not affected. The variable,value pairs for fixing are to be
provided as a dictionary of the form {variable:value}.
This is a special case of evaluating the polynomial with some of
the variables constants and the others the original variables.
INPUT:
- ``fixed`` - (optional) dictionary of inputs
- ``**kw`` - named parameters
OUTPUT: new MPolynomial
EXAMPLES::
sage: R.<x,y> = QQbar[]
sage: f = x^2 + y + x^2*y^2 + 5
sage: f((5,y))
25*y^2 + y + 30
sage: f.subs({x:5})
25*y^2 + y + 30
"""
variables = list(self.parent().gens())
for i in range(0,len(variables)):
if str(variables[i]) in kw:
variables[i]=kw[str(variables[i])]
elif fixed and variables[i] in fixed:
variables[i] = fixed[variables[i]]
return self(tuple(variables))
def monomials(self):
"""
Returns the list of monomials in self. The returned list is
decreasingly ordered by the term ordering of self.parent().
OUTPUT: list of MPolynomials representing Monomials
EXAMPLES::
sage: R.<x,y> = QQbar[]
sage: f = 3*x^2 - 2*y + 7*x^2*y^2 + 5
sage: f.monomials()
[x^2*y^2, x^2, y, 1]
::
sage: R.<fx,fy,gx,gy> = QQbar[]
sage: F = ((fx*gy - fy*gx)^3)
sage: F
-fy^3*gx^3 + 3*fx*fy^2*gx^2*gy + (-3)*fx^2*fy*gx*gy^2 + fx^3*gy^3
sage: F.monomials()
[fy^3*gx^3, fx*fy^2*gx^2*gy, fx^2*fy*gx*gy^2, fx^3*gy^3]
sage: F.coefficients()
[-1, 3, -3, 1]
sage: sum(map(mul,zip(F.coefficients(),F.monomials()))) == F
True
"""
ring = self.parent()
one = ring.base_ring().one()
return [MPolynomial_polydict(ring, polydict.PolyDict({m:one}, force_int_exponents=False, force_etuples=False))
for m in self._exponents]
def constant_coefficient(self):
"""
Return the constant coefficient of this multivariate polynomial.
EXAMPLES::
sage: R.<x,y> = QQbar[]
sage: f = 3*x^2 - 2*y + 7*x^2*y^2 + 5
sage: f.constant_coefficient()
5
sage: f = 3*x^2
sage: f.constant_coefficient()
0
"""
#v = (0,)*int(self.parent().ngens())
d = self.element().dict()
try:
return d[polydict.ETuple({},self.parent().ngens())]
except KeyError:
return self.parent().base_ring().zero()
def is_univariate(self):
"""
Returns True if this multivariate polynomial is univariate and
False otherwise.
EXAMPLES::
sage: R.<x,y> = QQbar[]
sage: f = 3*x^2 - 2*y + 7*x^2*y^2 + 5
sage: f.is_univariate()
False
sage: g = f.subs({x:10}); g
700*y^2 + (-2)*y + 305
sage: g.is_univariate()
True
sage: f = x^0
sage: f.is_univariate()
True
"""
mons = self.element().dict()
found = -1
for mon in mons:
for i in mon.nonzero_positions():
if found != i:
if found != -1:
return False
else:
found = i
return True
def univariate_polynomial(self, R=None):
"""
Returns a univariate polynomial associated to this multivariate
polynomial.
INPUT:
- ``R`` - (default: None) PolynomialRing
If this polynomial is not in at most one variable, then a
ValueError exception is raised. This is checked using the
is_univariate() method. The new Polynomial is over the same base
ring as the given MPolynomial.
EXAMPLES::
sage: R.<x,y> = QQbar[]
sage: f = 3*x^2 - 2*y + 7*x^2*y^2 + 5
sage: f.univariate_polynomial()
Traceback (most recent call last):
...
TypeError: polynomial must involve at most one variable
sage: g = f.subs({x:10}); g
700*y^2 + (-2)*y + 305
sage: g.univariate_polynomial ()
700*y^2 - 2*y + 305
sage: g.univariate_polynomial(PolynomialRing(QQ,'z'))
700*z^2 - 2*z + 305
TESTS::
sage: P = PolynomialRing(QQ, 0, '')
sage: P(5).univariate_polynomial()
5
"""
if self.parent().ngens() == 0:
if R is None:
return self.base_ring()(self)
else:
return R(self)
if not self.is_univariate():
raise TypeError("polynomial must involve at most one variable")
#construct ring if None
if R is None:
# constant, we just pick first variable from parent
if self.is_constant():
R = self.base_ring()[self.parent().variable_names()[0]]
else:
R = self.base_ring()[str(self.variables()[0])]
monomial_coefficients = self._MPolynomial_element__element.dict()
if not self.is_constant():
var_idx = self.degrees().nonzero_positions()[0] #variable
else:
var_idx = 0 #constant
if len(monomial_coefficients) == 0:
return R(0)
#construct list
lookup = [int(0),] * len(next(iter(monomial_coefficients)))
coefficients = []
for degree in range(max(m[var_idx]
for m in monomial_coefficients.keys()) + 1):
lookup[var_idx] = int(degree)
try:
coefficients.append( monomial_coefficients[ polydict.ETuple(lookup) ] ) #if we find something, add the coefficient
except KeyError:
coefficients.append( 0 ) #else add zero
#construct polynomial
return R(coefficients)
def variables(self):
"""
Returns the tuple of variables occurring in this polynomial.
EXAMPLES::
sage: R.<x,y> = QQbar[]
sage: f = 3*x^2 - 2*y + 7*x^2*y^2 + 5
sage: f.variables()
(x, y)
sage: g = f.subs({x:10}); g
700*y^2 + (-2)*y + 305
sage: g.variables()
(y,)
TESTS:
This shows that the issue at :trac:`7077` is fixed::
sage: x,y,z=polygens(QQ,'x,y,z')
sage: (x^2).variables()
(x,)
"""
return tuple([self.parent().gen(index) for index in self.degrees().nonzero_positions()])
def variable(self,i):
"""
Returns `i`-th variable occurring in this polynomial.
EXAMPLES::
sage: R.<x,y> = QQbar[]
sage: f = 3*x^2 - 2*y + 7*x^2*y^2 + 5
sage: f.variable(0)
x
sage: f.variable(1)
y
"""
return self.variables()[int(i)]
def nvariables(self):
"""
Number of variables in this polynomial
EXAMPLES::
sage: R.<x,y> = QQbar[]
sage: f = 3*x^2 - 2*y + 7*x^2*y^2 + 5
sage: f.nvariables ()
2
sage: g = f.subs({x:10}); g
700*y^2 + (-2)*y + 305
sage: g.nvariables ()
1
"""
return len(self.degrees().nonzero_positions())
def is_constant(self):
"""
Return ``True`` if ``self`` is a constant and ``False`` otherwise.
EXAMPLES::
sage: R.<x,y> = QQbar[]
sage: f = 3*x^2 - 2*y + 7*x^2*y^2 + 5
sage: f.is_constant()
False
sage: g = 10*x^0
sage: g.is_constant()
True
"""
return self.element().is_constant()
def lm(self):
"""
Returns the lead monomial of self with respect to the term order of
self.parent().
EXAMPLES::
sage: R.<x,y,z>=PolynomialRing(GF(7),3,order='lex')
sage: (x^1*y^2 + y^3*z^4).lm()
x*y^2
sage: (x^3*y^2*z^4 + x^3*y^2*z^1).lm()
x^3*y^2*z^4
::
sage: R.<x,y,z>=PolynomialRing(CC,3,order='deglex')
sage: (x^1*y^2*z^3 + x^3*y^2*z^0).lm()
x*y^2*z^3
sage: (x^1*y^2*z^4 + x^1*y^1*z^5).lm()
x*y^2*z^4
::
sage: R.<x,y,z>=PolynomialRing(QQbar,3,order='degrevlex')
sage: (x^1*y^5*z^2 + x^4*y^1*z^3).lm()
x*y^5*z^2
sage: (x^4*y^7*z^1 + x^4*y^2*z^3).lm()
x^4*y^7*z
TESTS::
sage: from sage.rings.polynomial.multi_polynomial_ring import MPolynomialRing_polydict
sage: R.<x,y>=MPolynomialRing_polydict(GF(2),2,order='lex')
sage: f=x+y
sage: f.lm()
x
"""
try:
return self.__lm
except AttributeError:
if self.is_zero():
return self
R = self.parent()
f = self._MPolynomial_element__element.lcmt( R.term_order().greater_tuple )
one = R.base_ring().one()
self.__lm = MPolynomial_polydict(R,polydict.PolyDict({f:one},zero=R.base_ring().zero(),force_int_exponents=False, force_etuples=False))
return self.__lm
def lc(self):
"""
Returns the leading coefficient of self i.e.,
self.coefficient(self.lm())
EXAMPLES::
sage: R.<x,y,z>=QQbar[]
sage: f=3*x^2-y^2-x*y
sage: f.lc()
3
"""
try:
return self.__lc
except AttributeError:
if self.is_zero():
return self.base_ring()._zero_element
R = self.parent()
f = self._MPolynomial_element__element.dict()
self.__lc = f[self._MPolynomial_element__element.lcmt( R.term_order().greater_tuple )]
return self.__lc
def lt(self):
r"""
Returns the leading term of self i.e., self.lc()\*self.lm(). The
notion of "leading term" depends on the ordering defined in the
parent ring.
EXAMPLES::
sage: R.<x,y,z>=PolynomialRing(QQbar)
sage: f=3*x^2-y^2-x*y
sage: f.lt()
3*x^2
sage: R.<x,y,z>=PolynomialRing(QQbar,order="invlex")
sage: f=3*x^2-y^2-x*y
sage: f.lt()
-y^2
TESTS::
sage: from sage.rings.polynomial.multi_polynomial_ring import MPolynomialRing_polydict
sage: R.<x,y>=MPolynomialRing_polydict(GF(2),2,order='lex')
sage: f=x+y
sage: f.lt()
x
"""
try:
return self.__lt
except AttributeError:
if self.is_zero():
return self
R = self.parent()
f = self._MPolynomial_element__element.dict()
res = self._MPolynomial_element__element.lcmt( R.term_order().greater_tuple )
self.__lt = MPolynomial_polydict(R,polydict.PolyDict({res:f[res]},zero=R.base_ring().zero(),force_int_exponents=False, force_etuples=False))
return self.__lt
def __eq__(self,right):
if not isinstance(right, MPolynomial_polydict):
# we want comparison with zero to be fast
if not right:
return not self._MPolynomial_element__element.dict()
return CommutativeRingElement.__eq__(self, right)
return self._MPolynomial_element__element == right._MPolynomial_element__element
def __ne__(self,right):
if not isinstance(right, MPolynomial_polydict):
# we want comparison with zero to be fast
if not right:
return not not self._MPolynomial_element__element.dict()
return CommutativeRingElement.__ne__(self, right)
return self._MPolynomial_element__element != right._MPolynomial_element__element
# required by Python 3
__hash__ = MPolynomial_element.__hash__
def __bool__(self):
"""
Return True if self != 0
.. note::
This is much faster than actually writing ``self == 0``.
"""
return self._MPolynomial_element__element.dict()!={}
__nonzero__ = __bool__
def _floordiv_(self, right):
r"""
Quotient of division of self by other. This is denoted //.
.. note::
It's not clear to me that this is well-defined if
``self`` is not exactly divisible by other.
EXAMPLES::
sage: R.<x,y>=QQbar[]
sage: 2*x*y//y
2*x
sage: 2*x//y
0
sage: 2*x//4
1/2*x
sage: type(0//y)
<class 'sage.rings.polynomial.multi_polynomial_element.MPolynomial_polydict'>
"""
# handle division by monomials without using Singular
if len(right.dict()) == 1:
P = self.parent()
ret = P(0)
denC,denM = next(iter(right))
for c,m in self:
t = c*m
if denC.divides(c) and P.monomial_divides(denM, m):
ret += P.monomial_quotient(t, right, coeff=True)
return ret
Q, _ = self.quo_rem(right)
return Q
def _derivative(self, var=None):
r"""
Differentiates ``self`` with respect to variable ``var``.
If ``var`` is not one of the generators of this ring, _derivative(var)
is called recursively on each coefficient of this polynomial.
.. SEEALSO::
:meth:`derivative`
EXAMPLES::
sage: R.<t> = PowerSeriesRing(QQbar)
sage: S.<x, y> = PolynomialRing(R)
sage: f = (t^2 + O(t^3))*x^2*y^3 + (37*t^4 + O(t^5))*x^3
sage: f.parent()
Multivariate Polynomial Ring in x, y over Power Series Ring in t over Algebraic Field
sage: f._derivative(x) # with respect to x
(2*t^2 + O(t^3))*x*y^3 + (111*t^4 + O(t^5))*x^2
sage: f._derivative(x).parent()
Multivariate Polynomial Ring in x, y over Power Series Ring in t over Algebraic Field
sage: f._derivative(y) # with respect to y
(3*t^2 + O(t^3))*x^2*y^2
sage: f._derivative(t) # with respect to t (recurses into base ring)
(2*t + O(t^2))*x^2*y^3 + (148*t^3 + O(t^4))*x^3
sage: f._derivative(x)._derivative(y) # with respect to x and then y
(6*t^2 + O(t^3))*x*y^2
sage: f.derivative(y, 3) # with respect to y three times
(6*t^2 + O(t^3))*x^2
sage: f._derivative() # can't figure out the variable
Traceback (most recent call last):
...
ValueError: must specify which variable to differentiate with respect to
"""
if var is None:
raise ValueError("must specify which variable to differentiate with respect to")
P = self.parent()
gens = list(P.gens())
# check if var is one of the generators
try:
index = gens.index(var)
except ValueError:
# var is not a generator; do term-by-term differentiation recursively
# var may be, for example, a generator of the base ring
d = dict([(e, x._derivative(var)) for (e, x) in self.dict().items()])
d = polydict.PolyDict(d, P.base_ring().zero(), remove_zero=True)
return MPolynomial_polydict(P, d)
# differentiate w.r.t. indicated variable
d = {}
v = polydict.ETuple({index:1}, len(gens))
for (exp, coeff) in self.dict().items():
if exp[index] > 0:
d[exp.esub(v)] = coeff * exp[index]
d = polydict.PolyDict(d, P.base_ring().zero(), remove_zero=True)
return MPolynomial_polydict(P, d)
def integral(self, var=None):
r"""
Integrates ``self`` with respect to variable ``var``.
.. NOTE::
The integral is always chosen so the constant term is 0.
If ``var`` is not one of the generators of this ring, integral(var)
is called recursively on each coefficient of this polynomial.
EXAMPLES:
On polynomials with rational coefficients::
sage: x, y = PolynomialRing(QQ, 'x, y').gens()
sage: ex = x*y + x - y
sage: it = ex.integral(x); it
1/2*x^2*y + 1/2*x^2 - x*y
sage: it.parent() == x.parent()
True
On polynomials with coefficients in power series::
sage: R.<t> = PowerSeriesRing(QQbar)
sage: S.<x, y> = PolynomialRing(R)
sage: f = (t^2 + O(t^3))*x^2*y^3 + (37*t^4 + O(t^5))*x^3
sage: f.parent()
Multivariate Polynomial Ring in x, y over Power Series Ring in t over Algebraic Field
sage: f.integral(x) # with respect to x
(1/3*t^2 + O(t^3))*x^3*y^3 + (37/4*t^4 + O(t^5))*x^4
sage: f.integral(x).parent()
Multivariate Polynomial Ring in x, y over Power Series Ring in t over Algebraic Field
sage: f.integral(y) # with respect to y
(1/4*t^2 + O(t^3))*x^2*y^4 + (37*t^4 + O(t^5))*x^3*y
sage: f.integral(t) # with respect to t (recurses into base ring)
(1/3*t^3 + O(t^4))*x^2*y^3 + (37/5*t^5 + O(t^6))*x^3
TESTS::
sage: f.integral() # can't figure out the variable
Traceback (most recent call last):
...
ValueError: must specify which variable to integrate with respect to
"""
if var is None:
raise ValueError("must specify which variable to integrate "
"with respect to")
P = self.parent()
gens = list(P.gens())
# check if var is one of the generators
try:
index = gens.index(var)
except ValueError:
# var is not a generator; do term-by-term integration recursively
# var may be, for example, a generator of the base ring
d = dict([(e, x.integral(var))
for (e, x) in self.dict().items()])
d = polydict.PolyDict(d, P.base_ring().zero(),
remove_zero=True)
return MPolynomial_polydict(P, d)
# integrate w.r.t. indicated variable
d = {}
v = polydict.ETuple({index:1}, len(gens))
for (exp, coeff) in self.dict().items():
d[exp.eadd(v)] = coeff / (1+exp[index])
d = polydict.PolyDict(d, P.base_ring().zero(), remove_zero=True)
return MPolynomial_polydict(P, d)
def factor(self, proof=None):
r"""
Compute the irreducible factorization of this polynomial.
INPUT:
- ``proof'' - insist on provably correct results (default: ``True``
unless explicitly disabled for the ``"polynomial"`` subsystem with
:class:`sage.structure.proof.proof.WithProof`.)
TESTS:
Check if we can handle polynomials with no variables, see :trac:`7950`::
sage: P = PolynomialRing(ZZ,0,'')
sage: res = P(10).factor(); res
2 * 5
sage: res[0][0].parent()
Multivariate Polynomial Ring in no variables over Integer Ring
sage: R = PolynomialRing(QQ,0,'')
sage: res = R(10).factor(); res
10
sage: res.unit().parent()
Rational Field
sage: P(0).factor()
Traceback (most recent call last):
...
ArithmeticError: factorization of 0 is not defined
Check if we can factor a constant polynomial, see :trac:`8207`::
sage: R.<x,y> = CC[]
sage: R(1).factor()
1.00000000000000
Check that we prohibit too large moduli, :trac:`11829`::
sage: R.<x,y> = GF(previous_prime(2^31))[]
sage: factor(x+y+1)
Traceback (most recent call last):
...
NotImplementedError: Factorization of multivariate polynomials over prime fields with characteristic > 2^29 is not implemented.
Check that we can factor over the algebraic field (:trac:`25390`)::
sage: R.<x,y> = PolynomialRing(QQbar)
sage: factor(x^2 + y^2)
(x + (-1*I)*y) * (x + 1*I*y)
Check that the global proof flag for polynomials is honored::
sage: R.<x,y> = PolynomialRing(QQ['z'])
sage: f = x^2 + y^2
sage: with proof.WithProof('polynomial', True):
....: f.factor()
Traceback (most recent call last):
...
NotImplementedError: Provably correct factorization not implemented. Disable this error by wrapping your code in a `with proof.WithProof('polynomial', False):` block.
sage: with proof.WithProof('polynomial', False):
....: f.factor()
Traceback (most recent call last):
...
TypeError: no conversion of this ring to a Singular ring defined
We check that the original issue in :trac:`7554` is fixed::
sage: K.<a> = PolynomialRing(QQ)
sage: R.<x,y> = PolynomialRing(FractionField(K))
sage: factor(x)
x
In the example below, we set the special method
``_factor_multivariate_polynomial()`` in the base ring which is called to
factor multivariate polynomials. This facility can be used to easily
extend polynomial factorization to work over new rings you introduce::
sage: R.<x, y> = PolynomialRing(QQ['z'])
sage: (x*y).factor()
Traceback (most recent call last):
...
NotImplementedError: ...
sage: R.base_ring()._factor_multivariate_polynomial = lambda f, **kwargs: f.change_ring(QQ).factor()
sage: (x*y).factor()
y * x
sage: del R.base_ring()._factor_multivariate_polynomial # clean up
Check that a "multivariate" polynomial in one variable is factored
correctly::
sage: R.<z> = PolynomialRing(CC,1)
sage: f = z^4 - 6*z + 3
sage: f.factor()
(z - 1.60443920904349) * (z - 0.511399619393097) * (z + 1.05791941421830 - 1.59281852704435*I) * (z + 1.05791941421830 + 1.59281852704435*I)
We check a case that failed with an exception at some point::
sage: k.<u> = GF(4)
sage: R.<v> = k[]
sage: l.<v> = R.quo(v^3 + v + 1)
sage: R.<x,y> = l[]
sage: f = y^3 + x^3 + (u + 1)*x
sage: f.factor()
x^3 + y^3 + (u + 1)*x
"""
R = self.parent()
# raise error if trying to factor zero
if not self:
raise ArithmeticError("factorization of {!r} is not defined".format(self))
# if number of variables is zero ...
if R.ngens() == 0:
base_ring = self.base_ring()
if base_ring.is_field():
return Factorization([],unit=self.base_ring()(self))
else:
F = base_ring(self).factor()
return Factorization([(R(f),m) for f,m in F], unit=F.unit())
base_ring = self.base_ring()
if hasattr(base_ring, '_factor_multivariate_polynomial'):
return base_ring._factor_multivariate_polynomial(self, proof=proof)
# try to use univariate factoring
try:
F = self.univariate_polynomial().factor()
return Factorization([(R(f),m) for f,m in F], unit=F.unit())
except TypeError:
pass
base_ring = self.base_ring()
if base_ring.is_finite():
if base_ring.characteristic() > 1<<29:
raise NotImplementedError("Factorization of multivariate polynomials over prime fields with characteristic > 2^29 is not implemented.")
if proof is None:
from sage.structure.proof.proof import get_flag
proof = get_flag(subsystem="polynomial")
if proof:
raise NotImplementedError("Provably correct factorization not implemented. Disable this error by wrapping your code in a `with proof.WithProof('polynomial', False):` block.")
R._singular_().set_ring()
S = self._singular_().factorize()
factors = S[1]
exponents = S[2]
v = sorted([(R(factors[i+1]), sage.rings.integer.Integer(exponents[i+1])) \
for i in range(len(factors))])
unit = R(1)
for i in range(len(v)):
if v[i][0].is_unit():
unit = unit * v[i][0]
del v[i]
break
F = sorted(Factorization(v, unit=unit))
return F
@handle_AA_and_QQbar
def lift(self,I):
"""
given an ideal I = (f_1,...,f_r) and some g (== self) in I, find
s_1,...,s_r such that g = s_1 f_1 + ... + s_r f_r
ALGORITHM: Use Singular.
EXAMPLES::
sage: A.<x,y> = PolynomialRing(CC,2,order='degrevlex')
sage: I = A.ideal([x^10 + x^9*y^2, y^8 - x^2*y^7 ])
sage: f = x*y^13 + y^12
sage: M = f.lift(I)
sage: M
[y^7, x^7*y^2 + x^8 + x^5*y^3 + x^6*y + x^3*y^4 + x^4*y^2 + x*y^5 + x^2*y^3 + y^4]
sage: sum( map( mul , zip( M, I.gens() ) ) ) == f
True
TESTS:
Check that this method works over QQbar (:trac:`25351`)::
sage: A.<x,y> = QQbar[]
sage: I = A.ideal([x^2 + y^2 - 1, x^2 - y^2])
sage: f = 2*x^2 - 1
sage: M = f.lift(I)
sage: sum( map( mul , zip( M, I.gens() ) ) ) == f
True
"""
fs = self._singular_()
Is = I._singular_()
P = I.ring()
try:
M = Is.lift(fs)._sage_(P)
except TypeError:
raise ArithmeticError("f is not in I")
return Sequence(M.list(), P, check=False, immutable=True)
@coerce_binop
@handle_AA_and_QQbar
def quo_rem(self, right):
"""
Returns quotient and remainder of self and right.
EXAMPLES::
sage: R.<x,y> = CC[]
sage: f = y*x^2 + x + 1
sage: f.quo_rem(x)
(x*y + 1.00000000000000, 1.00000000000000)
sage: R = QQ['a','b']['x','y','z']
sage: p1 = R('a + (1+2*b)*x*y + (3-a^2)*z')
sage: p2 = R('x-1')
sage: p1.quo_rem(p2)
((2*b + 1)*y, (2*b + 1)*y + (-a^2 + 3)*z + a)
sage: R.<x,y> = Qp(5)[]
sage: x.quo_rem(y)
Traceback (most recent call last):
...
TypeError: no conversion of this ring to a Singular ring defined
ALGORITHM: Use Singular.
TESTS:
Check that this method works over QQbar (:trac:`25351`)::
sage: R.<x,y> = QQbar[]
sage: f = y*x^2 + x + 1
sage: f.quo_rem(x)
(x*y + 1, 1)
"""
R = self.parent()
try:
R._singular_().set_ring()
except TypeError:
f = self.parent().flattening_morphism()
if f.domain() != f.codomain():
g = f.section()
q,r = f(self).quo_rem(f(right))
return g(q), g(r)
else:
raise
else:
X = self._singular_().division(right._singular_())
return R(X[1][1,1]), R(X[2][1])
@handle_AA_and_QQbar
def resultant(self, other, variable=None):
"""
Compute the resultant of ``self`` and ``other`` with respect
to ``variable``.
If a second argument is not provided, the first variable of
``self.parent()`` is chosen.
For inexact rings or rings not available in Singular,
this computes the determinant of the Sylvester matrix.
INPUT:
- ``other`` -- polynomial in ``self.parent()``
- ``variable`` -- (optional) variable (of type polynomial) in
``self.parent()``
EXAMPLES::
sage: P.<x,y> = PolynomialRing(QQ, 2)
sage: a = x + y
sage: b = x^3 - y^3
sage: a.resultant(b)
-2*y^3
sage: a.resultant(b, y)
2*x^3
TESTS::
sage: from sage.rings.polynomial.multi_polynomial_ring import MPolynomialRing_polydict_domain
sage: P.<x,y> = MPolynomialRing_polydict_domain(QQ, 2, order='degrevlex')
sage: a = x + y
sage: b = x^3 - y^3
sage: a.resultant(b)
-2*y^3
sage: a.resultant(b, y)
2*x^3
Check that :trac:`15061` is fixed::
sage: R.<x, y> = AA[]
sage: (x^2 + 1).resultant(x^2 - y)
y^2 + 2*y + 1
Test for :trac:`2693`::
sage: R.<x,y> = RR[]
sage: p = x + y
sage: q = x*y
sage: p.resultant(q)
-y^2
Check that this method works over QQbar (:trac:`25351`)::
sage: P.<x,y> = QQbar[]
sage: a = x + y
sage: b = x^3 - y^3
sage: a.resultant(b)
(-2)*y^3
sage: a.resultant(b, y)
2*x^3
"""
R = self.parent()
if variable is None:
variable = R.gen(0)
if R._has_singular and R.is_exact():
rt = self._singular_().resultant(other._singular_(), variable._singular_())
r = rt.sage_poly(R)
else:
r = self.sylvester_matrix(other, variable).det()
if R.ngens() <= 1 and r.degree() <= 0:
return R.base_ring()(r[0])
else:
return r
@coerce_binop
@handle_AA_and_QQbar
def subresultants(self, other, variable=None):
r"""
Return the nonzero subresultant polynomials of ``self`` and ``other``.
INPUT:
- ``other`` -- a polynomial
OUTPUT: a list of polynomials in the same ring as ``self``
EXAMPLES::
sage: R.<x,y> = QQbar[]
sage: p = (y^2 + 6)*(x - 1) - y*(x^2 + 1)
sage: q = (x^2 + 6)*(y - 1) - x*(y^2 + 1)
sage: p.subresultants(q, y)
[2*x^6 + (-22)*x^5 + 102*x^4 + (-274)*x^3 + 488*x^2 + (-552)*x + 288,
-x^3 - x^2*y + 6*x^2 + 5*x*y + (-11)*x + (-6)*y + 6]
sage: p.subresultants(q, x)
[2*y^6 + (-22)*y^5 + 102*y^4 + (-274)*y^3 + 488*y^2 + (-552)*y + 288,
x*y^2 + y^3 + (-5)*x*y + (-6)*y^2 + 6*x + 11*y - 6]
"""
R = self.parent()
if variable is None:
x = R.gen(0)
else:
x = variable
p = self.polynomial(x)
q = other.polynomial(x)
return [R(f) for f in p.subresultants(q)]
def reduce(self, I):
"""
Reduce this polynomial by the polynomials in `I`.
INPUT:
- ``I`` - a list of polynomials or an ideal
EXAMPLES::
sage: P.<x,y,z> = QQbar[]
sage: f1 = -2 * x^2 + x^3
sage: f2 = -2 * y + x* y
sage: f3 = -x^2 + y^2
sage: F = Ideal([f1,f2,f3])
sage: g = x*y - 3*x*y^2
sage: g.reduce(F)
(-6)*y^2 + 2*y
sage: g.reduce(F.gens())
(-6)*y^2 + 2*y
::
sage: f = 3*x
sage: f.reduce([2*x,y])
0
::
sage: k.<w> = CyclotomicField(3)
sage: A.<y9,y12,y13,y15> = PolynomialRing(k)
sage: J = [ y9 + y12]
sage: f = y9 - y12; f.reduce(J)
-2*y12
sage: f = y13*y15; f.reduce(J)
y13*y15
sage: f = y13*y15 + y9 - y12; f.reduce(J)
y13*y15 - 2*y12
Make sure the remainder returns the correct type, fixing :trac:`13903`::
sage: R.<y1,y2>=PolynomialRing(Qp(5),2, order='lex')
sage: G=[y1^2 + y2^2, y1*y2 + y2^2, y2^3]
sage: type((y2^3).reduce(G))
<class 'sage.rings.polynomial.multi_polynomial_element.MPolynomial_polydict'>
"""
from sage.rings.polynomial.multi_polynomial_ideal import MPolynomialIdeal
k = self.base_ring()
P = self.parent()
if isinstance(I, MPolynomialIdeal):
I = I.gens()
if not k.is_field():
raise TypeError("Can only reduce polynomials over fields.")
try:
fs = self._singular_()
Is = fs.parent().ideal(I)
return P(fs.reduce(Is))
except (NotImplementedError, TypeError):
pass
lI = len(I)
I = list(I)
r = P.zero()
p = self
while p != 0:
for i in range(lI):
gi = I[i]
plm = p.lm()
gilm = gi.lm()
if P.monomial_divides(gilm, plm):
quot = p.lc()/gi.lc() * P.monomial_quotient(plm, gilm)
p -= quot*I[i]
break
else:
plt = p.lt()
r += plt
p -= plt
return r
###############################################################
# Useful for some geometry code.
###############################################################
def degree_lowest_rational_function(r, x):
r"""
Return the difference of valuations of r with respect to variable x.
INPUT:
- ``r`` -- a multivariate rational function
- ``x`` -- a multivariate polynomial ring generator x
OUTPUT:
- ``integer`` -- the difference val_x(p) - val_x(q) where r = p/q
.. NOTE::
This function should be made a method of the
FractionFieldElement class.
EXAMPLES::
sage: R1 = PolynomialRing(FiniteField(5), 3, names = ["a","b","c"])
sage: F = FractionField(R1)
sage: a,b,c = R1.gens()
sage: f = 3*a*b^2*c^3+4*a*b*c
sage: g = a^2*b*c^2+2*a^2*b^4*c^7
Consider the quotient
`f/g = \frac{4 + 3 bc^{2}}{ac + 2 ab^{3}c^{6}}` (note the
cancellation).
::
sage: r = f/g; r
(-b*c^2 + 2)/(a*b^3*c^6 - 2*a*c)
sage: degree_lowest_rational_function(r,a)
-1
sage: degree_lowest_rational_function(r,b)
0
sage: degree_lowest_rational_function(r,c)
-1
"""
from sage.rings.fraction_field import FractionField
F = FractionField(r.parent())
r = F(r)
f = r.numerator().polynomial(x)
g = r.denominator().polynomial(x)
return f.valuation() - g.valuation()
| 31.551402 | 210 | 0.503329 |
4dff5924aeb4fc0a0db852c13369fb3f7aec59ce | 13,767 | py | Python | oneArch.py | orashi/DeeperDeblur | 3707ae147753bb9c5a8be2215eccee3ea5113cee | [
"MIT"
] | null | null | null | oneArch.py | orashi/DeeperDeblur | 3707ae147753bb9c5a8be2215eccee3ea5113cee | [
"MIT"
] | null | null | null | oneArch.py | orashi/DeeperDeblur | 3707ae147753bb9c5a8be2215eccee3ea5113cee | [
"MIT"
] | null | null | null | import argparse
import os
import random
from math import log10
import numpy as np
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torchvision.utils as vutils
from visdom import Visdom
from data.oneData import CreateDataLoader
from models.oneModel import *
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batchSize', type=int, default=4, help='input batch size')
parser.add_argument('--test', type=bool, default=False, help='test option')
parser.add_argument('--testBatch', type=int, default=4, help='input test batch size')
parser.add_argument('--cut', type=int, default=2, help='cut backup frequency')
parser.add_argument('--niter', type=int, default=700, help='number of epochs to train for')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--lrG', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--lrD', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--Diters', type=int, default=1, help='number of D iters per each G iter')
parser.add_argument('--manualSeed', type=int, default=2345, help='random seed to use. Default=1234')
parser.add_argument('--baseGeni', type=int, default=0, help='start base of pure pair L1 loss')
parser.add_argument('--adv', type=bool, default=False, help='adversarial training option')
parser.add_argument('--geni', type=int, default=0, help='continue gen image num')
parser.add_argument('--epoi', type=int, default=0, help='continue epoch num')
parser.add_argument('--env', type=str, default='main', help='visdom env')
# parser.add_argument('--gpW', type=float, default=10, help='gradient penalty weight')
opt = parser.parse_args()
print(opt)
####### regular set up
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
gen_iterations = opt.geni
try:
os.makedirs(opt.outf)
except OSError:
pass
# random seed setup
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
####### regular set up end
viz = Visdom(env=opt.env)
dataloader_train, dataloader_test = CreateDataLoader(opt)
netG = Pyramid()
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netD = PatchD()
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion_GAN = GANLoss()
if opt.cuda:
criterion_GAN = GANLoss(tensor=torch.cuda.FloatTensor)
criterion_L1 = nn.L1Loss()
criterion_L2 = nn.MSELoss()
if opt.cuda:
netD.cuda()
netG.cuda()
criterion_L1.cuda()
criterion_L2.cuda()
criterion_GAN.cuda()
# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.9))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.9))
schedulerG = lr_scheduler.ReduceLROnPlateau(optimizerG, mode='max', verbose=True, min_lr=0.000005,
patience=10) # 1.5*10^5 iter
schedulerD = lr_scheduler.ReduceLROnPlateau(optimizerD, mode='max', verbose=True, min_lr=0.000005,
patience=10) # 1.5*10^5 iter
flag = 1
flag2 = 1
flag3 = 1
flag4 = 1
flag5 = 1
flag6 = 1
for epoch in range(opt.epoi, opt.niter):
epoch_loss = 0
epoch_iter_count = 0
for extra in range(4):
data_iter = iter(dataloader_train)
iter_count = 0
while iter_count < len(dataloader_train):
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for p in netG.parameters():
p.requires_grad = False # to avoid computation
# train the discriminator Diters times
Diters = opt.Diters
if gen_iterations < opt.baseGeni or not opt.adv: # L1 stage
Diters = 0
j = 0
while j < Diters and iter_count < len(dataloader_train):
j += 1
netD.zero_grad()
real_bim, real_sim = data_iter.next()
iter_count += 1
if opt.cuda:
real_bim, real_sim = real_bim.cuda(), real_sim.cuda()
# train with fake
fake_Vsim = netG(Variable(real_bim, volatile=True))
errD_fake = criterion_GAN(netD(Variable(torch.cat([fake_Vsim.data, real_bim], 1))), False)
errD_fake.backward(retain_graph=True) # backward on score on real
errD_real = criterion_GAN(netD(Variable(torch.cat([real_sim, real_bim], 1))), True)
errD_real.backward() # backward on score on real
errD = errD_real + errD_fake
optimizerD.step()
############################
# (2) Update G network
############################
if iter_count < len(dataloader_train):
for p in netD.parameters():
p.requires_grad = False # to avoid computation
for p in netG.parameters():
p.requires_grad = True # to avoid computation
netG.zero_grad()
real_bim, real_sim = data_iter.next()
iter_count += 1
if opt.cuda:
real_bim, real_sim = real_bim.cuda(), real_sim.cuda()
if flag: # fix samples
viz.images(
real_bim.mul(0.5).add(0.5).cpu().numpy(),
opts=dict(title='blur img', caption='level 0')
)
viz.images(
real_sim.mul(0.5).add(0.5).cpu().numpy(),
opts=dict(title='sharp img', caption='level 0')
)
vutils.save_image(real_bim.mul(0.5).add(0.5),
'%s/blur_samples' % opt.outf + '.png')
vutils.save_image(real_sim.mul(0.5).add(0.5),
'%s/sharp_samples' % opt.outf + '.png')
fixed_blur = real_bim
flag -= 1
fake = netG(Variable(real_bim))
if gen_iterations < opt.baseGeni or not opt.adv:
contentLoss = criterion_L1(fake, Variable(real_sim))
contentLoss.backward()
errG = contentLoss
MSE = criterion_L2(fake.mul(0.5).add(0.5), Variable(real_sim.mul(0.5).add(0.5)))
epoch_loss += 10 * log10(1 / MSE.data[0])
epoch_iter_count += 1
else:
errG = criterion_GAN(netD(torch.cat([fake, Variable(real_bim)], 1)), True) * 0.0001
errG.backward(retain_graph=True)
contentLoss = criterion_L1(fake, Variable(real_sim))
contentLoss.backward()
MSE = criterion_L2(fake.mul(0.5).add(0.5), Variable(real_sim.mul(0.5).add(0.5)))
epoch_loss += 10 * log10(1 / MSE.data[0])
epoch_iter_count += 1
optimizerG.step()
############################
# (3) Report & 100 Batch checkpoint
############################
if gen_iterations < opt.baseGeni or not opt.adv:
if flag2:
L1window = viz.line(
np.array([contentLoss.data[0]]), np.array([gen_iterations]),
opts=dict(title='L1 loss toward real', caption='Gnet content loss')
)
flag2 -= 1
else:
viz.line(np.array([contentLoss.data[0]]), np.array([gen_iterations]), update='append', win=L1window)
print('[%d/%d][%d/%d][%d] err_G: %f'
% (epoch, opt.niter, iter_count + extra * len(dataloader_train), len(dataloader_train) * 4,
gen_iterations, contentLoss.data[0]))
else:
if flag4:
D1 = viz.line(
np.array([errD.data[0]]), np.array([gen_iterations]),
opts=dict(title='errD(distinguishability)', caption='total Dloss')
)
D2 = viz.line(
np.array([errD_real.data[0]]), np.array([gen_iterations]),
opts=dict(title='errD_real', caption='real\'s mistake')
)
D3 = viz.line(
np.array([errD_fake.data[0]]), np.array([gen_iterations]),
opts=dict(title='errD_fake', caption='fake\'s mistake')
)
G1 = viz.line(
np.array([errG.data[0]]), np.array([gen_iterations]),
opts=dict(title='Gnet loss toward real', caption='Gnet loss')
)
flag4 -= 1
if flag2:
L1window = viz.line(
np.array([contentLoss.data[0]]), np.array([gen_iterations]),
opts=dict(title='MSE loss toward real', caption='Gnet content loss')
)
flag2 -= 1
viz.line(np.array([errD.data[0]]), np.array([gen_iterations]), update='append', win=D1)
viz.line(np.array([errD_real.data[0]]), np.array([gen_iterations]), update='append', win=D2)
viz.line(np.array([errD_fake.data[0]]), np.array([gen_iterations]), update='append', win=D3)
viz.line(np.array([errG.data[0]]), np.array([gen_iterations]), update='append', win=G1)
viz.line(np.array([contentLoss.data[0]]), np.array([gen_iterations]), update='append', win=L1window)
print('[%d/%d][%d/%d][%d] errD: %f err_G: %f err_D_real: %f err_D_fake %f content loss %f'
% (epoch, opt.niter, iter_count + extra * len(dataloader_train), len(dataloader_train) * 4,
gen_iterations,
errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0], contentLoss.data[0]))
if gen_iterations % 100 == 0:
fake = netG(Variable(fixed_blur, volatile=True))
if flag3:
imageW = viz.images(
fake.data.mul(0.5).add(0.5).clamp(0, 1).cpu().numpy(),
opts=dict(title='deblur img', caption='level 0')
)
flag3 -= 1
else:
viz.images(
fake.data.mul(0.5).add(0.5).clamp(0, 1).cpu().numpy(),
win=imageW,
opts=dict(title='deblur img', caption='level 0')
)
if gen_iterations % 1000 == 0:
vutils.save_image(fake.data.mul(0.5).add(0.5).clamp(0, 1),
'%s/fake_samples_gen_iter_%08d.png' % (opt.outf, gen_iterations))
gen_iterations += 1
if opt.test:
if epoch % 5 == 0:
avg_psnr = 0
for batch in dataloader_test:
input, target = [x.cuda() for x in batch]
prediction = netG(Variable(input, volatile=True))
mse = criterion_L2(prediction.mul(0.5).add(0.5), Variable(target.mul(0.5).add(0.5)))
psnr = 10 * log10(1 / mse.data[0])
avg_psnr += psnr
avg_psnr = avg_psnr / len(dataloader_test)
if flag6:
Test = viz.line(
np.array([avg_psnr]), np.array([epoch]),
opts=dict(title='Test PSNR', caption='PSNR')
)
flag6 -= 1
else:
viz.line(np.array([avg_psnr]), np.array([epoch]), update='append', win=Test)
print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr))
avg_psnr = epoch_loss / epoch_iter_count
if flag5:
epoL = viz.line(
np.array([avg_psnr]), np.array([epoch]),
opts=dict(title='Train epoch PSNR', caption='Epoch PSNR')
)
flag5 -= 1
schedulerG.step(avg_psnr)
schedulerD.step(avg_psnr)
else:
viz.line(np.array([avg_psnr]), np.array([epoch]), update='append', win=epoL)
schedulerG.step(avg_psnr)
schedulerD.step(avg_psnr)
# do checkpointing
if opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_only.pth' % opt.outf)
torch.save(netD.state_dict(), '%s/netD_epoch_only.pth' % opt.outf)
elif epoch % opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
| 41.592145 | 120 | 0.54696 |
611a02228f5b0068bb071834d7d230b587cd4328 | 38,354 | py | Python | src/replay_analyzerServer.py | m-lab/wehe_desktop | d10b17e8d2d5618a1b3b7d41166dac0f186215e5 | [
"Apache-2.0"
] | 6 | 2020-03-07T04:38:26.000Z | 2021-09-17T06:43:06.000Z | src/replay_analyzerServer.py | m-lab/wehe_desktop | d10b17e8d2d5618a1b3b7d41166dac0f186215e5 | [
"Apache-2.0"
] | 2 | 2019-03-20T16:23:03.000Z | 2019-09-20T20:41:11.000Z | src/replay_analyzerServer.py | m-lab/wehe_desktop | d10b17e8d2d5618a1b3b7d41166dac0f186215e5 | [
"Apache-2.0"
] | 3 | 2019-04-27T20:43:05.000Z | 2020-08-07T21:36:22.000Z | '''
#######################################################################################################
#######################################################################################################
Copyright 2018 Northeastern University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
USAGE:
sudo python replay_analyzerServer.py --port=56565 --ConfigFile=configs_local.cfg
IMPORTANT NOTES: always run in sudo mode
#######################################################################################################
#######################################################################################################
'''
import json, datetime, logging, pickle, sys, traceback, glob
import tornado.ioloop, tornado.web
import gevent.monkey
import db as DB
gevent.monkey.patch_all(ssl=False)
import ssl
import gevent, gevent.pool, gevent.server, gevent.queue, gevent.select
from gevent.lock import RLock
from python_lib import *
from prometheus_client import start_http_server, Counter
import finalAnalysis as FA
db = None
POSTq = gevent.queue.Queue()
errorlog_q = gevent.queue.Queue()
logger = logging.getLogger('replay_analyzer')
DPIlogger = logging.getLogger('DPI')
'''
DPI test related part
'''
RESULT_REQUEST = Counter("request_total", "Total Number of Requests Received", ['type'])
class singleCurrTest(object):
def __init__(self, userID, replayName, carrierName):
global db
# load the curr test info from database
self.userID = userID
self.replayName = replayName
self.carrierName = carrierName
self.db = db
# if not in currTest
# create one database entry with initialized currTest and BAque_id
currTest = db.getCurrTest(userID, replayName, carrierName)
if not currTest:
self.timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
# currently this packet and this region is being tested
self.currTestPacket, self.currTestLeft, self.currTestRight = getInitTest(replayName)
# How many tests ran so far
self.numTests = 0
# How many packets has been tested
self.numTestedPackets = 0
# The binary analysis ID, uniquely identify the binary analysis entries related to this test
self.BAque_id = int(
int(hashlib.sha1('{}_{}_{}_{}_que'.format(userID, replayName, carrierName, self.timestamp)).hexdigest(),
16) % 10 ** 8)
# The matching region ID, uniquely identify the matching region entries related to this test
self.mr_id = int(
int(hashlib.sha1('{}_{}_{}_{}_mr'.format(userID, replayName, carrierName, self.timestamp)).hexdigest(),
16) % 10 ** 8)
db.insertCurrTest(userID, replayName, carrierName, self.timestamp, self.currTestPacket, self.currTestLeft,
self.currTestRight, self.numTests, self.numTestedPackets, self.BAque_id, self.mr_id)
else:
self.timestamp = currTest[0]['timestamp']
self.currTestPacket = currTest[0]['currTestPacket']
self.currTestLeft = currTest[0]['currTestLeft']
self.currTestRight = currTest[0]['currTestRight']
self.numTests = currTest[0]['numTests']
self.numTestedPackets = currTest[0]['numTestedPackets']
self.BAque_id = currTest[0]['BAque_id']
self.mr_id = currTest[0]['mr_id']
# insert the new test into the BAque table
def insertBAque(self, testPacket, testLeft, testRight):
print('\r\n inserting BAque, ',testLeft, testRight)
insertResult = self.db.insertBAque(self.BAque_id, testPacket, testLeft, testRight)
if not insertResult:
errorlog_q.put(('error inserting into BA queue', self.userID, self.replayName, self.carrierName,
self.BAque_id, testPacket, testLeft, testRight))
# get the next test for this client from BAque table
# if there is test left:
# update currtest accordingly return true
# delete the test entry from BAque table
# return True
# else:
# return False
def getNextTest(self):
response = self.db.getTestBAque(self.BAque_id)
# example ({'testRight': 286L, 'uniqtest_id': 1L, 'testLeft': 10L, 'testPacket': 'C_1', 'testq_id': 2501484L},)
if response:
db.delTestBAque(response[0]['uniqtest_id'])
self.currTestPacket = response[0]['testPacket']
self.currTestLeft = response[0]['testLeft']
self.currTestRight = response[0]['testRight']
return True
else:
print 'NO NEXT TEST'
return False
# insert the byte into the matching region table
# this function is called when the test region is a single byte and it is one of the matching bytes
def insertMatchingRegion(self):
insertResult = self.db.insertRegion(self.mr_id, self.currTestPacket, self.currTestLeft)
if not insertResult:
errorlog_q.put(('error inserting into BA queue', self.userID, self.replayName, self.carrierName, self.mr_id,
self.currTestPacket, self.currTestLeft))
def getAllMatchingRegion(self):
# allRes is [] if no matching region
allRes = self.db.getMatchingRegion(self.mr_id)
allMatching = {}
for res in allRes:
if res['packetNum'] not in allMatching:
allMatching[res['packetNum']] = [res['byteNum']]
else:
allMatching[res['packetNum']].append(res['byteNum'])
return allMatching
# Update the current test info to database
def updateCurr(self):
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
updateResult = self.db.updateCurrTest(self.userID, self.replayName, self.carrierName, timestamp,
self.currTestPacket,
self.currTestLeft, self.currTestRight, self.numTests,
self.numTestedPackets)
if not updateResult:
errorlog_q.put(('error updating into current test', self.userID, self.replayName, self.carrierName))
# Write this test result to database
def backUpRaw(self, historyCount, testID, diffDetected):
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
insertResult = self.db.insertRawTest(self.userID, self.replayName, self.carrierName, timestamp,
self.currTestPacket,
self.currTestLeft, self.currTestRight,
historyCount, testID, diffDetected)
if not insertResult:
errorlog_q.put(('error backing up this test', self.userID, self.replayName, self.carrierName,
self.currTestPacket, self.currTestLeft, self.currTestRight,
historyCount, testID, diffDetected))
# delete the current test in database
def delCurr(self):
delTestQueResult = self.db.delTestQueue(self.BAque_id)
delMatchingRegionResult = self.db.delMatchingRegion(self.mr_id)
delCurrTestResult = self.db.delCurrTest(self.userID, self.replayName, self.carrierName)
return delTestQueResult and delMatchingRegionResult and delCurrTestResult
'''
If DPI rule in prevTestData: return it
Else: return 'No result found', suggest client do DPIanalysis
'''
def getDPIrule(args):
try:
userID = args['userID'][0]
carrierName = args['carrierName'][0]
replayName = args['replayName'][0]
except:
return json.dumps({'success': False, 'error': 'required fields missing'}, cls=myJsonEncoder)
preResult = loadPrevTest(userID, replayName, carrierName)
if not preResult:
# User can choose to test by sending DPIanalysis request
return json.dumps({'success': False, 'error': 'No result found'})
else:
timestamp = preResult['timestamp']
numTests = preResult['numTests']
matchingContent = preResult['matchingContent']
# User can still choose to re-test again by sending DPIanalysis request
return json.dumps({'success': True,
'response':
{'timestamp': timestamp, 'DPIrule': matchingContent,
'numTests': numTests}}, cls=myJsonEncoder)
def loadPrevTest(userID, replayName, carrierName):
global db
response = db.getPreTest(userID, replayName, carrierName)
if not response:
return None
else:
return response[0]
# insert the matching content to previousTest
def insertPrevTest(cTest, matchingRules):
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
global db
db.insertPreTest(cTest.userID, cTest.replayName, cTest.carrierName, timestamp, cTest.numTests, str(matchingRules),
cTest.mr_id)
def resetDPI(args):
try:
userID = args['userID'][0]
carrierName = args['carrierName'][0]
replayName = args['replayName'][0]
except:
return json.dumps({'success': False, 'error': 'required fields missing'}, cls=myJsonEncoder)
cTest = singleCurrTest(userID, replayName, carrierName)
if cTest.delCurr():
return json.dumps({'success': True})
else:
return json.dumps({'success': False, 'error': 'Failed at removing current test'})
'''
Process the client Request for DPI analysis
Client sends current test status --- userID, carrierName, replayName, testedRegion, diff
Server figures out which Packet, TestRegion should this client be testing next
If there is no currTestData for the requested test:
Initialize one, return the initial test to client
Elif the client did not perform test (either TestedRegion or diff is empty) right before requesting:
Return whatever is in CurrTestData (what this client is supposed to test) for this test
Else:
a. Write TestedRegion and diff into rawTestData (backup database for all tests)
b. Figure out what to send to client (can either be the next test or test result)
'''
def processDPIrequest(args):
try:
userID = args['userID'][0]
carrierName = args['carrierName'][0]
replayName = args['replayName'][0]
historyCount = args['historyCount'][0]
testID = args['testID'][0]
except:
return json.dumps({'success': False, 'error': 'required fields missing'}, cls=myJsonEncoder)
try:
cTest = singleCurrTest(userID, replayName, carrierName)
testedLeft = int(args['testedLeft'][0])
testedRight = int(args['testedRight'][0])
except Exception as e:
return json.dumps({'success': False, 'error': 'error in provided fields'})
# initial test
if testedLeft == testedRight == -1:
return json.dumps({'success': True,
'response': {'testPacket': cTest.currTestPacket, 'testRegionLeft': cTest.currTestLeft,
'testRegionRight': cTest.currTestRight}}, cls=myJsonEncoder)
# last finished test from this client should match the status in the database
elif testedLeft == cTest.currTestLeft and testedRight == cTest.currTestRight:
# store the test result
diff = args['diff'][0]
if diff == 'T':
diff = True
else:
diff = False
cTest.backUpRaw(historyCount, testID, diff)
# nextDPItest, return None if not all tests have finished (result is not available)
# otherwise, return value is matchingContent or strings indicate maximum number of tests reached
matchingContent = nextDPItest(cTest, diff, replayName)
if not matchingContent:
return json.dumps({'success': True,
'response': {'testPacket': cTest.currTestPacket, 'testRegionLeft': cTest.currTestLeft,
'testRegionRight': cTest.currTestRight}}, cls=myJsonEncoder)
else:
# Return a list of matching contents identified, probably needs to add multiple strings in the future
# DPIrule = [{packet number : matching content}]
DPIrule = matchingContent
# remove/reset current test progress
cTest.delCurr()
# store this DPI test result into previousTestResult
insertPrevTest(cTest, DPIrule)
return json.dumps({'success': True,
'response': {'DPIrule': [DPIrule], 'numTests': cTest.numTests}}, cls=myJsonEncoder)
else:
return json.dumps({'success': False, 'error': 'error running reverse engineering'})
'''
Based on test result:
a. Update BAque (append more tests)
b. Move currTestPacket/currTestRegion forward (next packet OR next region in BAque)
'''
def nextDPItest(cTest, diff, replayName):
cTest.numTests += 1
# Break the test if reached either maximum threshold
# Or no more test left
maxPacketCheck = 10
maxNumTest = 200
testPacket = cTest.currTestPacket
leftBar = cTest.currTestLeft
rightBar = cTest.currTestRight
# If diff = True aka Different than original, classification broke == matching contents are in the tested region
# The remaining tests are kept in the BAque for each client
# TODO, keep the HTTP structure while looking for keywords? since the HTTP structure is a big AND condition
if diff:
if (rightBar - leftBar) > 4:
# Need to check both left and right sub regions
midPoint = leftBar + (rightBar - leftBar) / 2
cTest.insertBAque(testPacket, leftBar, midPoint)
cTest.insertBAque(testPacket, midPoint, rightBar)
# If only one byte tested in the packet
# This byte belongs to matchingRegion (changing it broke classification)
elif rightBar - leftBar == 1:
cTest.insertMatchingRegion()
# Else only 1 ~ 3 bytes need to be tested in this region, check each byte individually
else:
for testByte in xrange(leftBar, rightBar):
cTest.insertBAque(testPacket, testByte, testByte + 1)
# getNextTest updates current test
currTestUpdated = cTest.getNextTest()
# If no more test is needed for this packet
if not currTestUpdated:
matchingRegion = cTest.getAllMatchingRegion()
# DPI rule found for this test
if matchingRegion:
allMatchingContent = {}
for packet in matchingRegion:
matchingBytes = matchingRegion[packet]
matchingBytes.sort()
matchingContent = getMatchingContent(replayName, matchingBytes, packet)
allMatchingContent[packet] = matchingContent
return allMatchingContent
else:
cTest.numTestedPackets += 1
testPacket, testLeft, testRight = getInitTest(replayName, cTest.numTestedPackets)
cTest.currTestPacket = testPacket
cTest.currTestLeft = testLeft
cTest.currTestRight = testRight
# Stop DPI analysis when threshold is reached
if cTest.numTestedPackets >= maxPacketCheck:
return 'NO DPI for first {} pacs'.format(cTest.numTestedPackets)
elif cTest.numTests >= maxNumTest:
return 'NO DPI after {} tests'.format(cTest.numTests)
cTest.updateCurr()
return None
'''
Find the longest consecutive bytes in matchingRegion
Get the string corresponding to those bytes, and they are the matching content
'''
def getMatchingContent(replayName, matchingRegion, matchingPacket):
consecutiveBytes = getLongestConsecutive(matchingRegion)
side = matchingPacket.split('_')[0]
packetNum = matchingPacket.split('_')[1]
matchingContent = getContent(replayName, side, consecutiveBytes, packetNum)
return matchingContent
# return a list of longest consecutive bytes
def getLongestConsecutive(allBytes):
currLen = longestLen = 0
currConsecutive = longestConsecutive = []
for aByte in allBytes[1:]:
if not currConsecutive:
currConsecutive.append(aByte)
currLen = 1
# Still in a consecutive list
elif aByte == currConsecutive[-1] + 1:
currLen += 1
currConsecutive.append(aByte)
else:
if currLen > longestLen:
longestLen = currLen
longestConsecutive = currConsecutive
currLen = 1
currConsecutive = [aByte]
if currLen > longestLen:
longestConsecutive = currConsecutive
return longestConsecutive
# Load packet queues and get the contents from the corresponding bytes
def getContent(replayName, side, bytes, packetNum):
# step one, load all packet content from client/server pickle/json
packetNum = int(packetNum)
pcap_folder = Configs().get('pcap_folder')
replayDir = ''
if os.path.isfile(pcap_folder):
with open(pcap_folder, 'r') as f:
for l in f.readlines():
repleyFileName = replayName.replace('-','_')
if repleyFileName in l:
replayDir = l.strip()
break
pickleServerFile = pickleClientFile = ''
for file in os.listdir(replayDir):
if file.endswith(".pcap_server_all.pickle"):
pickleServerFile = file
elif file.endswith(".pcap_client_all.pickle"):
pickleClientFile = file
if side == 'S' and pickleServerFile:
serverQ, tmpLUT, tmpgetLUT, udpServers, tcpServerPorts, replayName = \
pickle.load(open(replayDir + '/' + pickleServerFile, 'r'))
protocol = 'tcp'
if not serverQ['udp']:
protocol = 'udp'
csp = serverQ[protocol].keys()[0]
response_text = serverQ[protocol][csp][packetNum - 1].response_list[0].payload.decode('hex')
matchingContent = response_text[bytes[0]: bytes[-1] + 1]
elif pickleClientFile:
clientQ, udpClientPorts, tcpCSPs, replayName = \
pickle.load(open(replayDir + '/' + pickleClientFile, 'r'))
request_text = clientQ[packetNum - 1].payload.decode('hex')
matchingContent = request_text[bytes[0]: bytes[-1] + 1]
else:
matchingContent = ''
return matchingContent
'''
Result analysis related
'''
def processResult(results):
# Only if ks2ration > ks2Beta (this is the confidence interval) the ks2 result is trusted, otherwise only the area test is used
# Default suggestion: areaThreshold 0.1, ks2Beta 95%, ks2Threshold 0.05
# KS2:
# ks2Threshold is the threshold for p value in the KS2 test, if p greater than it, then we cannot
# reject the hypothesis that the distributions of the two samples are the same
# If ks2pvalue suggests rejection (i.e., p < ks2Threshold), where accept rate < (1 - ks2Beta), the two distributions are not the same (i.e., differentiation)
# Else, the two distributions are the same, i.e., no differentiation
# Area:
# if area_test > areaThreshold, the two distributions are not the same (i.e., Differentiation)
# Else, the two distributions are the same, i.e., no differentiation
# Return result score, 0 : both suggests no differentiation
# 1 : inconclusive conclusion from two methods (Considered as no differentiation so far)
# 2 : both suggests differentiation
# if target trace has less throughput, return negative value respectively, e.g., -1 means target trace is throttled
# result rate: differentiated rate = (normal - throttled)/throttled
# Should only be one result since unique (userID, historyCount, testID)
result = results[0]
areaT = Configs().get('areaThreshold')
ks2Beta = Configs().get('ks2Beta')
ks2T = Configs().get('ks2Threshold')
outres = {'userID': result['userID'],
'historyCount': result['historyCount'],
'replayName': result['replayName'],
'date': result['date'],
'xput_avg_original': result['xput_avg_original'],
'xput_avg_test': result['xput_avg_test'],
'area_test': result['area_test'],
'ks2pVal': result['ks2pVal']}
outres['against'] = 'test'
Negative = False
# if the controlled flow has less throughput
if result['xput_avg_test'] < result['xput_avg_original']:
Negative = True
# ks2_ratio test is problematic, sometimes does not give the correct result even in the obvious cases, not using it so far
# 1.Area test does not pass and 2.With confidence level ks2Beta that the two distributions are the same
# Then there is no differentiation
if (result['area_test'] < areaT) and (result['ks2pVal'] > ks2T):
outres['diff'] = 0
outres['rate'] = 0
# 1.Area test does pass and 2.With confidence level ks2Beta that the two distributions are not the same
# Then there is differentiation
elif (result['area_test'] > areaT) and (result['ks2pVal'] < ks2T):
outres['diff'] = 2
outres['rate'] = (result['xput_avg_test'] - result['xput_avg_original']) / min(result['xput_avg_original'],
result['xput_avg_test'])
# Else inconclusive
else:
outres['diff'] = 1
outres['rate'] = 0
if Negative:
outres['diff'] = - outres['diff']
outres['rate'] = - outres['rate']
return outres
'''
J'Alerte l'Arcep
Find the ReplayInfo for the user that wants to alert Arcep, add an additional field 'AlertArcep' : True
'''
def setAlert(userID, historyCount, testID):
path = Configs().get('resultsFolder')
replayInfoDir = path + '/' + userID + '/replayInfo/'
regexOriginal = '*_' + str(historyCount) + '_' + str(testID) + '.json'
replayOriginal = glob.glob(replayInfoDir + regexOriginal)
replayInfo = json.load(open(replayOriginal[0], 'r'))
# set the 16th element, alert arcep to true
replayInfo[15] = True
json.dump(replayInfo, open(replayOriginal[0], 'w'))
# Logic:
# 1. Analyze using the throughputs sent by client (server creates a client decision file for the GET handle to answer client request)
# 2. Use the tcpdump trace to perform server side analysis (if tcpdump enabled)
def analyzer(args, resultsFolder, xputBuckets, alpha):
LOG_ACTION(logger, 'analyzer:' + str(args))
args = json.loads(args)
# return value is None if there is no file to analyze
resObjClient = FA.finalAnalyzer(args['userID'][0], args['historyCount'][0], args['testID'][0], resultsFolder,
xputBuckets, alpha, side='Client')
serverAnalysisStarts = time.time()
resObjServer = FA.finalAnalyzer(args['userID'][0], args['historyCount'][0], args['testID'][0], resultsFolder,
xputBuckets, alpha, side='Server')
serverAnalysisEnds = time.time()
cpuPercent, memPercent, diskPercent, upLoad = getSystemStat()
if (serverAnalysisEnds - serverAnalysisStarts) > 1:
LOG_ACTION(logger,
'Took {} seconds for server side analysis and cleaning up for UserID {} and historyCount {} testID {} *** CPU {}% MEM {}% DISK {}% UPLOAD {}Mbps'.format(
serverAnalysisEnds - serverAnalysisStarts, args['userID'][0], args['historyCount'][0],
args['testID'][0], cpuPercent, memPercent, diskPercent, upLoad))
def jobDispatcher(q):
resultsFolder = Configs().get('resultsFolder')
xputBuckets = Configs().get('xputBuckets')
alpha = Configs().get('alpha')
pool = gevent.pool.Pool()
while True:
args = q.get()
pool.apply_async(analyzer, args=(args, resultsFolder, xputBuckets, alpha,))
class myJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()
else:
obj = super(myJsonEncoder, self).default(obj)
return obj
def loadAndReturnResult(userID, historyCount, testID, args):
resultFolder = Configs().get('resultsFolder')
resultFile = (resultFolder + userID + '/decisions/' + 'results_{}_{}_{}_{}.json').format(userID, 'Client',
historyCount, testID)
replayInfoFile = (resultFolder + userID + '/replayInfo/' + 'replayInfo_{}_{}_{}.json').format(userID,
historyCount,
testID)
clientXputFile = (resultFolder + userID + '/clientXputs/' + 'Xput_{}_{}_{}.json').format(userID,
historyCount,
testID)
clientOriginalXputFile = (resultFolder + userID + '/clientXputs/' + 'Xput_{}_{}_{}.json').format(userID,
historyCount,
0)
# if result file is here, return result
if os.path.isfile(resultFile) and os.path.isfile(replayInfoFile):
results = json.load(open(resultFile, 'r'))
info = json.load(open(replayInfoFile, 'r'))
realID = info[2]
replayName = info[4]
extraString = info[5]
incomingTime = info[0]
# incomingTime = strftime("%Y-%m-%d %H:%M:%S", gmtime())
areaTest = str(results[0])
ks2ratio = str(results[1])
xputAvg1 = str(results[4][2])
xputAvg2 = str(results[5][2])
ks2dVal = str(results[9])
ks2pVal = str(results[10])
return json.dumps({'success': True,
'response': {'replayName': replayName, 'date': incomingTime, 'userID': userID,
'extraString': extraString, 'historyCount': str(historyCount),
'testID': str(testID), 'area_test': areaTest, 'ks2_ratio_test': ks2ratio,
'xput_avg_original': xputAvg1, 'xput_avg_test': xputAvg2,
'ks2dVal': ks2dVal, 'ks2pVal': ks2pVal}}, cls=myJsonEncoder)
else:
# else if the clientXputs and replayInfo files (but not the result file) exist
# maybe the POST request is missing, try putting the test to the analyzer queue
if os.path.isfile(replayInfoFile) and os.path.isfile(clientXputFile) and os.path.isfile(
clientOriginalXputFile):
POSTq.put(json.dumps(args))
return json.dumps({'success': False, 'error': 'No result found'})
def getHandler(args):
'''
Handles GET requests.
There are three types of requests:
1. Latest default settings (no db operation)
2. Differentiation test result (no db operation)
3. DPI test results (db operations needed)
If something wrong with the job, returns False.
'''
try:
command = args['command'][0]
except:
RESULT_REQUEST.labels('nocommand').inc()
return json.dumps({'success': False, 'error': 'command not provided'})
try:
userID = args['userID'][0]
except KeyError as e:
RESULT_REQUEST.labels('nouserID').inc()
return json.dumps({'success': False, 'missing': str(e)})
# Return the latest threshold for both area test and ks2 test
RESULT_REQUEST.labels(command).inc()
if command == 'defaultSetting':
# Default setting for the client
areaThreshold = 0.1
ks2Threshold = 0.05
ks2Ratio = 0.95
return json.dumps({'success': True, 'areaThreshold': str(areaThreshold), 'ks2Threshold': str(ks2Threshold),
'ks2Ratio': str(ks2Ratio)}, cls=myJsonEncoder)
elif command == 'singleResult':
try:
historyCount = int(args['historyCount'][0])
testID = int(args['testID'][0])
except Exception as e:
return json.dumps({'success': False, 'error': str(e)})
return loadAndReturnResult(userID, historyCount, testID, args)
# Return the DPI rule
elif command == 'DPIrule':
return getDPIrule(args)
# Reverse engineer the DPI rule used for classifying this App
elif command == 'DPIanalysis':
return processDPIrequest(args)
elif command == 'DPIreset':
return resetDPI(args)
else:
return json.dumps({'success': False, 'error': 'unknown command'})
def postHandler(args):
'''
Handles POST requests.
Basically puts the job on the queue and return True.
If something wrong with the job, returns False.
'''
try:
command = args['command'][0]
except:
return json.dumps({'success': False, 'error': 'command not provided'})
try:
userID = args['userID'][0]
historyCount = int(args['historyCount'][0])
testID = int(args['testID'][0])
except KeyError as e:
return json.dumps({'success': False, 'missing': str(e)})
except ValueError as e:
return json.dumps({'success': False, 'value error': str(e)})
if command == 'analyze':
POSTq.put(json.dumps(args))
elif command == 'alertArcep':
try:
setAlert(userID, historyCount, testID)
json.dumps({'success': True})
except:
errorlog_q.put(('Failed to alert', args))
return json.dumps({'success': False, 'error': 'Failed to alert'})
else:
errorlog_q.put(('unknown command', args))
return json.dumps({'success': False, 'error': 'unknown command'})
LOG_ACTION(logger, 'Returning for POST UserID {} and historyCount {} testID {} ***'.format(
userID, historyCount, testID))
return json.dumps({'success': True})
class Results(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
pool = self.application.settings.get('GETpool')
args = self.request.arguments
LOG_ACTION(logger, 'GET:' + str(args))
pool.apply_async(getHandler, (args,), callback=self._callback)
@tornado.web.asynchronous
def post(self):
pool = self.application.settings.get('POSTpool')
args = self.request.arguments
LOG_ACTION(logger, 'POST:' + str(args))
pool.apply_async(postHandler, (args,), callback=self._callback)
def _callback(self, response):
LOG_ACTION(logger, '_callback:' + str(response))
self.write(response)
self.finish()
def error_logger(error_log):
'''
Logs all errors and exceptions.
'''
errorLogger = logging.getLogger('errorLogger')
createRotatingLog(errorLogger, error_log)
while True:
toWrite = errorlog_q.get()
id = toWrite[0]
toWrite = str(toWrite)
print '\n***CHECK ERROR LOGS: {}***'.format(toWrite)
errorLogger.info(toWrite)
'''
Get the initial test
a. which packet to change (if packetNum given, return the next packet)
b. what region to change
(*** some ISPs validate TLS length field (e.g., AT&T hangs replay when length is invalid),
thus keep the first 10 bytes untouched for DPI reverse engineering ***)
'''
def getInitTest(replayName, packetNum=0):
packetMetaDic = Configs().get('packetMetaDic')
packetNum = int(packetNum)
# packetS_N is packet side_number, e.g., C_1
packetS_N = packetMetaDic[replayName][packetNum][0]
packetLen = packetMetaDic[replayName][packetNum][1]
return packetS_N, 10, packetLen
def procPacketMetaLine(onePacMeta, clientIP):
l = onePacMeta.replace('\n', '').split('\t')
srcIP = l[5]
if 'ip:tcp' in l[1]:
protocol = 'tcp'
elif 'ip:udp' in l[1]:
protocol = 'udp'
else:
print '\r\n Unknown protocol!! EXITING'
sys.exit()
if protocol == 'tcp':
paclength = int(l[11])
else:
paclength = int(l[12]) - 8 # subtracting UDP header length
if srcIP == clientIP:
srcSide = 'C'
else:
srcSide = 'S'
return srcSide, paclength
'''
load packetMeta info from replay folders
create a dictionary with key : replayName
value : list of packets in the replay and the length of each packet
e.g. [ ('C_1', 230), ('S_1', 751), ('S_2', 182), ...]
'''
def getPacketMetaInfo():
folders = []
packetMetaDic = {}
pcap_folder = Configs().get('pcap_folder')
if os.path.isfile(pcap_folder):
with open(pcap_folder, 'r') as f:
for l in f:
folders.append(l.strip())
else:
folders.append(pcap_folder)
for folder in folders:
packetList = []
# client packet number
packetNumC = 0
# server packet number
packetNumS = 0
if folder == '':
continue
replayName = folder.split('/')[-1]
if 'Random' in replayName:
continue
# For some reason, the filename uses '_', but replayName actually uses '-'
replayName = replayName.replace('_', '-')
packetMeta = folder + '/packetMeta'
client_ip_file = folder + '/client_ip.txt'
f = open(client_ip_file, 'r')
client_ip = (f.readline()).strip()
with open(packetMeta, 'r') as m:
for line in m:
pacSide, packetLen = procPacketMetaLine(line, client_ip)
if packetLen == 0:
continue
elif pacSide == 'C':
packetNumC += 1
packetList.append(('C_' + str(packetNumC), packetLen))
else:
packetNumS += 1
packetList.append(('S_' + str(packetNumS), packetLen))
packetMetaDic[replayName] = packetList
Configs().set('packetMetaDic', packetMetaDic)
def main():
# PRINT_ACTION('Checking tshark version', 0)
# TH.checkTsharkVersion('1.8')
global db
configs = Configs()
configs.set('xputInterval', 0.25)
configs.set('alpha', 0.95)
configs.set('mainPath', '/data/RecordReplay/')
configs.set('resultsFolder', 'ReplayDumps/')
configs.set('logsPath', 'logs/')
configs.set('analyzerLog', 'analyzerLog.log')
configs.read_args(sys.argv)
configs.check_for(['analyzerPort'])
PRINT_ACTION('Configuring paths', 0)
configs.set('resultsFolder', configs.get('mainPath') + configs.get('resultsFolder'))
configs.set('logsPath', configs.get('mainPath') + configs.get('logsPath'))
configs.set('analyzerLog', configs.get('logsPath') + configs.get('analyzerLog'))
PRINT_ACTION('Setting up logging', 0)
if not os.path.isdir(configs.get('logsPath')):
os.makedirs(configs.get('logsPath'))
createRotatingLog(logger, configs.get('analyzerLog'))
# install_mp_handler()
configs.show_all()
getPacketMetaInfo()
# first db connection sometimes fails
# Commented out DB as DPI analysis is currently disabled
# attempts = 1
# maxAttempts = 10
# while attempts < maxAttempts:
# try:
# db = DB.DB()
# except Exception as e:
# LOG_ACTION(logger, 'Failed to connect to the database, retrying...')
# if attempts == maxAttempts - 1:
# raise e
# attempts += 1
# time.sleep(0.5 * attempts)
# else:
# break
LOG_ACTION(logger, 'Starting server. Configs: ' + str(configs), doPrint=False)
gevent.Greenlet.spawn(error_logger, Configs().get('errorsLog'))
g = gevent.Greenlet.spawn(jobDispatcher, POSTq)
g.start()
if configs.is_given('analyzer_tls_port') and configs.is_given('certs_folder'):
certs_folder = configs.get('certs_folder')
cert_location = os.path.join(certs_folder, 'server.crt')
key_location = os.path.join(certs_folder, 'server.key')
if os.path.isfile(cert_location) and os.path.isfile(key_location):
try:
https_application = tornado.web.Application([(r"/Results", Results),])
https_application.settings = {'GETpool': gevent.pool.Pool(),
'POSTpool': gevent.pool.Pool(),
'debug': True,
}
ssl_options = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_options.load_cert_chain(cert_location, key_location)
ssl_options.verify_mode = ssl.CERT_NONE
https_application.listen(configs.get('analyzer_tls_port'), ssl_options=ssl_options)
print("[https] listening on port %d" % configs.get('analyzer_tls_port'))
except:
print("There was an error launching the https server")
else:
print("Https keys not found, skipping https server")
else:
print("Missing https configuration, skipping https server")
application = tornado.web.Application([(r"/Results", Results),
])
application.settings = {'GETpool': gevent.pool.Pool(),
'POSTpool': gevent.pool.Pool(),
'debug': True,
}
application.listen(configs.get('analyzerPort'))
print("[http] listening on port %d" % configs.get('analyzerPort'))
start_http_server(9091)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| 40.077325 | 172 | 0.611201 |
71cfffc9c5a2db3409d002ebd2f5ecab5b28c167 | 1,489 | py | Python | tests/test_shift_intensityd.py | madil90/MONAI | 2f1c7a5d1b47c8dd21681dbe1b67213aa3278cd7 | [
"Apache-2.0"
] | 1 | 2021-08-20T01:54:26.000Z | 2021-08-20T01:54:26.000Z | tests/test_shift_intensityd.py | madil90/MONAI | 2f1c7a5d1b47c8dd21681dbe1b67213aa3278cd7 | [
"Apache-2.0"
] | null | null | null | tests/test_shift_intensityd.py | madil90/MONAI | 2f1c7a5d1b47c8dd21681dbe1b67213aa3278cd7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from monai.transforms import IntensityStatsd, ShiftIntensityd
from tests.utils import NumpyImageTestCase2D
class TestShiftIntensityd(NumpyImageTestCase2D):
def test_value(self):
key = "img"
shifter = ShiftIntensityd(keys=[key], offset=1.0)
result = shifter({key: self.imt})
expected = self.imt + 1.0
np.testing.assert_allclose(result[key], expected)
def test_factor(self):
key = "img"
stats = IntensityStatsd(keys=key, ops="max", key_prefix="orig")
shifter = ShiftIntensityd(keys=[key], offset=1.0, factor_key=["orig_max"])
data = {key: self.imt, key + "_meta_dict": {"affine": None}}
result = shifter(stats(data))
expected = self.imt + 1.0 * np.nanmax(self.imt)
np.testing.assert_allclose(result[key], expected)
if __name__ == "__main__":
unittest.main()
| 36.317073 | 82 | 0.701813 |
fe0d3912343da8cc3e72cad486a38a82ef3060b4 | 3,321 | py | Python | nmma/pbilby/utils.py | DavidIbarr/nmma | 109fdd57add52cfea3553df8346981d6a117a7e7 | [
"MIT"
] | 1 | 2022-02-12T18:06:50.000Z | 2022-02-12T18:06:50.000Z | nmma/pbilby/utils.py | DavidIbarr/nmma | 109fdd57add52cfea3553df8346981d6a117a7e7 | [
"MIT"
] | 10 | 2022-02-08T18:18:22.000Z | 2022-03-10T13:11:03.000Z | nmma/pbilby/utils.py | DavidIbarr/nmma | 109fdd57add52cfea3553df8346981d6a117a7e7 | [
"MIT"
] | 12 | 2022-02-07T21:15:16.000Z | 2022-03-31T18:26:06.000Z | import bilby_pipe
import logging
import os
def turn_off_forbidden_option(input, forbidden_option, prog):
# NOTE Only support boolean option
if getattr(input, forbidden_option, False):
logger = logging.getLogger(prog)
logger.info(f"Turning off {forbidden_option}")
setattr(input, forbidden_option, False)
def write_complete_config_file(parser, args, inputs, prog):
args_dict = vars(args).copy()
for key, val in args_dict.items():
if key == "label":
continue
if isinstance(val, str):
if os.path.isfile(val) or os.path.isdir(val):
setattr(args, key, os.path.abspath(val))
if isinstance(val, list):
if isinstance(val[0], str):
setattr(args, key, f"[{', '.join(val)}]")
args.sampler_kwargs = str(inputs.sampler_kwargs)
parser.write_to_file(
filename=inputs.complete_ini_file,
args=args,
overwrite=False,
include_description=False,
)
logger = logging.getLogger(prog)
logger.info(f"Complete ini written: {inputs.complete_ini_file}")
# The following code is modified from bilby_pipe.utils
def setup_logger(prog_name, outdir=None, label=None, log_level="INFO"):
"""Setup logging output: call at the start of the script to use
Parameters
----------
prog_name: str
Name of the program
outdir, label: str
If supplied, write the logging output to outdir/label.log
log_level: str, optional
['debug', 'info', 'warning']
Either a string from the list above, or an integer as specified
in https://docs.python.org/2/library/logging.html#logging-levels
"""
if isinstance(log_level, str):
try:
level = getattr(logging, log_level.upper())
except AttributeError:
raise ValueError(f"log_level {log_level} not understood")
else:
level = int(log_level)
logger = logging.getLogger(prog_name)
logger.propagate = False
logger.setLevel(level)
streams = [isinstance(h, logging.StreamHandler) for h in logger.handlers]
if len(streams) == 0 or not all(streams):
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(
logging.Formatter(
"%(asctime)s %(name)s %(levelname)-8s: %(message)s", datefmt="%H:%M"
)
)
stream_handler.setLevel(level)
logger.addHandler(stream_handler)
if any([isinstance(h, logging.FileHandler) for h in logger.handlers]) is False:
if label:
if outdir:
bilby_pipe.utils.check_directory_exists_and_if_not_mkdir(outdir)
else:
outdir = "."
log_file = f"{outdir}/{label}.log"
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(
logging.Formatter(
"%(asctime)s %(levelname)-8s: %(message)s", datefmt="%H:%M"
)
)
file_handler.setLevel(level)
logger.addHandler(file_handler)
for handler in logger.handlers:
handler.setLevel(level)
# Initialize a logger for nmma_generation
setup_logger("nmma_generation")
# Initialize a logger for nmma_analysis
setup_logger("nmma_analysis")
| 32.881188 | 84 | 0.627823 |
69071cfdc4a036d6576e8a68c92a7e253dc44965 | 1,942 | py | Python | web/api/netcdf.py | wdias/export-netcdf-binary | 848cc28b617055d3dae2eda96da0abbecc283293 | [
"Apache-2.0"
] | null | null | null | web/api/netcdf.py | wdias/export-netcdf-binary | 848cc28b617055d3dae2eda96da0abbecc283293 | [
"Apache-2.0"
] | null | null | null | web/api/netcdf.py | wdias/export-netcdf-binary | 848cc28b617055d3dae2eda96da0abbecc283293 | [
"Apache-2.0"
] | null | null | null | import logging
from flask import Blueprint, request, Response, stream_with_context
import requests
from datetime import datetime, timedelta
bp = Blueprint('ascii_grid', __name__)
logger = logging.getLogger(__name__)
DATE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
ADAPTER_GRID = 'http://adapter-grid.default.svc.cluster.local'
@bp.route('/export/netcdf/binary/<string:timeseries_id>/<string:request_name>', methods=['GET'])
def get_export_netcdf_binary(timeseries_id: str, request_name: str):
assert timeseries_id, 'Timeseries ID should be provided'
assert request_name.endswith('.nc'), 'Request file name should ends with .nc'
start = request.args.get('start')
assert start, 'start date time should be provide'
start_time = datetime.strptime(start, DATE_TIME_FORMAT)
end = request.args.get('end')
if end:
end_time = datetime.strptime(end, DATE_TIME_FORMAT)
else:
end_time = start_time + timedelta(hours=24)
query_string = f"start={start}&end={end_time.strftime(DATE_TIME_FORMAT)}"
logger.info(f'>> {timeseries_id}, {request_name}, {query_string}')
# Solution via: 1. https://stackoverflow.com/a/5166423/1461060
# combining 2. https://stackoverflow.com/a/39217788/1461060
return Response(requests.get(f'{ADAPTER_GRID}/timeseries/{timeseries_id}/{request_name}?{query_string}', stream=True), direct_passthrough=True, mimetype='application/x-netcdf4')
# Solution by: ``. https://stackoverflow.com/a/16696317/1461060
# 2. http://flask.pocoo.org/docs/1.0/patterns/streaming/#streaming-with-context
# def generate():
# with requests.get(f'{ADAPTER_GRID}/timeseries/{timeseries_id}/{request_name}', stream=True) as r:
# for chunk in r.iter_content(chunk_size=1024):
# if chunk: # filter out keep-alive new chunks
# yield chunk
#
# return Response(stream_with_context(generate()), direct_passthrough=True)
| 47.365854 | 181 | 0.714727 |
75dc6b28dd39fba164cd6fc95ba71ba244b2e4f7 | 2,930 | py | Python | examples/ode/case_4_model.py | KennedyPutraKusumo/py-DED | c5742c29cae66542960060f19d65b446d532b477 | [
"MIT"
] | 2 | 2020-05-19T14:06:41.000Z | 2021-09-09T16:11:53.000Z | examples/ode/case_4_model.py | KennedyPutraKusumo/py-DED | c5742c29cae66542960060f19d65b446d532b477 | [
"MIT"
] | 2 | 2020-04-28T02:36:36.000Z | 2021-08-23T09:36:13.000Z | examples/ode/case_4_model.py | KennedyPutraKusumo/py-DED | c5742c29cae66542960060f19d65b446d532b477 | [
"MIT"
] | 2 | 2020-10-10T18:43:27.000Z | 2020-11-22T19:41:05.000Z | from pyomo import environ as po
from pyomo import dae as pod
from matplotlib import pyplot as plt
import numpy as np
import logging
logging.getLogger("pyomo.core").setLevel(logging.ERROR)
def create_model(spt):
norm_spt = spt / np.max(spt)
m = po.ConcreteModel()
""" Sets """
m.i = po.Set(initialize=["A", "B", "C"])
m.j = po.Set(initialize=[1, 2])
""" Time Components """
m.t = pod.ContinuousSet(bounds=(0, 1), initialize=norm_spt)
m.tau = po.Var(bounds=(0, None))
""" Concentrations """
m.c = po.Var(m.t, m.i, bounds=(0, None))
m.dcdt = pod.DerivativeVar(m.c, wrt=m.t)
""" Experimental Variables """
m.f_in = po.Var(bounds=(0, 10))
""" Reaction Parameters """
s = {
("A", 1): -1,
("A", 2): 0,
("B", 1): 1,
("B", 2): -1,
("C", 1): 0,
("C", 2): 1,
}
m.s = po.Param(m.i, m.j, initialize=s)
c_in = {
"A": 1,
"B": 0,
"C": 0,
}
m.c_in = po.Param(m.i, initialize=c_in)
""" Model Parameters """
m.k = po.Var(m.j, bounds=(0, None))
""" Reaction Rates """
m.r = po.Var(m.t, m.j)
""" Model Equations """
def _bal(m, t, i):
return m.dcdt[t, i] / m.tau == m.f_in * m.c_in[i] + sum(m.s[i, j] * m.r[t, j] for j in m.j)
m.bal = po.Constraint(m.t, m.i, rule=_bal)
def _r_def(m, t, j):
if j == 1:
return m.r[t, j] == m.k[j] * m.c[t, "A"]
elif j == 2:
return m.r[t, j] == m.k[j] * m.c[t, "B"]
else:
raise SyntaxError("Unrecognized reaction index, please check the model.")
m.r_def = po.Constraint(m.t, m.j, rule=_r_def)
return m
def simulate(ti_controls, sampling_times, model_parameters):
m = create_model(sampling_times)
m.tau.fix(max(sampling_times))
m.f_in.fix(ti_controls[0])
m.k[1].fix(model_parameters[0])
m.k[2].fix(model_parameters[1])
m.c[0, "A"].fix(1)
m.c[0, "B"].fix(0)
m.c[0, "C"].fix(0)
simulator = pod.Simulator(m, package="casadi")
t, profile = simulator.simulate(integrator="idas")
simulator.initialize_model()
c = np.array([[m.c[t, i].value for t in m.t] for i in m.i])
if False:
plt.plot(t, profile)
return c.T
if __name__ == '__main__':
""" Run Simulation """
tic = [0]
spt = np.linspace(0, 10, 101)
mp = [1, 1]
c = simulate(
ti_controls=tic,
sampling_times=spt,
model_parameters=mp,
)
""" Plot Results """
fig = plt.figure()
axes = fig.add_subplot(111)
axes.plot(
spt,
c[:, 0],
label=r"$c_A$",
)
axes.plot(
spt,
c[:, 1],
label=r"$c_B$",
)
axes.plot(
spt,
c[:, 2],
label=r"$c_C$",
)
axes.legend()
axes.set_xlabel("Time (hour)")
axes.set_ylabel("Concentration (mol/L)")
fig.tight_layout()
plt.show()
| 23.253968 | 99 | 0.516382 |
f5a9bb5517831a981a3d118ec89e3586ea5938c9 | 20,535 | py | Python | ceres/server/ws_connection.py | yjiangnan/ceres-combineharvester | ed32d5e2564cd0082fa5bf5733e707f06abc2045 | [
"Apache-2.0"
] | null | null | null | ceres/server/ws_connection.py | yjiangnan/ceres-combineharvester | ed32d5e2564cd0082fa5bf5733e707f06abc2045 | [
"Apache-2.0"
] | null | null | null | ceres/server/ws_connection.py | yjiangnan/ceres-combineharvester | ed32d5e2564cd0082fa5bf5733e707f06abc2045 | [
"Apache-2.0"
] | null | null | null | import asyncio
import logging
import time
import traceback
from typing import Any, Callable, Dict, List, Optional
from aiohttp import WSCloseCode, WSMessage, WSMsgType
from ceres.cmds.init_funcs import chia_full_version_str
from ceres.protocols.protocol_message_types import ProtocolMessageTypes
from ceres.protocols.shared_protocol import Capability, Handshake
from ceres.server.outbound_message import Message, NodeType, make_msg
from ceres.server.rate_limits import RateLimiter
from ceres.types.blockchain_format.sized_bytes import bytes32
from ceres.types.peer_info import PeerInfo
from ceres.util.errors import Err, ProtocolError
from ceres.util.ints import uint8, uint16
# Each message is prepended with LENGTH_BYTES bytes specifying the length
from ceres.util.network import class_for_type, is_localhost
# Max size 2^(8*4) which is around 4GiB
LENGTH_BYTES: int = 4
class WSChiaConnection:
"""
Represents a connection to another node. Local host and port are ours, while peer host and
port are the host and port of the peer that we are connected to. Node_id and connection_type are
set after the handshake is performed in this connection.
"""
def __init__(
self,
local_type: NodeType,
ws: Any, # Websocket
server_port: int,
log: logging.Logger,
is_outbound: bool,
is_feeler: bool, # Special type of connection, that disconnects after the handshake.
peer_host,
incoming_queue,
close_callback: Callable,
peer_id,
inbound_rate_limit_percent: int,
outbound_rate_limit_percent: int,
close_event=None,
session=None,
):
# Local properties
self.ws: Any = ws
self.local_type = local_type
self.local_port = server_port
# Remote properties
self.peer_host = peer_host
peername = self.ws._writer.transport.get_extra_info("peername")
if peername is None:
raise ValueError(f"Was not able to get peername from {self.peer_host}")
connection_port = peername[1]
self.peer_port = connection_port
self.peer_server_port: Optional[uint16] = None
self.peer_node_id = peer_id
self.log = log
# connection properties
self.is_outbound = is_outbound
self.is_feeler = is_feeler
# ChiaConnection metrics
self.creation_time = time.time()
self.bytes_read = 0
self.bytes_written = 0
self.last_message_time: float = 0
# Messaging
self.incoming_queue: asyncio.Queue = incoming_queue
self.outgoing_queue: asyncio.Queue = asyncio.Queue()
self.inbound_task: Optional[asyncio.Task] = None
self.outbound_task: Optional[asyncio.Task] = None
self.active: bool = False # once handshake is successful this will be changed to True
self.close_event: asyncio.Event = close_event
self.session = session
self.close_callback = close_callback
self.pending_requests: Dict[bytes32, asyncio.Event] = {}
self.pending_timeouts: Dict[bytes32, asyncio.Task] = {}
self.request_results: Dict[bytes32, Message] = {}
self.closed = False
self.connection_type: Optional[NodeType] = None
if is_outbound:
self.request_nonce: uint16 = uint16(0)
else:
# Different nonce to reduce chances of overlap. Each peer will increment the nonce by one for each
# request. The receiving peer (not is_outbound), will use 2^15 to 2^16 - 1
self.request_nonce = uint16(2 ** 15)
# This means that even if the other peer's boundaries for each minute are not aligned, we will not
# disconnect. Also it allows a little flexibility.
self.outbound_rate_limiter = RateLimiter(incoming=False, percentage_of_limit=outbound_rate_limit_percent)
self.inbound_rate_limiter = RateLimiter(incoming=True, percentage_of_limit=inbound_rate_limit_percent)
# async def perform_handshake(self, network_id: str, protocol_version: str, server_port: int, local_type: NodeType):
async def perform_handshake(self, network_id: str, protocol_version: str, version: str, server_port: int, local_type: NodeType):
# version = chia_full_version_str()
if version is None:
version = chia_full_version_str()
if self.is_outbound:
outbound_handshake = make_msg(
ProtocolMessageTypes.handshake,
Handshake(
network_id,
protocol_version,
# chia_full_version_str(),
version,
uint16(server_port),
uint8(local_type.value),
[(uint16(Capability.BASE.value), "1")],
),
)
assert outbound_handshake is not None
await self._send_message(outbound_handshake)
inbound_handshake_msg = await self._read_one_message()
if inbound_handshake_msg is None:
self.log.info(f'INVALID_HANDSHAKE: inbound_handshake_msg: {inbound_handshake_msg}')
raise ProtocolError(Err.INVALID_HANDSHAKE)
inbound_handshake = Handshake.from_bytes(inbound_handshake_msg.data)
# Handle case of invalid ProtocolMessageType
try:
message_type: ProtocolMessageTypes = ProtocolMessageTypes(inbound_handshake_msg.type)
except Exception:
self.log.info(f'INVALID_HANDSHAKE 1: inbound_handshake_msg: {inbound_handshake_msg}')
raise ProtocolError(Err.INVALID_HANDSHAKE)
if message_type != ProtocolMessageTypes.handshake:
self.log.info(f'INVALID_HANDSHAKE 2: inbound_handshake_msg: {inbound_handshake_msg}')
raise ProtocolError(Err.INVALID_HANDSHAKE)
if inbound_handshake.network_id != network_id:
self.log.info(f'INVALID_HANDSHAKE 3: inbound_handshake_msg: {inbound_handshake_msg}')
raise ProtocolError(Err.INCOMPATIBLE_NETWORK_ID)
self.peer_server_port = inbound_handshake.server_port
self.connection_type = NodeType(inbound_handshake.node_type)
else:
try:
message = await self._read_one_message()
except Exception:
raise ProtocolError(Err.INVALID_HANDSHAKE)
if message is None:
raise ProtocolError(Err.INVALID_HANDSHAKE)
# Handle case of invalid ProtocolMessageType
try:
message_type = ProtocolMessageTypes(message.type)
except Exception:
raise ProtocolError(Err.INVALID_HANDSHAKE)
if message_type != ProtocolMessageTypes.handshake:
raise ProtocolError(Err.INVALID_HANDSHAKE)
inbound_handshake = Handshake.from_bytes(message.data)
if inbound_handshake.network_id != network_id:
raise ProtocolError(Err.INCOMPATIBLE_NETWORK_ID)
outbound_handshake = make_msg(
ProtocolMessageTypes.handshake,
Handshake(
network_id,
protocol_version,
# chia_full_version_str(),
version,
uint16(server_port),
uint8(local_type.value),
[(uint16(Capability.BASE.value), "1")],
),
)
await self._send_message(outbound_handshake)
self.peer_server_port = inbound_handshake.server_port
self.connection_type = NodeType(inbound_handshake.node_type)
self.outbound_task = asyncio.create_task(self.outbound_handler())
self.inbound_task = asyncio.create_task(self.inbound_handler())
return True
async def close(self, ban_time: int = 0, ws_close_code: WSCloseCode = WSCloseCode.OK, error: Optional[Err] = None):
"""
Closes the connection, and finally calls the close_callback on the server, so the connections gets removed
from the global list.
"""
if self.closed:
return None
self.closed = True
if error is None:
message = b""
else:
message = str(int(error.value)).encode("utf-8")
try:
if self.inbound_task is not None:
self.inbound_task.cancel()
if self.outbound_task is not None:
self.outbound_task.cancel()
if self.ws is not None and self.ws._closed is False:
await self.ws.close(code=ws_close_code, message=message)
if self.session is not None:
await self.session.close()
if self.close_event is not None:
self.close_event.set()
self.cancel_pending_timeouts()
except Exception:
error_stack = traceback.format_exc()
self.log.warning(f"Exception closing socket: {error_stack}")
self.close_callback(self, ban_time)
raise
self.close_callback(self, ban_time)
def cancel_pending_timeouts(self):
for _, task in self.pending_timeouts.items():
task.cancel()
async def outbound_handler(self):
try:
while not self.closed:
msg = await self.outgoing_queue.get()
if msg is not None:
await self._send_message(msg)
except asyncio.CancelledError:
pass
except BrokenPipeError as e:
self.log.warning(f"{e} {self.peer_host}")
except ConnectionResetError as e:
self.log.warning(f"{e} {self.peer_host}")
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception: {e} with {self.peer_host}")
self.log.error(f"Exception Stack: {error_stack}")
async def inbound_handler(self):
try:
while not self.closed:
message: Message = await self._read_one_message()
if message is not None:
if message.id in self.pending_requests:
self.request_results[message.id] = message
event = self.pending_requests[message.id]
event.set()
else:
await self.incoming_queue.put((message, self))
else:
continue
except asyncio.CancelledError:
self.log.debug("Inbound_handler task cancelled")
except Exception as e:
error_stack = traceback.format_exc()
self.log.error(f"Exception: {e}")
self.log.error(f"Exception Stack: {error_stack}")
async def send_message(self, message: Message):
"""Send message sends a message with no tracking / callback."""
if self.closed:
return None
await self.outgoing_queue.put(message)
def __getattr__(self, attr_name: str):
# TODO KWARGS
async def invoke(*args, **kwargs):
timeout = 60
if "timeout" in kwargs:
timeout = kwargs["timeout"]
attribute = getattr(class_for_type(self.connection_type), attr_name, None)
if attribute is None:
raise AttributeError(f"Node type {self.connection_type} does not have method {attr_name}")
msg = Message(uint8(getattr(ProtocolMessageTypes, attr_name).value), None, args[0])
request_start_t = time.time()
result = await self.create_request(msg, timeout)
self.log.debug(
f"Time for request {attr_name}: {self.get_peer_logging()} = {time.time() - request_start_t}, "
f"None? {result is None}"
)
if result is not None:
ret_attr = getattr(class_for_type(self.local_type), ProtocolMessageTypes(result.type).name, None)
req_annotations = ret_attr.__annotations__
req = None
for key in req_annotations:
if key == "return" or key == "peer":
continue
else:
req = req_annotations[key]
assert req is not None
result = req.from_bytes(result.data)
return result
return invoke
async def create_request(self, message_no_id: Message, timeout: int) -> Optional[Message]:
"""Sends a message and waits for a response."""
if self.closed:
return None
# We will wait for this event, it will be set either by the response, or the timeout
event = asyncio.Event()
# The request nonce is an integer between 0 and 2**16 - 1, which is used to match requests to responses
# If is_outbound, 0 <= nonce < 2^15, else 2^15 <= nonce < 2^16
request_id = self.request_nonce
if self.is_outbound:
self.request_nonce = uint16(self.request_nonce + 1) if self.request_nonce != (2 ** 15 - 1) else uint16(0)
else:
self.request_nonce = (
uint16(self.request_nonce + 1) if self.request_nonce != (2 ** 16 - 1) else uint16(2 ** 15)
)
message = Message(message_no_id.type, request_id, message_no_id.data)
self.pending_requests[message.id] = event
await self.outgoing_queue.put(message)
# If the timeout passes, we set the event
async def time_out(req_id, req_timeout):
try:
await asyncio.sleep(req_timeout)
if req_id in self.pending_requests:
self.pending_requests[req_id].set()
except asyncio.CancelledError:
if req_id in self.pending_requests:
self.pending_requests[req_id].set()
raise
timeout_task = asyncio.create_task(time_out(message.id, timeout))
self.pending_timeouts[message.id] = timeout_task
await event.wait()
self.pending_requests.pop(message.id)
result: Optional[Message] = None
if message.id in self.request_results:
result = self.request_results[message.id]
assert result is not None
self.log.debug(f"<- {ProtocolMessageTypes(result.type).name} from: {self.peer_host}:{self.peer_port}")
self.request_results.pop(result.id)
return result
async def reply_to_request(self, response: Message):
if self.closed:
return None
await self.outgoing_queue.put(response)
async def send_messages(self, messages: List[Message]):
if self.closed:
return None
for message in messages:
await self.outgoing_queue.put(message)
async def _wait_and_retry(self, msg: Message, queue: asyncio.Queue):
try:
await asyncio.sleep(1)
await queue.put(msg)
except Exception as e:
self.log.debug(f"Exception {e} while waiting to retry sending rate limited message")
return None
async def _send_message(self, message: Message):
encoded: bytes = bytes(message)
size = len(encoded)
assert len(encoded) < (2 ** (LENGTH_BYTES * 8))
if not self.outbound_rate_limiter.process_msg_and_check(message):
if not is_localhost(self.peer_host):
self.log.debug(
f"Rate limiting ourselves. message type: {ProtocolMessageTypes(message.type).name}, "
f"peer: {self.peer_host}"
)
# TODO: fix this special case. This function has rate limits which are too low.
if ProtocolMessageTypes(message.type) != ProtocolMessageTypes.respond_peers:
asyncio.create_task(self._wait_and_retry(message, self.outgoing_queue))
return None
else:
self.log.debug(
f"Not rate limiting ourselves. message type: {ProtocolMessageTypes(message.type).name}, "
f"peer: {self.peer_host}"
)
await self.ws.send_bytes(encoded)
#self.log.debug(f"-> {ProtocolMessageTypes(message.type).name} to peer {self.peer_host} {self.peer_node_id}")
self.bytes_written += size
async def _read_one_message(self) -> Optional[Message]:
try:
message: WSMessage = await self.ws.receive(30)
except asyncio.TimeoutError:
# self.ws._closed if we didn't receive a ping / pong
if self.ws._closed:
asyncio.create_task(self.close())
await asyncio.sleep(3)
return None
return None
if self.connection_type is not None:
connection_type_str = NodeType(self.connection_type).name.lower()
else:
connection_type_str = ""
if message.type == WSMsgType.CLOSING:
self.log.debug(
f"Closing connection to {connection_type_str} {self.peer_host}:"
f"{self.peer_server_port}/"
f"{self.peer_port}"
)
asyncio.create_task(self.close())
await asyncio.sleep(3)
elif message.type == WSMsgType.CLOSE:
self.log.debug(
f"Peer closed connection {connection_type_str} {self.peer_host}:"
f"{self.peer_server_port}/"
f"{self.peer_port}"
)
asyncio.create_task(self.close())
await asyncio.sleep(3)
elif message.type == WSMsgType.CLOSED:
if not self.closed:
asyncio.create_task(self.close())
await asyncio.sleep(3)
return None
elif message.type == WSMsgType.BINARY:
data = message.data
full_message_loaded: Message = Message.from_bytes(data)
self.bytes_read += len(data)
self.last_message_time = time.time()
try:
message_type = ProtocolMessageTypes(full_message_loaded.type).name
except Exception:
message_type = "Unknown"
if not self.inbound_rate_limiter.process_msg_and_check(full_message_loaded):
if self.local_type == NodeType.FULL_NODE and not is_localhost(self.peer_host):
self.log.error(
f"Peer has been rate limited and will be disconnected: {self.peer_host}, "
f"message: {message_type}"
)
# Only full node disconnects peers, to prevent abuse and crashing timelords, farmers, etc
asyncio.create_task(self.close(300))
await asyncio.sleep(3)
return None
else:
self.log.warning(
f"Peer surpassed rate limit {self.peer_host}, message: {message_type}, "
f"port {self.peer_port} but not disconnecting"
)
return full_message_loaded
return full_message_loaded
elif message.type == WSMsgType.ERROR:
self.log.error(f"WebSocket Error: {message}")
if message.data.code == WSCloseCode.MESSAGE_TOO_BIG:
asyncio.create_task(self.close(300))
else:
asyncio.create_task(self.close())
await asyncio.sleep(3)
else:
self.log.error(f"Unexpected WebSocket message type: {message}")
asyncio.create_task(self.close())
await asyncio.sleep(3)
return None
def get_peer_info(self) -> Optional[PeerInfo]:
result = self.ws._writer.transport.get_extra_info("peername")
if result is None:
return None
connection_host = result[0]
port = self.peer_server_port if self.peer_server_port is not None else self.peer_port
return PeerInfo(connection_host, port)
def get_peer_logging(self) -> PeerInfo:
info: Optional[PeerInfo] = self.get_peer_info()
if info is None:
# in this case, we will use self.peer_host which is friendlier for logging
port = self.peer_server_port if self.peer_server_port is not None else self.peer_port
return PeerInfo(self.peer_host, port)
else:
return info
| 41.737805 | 132 | 0.606623 |
370f9e65b1ed7d8b9651d10c3bd27966e2683fe5 | 584 | py | Python | src/blog/urls.py | pv45412/Try-Django | f266d05849b1924c2616ba03641626356e0e0c82 | [
"MIT"
] | 1,110 | 2018-07-02T21:28:21.000Z | 2022-03-29T11:28:23.000Z | src/blog/urls.py | pv45412/Try-Django | f266d05849b1924c2616ba03641626356e0e0c82 | [
"MIT"
] | 5 | 2018-11-06T20:54:44.000Z | 2021-08-07T08:37:51.000Z | src/blog/urls.py | pv45412/Try-Django | f266d05849b1924c2616ba03641626356e0e0c82 | [
"MIT"
] | 876 | 2018-07-04T08:50:47.000Z | 2022-03-31T03:50:47.000Z | from django.urls import path
from .views import (
ArticleCreateView,
ArticleDeleteView,
ArticleDetailView,
ArticleListView,
ArticleUpdateView,
)
app_name = 'articles'
urlpatterns = [
path('', ArticleListView.as_view(), name='article-list'),
path('create/', ArticleCreateView.as_view(), name='article-create'),
path('<int:id>/', ArticleDetailView.as_view(), name='article-detail'),
path('<int:id>/update/', ArticleUpdateView.as_view(), name='article-update'),
path('<int:id>/delete/', ArticleDeleteView.as_view(), name='article-delete'),
] | 30.736842 | 81 | 0.690068 |
d76cf122ac20c200f1c2dac18f29b19b7ee80a3a | 169 | py | Python | templado/admin.py | nnkps/templado | 660bb430b06d34286ecb9d21ae88279277bf92fc | [
"BSD-2-Clause"
] | null | null | null | templado/admin.py | nnkps/templado | 660bb430b06d34286ecb9d21ae88279277bf92fc | [
"BSD-2-Clause"
] | 3 | 2020-02-11T21:28:48.000Z | 2021-06-10T17:24:02.000Z | templado/admin.py | nnkps/templado | 660bb430b06d34286ecb9d21ae88279277bf92fc | [
"BSD-2-Clause"
] | 2 | 2015-08-14T12:49:31.000Z | 2015-08-19T21:17:56.000Z | from django.contrib import admin
# Register your models here.
from models import ReportTemplate, Report
admin.site.register(ReportTemplate)
admin.site.register(Report) | 24.142857 | 41 | 0.828402 |
9d7a5cb56504fb02890d7a1676e97c4179a0a642 | 445 | py | Python | kratos/apps/log/migrations/0004_log_complete_at.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | 1 | 2020-11-30T09:53:40.000Z | 2020-11-30T09:53:40.000Z | kratos/apps/log/migrations/0004_log_complete_at.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | null | null | null | kratos/apps/log/migrations/0004_log_complete_at.py | cipher-ops/backend-kts | 7ea54d7f56bcb0da54b901ac8f3cfbfbb0b12319 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-10-20 11:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('log', '0003_auto_20201019_1721'),
]
operations = [
migrations.AddField(
model_name='log',
name='complete_at',
field=models.DateTimeField(default='2020-10-16 15:07:03.547974'),
preserve_default=False,
),
]
| 22.25 | 77 | 0.6 |
36f17431a8fe25fc78d7e90856dcdd568bb873d2 | 1,374 | py | Python | geonode/geonode/qgis_server/migrations/0005_auto_20170823_0341.py | ttungbmt/BecaGIS_GeoPortal | 6c05f9fc020ec4ccf600ba2503a06c2231443920 | [
"MIT"
] | null | null | null | geonode/geonode/qgis_server/migrations/0005_auto_20170823_0341.py | ttungbmt/BecaGIS_GeoPortal | 6c05f9fc020ec4ccf600ba2503a06c2231443920 | [
"MIT"
] | null | null | null | geonode/geonode/qgis_server/migrations/0005_auto_20170823_0341.py | ttungbmt/BecaGIS_GeoPortal | 6c05f9fc020ec4ccf600ba2503a06c2231443920 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('qgis_server', '0004_auto_20170805_0223'),
]
operations = [
migrations.AlterField(
model_name='qgisserverlayer',
name='default_style',
field=models.ForeignKey(related_name='layer_default_style', on_delete=models.SET_NULL,
default=None, to='qgis_server.QGISServerStyle', null=True),
),
]
| 33.512195 | 98 | 0.618632 |
44c6f7905b30c7a63a5f28c0228ffd80da80085d | 48,729 | py | Python | floorplan_utils.py | marcotinacci/FloorNet | 9765dfb120ff2f586919823a06817e78ef13474b | [
"MIT"
] | 156 | 2018-04-05T11:57:51.000Z | 2022-03-17T11:04:25.000Z | floorplan_utils.py | liu115/FloorNet | f75dc2a933348b12db8f91a9f7e35cfe2dc080f2 | [
"MIT"
] | 23 | 2018-04-07T22:51:01.000Z | 2022-03-25T19:51:06.000Z | floorplan_utils.py | liu115/FloorNet | f75dc2a933348b12db8f91a9f7e35cfe2dc080f2 | [
"MIT"
] | 48 | 2018-04-07T11:37:53.000Z | 2022-01-03T21:36:16.000Z | import numpy as np
from skimage import measure
import cv2
import copy
from utils import *
NUM_WALL_CORNERS = 13
NUM_CORNERS = 21
CORNER_RANGES = {'wall': (0, 13), 'opening': (13, 17), 'icon': (17, 21)}
MAX_NUM_CORNERS = 300
NUM_FINAL_ICONS = 10
NUM_FINAL_ROOMS = 15
NUM_ICONS = 13
NUM_ROOMS = 16
HEIGHT=256
WIDTH=256
NUM_POINTS = 50000
NUM_INPUT_CHANNELS = 7
NUM_CHANNELS = [7, 64, 64, 64, 128, 256]
SIZES = [WIDTH, WIDTH // 2, WIDTH // 4, WIDTH // 8, WIDTH // 16, WIDTH // 32]
POINT_ORIENTATIONS = [[(2, ), (3, ), (0, ), (1, )], [(0, 3), (0, 1), (1, 2), (2, 3)], [(1, 2, 3), (0, 2, 3), (0, 1, 3), (0, 1, 2)], [(0, 1, 2, 3)]]
def getOrientationRanges(width, height):
orientationRanges = [[width, 0, 0, 0], [width, height, width, 0], [width, height, 0, height], [0, height, 0, 0]]
return orientationRanges
def getIconNames():
iconNames = []
iconLabelMap = getIconLabelMap()
for iconName, _ in iconLabelMap.iteritems():
iconNames.append(iconName)
continue
return iconNames
def getRoomLabelMap():
labelMap = {}
labelMap['living_room'] = 1
labelMap['kitchen'] = 2
labelMap['bedroom'] = 3
labelMap['bathroom'] = 4
labelMap['restroom'] = 4
labelMap['office'] = 3
labelMap['closet'] = 6
labelMap['balcony'] = 7
labelMap['corridor'] = 8
labelMap['dining_room'] = 9
labelMap['laundry_room'] = 10
labelMap['garage'] = 11
labelMap['recreation_room'] = 12
labelMap['stairs'] = 13
labelMap['other'] = 14
labelMap['wall'] = 15
return labelMap
def getLabelRoomMap():
labelMap = {}
labelMap[1] = 'living room'
labelMap[2] = 'kitchen'
labelMap[3] = 'bedroom'
labelMap[4] = 'bathroom'
labelMap[6] = 'closet'
labelMap[7] = 'balcony'
labelMap[8] = 'corridor'
labelMap[9] = 'dining room'
return labelMap
def getIconLabelMap():
labelMap = {}
labelMap['cooking_counter'] = 1
labelMap['bathtub'] = 2
labelMap['toilet'] = 3
labelMap['washing_basin'] = 4
labelMap['sofa'] = 5
labelMap['cabinet'] = 6
labelMap['bed'] = 7
labelMap['table'] = 8
labelMap['desk'] = 8
labelMap['refrigerator'] = 9
labelMap['TV'] = 0
labelMap['entrance'] = 0
labelMap['chair'] = 0
labelMap['door'] = 11
labelMap['window'] = 12
return labelMap
def getLabelIconMap():
labelMap = {}
labelMap[1] = 'cooking_counter'
labelMap[2] = 'bathtub'
labelMap[3] = 'toilet'
labelMap[4] = 'washing_basin'
labelMap[5] = 'sofa'
labelMap[6] = 'cabinet'
labelMap[7] = 'bed'
labelMap[8] = 'table'
labelMap[9] = 'refrigerator'
return labelMap
def getLabelMapNYU40():
labelMap = {}
labelMap[1] = 'wall'
labelMap[2] = 'floor'
labelMap[3] = 'cabinet'
labelMap[4] = 'bed'
labelMap[5] = 'chair'
labelMap[6] = 'sofa'
labelMap[7] = 'table'
labelMap[8] = 'door'
labelMap[9] = 'window'
labelMap[10] = 'bookshelf'
labelMap[11] = 'picture'
labelMap[12] = 'cooking_counter'
labelMap[13] = 'blinds'
labelMap[14] = 'desk'
labelMap[15] = 'shelf'
labelMap[16] = 'curtain'
labelMap[17] = 'dresser'
labelMap[18] = 'pillow'
labelMap[19] = 'mirror'
labelMap[20] = 'entrance' #mat
labelMap[21] = 'clothes'
labelMap[22] = 'ceiling'
labelMap[23] = 'book'
labelMap[24] = 'refrigerator'
labelMap[25] = 'TV'
labelMap[26] = 'paper'
labelMap[27] = 'towel'
labelMap[28] = 'shower_curtain'
labelMap[29] = 'box'
labelMap[30] = 'whiteboard'
labelMap[31] = 'person'
labelMap[32] = 'nightstand'
labelMap[33] = 'toilet'
labelMap[34] = 'washing_basin'
labelMap[35] = 'lamp'
labelMap[36] = 'bathtub'
labelMap[37] = 'bag'
labelMap[38] = 'otherprop'
labelMap[39] = 'otherstructure'
labelMap[40] = 'unannotated'
return labelMap
def getNYUScanNetMap():
labelMap = np.zeros(41, dtype=np.int32)
labelMap[1] = 1
labelMap[2] = 2
labelMap[3] = 19
labelMap[4] = 6
labelMap[5] = 3
labelMap[6] = 8
labelMap[7] = 4
labelMap[8] = 14
labelMap[9] = 15
labelMap[10] = 7
labelMap[11] = 18
labelMap[12] = 13
labelMap[13] = 12 #20 Blinds
labelMap[14] = 5
labelMap[15] = 7
labelMap[16] = 12
labelMap[17] = 19
labelMap[18] = 20
labelMap[19] = 20
labelMap[20] = 20
labelMap[21] = 20
labelMap[22] = 1
labelMap[23] = 20
labelMap[24] = 17
labelMap[25] = 20
labelMap[26] = 20
labelMap[27] = 20
labelMap[28] = 16
labelMap[29] = 20
labelMap[30] = 20
labelMap[31] = 20
labelMap[32] = 20
labelMap[33] = 11
labelMap[34] = 9
labelMap[35] = 20
labelMap[36] = 10
labelMap[37] = 20
labelMap[38] = 20
labelMap[39] = 20
labelMap[40] = 0
return labelMap
def getMatterportClassMap():
classMap = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5,
'f': 6,
'g': 7,
'h': 8,
'i': 9,
'j': 10,
'k': 11,
'l': 12,
'm': 13,
'n': 14,
'o': 15,
'p': 16,
'r': 17,
's': 18,
't': 19,
'u': 20,
'v': 21,
'w': 22,
'x': 23,
'y': 24,
'z': 25,
'B': 26,
'C': 27,
'D': 28,
'S': 29,
'Z': 30
}
return classMap
def calcLineDim(points, line):
point_1 = points[line[0]]
point_2 = points[line[1]]
if abs(point_2[0] - point_1[0]) > abs(point_2[1] - point_1[1]):
lineDim = 0
else:
lineDim = 1
return lineDim
def calcLineDirection(line):
return int(abs(line[0][0] - line[1][0]) < abs(line[0][1] - line[1][1]))
def calcLineDirectionPoints(points, line):
point_1 = points[line[0]]
point_2 = points[line[1]]
if isinstance(point_1[0], tuple):
point_1 = point_1[0]
pass
if isinstance(point_2[0], tuple):
point_2 = point_2[0]
pass
return calcLineDirection((point_1, point_2))
def pointDistance(point_1, point_2):
#return np.sqrt(pow(point_1[0] - point_2[0], 2) + pow(point_1[1] - point_2[1], 2))
return max(abs(point_1[0] - point_2[0]), abs(point_1[1] - point_2[1]))
def sortLines(lines):
newLines = []
for line in lines:
direction = calcLineDirection(line)
if line[0][direction] < line[1][direction]:
newLines.append((line[0], line[1]))
else:
newLines.append((line[1], line[0]))
pass
continue
return newLines
def lineRange(line):
direction = calcLineDirection(line)
fixedValue = (line[0][1 - direction] + line[1][1 - direction]) / 2
minValue = min(line[0][direction], line[1][direction])
maxValue = max(line[0][direction], line[1][direction])
return direction, fixedValue, minValue, maxValue
def findConnections(line_1, line_2, gap):
connection_1 = -1
connection_2 = -1
pointConnected = False
for c_1 in xrange(2):
if pointConnected:
break
for c_2 in xrange(2):
if pointDistance(line_1[c_1], line_2[c_2]) > gap:
continue
connection_1 = c_1
connection_2 = c_2
connectionPoint = ((line_1[c_1][0] + line_2[c_2][0]) / 2, (line_1[c_1][1] + line_2[c_2][1]) / 2)
pointConnected = True
break
continue
if pointConnected:
return [connection_1, connection_2], connectionPoint
direction_1, fixedValue_1, min_1, max_1 = lineRange(line_1)
direction_2, fixedValue_2, min_2, max_2 = lineRange(line_2)
if direction_1 == direction_2:
return [-1, -1], (0, 0)
#print(fixedValue_1, min_1, max_1, fixedValue_2, min_2, max_2)
if min(fixedValue_1, max_2) < max(fixedValue_1, min_2) - gap or min(fixedValue_2, max_1) < max(fixedValue_2, min_1) - gap:
return [-1, -1], (0, 0)
if abs(min_1 - fixedValue_2) <= gap:
return [0, 2], (fixedValue_2, fixedValue_1)
if abs(max_1 - fixedValue_2) <= gap:
return [1, 2], (fixedValue_2, fixedValue_1)
if abs(min_2 - fixedValue_1) <= gap:
return [2, 0], (fixedValue_2, fixedValue_1)
if abs(max_2 - fixedValue_1) <= gap:
return [2, 1], (fixedValue_2, fixedValue_1)
return [2, 2], (fixedValue_2, fixedValue_1)
def lines2Corners(lines, gap, getSingularCorners=False):
corners = []
lineConnections = []
for _ in xrange(len(lines)):
lineConnections.append({})
continue
connectionCornerMap = {}
connectionCornerMap[(1, 1)] = 4
connectionCornerMap[(0, 1)] = 5
connectionCornerMap[(0, 0)] = 6
connectionCornerMap[(1, 0)] = 7
connectionCornerMap[(2, 0)] = 8
connectionCornerMap[(1, 2)] = 9
connectionCornerMap[(2, 1)] = 10
connectionCornerMap[(0, 2)] = 11
connectionCornerMap[(2, 2)] = 12
corners = []
for lineIndex_1, line_1 in enumerate(lines):
for lineIndex_2, line_2 in enumerate(lines):
if lineIndex_2 == lineIndex_1:
continue
connections, connectionPoint = findConnections(line_1, line_2, gap=gap)
if connections[0] == -1 and connections[1] == -1:
continue
if calcLineDirection(line_1) == calcLineDirection(line_2):
print('overlap', line_1, line_2, connections)
exit(1)
pass
if calcLineDirection(line_1) == 1:
continue
indices = [lineIndex_1, lineIndex_2]
#print(lineIndex_1, lineIndex_2, connections)
for c in xrange(2):
if connections[c] in [0, 1] and connections[c] in lineConnections[indices[c]]:
print('duplicate corner', line_1, line_2, connections)
exit(1)
pass
lineConnections[indices[c]][connections[c]] = True
continue
corners.append((connectionPoint, connectionCornerMap[tuple(connections)]))
continue
continue
if getSingularCorners:
singularCorners = []
for lineIndex, connections in enumerate(lineConnections):
if 0 not in connections:
print('single corner', lines[lineIndex], connections)
singularCorners.append((lineIndex, 0))
pass
if 1 not in connections:
print('single corner', lines[lineIndex], connections)
singularCorners.append((lineIndex, 1))
pass
continue
return corners, singularCorners
return corners
def drawWallMask(walls, width, height, thickness=3, indexed=False):
if indexed:
wallMask = np.full((height, width), -1, dtype=np.int32)
for wallIndex, wall in enumerate(walls):
cv2.line(wallMask, (int(wall[0][0]), int(wall[0][1])), (int(wall[1][0]), int(wall[1][1])), color=wallIndex, thickness=thickness)
continue
else:
wallMask = np.zeros((height, width), dtype=np.int32)
for wall in walls:
cv2.line(wallMask, (int(wall[0][0]), int(wall[0][1])), (int(wall[1][0]), int(wall[1][1])), color=1, thickness=thickness)
continue
wallMask = wallMask.astype(np.bool)
pass
return wallMask
def mergeLines(line_1, line_2):
direction_1, fixedValue_1, min_1, max_1 = lineRange(line_1)
direction_2, fixedValue_2, min_2, max_2 = lineRange(line_2)
fixedValue = (fixedValue_1 + fixedValue_2) / 2
if direction_1 == 0:
return [[min(min_1, min_2), fixedValue], [max(max_1, max_2), fixedValue]]
else:
return [[fixedValue, min(min_1, min_2)], [fixedValue, max(max_1, max_2)]]
return
def findIntersection(line_1, line_2):
direction_1, fixedValue_1, min_1, max_1 = lineRange(line_1)
direction_2, fixedValue_2, min_2, max_2 = lineRange(line_2)
if direction_1 == 0:
return (fixedValue_2, fixedValue_1)
else:
return (fixedValue_1, fixedValue_2)
return
def extendLine(line, point):
direction, fixedValue, min_value, max_value = lineRange(line)
if direction == 0:
return ((min(min_value, point[direction]), fixedValue), (max(max_value, point[direction]), fixedValue))
else:
return ((fixedValue, min(min_value, point[direction])), (fixedValue, max(max_value, point[direction])))
return
def divideWalls(walls):
horizontalWalls = []
verticalWalls = []
for wall in walls:
if calcLineDirection(wall) == 0:
horizontalWalls.append(wall)
else:
verticalWalls.append(wall)
pass
continue
return horizontalWalls, verticalWalls
def connectWalls(walls, roomSegmentation, gap=3):
width = roomSegmentation.shape[1]
height = roomSegmentation.shape[0]
roomBoundary = np.zeros(roomSegmentation.shape, dtype=np.bool)
for direction in xrange(2):
for shift in [-1, 1]:
roomBoundary = np.logical_or(roomBoundary, roomSegmentation != np.roll(roomSegmentation, shift, axis=direction))
continue
continue
roomBoundary = roomBoundary.astype(np.uint8)
roomBoundary[0] = roomBoundary[-1] = roomBoundary[:, 0] = roomBoundary[:, -1] = 0
uncoveredBoundary = roomBoundary.copy()
wallGroups = divideWalls(walls)
wallMasks = [drawWallMask(walls, width, height, indexed=True, thickness=gap * 2) for walls in wallGroups]
uncoveredBoundary[wallMasks[0] >= 0] = 0
uncoveredBoundary[wallMasks[1] >= 0] = 0
uncoveredBoundary = cv2.dilate(uncoveredBoundary, np.ones((3, 3)), iterations=gap)
components = measure.label(uncoveredBoundary, background=0)
connectedWalls = []
for walls, wallMask in zip(wallGroups, wallMasks):
newWalls = copy.deepcopy(walls)
invalidWallIndices = []
for label in xrange(components.min() + 1, components.max() + 1):
mask = components == label
wallIndices = np.unique(wallMask[mask]).tolist()
if -1 in wallIndices:
wallIndices.remove(-1)
pass
if len(wallIndices) != 2:
continue
wall_1 = newWalls[wallIndices[0]]
wall_2 = newWalls[wallIndices[1]]
direction_1, fixedValue_1, min_1, max_1 = lineRange(wall_1)
direction_2, fixedValue_2, min_2, max_2 = lineRange(wall_2)
if direction_1 == direction_2:
if abs(fixedValue_1 - fixedValue_2) < gap:
newWallIndex = len(newWalls)
wallMask[wallMask == wallIndices[0]] = newWallIndex
wallMask[wallMask == wallIndices[1]] = newWallIndex
newWall = mergeLines(wall_1, wall_2)
newWalls.append(newWall)
invalidWallIndices.append(wallIndices[0])
invalidWallIndices.append(wallIndices[1])
pass
pass
# else:
# print(wall_1, wall_2)
# ys, xs = mask.nonzero()
# newWall = [[xs.min(), ys.min()], [xs.max(), ys.max()]]
# newWallDirection = calcLineDirection(newWall)
# if newWallDirection != direction_1 and newWall[1][1 - newWallDirection] - newWall[0][1 - newWallDirection] < gap * 2 + 1:
# fixedValue = (newWall[1][1 - newWallDirection] + newWall[0][1 - newWallDirection]) / 2
# newWall[1][1 - newWallDirection] = newWall[0][1 - newWallDirection] = fixedValue
# newWalls.append(newWall)
# pass
# pass
# else:
# assert(False)
# intersectionPoint = findIntersection(wall_1, wall_2)
# newWalls[wallIndices[0]] = extendLine(wall_1, intersectionPoint)
# newWalls[wallIndices[1]] = extendLine(wall_2, intersectionPoint)
# pass
continue
#print(invalidWallIndices)
invalidWallIndices = sorted(invalidWallIndices, key=lambda x: -x)
for index in invalidWallIndices:
del newWalls[index]
continue
connectedWalls += newWalls
continue
newWalls = connectedWalls
wallMask = drawWallMask(newWalls, width, height, indexed=True, thickness=gap * 2)
uncoveredBoundary = roomBoundary.copy()
uncoveredBoundary[wallMask >= 0] = 0
uncoveredBoundary = cv2.dilate(uncoveredBoundary, np.ones((3, 3)), iterations=gap)
components = measure.label(uncoveredBoundary, background=0)
#cv2.imwrite('test/segmentation.png', drawSegmentationImage(components))
for label in xrange(components.min() + 1, components.max() + 1):
mask = components == label
#cv2.imwrite('test/mask_' + str(label) + '.png', drawMaskImage(mask))
wallIndices = np.unique(wallMask[mask]).tolist()
if -1 in wallIndices:
wallIndices.remove(-1)
pass
lines = [newWalls[index] for index in wallIndices]
#cv2.imwrite('test/mask_' + str(label) + '_segment.png', drawMaskImage(mask))
#cv2.imwrite('test/mask_' + str(label) + '.png', drawMaskImage(drawWallMask(lines, width, height)))
horizontalLines, verticalLines = divideWalls(lines)
if len(horizontalLines) > 0 and len(verticalLines) > 0:
continue
#print(label, wallIndices, len(horizontalLines), len(verticalLines))
for direction, lines in enumerate([horizontalLines, verticalLines]):
if len(lines) < 2:
continue
#wall_1 = lines[0]
#wall_2 = lines[1]
#print(wall_1, wall_2)
#direction_1, fixedValue_1, min_1, max_1 = lineRange(wall_1)
#direction_2, fixedValue_2, min_2, max_2 = lineRange(wall_2)
#values = [line[direction] for line in lines]
#print(wall_1, wall_2)
ys, xs = mask.nonzero()
newWall = [[xs.min(), ys.min()], [xs.max(), ys.max()]]
newWallDirection = calcLineDirection(newWall)
#print(label, wallIndices, newWallDirection, direction, newWall[1][1 - newWallDirection] - newWall[0][1 - newWallDirection])
if newWallDirection != direction and newWall[1][1 - newWallDirection] - newWall[0][1 - newWallDirection] <= (gap * 2 + 2) * 2:
fixedValue = (newWall[1][1 - newWallDirection] + newWall[0][1 - newWallDirection]) / 2
newWall[1][1 - newWallDirection] = newWall[0][1 - newWallDirection] = fixedValue
values = [line[0][newWallDirection] for line in lines] + [line[1][newWallDirection] for line in lines]
min_value = min(values)
max_value = max(values)
newWall[0][newWallDirection] = min_value
newWall[1][newWallDirection] = max_value
newWalls.append(newWall)
#print('new orthogonal wall', newWall)
pass
continue
continue
wallMask = drawWallMask(newWalls, width, height, indexed=True, thickness=gap * 2)
uncoveredBoundary = roomBoundary.copy()
uncoveredBoundary[wallMask >= 0] = 0
uncoveredBoundary = cv2.dilate(uncoveredBoundary, np.ones((3, 3)), iterations=gap)
components = measure.label(uncoveredBoundary, background=0)
for label in xrange(components.min() + 1, components.max() + 1):
mask = components == label
wallIndices = np.unique(wallMask[mask]).tolist()
if -1 in wallIndices:
wallIndices.remove(-1)
pass
if len(wallIndices) != 2:
continue
wall_1 = newWalls[wallIndices[0]]
wall_2 = newWalls[wallIndices[1]]
#print(wall_1, wall_2)
direction_1 = calcLineDirection(wall_1)
direction_2 = calcLineDirection(wall_2)
if direction_1 != direction_2:
intersectionPoint = findIntersection(wall_1, wall_2)
newWalls[wallIndices[0]] = extendLine(wall_1, intersectionPoint)
newWalls[wallIndices[1]] = extendLine(wall_2, intersectionPoint)
pass
continue
# try:
# _, singularCorners = lines2Corners(newWalls, gap=gap, getSingularCorners=True)
# for _, singularCorner_1 in enumerate(singularCorners):
# for singularCorner_2 in singularCorners[_ + 1:]:
# wall_1 = newWalls[singularCorner_1[0]]
# wall_2 = newWalls[singularCorner_2[0]]
# corner_1 = wall_1[singularCorner_1[1]]
# corner_2 = wall_2[singularCorner_2[1]]
# if pointDistance(corner_1, corner_2) < (gap * 2 + 1) * 2:
# intersectionPoint = findIntersection(wall_1, wall_2)
# newWalls[singularCorner_1[0]] = extendLine(wall_1, intersectionPoint)
# newWalls[singularCorner_2[0]] = extendLine(wall_2, intersectionPoint)
# pass
# continue
# continue
# except:
# pass
return newWalls
def extractLines(lineMask, lengthThreshold=11, widthThreshold=5):
lines = []
components = measure.label(lineMask, background=0)
for label in xrange(components.min() + 1, components.max() + 1):
mask = components == label
ys, xs = mask.nonzero()
line = [[xs.min(), ys.min()], [xs.max(), ys.max()]]
direction = calcLineDirection(line)
if abs(line[1][1 - direction] - line[0][1 - direction]) > widthThreshold or abs(line[1][direction] - line[0][direction]) < lengthThreshold:
continue
fixedValue = (line[1][1 - direction] + line[0][1 - direction]) / 2
line[1][1 - direction] = line[0][1 - direction] = fixedValue
lines.append(line)
continue
return lines
def drawPoints(filename, width, height, points, backgroundImage=None, pointSize=5, pointColor=None):
colorMap = ColorPalette(NUM_CORNERS).getColorMap()
if np.all(np.equal(backgroundImage, None)):
image = np.zeros((height, width, 3), np.uint8)
else:
if backgroundImage.ndim == 2:
image = np.tile(np.expand_dims(backgroundImage, -1), [1, 1, 3])
else:
image = backgroundImage
pass
pass
no_point_color = pointColor is None
for point in points:
if no_point_color:
pointColor = colorMap[point[2] * 4 + point[3]]
pass
#print('used', pointColor)
#print('color', point[2] , point[3])
image[max(int(round(point[1])) - pointSize, 0):min(int(round(point[1])) + pointSize, height), max(int(round(point[0])) - pointSize, 0):min(int(round(point[0])) + pointSize, width)] = pointColor
continue
if filename != '':
cv2.imwrite(filename, image)
return
else:
return image
def drawPointsSeparately(path, width, height, points, backgroundImage=None, pointSize=5):
if np.all(np.equal(backgroundImage, None)):
image = np.zeros((height, width, 13), np.uint8)
else:
image = np.tile(np.expand_dims(backgroundImage, -1), [1, 1, 13])
pass
for point in points:
image[max(int(round(point[1])) - pointSize, 0):min(int(round(point[1])) + pointSize, height), max(int(round(point[0])) - pointSize, 0):min(int(round(point[0])) + pointSize, width), int(point[2] * 4 + point[3])] = 255
continue
for channel in xrange(13):
cv2.imwrite(path + '_' + str(channel) + '.png', image[:, :, channel])
continue
return
def drawLineMask(width, height, points, lines, lineWidth = 5, backgroundImage = None):
lineMask = np.zeros((height, width))
for lineIndex, line in enumerate(lines):
point_1 = points[line[0]]
point_2 = points[line[1]]
direction = calcLineDirectionPoints(points, line)
fixedValue = int(round((point_1[1 - direction] + point_2[1 - direction]) / 2))
minValue = int(min(point_1[direction], point_2[direction]))
maxValue = int(max(point_1[direction], point_2[direction]))
if direction == 0:
lineMask[max(fixedValue - lineWidth, 0):min(fixedValue + lineWidth + 1, height), minValue:maxValue + 1] = 1
else:
lineMask[minValue:maxValue + 1, max(fixedValue - lineWidth, 0):min(fixedValue + lineWidth + 1, width)] = 1
pass
continue
return lineMask
def drawLines(filename, width, height, points, lines, lineLabels = [], backgroundImage = None, lineWidth = 5, lineColor = None):
colorMap = ColorPalette(len(lines)).getColorMap()
if backgroundImage is None:
image = np.ones((height, width, 3), np.uint8) * 0
else:
if backgroundImage.ndim == 2:
image = np.stack([backgroundImage, backgroundImage, backgroundImage], axis=2)
else:
image = backgroundImage
pass
pass
for lineIndex, line in enumerate(lines):
point_1 = points[line[0]]
point_2 = points[line[1]]
direction = calcLineDirectionPoints(points, line)
fixedValue = int(round((point_1[1 - direction] + point_2[1 - direction]) / 2))
minValue = int(round(min(point_1[direction], point_2[direction])))
maxValue = int(round(max(point_1[direction], point_2[direction])))
if len(lineLabels) == 0:
if np.any(lineColor == None):
lineColor = np.random.rand(3) * 255
pass
if direction == 0:
image[max(fixedValue - lineWidth, 0):min(fixedValue + lineWidth + 1, height), minValue:maxValue + 1, :] = lineColor
else:
image[minValue:maxValue + 1, max(fixedValue - lineWidth, 0):min(fixedValue + lineWidth + 1, width), :] = lineColor
else:
labels = lineLabels[lineIndex]
isExterior = False
if direction == 0:
for c in xrange(3):
image[max(fixedValue - lineWidth, 0):min(fixedValue, height), minValue:maxValue, c] = colorMap[labels[0]][c]
image[max(fixedValue, 0):min(fixedValue + lineWidth + 1, height), minValue:maxValue, c] = colorMap[labels[1]][c]
continue
else:
for c in xrange(3):
image[minValue:maxValue, max(fixedValue - lineWidth, 0):min(fixedValue, width), c] = colorMap[labels[1]][c]
image[minValue:maxValue, max(fixedValue, 0):min(fixedValue + lineWidth + 1, width), c] = colorMap[labels[0]][c]
continue
pass
pass
continue
if filename == '':
return image
else:
cv2.imwrite(filename, image)
def drawRectangles(filename, width, height, points, rectangles, labels, lineWidth = 2, backgroundImage = None, rectangleColor = None):
colorMap = ColorPalette(NUM_ICONS).getColorMap()
if backgroundImage is None:
image = np.ones((height, width, 3), np.uint8) * 0
else:
image = backgroundImage
pass
for rectangleIndex, rectangle in enumerate(rectangles):
point_1 = points[rectangle[0]]
point_2 = points[rectangle[1]]
point_3 = points[rectangle[2]]
point_4 = points[rectangle[3]]
if len(labels) == 0:
if rectangleColor is None:
color = np.random.rand(3) * 255
else:
color = rectangleColor
else:
color = colorMap[labels[rectangleIndex]]
pass
x_1 = int(round((point_1[0] + point_3[0]) / 2))
x_2 = int(round((point_2[0] + point_4[0]) / 2))
y_1 = int(round((point_1[1] + point_2[1]) / 2))
y_2 = int(round((point_3[1] + point_4[1]) / 2))
cv2.rectangle(image, (x_1, y_1), (x_2, y_2), color=tuple(color.tolist()), thickness = 2)
# point_1 = (int(point_1[0]), int(point_1[1]))
# point_2 = (int(point_2[0]), int(point_2[1]))
# point_3 = (int(point_3[0]), int(point_3[1]))
# point_4 = (int(point_4[0]), int(point_4[1]))
# image[max(point_1[1] - lineWidth, 0):min(point_1[1] + lineWidth, height), point_1[0]:point_2[0] + 1, :] = color
# image[max(point_3[1] - lineWidth, 0):min(point_3[1] + lineWidth, height), point_3[0]:point_4[0] + 1, :] = color
# image[point_1[1]:point_3[1] + 1, max(point_1[0] - lineWidth, 0):min(point_1[0] + lineWidth, width), :] = color
# image[point_2[1]:point_4[1] + 1, max(point_2[0] - lineWidth, 0):min(point_2[0] + lineWidth, width), :] = color
continue
if filename == '':
return image
else:
cv2.imwrite(filename, image)
pass
def drawResultImage(width, height, result):
resultImage = drawLines('', width, height, result['wall'][0], result['wall'][1], result['wall'][2], None, lineWidth=3)
resultImage = drawLines('', width, height, result['door'][0], result['door'][1], [], resultImage, lineWidth=2, lineColor=0)
iconImage = drawRectangles('', width, height, result['icon'][0], result['icon'][1], result['icon'][2], lineWidth=2)
return resultImage, iconImage
def resizeResult(result, width, height, oriWidth=256, oriHeight=256):
result['wall'][0] = [[float(point[0]) / oriWidth * width, float(point[1]) / oriHeight * height, point[2], point[3]] for point in result['wall'][0]]
result['door'][0] = [[float(point[0]) / oriWidth * width, float(point[1]) / oriHeight * height, point[2], point[3]] for point in result['door'][0]]
result['icon'][0] = [[float(point[0]) / oriWidth * width, float(point[1]) / oriHeight * height, point[2], point[3]] for point in result['icon'][0]]
#result['room'][0] = [(cv2.resize(mask, (width, height), interpolation=cv2.INTER_NEAREST) > 0).astype(np.uint8) for mask in result['room'][0]]
return
def drawResultImageFinal(width, height, result):
colorMap = np.array([(224, 255, 192), (255, 160, 96), (255, 224, 128), (192, 255, 255), (192, 255, 255), (192, 255, 255), (192, 192, 224), (224, 255, 192), (255, 224, 224), (224, 224, 224)])
borderColorMap = np.array([(128, 192, 64), (192, 64, 64), (192, 128, 64), (0, 128, 192), (0, 128, 192), (0, 128, 192), (128, 64, 160), (128, 192, 64), (192, 64, 0), (255, 255, 255)])
colorMap = np.concatenate([np.full(shape=(1, 3), fill_value=0), colorMap, borderColorMap], axis=0).astype(np.uint8)
colorMap = colorMap[:, ::-1]
labelRoomMap = getLabelRoomMap()
roomSegmentation = np.zeros((height, width), dtype=np.int32)
roomsInfo = []
wall_dict = result['wall']
wallMask = drawWallMask([(wall_dict[0][line[0]], wall_dict[0][line[1]]) for line in wall_dict[1]], width, height, thickness=3)
roomRegions = measure.label(1 - wallMask, background=0)
#cv2.imwrite('test/' + str(dictIndex) + '_segmentation_regions.png', drawSegmentationImage(roomRegions))
backgroundIndex = roomRegions.min()
wallPoints = wall_dict[0]
roomLabels = {}
sizes = np.array([width, height])
for wallIndex, wallLabels in enumerate(wall_dict[2]):
wallLine = wall_dict[1][wallIndex]
lineDim = calcLineDim(wallPoints, wallLine)
#print('wall', wallIndex, wallPoints[wallLine[0]][:2], wallPoints[wallLine[1]][:2])
center = np.round((np.array(wallPoints[wallLine[0]][:2]) + np.array(wallPoints[wallLine[1]][:2])) / 2).astype(np.int32)
for c in xrange(2):
direction = c * 2 - 1
if lineDim == 1:
direction *= -1
pass
point = center
for offset in xrange(10):
point[1 - lineDim] += direction
if point[lineDim] < 0 or point[lineDim] >= sizes[lineDim]:
break
roomIndex = roomRegions[point[1], point[0]]
if roomIndex != backgroundIndex:
#print(roomIndex, wallLabels[c], wallLabels, point.tolist())
#mask = roomRegions == roomIndex
#mask = cv2.dilate(mask.astype(np.uint8), np.ones((3, 3)), iterations=1)
#roomSegmentation[mask] = wallLabels[c]
#rooms[wallLabels[c]].append(cv2.dilate(mask.astype(np.uint8), np.ones((3, 3)), iterations=wallLineWidth))
#roomRegions[mask] = backgroundIndex
if roomIndex not in roomLabels:
roomLabels[roomIndex] = {}
pass
roomLabels[roomIndex][wallLabels[c]] = True
break
continue
continue
continue
rooms = []
indexMap = {}
for roomIndex, labels in roomLabels.iteritems():
#print(roomIndex, labels)
if roomIndex == roomRegions[0][0]:
continue
indexMap[roomIndex] = len(rooms)
mask = roomRegions == roomIndex
mask = cv2.dilate(mask.astype(np.uint8), np.ones((3, 3)), iterations=3)
# if 7 in labels and 2 not in labels:
# labels[2] = True
# pass
# if 5 in labels and 3 not in labels:
# labels[3] = True
# pass
# if 9 in labels and 1 not in labels:
# labels[1] = True
# pass
rooms.append((mask, labels))
continue
wallLineWidth = 5
# foregroundMask = roomSegmentation > 0
# foregroundMask = cv2.dilate(foregroundMask, np.ones((3, 3)), iterations=wallLineWidth)
# roomSegmentation[foregroundMask] =
for mask, labels in rooms:
label = min([label for label in labels])
if label < 0:
continue
kernel = np.zeros((3, 3))
kernel[1:, 1:] = 1
#mask = cv2.erode(mask.astype(np.uint8), kernel.astype(np.uint8), iterations=1)
erodedMask = cv2.erode(mask, np.ones((3, 3)), iterations=wallLineWidth)
roomSegmentation[mask.astype(np.bool)] = label + 10
roomSegmentation[erodedMask.astype(np.bool)] = label
continue
image = colorMap[roomSegmentation.reshape(-1)].reshape((height, width, 3))
pointColor = tuple((np.array([0.3, 0.3, 0.9]) * 255).astype(np.uint8).tolist())
for wallLine in result['wall'][1]:
for pointIndex in wallLine:
point = result['wall'][0][pointIndex]
cv2.circle(image, (int(point[0]), int(point[1])), color=pointColor, radius=8, thickness=-1)
cv2.circle(image, (int(point[0]), int(point[1])), color=(255, 255, 255), radius=4, thickness=-1)
continue
continue
lineSegmentLength = 20.0
for doorLine in result['door'][1]:
point_1 = np.array(result['door'][0][doorLine[0]][:2]).astype(np.float32)
point_2 = np.array(result['door'][0][doorLine[1]][:2]).astype(np.float32)
lineDim = calcLineDim(result['door'][0], doorLine)
for i in xrange(int(abs(point_1[lineDim] - point_2[lineDim]) / lineSegmentLength + 1)):
ratio = i * lineSegmentLength / abs(point_1[lineDim] - point_2[lineDim])
if ratio >= 1:
break
startPoint = point_1 + ratio * (point_2 - point_1)
ratio = (i + 0.5) * lineSegmentLength / abs(point_1[lineDim] - point_2[lineDim])
ratio = min(ratio, 1)
endPoint = point_1 + ratio * (point_2 - point_1)
cv2.line(image, (startPoint[0], startPoint[1]), (endPoint[0], endPoint[1]), color=(0, 0, 0), thickness=4)
continue
for point in [point_1, point_2]:
startPoint = point.copy()
startPoint[1 - lineDim] += lineSegmentLength / 2
endPoint = point.copy()
endPoint[1 - lineDim] -= lineSegmentLength / 2
cv2.line(image, (startPoint[0], startPoint[1]), (endPoint[0], endPoint[1]), color=(0, 0, 0), thickness=2)
continue
continue
labelIconMap = getLabelIconMap()
iconPos = []
for iconIndex, (icon, label) in enumerate(zip(result['icon'][1], result['icon'][2])):
name = labelIconMap[label + 1]
iconImage = cv2.imread('icons/' + name + '.jpg')
points = [result['icon'][0][pointIndex] for pointIndex in icon]
x_1 = int(round((points[0][0] + points[2][0]) / 2))
x_2 = int(round((points[1][0] + points[3][0]) / 2))
y_1 = int(round((points[0][1] + points[1][1]) / 2))
y_2 = int(round((points[2][1] + points[3][1]) / 2))
iconSize = iconImage.shape #(y, x)
#print('icon_size', iconSize)
icon_is_landscape = iconSize[1] > iconSize[0]
slot_size = (x_2 - x_1 + 1, y_2 - y_1 + 1)
slot_center = np.array((x_1 + slot_size[0]/2, y_1 + slot_size[1] / 2))
slot_is_landscape = slot_size[0] > slot_size[1]
min_dist = float('inf')
line = None
close_line_dim = 0
for wallIndex, wallLabels in enumerate(wall_dict[2]):
wallLine = wall_dict[1][wallIndex]
lineDim = calcLineDim(wallPoints, wallLine)
center = np.round((np.array(wallPoints[wallLine[0]][:2]) + np.array(wallPoints[wallLine[1]][:2])) / 2).astype(np.int32)
point1=np.array(wallPoints[wallLine[0]][:2])
point2 = np.array(wallPoints[wallLine[1]][:2])
n = point2 - point1
dist = np.dot((point1 - slot_center) - np.dot((point1 - slot_center), n), n)
#print('indices', wallIndex, wallLabels, wallLine)
# print('points', wallPoints[wallLine[0]], wallPoints[wallLine[1]])
# pass
if dist < 5:
min_dist = dist
line = (point1, point2)
close_line_dim = lineDim
pass
pass
#sys.stderr.write("{}, {}, {}, {}, {}\n".format(y_1, y_2, x_1, x_2, iconImage.shape))
print('has line: ', line, name, close_line_dim)
if name == "toilet":
if line is not None:
if close_line_dim == 0: #x
y_pos = (line[0][1] + line[1][1]) / 2
if y_pos > y_2: #toilet is below
print('first case rot')
iconImage = rotateImage(iconImage, 2)
elif y_pos < y_1: # toilet is above
pass # do nothing
else:
print("bad case", x_1, x_2, y_1, y_2, line)
pass
else: #y
x_pos = (line[0][0] + line[1][0])/2
print('here', x_pos, x_1, x_2)
if x_pos > x_2: #toilet is to the left
pass # do nothing
elif x_pos < x_1: # toilet is to the right
print(slot_is_landscape, icon_is_landscape)
if slot_is_landscape:
iconImage = rotateImage(iconImage, 2)
pass # do nothing
else:
print("bad case", x_1, x_2, y_1, y_2, line)
pass
pass
elif name == "washing_basin":
if line is not None:
if close_line_dim == 0: #x
y_pos = (line[0][1] + line[1][1]) / 2
print(y_pos, y_1, y_2, 'y')
if y_pos > y_2: #toilet is below
iconImage = rotateImage(iconImage, 2)
pass
elif y_pos < y_1: # toilet is above
pass # do nothing
else:
print("bad case", x_1, x_2, y_1, y_2, line)
pass
else: #y
x_pos = (line[0][0] + line[1][0])/2
print(x_pos, x_1, x_2 , 'x')
if x_pos > x_2: #toilet is to the left
pass # do nothing
elif x_pos < x_1: # toilet is to the right
if not slot_is_landscape:
iconImage = rotateImage(iconImage, 2)
pass # do nothing
pass
else:
print("bad case", x_1, x_2, y_1, y_2, line)
pass
pass
pass
pass
pass
if slot_is_landscape != icon_is_landscape:
iconImage = rotateImage(iconImage, 1)
iconImage = cv2.resize(iconImage, slot_size)
image[y_1:y_2 + 1, x_1:x_2 + 1] = iconImage
if name == "washing_basin":
print('basin pose', [x_1, y_1, x_2, y_2])
iconPos.append([x_1,y_1, x_2, y_2])
continue
fontSize = 0.7
for mask, labels in rooms:
label = min([label for label in labels])
if label <= 0:
continue
ys, xs = mask.nonzero()
print(xs.mean(), ys.mean(), label)
#label_half_size_x = int(fontSize * len(labelRoomMap[label]) / 2 * 20)
#label_half_size_y = int(fontSize / 2 * 20)
ret, baseline = cv2.getTextSize(labelRoomMap[label], fontFace=cv2.FONT_HERSHEY_TRIPLEX, fontScale=fontSize, thickness=1)
print(labelRoomMap[label])
#print('ret', ret)
center = findBestTextLabelCenter(iconPos, xs, ys, ret[0]/2, ret[1]/2)
print('comp', [xs.mean(), ys.mean()], center)
#center = np.round([xs.mean(), ys.mean()]).astype(np.int32)
if center is not None:
cv2.putText(image, labelRoomMap[label], (center[0] - ret[0]/2, center[1] + ret[1]/2), fontFace=cv2.FONT_HERSHEY_TRIPLEX, fontScale=fontSize, color=(0, 0, 0), thickness=1)
else:
if label != 4:
import sys
sys.stderr.write("panic! I cannot find valid position to put label in room: {}, {}\n".format(label, labelRoomMap[label]))
continue
print('end draw')
#cv2.imwrite('test/result.png', image)
#exit(1)
#cv2.imwrite('test/region.png', drawSegmentationImage(roomRegions))
# for regionIndex in xrange(roomRegions.max() + 1):
# cv2.imwrite('test/mask_' + str(regionIndex) + '.png', drawMaskImage(roomRegions == regionIndex))
# continue
#resultImage = drawLines('', width, height, result['wall'][0], result['wall'][1], result['wall'][2], None, lineWidth=3)
#resultImage = drawLines('', width, height, result['door'][0], result['door'][1], [], resultImage, lineWidth=2, lineColor=0)
#iconImage = drawRectangles('', width, height, result['icon'][0], result['icon'][1], result['icon'][2], lineWidth=2)
return image
def findBestTextLabelCenter( icon_pos, xs, ys, label_half_size_x, label_half_size_y):
center = np.array([xs.mean(), ys.mean()])
icon_pos = np.array(icon_pos)
room_points = np.array([xs, ys]).transpose()
min_point = room_points.min(axis=0, keepdims=True)
max_point = room_points.max(axis=0, keepdims=True)
size = np.array([label_half_size_x, label_half_size_y])
print('size', size)
avail_min_point = min_point + size
avail_max_point = max_point - size
avail_points = np.logical_and(room_points > avail_min_point, room_points < avail_max_point)
avail_points = np.all(avail_points, axis=1)
room_points_aug = np.tile(room_points[:, :, np.newaxis], [1, 1, icon_pos.shape[0]])
room_points_gt_tl_x = room_points_aug[:, 0, :] > icon_pos[:, 0] - size[0] - 5
room_points_lt_br_x = room_points_aug[:, 0, :] < icon_pos[:, 2] + size[0] + 5
room_points_gt_tl_y = room_points_aug[:, 1, :] > icon_pos[:, 1] - size[1] - 5
room_points_lt_br_y = room_points_aug[:, 1, :] < icon_pos[:, 3] + size[1] + 5
room_points_in_square = np.logical_and(room_points_gt_tl_x, room_points_lt_br_x)
room_points_in_square = np.logical_and(room_points_in_square, room_points_gt_tl_y)
room_points_in_square = np.logical_and(room_points_in_square, room_points_lt_br_y)
#room_points_in_square = np.all(room_points_in_square, axis=1)
room_points_in_square = np.any(room_points_in_square, axis=1)
room_points_not_in_square = np.logical_not(room_points_in_square)
good_points_mask = np.logical_and(avail_points, room_points_not_in_square)
good_points = room_points[good_points_mask]
good_points_center_dist = np.linalg.norm(good_points - center, axis=1)
#good_points_center_dist = np.sum(np.abs(good_points - center), axis=1)
#print('icon_pos')
#print(icon_pos)
#print('goodpoints')
#print(center)
#print(good_points)
#print(good_points_center_dist)
if len(good_points) == 0:
#print('give up')
return None
#return np.round(center).astype(np.int32)
best_point_idx = np.argmin(good_points_center_dist, axis=0)
#print('cost', good_points_center_dist[best_point_idx])
#print('best points')
#print(good_points[best_point_idx])
return good_points[best_point_idx]
def rotateImage(image, orientation):
if orientation == 0:
return image
elif orientation == 1:
return np.flip(image.transpose((1, 0, 2)), axis=0)
elif orientation == 2:
return np.flip(np.flip(image, axis=1), axis=0)
else:
return np.flip(image.transpose(1, 0, 2), axis=1)
return
def projectIndices(pointIndices, pointSegmentation, min_x, max_x, min_y, max_y):
if max_x - min_x == 1 and max_y - min_y == 1:
pointIndices[pointSegmentation[:, 2]] = min_y * WIDTH + min_x
return
elif max_x - min_x >= max_y - min_y:
middle_x = int((max_x + min_x + 1) / 2)
mask_1 = pointSegmentation[:, 0] < middle_x
projectIndices(pointIndices, pointSegmentation[mask_1], min_x, middle_x, min_y, max_y)
mask_2 = pointSegmentation[:, 0] >= middle_x
projectIndices(pointIndices, pointSegmentation[mask_2], middle_x, max_x, min_y, max_y)
else:
middle_y = int((max_y + min_y + 1) / 2)
mask_1 = pointSegmentation[:, 1] < middle_y
projectIndices(pointIndices, pointSegmentation[mask_1], min_x, max_x, min_y, middle_y)
mask_2 = pointSegmentation[:, 1] >= middle_y
projectIndices(pointIndices, pointSegmentation[mask_2], min_x, max_x, middle_y, max_y)
pass
return
def drawCornerSegmentation(corners, radius=1, width=WIDTH, height=HEIGHT):
cornerSegmentation = np.zeros((height, width), dtype=np.int64)
for corner in corners:
cornerSegmentation[max(corner[1] - radius + 1, 0):min(corner[1] + radius, height - 1), max(corner[0] - radius + 1, 0):min(corner[0] + radius, width - 1)] = corner[2]
continue
return cornerSegmentation
def getOrientationCorners(corners, cornerSize=3):
orientationCorners = [[] for _ in xrange(NUM_CORNERS)]
for corner in corners:
orientationCorners[corner[2] - 1].append(((corner[0], corner[1]), (corner[0] - cornerSize, corner[1] - cornerSize), (corner[0] + cornerSize, corner[1] + cornerSize)))
continue
return orientationCorners
def getGTPrimitives(gt_dict):
result_dict = {'wall': [wallPoints, filteredWallLines, filteredWallLabels], 'door': [doorPoints, filteredDoorLines, []], 'icon': [iconPoints, filteredIcons, filteredIconTypes]}
return
def writeRepresentation(filename, width, height, result_dict):
labelMap = [11, 1, 2, 3, 4, 3, 6, 7, 8, 2,]
labelIconMap = getLabelIconMap()
with open(filename, 'w') as f:
f.write(str(width) + '\t' + str(height) + '\n')
f.write(str(len(result_dict['wall'][1])) + '\n')
for wallLine, wallLabels in zip(result_dict['wall'][1], result_dict['wall'][2]):
point_1 = result_dict['wall'][0][wallLine[0]]
point_2 = result_dict['wall'][0][wallLine[1]]
lineDim = calcLineDim(result_dict['wall'][0], wallLine)
if point_1[lineDim] > point_2[lineDim]:
point_1[lineDim], point_2[lineDim] = point_2[lineDim], point_1[lineDim]
pass
f.write(str(int(point_1[0])) + '\t' + str(int(point_1[1])) + '\t' + str(int(point_2[0])) + '\t' + str(int(point_2[1])) + '\t' + str(labelMap[wallLabels[0]]) + '\t' + str(labelMap[wallLabels[1]]) + '\n')
continue
for doorLine in result_dict['door'][1]:
point_1 = result_dict['door'][0][doorLine[0]]
point_2 = result_dict['door'][0][doorLine[1]]
lineDim = calcLineDim(result_dict['door'][0], doorLine)
if point_1[lineDim] > point_2[lineDim]:
point_1[lineDim], point_2[lineDim] = point_2[lineDim], point_1[lineDim]
pass
f.write(str(int(point_1[0])) + '\t' + str(int(point_1[1])) + '\t' + str(int(point_2[0])) + '\t' + str(int(point_2[1])) + '\tdoor\t1\t1\n')
continue
#print(len(result_dict['icon'][1]))
for icon, iconLabel in zip(result_dict['icon'][1], result_dict['icon'][2]):
#print(iconLabel, labelIconMap[iconLabel + 1])
points = np.array([result_dict['icon'][0][pointIndex][:2] for pointIndex in icon]).astype(np.int32)
mins = points.min(0)
maxs = points.max(0)
f.write(str(int(mins[0])) + '\t' + str(int(mins[1])) + '\t' + str(int(maxs[0])) + '\t' + str(int(maxs[1])) + '\t' + labelIconMap[iconLabel + 1] + '\t1\t1\n')
continue
f.close()
pass
return
| 39.974569 | 220 | 0.590429 |
3fc745f5db9e3516f67d93fe77be28482e348d51 | 2,543 | py | Python | app/api/endpoints/transfer.py | justincc/RDSDS-Server | ed59110a9e56d19944c87464f682ce49111ad1e4 | [
"Apache-2.0"
] | null | null | null | app/api/endpoints/transfer.py | justincc/RDSDS-Server | ed59110a9e56d19944c87464f682ce49111ad1e4 | [
"Apache-2.0"
] | 1 | 2020-08-11T10:48:16.000Z | 2020-08-11T10:48:16.000Z | app/api/endpoints/transfer.py | justincc/RDSDS-Server | ed59110a9e56d19944c87464f682ce49111ad1e4 | [
"Apache-2.0"
] | 2 | 2020-10-13T14:22:59.000Z | 2020-11-13T16:50:54.000Z |
from fastapi import APIRouter, Depends
from starlette.requests import Request
from starlette.responses import JSONResponse
from app.models.transfer import TransferBase, TransferType, TransferResponse
from app.business.oauth import auth_request
from app.models.objects import Error
from app.business import transfer
router = APIRouter()
@router.post(
"",
summary="Create a transfer request for RDSDS",
name='create_transfer_globus',
tags=["TransferService"],
response_model=TransferResponse,
response_model_skip_defaults=True,
responses={
403: {'model': Error, 'description': "The requester is not authorized to perform this action, Please login through /globus/login"},
500: {'model': Error, 'description': "An unexpected error occurred."}
}
)
async def create_transfer(transferBase: TransferBase, request: Request):
return await transfer.create_transfer(transferBase, request)
@router.get(
"/",
summary="Get list for transfers for RDSDS",
name='get_transfer_list',
tags=["TransferService"],
responses={
403: {'model': Error, 'description': "The requester is not authorized to perform this action, Please login through /globus/login"},
500: {'model': Error, 'description': "An unexpected error occurred."}
}
)
async def get_transfer_list(request: Request):
return await transfer.get_transfer_list(request)
@router.get(
"/{transfer_id}",
summary="Get status for transfer request for RDSDS",
name='get_transfer',
tags=["TransferService"],
responses={
403: {'model': Error, 'description': "The requester is not authorized to perform this action, Please login through /globus/login"},
500: {'model': Error, 'description': "An unexpected error occurred."}
}
)
async def get_transfer(transfer_id: str, request: Request):
return await transfer.get_transfer(transfer_id, request)
@router.delete(
"/{transfer_id}",
summary="Cancel transfer request for RDSDS",
name='delete_transfer',
tags=["TransferService"],
responses={
403: {'model': Error, 'description': "The requester is not authorized to perform this action, Please login through /globus/login"},
500: {'model': Error, 'description': "An unexpected error occurred."}
}
)
async def delete_transfer(transfer_id: str, request: Request):
return await transfer.delete_transfer(transfer_id, request)
| 37.397059 | 140 | 0.685018 |
e39b2046ea911ce697feef72c54f48d8486e5b52 | 7,652 | py | Python | merfishdecoder/apps/run_extract_barcodes.py | r3fang/MERlin | 2ee81a4e18c3fe406d91ee6ce65ca38b69ee9d68 | [
"MIT"
] | null | null | null | merfishdecoder/apps/run_extract_barcodes.py | r3fang/MERlin | 2ee81a4e18c3fe406d91ee6ce65ca38b69ee9d68 | [
"MIT"
] | null | null | null | merfishdecoder/apps/run_extract_barcodes.py | r3fang/MERlin | 2ee81a4e18c3fe406d91ee6ce65ca38b69ee9d68 | [
"MIT"
] | null | null | null | import os
import sys
import pickle
import pandas as pd
import numpy as np
from merfishdecoder.core import zplane
from merfishdecoder.util import utilities
from merfishdecoder.util import barcoder
from merfishdecoder.util import decoder
def run_job_archieve(dataSetName: str = None,
fov: int = None,
zpos: float = None,
decodedImagesName: str = None,
outputName: str = None,
psmName: str = None,
barcodesPerCore: int = 5,
maxCores: int = 10):
"""
Extract barcodes from decoded images.
Args
----
dataSetName: input dataset name.
inputFile: input movie for decoding.
outputFile: output file that contains decoded barcodes.
psmName: pixel scoring model file name.
maxCores: number of cores for parall processing.
"""
# print input variables
print("====== input ======")
print("dataSetName: %s" % dataSetName)
print("fov: %d" % fov)
print("zpos: %f" % zpos)
print("decodedImagesName: %s" % decodedImagesName)
print("outputName: %s" % outputName)
print("barcodesPerCore: %s" % barcodesPerCore)
print("maxCores: %s" % maxCores)
print("==================\n")
utilities.print_checkpoint("Extract Barcodes")
utilities.print_checkpoint("Start")
# generate zplane object
zp = zplane.Zplane(dataSetName,
fov=fov,
zpos=zpos)
# create the folder
os.makedirs(os.path.dirname(outputName),
exist_ok=True)
# load decoding movie
f = np.load(decodedImagesName)
decodes = {
"decodedImage": f["decodedImage"],
"magnitudeImage": f["magnitudeImage"],
"distanceImage": f["distanceImage"]
}
f.close()
# load the score machine
if psmName != None:
psm = pickle.load(open(psmName, "rb"))
# calculate pixel probability
decodes["probabilityImage"] = \
decoder.calc_pixel_probability(
model = psm,
decodedImage = decodes["decodedImage"],
magnitudeImage = decodes["magnitudeImage"],
distanceImage = decodes["distanceImage"],
minProbability = 0.01)
else:
decodes["probabilityImage"] = \
decodes["distanceImage"]
# extract barcodes
barcodes = barcoder.extract_barcodes(
decodedImage = decodes["decodedImage"],
distanceImage = decodes["distanceImage"],
probabilityImage = decodes["probabilityImage"],
magnitudeImage = decodes["magnitudeImage"],
barcodesPerCore = barcodesPerCore,
numCores = maxCores)
# add fov and zpos info
barcodes = barcodes.assign(fov = fov)
barcodes = barcodes.assign(global_z = zpos)
barcodes = barcodes.assign(z = \
zp._dataSet.get_z_positions().index(zpos))
# save barcodes
barcodes.to_hdf(outputName,
key = "barcodes")
utilities.print_checkpoint("Done")
def run_job(dataSetName: str = None,
fov: int = None,
zpos: float = None,
decodingImagesName: str = None,
decodedImagesName: str = None,
outputName: str = None,
psmName: str = None,
barcodesPerCore: int = 5,
maxCores: int = 10):
"""
Extract barcodes from decoded images.
Args
----
dataSetName: input dataset name.
inputFile: input movie for decoding.
processedImagesName: processed images
outputFile: output file that contains decoded barcodes.
psmName: pixel scoring model file name.
maxCores: number of cores for parall processing.
"""
# print input variables
print("====== input ======")
print("dataSetName: %s" % dataSetName)
print("fov: %d" % fov)
print("zpos: %f" % zpos)
print("decodedImagesName: %s" % decodedImagesName)
print("decodingImagesName: %s" % decodingImagesName)
print("outputName: %s" % outputName)
print("barcodesPerCore: %s" % barcodesPerCore)
print("maxCores: %s" % maxCores)
print("==================\n")
utilities.print_checkpoint("Extract Barcodes")
utilities.print_checkpoint("Start")
# generate zplane object
zp = zplane.Zplane(dataSetName,
fov=fov,
zpos=zpos)
# create the folder
os.makedirs(os.path.dirname(outputName),
exist_ok=True)
# load decoding movie
f = np.load(decodedImagesName)
decodedImages = {
"decodedImage": f["decodedImage"],
"magnitudeImage": f["magnitudeImage"],
"distanceImage": f["distanceImage"]
}
f.close()
# load decoding images
f = np.load(decodingImagesName)
decodingImages = f["arr_0"]
f.close()
# load the score machine
if psmName != None:
psm = pickle.load(open(psmName, "rb"))
# calculate pixel probability
decodes["probabilityImage"] = \
decoder.calc_pixel_probability(
model = psm,
decodedImage = decodedImages["decodedImage"],
magnitudeImage = decodedImages["magnitudeImage"],
distanceImage = decodedImages["distanceImage"],
minProbability = 0.01)
else:
decodedImages["probabilityImage"] = \
decodedImages["distanceImage"]
# extract barcodes
barcodes = barcoder.extract_barcodes(
decodedImage = decodedImages["decodedImage"],
distanceImage = decodedImages["distanceImage"],
probabilityImage = decodedImages["probabilityImage"],
magnitudeImage = decodedImages["magnitudeImage"],
barcodesPerCore = barcodesPerCore,
numCores = maxCores)
# add fov and zpos info
barcodes = barcodes.assign(fov = fov)
barcodes = barcodes.assign(global_z = zpos)
barcodes = barcodes.assign(z = \
zp._dataSet.get_z_positions().index(zpos))
# add intensity traces
pixelTraces = decodingImages[:,barcodes.y.astype(int), barcodes.x.astype(int)].T
pixelTraces = pd.DataFrame(pixelTraces, columns = zp.get_bit_name())
barcodes = pd.concat([barcodes, pixelTraces], axis=1)
# save barcodes
barcodes.to_hdf(outputName,
key = "barcodes")
utilities.print_checkpoint("Done")
def main():
dataSetName = "191010_LMN7_DIV18_Map2Tau"
fov = 188
zpos = 0.0
decodingImagesName = \
"processedImages/fov_{fov:d}_zpos_{zpos:.1f}.npz".format(
fov = fov, zpos = zpos)
decodedImagesName = \
"decodedImages/fov_{fov:d}_zpos_{zpos:.1f}.npz".format(
fov = fov, zpos = zpos)
outputName = \
"extractedBarcodes/fov_{fov:d}_zpos_{zpos:.1f}.h5".format(
fov = fov, zpos = zpos)
psmName = None
barcodesPerCore = 1
maxCores = 10
dataSetName = sys.argv[1]
fov = int(sys.argv[2])
zpos = float(sys.argv[3])
decodingImagesName = sys.argv[4]
decodedImagesName = sys.argv[5]
outputName = sys.argv[6]
run_job(dataSetName = dataSetName,
fov = fov,
zpos = zpos,
decodingImagesName = decodingImagesName,
decodedImagesName = decodedImagesName,
outputName = outputName,
psmName = psmName,
barcodesPerCore = barcodesPerCore,
maxCores = maxCores)
if __name__ == "__main__":
main() | 30.365079 | 84 | 0.592133 |
b177f522cf0e2dd2aba4d522881c9ed055cc5fa2 | 7,558 | py | Python | mmdet/core/anchor/anchor_target_bi.py | latstars/mmdetectionCV2019 | 79575ad41f79c191be30f1e24feffc39aac3ca1c | [
"Apache-2.0"
] | 3 | 2020-01-25T14:20:24.000Z | 2021-10-17T17:39:53.000Z | mmdet/core/anchor/anchor_target_bi.py | latstars/mmdetectionCV2019 | 79575ad41f79c191be30f1e24feffc39aac3ca1c | [
"Apache-2.0"
] | null | null | null | mmdet/core/anchor/anchor_target_bi.py | latstars/mmdetectionCV2019 | 79575ad41f79c191be30f1e24feffc39aac3ca1c | [
"Apache-2.0"
] | null | null | null | import torch
from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner
from ..utils import multi_apply
def anchor_target_bi(anchor_list,
valid_flag_list,
gt_bboxes_list,
gt_bboxes_visible_list,
img_metas,
target_means,
target_stds,
cfg,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
sampling=True,
unmap_outputs=True):
"""Compute regression and classification targets for anchors.
Args:
anchor_list (list[list]): Multi level anchors of each image.
valid_flag_list (list[list]): Multi level valid flags of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
gt_bboxes_visible_list (list[Tensor]): Ground truth visible bboxes of each image.
img_metas (list[dict]): Meta info of each image.
target_means (Iterable): Mean value of regression targets.
target_stds (Iterable): Std value of regression targets.
cfg (dict): RPN train configs.
Returns:
tuple
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
pos_inds_list, neg_inds_list) = multi_apply(
anchor_target_bi_single,
anchor_list,
valid_flag_list,
gt_bboxes_list,
gt_bboxes_visible_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
target_means=target_means,
target_stds=target_stds,
cfg=cfg,
label_channels=label_channels,
sampling=sampling,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights, num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors)
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg)
def images_to_levels(target, num_level_anchors):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_level_anchors:
end = start + n
level_targets.append(target[:, start:end].squeeze(0))
start = end
return level_targets
def anchor_target_bi_single(flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_visible,
gt_bboxes_ignore,
gt_labels,
img_meta,
target_means,
target_stds,
cfg,
label_channels=1,
sampling=True,
unmap_outputs=True):
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
cfg.allowed_border)
if not inside_flags.any():
return (None, ) * 6
# assign gt and sample anchors
anchors = flat_anchors[inside_flags, :]
if sampling:
assign_result, sampling_result = assign_and_sample(
anchors, gt_bboxes, gt_bboxes_ignore, None, cfg)
else:
bbox_assigner = build_assigner(cfg.assigner_bi)
assign_result = bbox_assigner.assign(anchors, gt_bboxes, gt_bboxes_visible,
gt_bboxes_ignore, gt_labels)
bbox_sampler = PseudoSampler()
sampling_result = bbox_sampler.sample(assign_result, anchors,
gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes,
sampling_result.pos_gt_bboxes,
target_means, target_stds)
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
labels[pos_inds] = 1
else:
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
labels = unmap(labels, num_total_anchors, inside_flags)
label_weights = unmap(label_weights, num_total_anchors, inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds)
def anchor_inside_flags(flat_anchors,
valid_flags,
img_shape,
allowed_border=0):
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border).type(torch.uint8) & \
(flat_anchors[:, 1] >= -allowed_border).type(torch.uint8) & \
(flat_anchors[:, 2] < img_w + allowed_border).type(torch.uint8) & \
(flat_anchors[:, 3] < img_h + allowed_border).type(torch.uint8)
else:
inside_flags = valid_flags
return inside_flags
def unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds, :] = data
return ret
| 39.160622 | 89 | 0.616698 |
0b029ecbcb619730ffb0e6631d7b1557d8d9c255 | 19 | py | Python | src/osfa/__init__.py | FreehandBlock51/OSFA | 13b6f8e6a11020e3205e0c89cb6e095edfbe402a | [
"MIT"
] | 1 | 2022-02-02T03:29:44.000Z | 2022-02-02T03:29:44.000Z | src/osfa/__init__.py | FreehandBlock51/OSFA | 13b6f8e6a11020e3205e0c89cb6e095edfbe402a | [
"MIT"
] | 3 | 2022-02-02T00:58:45.000Z | 2022-02-10T20:13:04.000Z | src/osfa/__init__.py | FreehandBlock51/OSFA | 13b6f8e6a11020e3205e0c89cb6e095edfbe402a | [
"MIT"
] | 1 | 2022-02-02T00:55:58.000Z | 2022-02-02T00:55:58.000Z | from .osfa import * | 19 | 19 | 0.736842 |
0b832fc627a13d4c6eaec4e360b433d6ba6a875b | 6,102 | py | Python | clean_data.py | borovik135/VisSatSatelliteStereo | e591e8753c48e231d2c5cce74d37df2252c4ed93 | [
"BSD-3-Clause"
] | 37 | 2019-11-22T14:55:36.000Z | 2022-03-27T07:52:18.000Z | clean_data.py | borovik135/VisSatSatelliteStereo | e591e8753c48e231d2c5cce74d37df2252c4ed93 | [
"BSD-3-Clause"
] | 11 | 2020-02-10T16:23:25.000Z | 2022-03-12T00:47:32.000Z | clean_data.py | borovik135/VisSatSatelliteStereo | e591e8753c48e231d2c5cce74d37df2252c4ed93 | [
"BSD-3-Clause"
] | 14 | 2020-03-19T06:19:06.000Z | 2022-02-16T07:59:38.000Z | # ===============================================================================================================
# Copyright (c) 2019, Cornell University. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the above copyright otice, this list of conditions and
# the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# * Neither the name of Cornell University nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# Author: Kai Zhang (kz298@cornell.edu)
#
# The research is based upon work supported by the Office of the Director of National Intelligence (ODNI),
# Intelligence Advanced Research Projects Activity (IARPA), via DOI/IBC Contract Number D17PC00287.
# The U.S. Government is authorized to reproduce and distribute copies of this work for Governmental purposes.
# ===============================================================================================================
import os
import tarfile
import shutil
import unicodedata
import logging
# first find .NTF file, and extract order_id, prod_id, standard name
# then extract rpc file and preview image from the .tar file
def clean_image_info(file_name):
file_name = os.path.basename(file_name)
# get order_id, prod_id
idx = file_name.find('-P1BS-')
order_id = file_name[idx + 6:idx + 21]
prod_id = file_name[idx + 6:idx + 26]
img_name = file_name[idx - 13:idx + 26]
return img_name, order_id, prod_id
def process_clean_data_item(item, dataset_dir, out_dir, tmp_dir):
if item[-4:] == '.NTF' and os.path.exists(os.path.join(dataset_dir, '{}.tar'.format(item[:-4]))):
logging.info('cleaning {}'.format(item))
img_name, order_id, prod_id = clean_image_info(item)
os.symlink(os.path.join(dataset_dir, item), os.path.join(out_dir, '{}.NTF'.format(img_name)))
tar = tarfile.open(os.path.join(dataset_dir, '{}.tar'.format(item[:-4])))
tar.extractall(os.path.join(tmp_dir, img_name))
subfolder = 'DVD_VOL_1'
for x in os.listdir(os.path.join(tmp_dir, img_name, order_id)):
if 'DVD_VOL' in x:
subfolder = x
break
des_folder = os.path.join(tmp_dir, img_name, order_id, subfolder, order_id)
# walk through des_folder
# img_files = []
# for root, dirs, files in os.walk(des_folder):
# img_files.extend([os.path.join(root, x) for x in files
# if img_name in x and (x[-4:] == '.XML' or x[-4:] == '.JPG')])
rpc_file = os.path.join(des_folder, '{}_PAN'.format(prod_id), '{}.XML'.format(img_name))
jpg_file = os.path.join(des_folder, '{}_PAN'.format(prod_id), '{}-BROWSE.JPG'.format(img_name))
img_files = [rpc_file, jpg_file]
for x in img_files:
shutil.copy(x, out_dir)
# remove control characters in the xml file
rpc_file = os.path.join(out_dir, '{}.XML'.format(img_name))
with open(rpc_file, encoding='utf-8', errors='ignore') as fp:
content = fp.read()
content = "".join([ch for ch in content if unicodedata.category(ch)[0] != "C"])
with open(rpc_file, 'w') as fp:
fp.write(content)
return True
return False
def clean_data(dataset_dirs, out_dir, pairing=None):
# out_dir must exist and be empty
if not os.path.exists(out_dir):
os.mkdir(out_dir)
dataset_dirs = [os.path.abspath(dataset_dir) for dataset_dir in dataset_dirs]
logging.info('dataset path: {}'.format(dataset_dirs))
logging.info('will save files to folder: {}'.format(out_dir))
logging.info('the standard format is: <7 char date><6 char time>-P1BS-<20 char product id>.NTF\n\n')
tmp_dir = os.path.join(out_dir, 'tmp')
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
os.mkdir(tmp_dir)
cnt = 0
if pairing is not None:
for p in pairing:
pan_ntf = p[0]
item = os.path.basename(pan_ntf)
dataset_dir = os.path.dirname(pan_ntf)
if process_clean_data_item(item, dataset_dir, out_dir, tmp_dir):
cnt += 1
else:
for dataset_dir in sorted(dataset_dirs):
for item in sorted(os.listdir(dataset_dir)):
# if 'WV03' not in item: # only select 'WV03' satellite images
# continue
if process_clean_data_item(item, dataset_dir, out_dir, tmp_dir):
cnt += 1
logging.info('processed {} items in total'.format(cnt))
# remove tmp_dir
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
def main():
dataset_dir = '/data2/kz298/core3d_pan/jacksonville'
out_dir = os.path.join(dataset_dir, 'cleaned_data')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
clean_data(dataset_dir, out_dir)
main()
| 45.2 | 115 | 0.643559 |
016d9976eb320e81ff9aea647dd3fb92ffbe2043 | 1,395 | py | Python | cssTkinter/__init__.py | rug-gui/cssTk | 8159bf0892aa84fc0e42c1a6dec276ec615e63a5 | [
"MIT"
] | 4 | 2021-11-19T15:14:32.000Z | 2022-01-23T17:56:51.000Z | cssTkinter/__init__.py | rug-gui/cssTk | 8159bf0892aa84fc0e42c1a6dec276ec615e63a5 | [
"MIT"
] | 1 | 2021-11-19T15:16:04.000Z | 2021-11-19T15:20:45.000Z | cssTkinter/__init__.py | rug-gui/cssTk | 8159bf0892aa84fc0e42c1a6dec276ec615e63a5 | [
"MIT"
] | null | null | null |
class Element(object):
def __init__(self, canvas, bs_element=None, declarations=None, canvas_ids=[]):
self.canvas=canvas
self.canvas_ids=canvas_ids
self.declarations = declarations
self.bs_element = bs_element
if self.bs_element:
self.bs_element.cssTkinter_element = self
def add_id(self, canvas_id):
self.canvas_ids.append(canvas_id)
def set_canvas(self, canvas):
"""Specifies the canvas the canvas_id belongs to.
"""
self.canvas = canvas
def set_rules(self, declarations):
"""Specifies the custom declarations for this element.
"""
self.declarations = declarations
def set_bs_element(self, bs_element):
"""Specifies the BeautifulSoup element associated with this Element.
"""
if not bs_element and self.bs_element:
del self.bs_element.cssTkinter_element
return
self.bs_element = bs_element
if self.bs_element:
self.bs_element.cssTkinter_element = self
def create_element(canvas, htmlelement, stylesheet):
import cssTkinter.css_processor as CSS
htmlstyle = ""
if htmlelement.has_attr("style"):
htmlstyle=htmlelement.attrs["style"]
style = CSS.parse_style(htmlstyle)
e = Element(canvas, htmlelement, style)
| 30.326087 | 83 | 0.637993 |
3ef0a208cf92c42e597f91b9053d5f6bfdedb656 | 605 | py | Python | utils/config.py | npurson/pytorch-lightning-template | 6a7972242a8b287f861e6a0139fbf38cc954b061 | [
"MIT"
] | 3 | 2022-03-03T02:27:06.000Z | 2022-03-31T08:14:26.000Z | utils/config.py | npurson/pytorch-lightning-template | 6a7972242a8b287f861e6a0139fbf38cc954b061 | [
"MIT"
] | null | null | null | utils/config.py | npurson/pytorch-lightning-template | 6a7972242a8b287f861e6a0139fbf38cc954b061 | [
"MIT"
] | null | null | null | class ConfigDict(dict):
"""
Access-by-attribute, case-insensitive dictionary
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for k, v in list(self.items()):
if not k.islower() and k.isupper():
self.pop(k)
self[k.lower()] = v
if isinstance(v, dict) and not isinstance(v, ConfigDict):
self[k.lower()] = ConfigDict(v)
def __getattr__(self, name):
return self.get(name.lower())
def __setattr__(self, name, value):
self.__setitem__(name.lower(), value)
| 31.842105 | 69 | 0.557025 |
60e7ae0e594695adafb9d1a1776143bbf5a31e22 | 6,946 | py | Python | nevergrad/functions/rl/agents.py | kentwar/nevergrad | caf2059df7cc0110f1a476b6f0620fb03afd0093 | [
"MIT"
] | null | null | null | nevergrad/functions/rl/agents.py | kentwar/nevergrad | caf2059df7cc0110f1a476b6f0620fb03afd0093 | [
"MIT"
] | null | null | null | nevergrad/functions/rl/agents.py | kentwar/nevergrad | caf2059df7cc0110f1a476b6f0620fb03afd0093 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
import operator
import site
import glob
import ctypes
import copy as _copy
from typing import Dict, Any, Optional, Callable, Tuple
# Hackfix needed before pytorch import ("dlopen: cannot load any more object with static TLS")
# See issue #305
try:
for packages in site.getsitepackages():
for lib in glob.glob(f'{packages}/torch/lib/libgomp*.so*'):
ctypes.cdll.LoadLibrary(lib)
except Exception: # pylint: disable=broad-except
pass
# pylint: disable=wrong-import-position
import gym
import numpy as np
import torch as torch
import torch.nn.functional as F
from torch import nn
from torch.utils.data import WeightedRandomSampler
from nevergrad.parametrization import parameter as p
from ..base import ExperimentFunction
from . import base
from . import envs
class RandomAgent(base.Agent):
"""Agent that plays randomly.
"""
def __init__(self, env: gym.Env) -> None:
self.env = env
assert isinstance(env.action_space, gym.spaces.Discrete)
self.num_outputs = env.action_space.n
def act(self, observation: Any, reward: Any, done: bool, info: Optional[Dict[Any, Any]] = None) -> Any:
return np.random.randint(self.num_outputs)
def copy(self) -> "RandomAgent":
return self.__class__(self.env)
class Agent007(base.Agent):
"""Agents that plays slighlty better than random on the 007 game.
"""
def __init__(self, env: gym.Env) -> None:
self.env = env
assert isinstance(env, envs.DoubleOSeven) or (isinstance(env, base.SingleAgentEnv) and isinstance(env.env, envs.DoubleOSeven))
def act(self, observation: Any, reward: Any, done: bool, info: Optional[Dict[Any, Any]] = None) -> Any:
my_amm, my_prot, their_amm, their_prot = observation # pylint: disable=unused-variable
if their_prot == 4 and my_amm:
action = "fire"
elif their_amm == 0:
action = np.random.choice(["fire", "reload"])
else:
action = np.random.choice(["fire", "protect", "reload"])
return envs.JamesBond.actions.index(action)
def copy(self) -> "Agent007":
return self.__class__(self.env)
class TorchAgent(base.Agent):
"""Agents than plays through a torch neural network
"""
def __init__(self, module: nn.Module,
deterministic: bool = True,
instrumentation_std: float = 0.1) -> None:
super().__init__()
self.deterministic = deterministic
self.module = module
kwargs = {
name: p.Array(shape=value.shape).set_mutation(sigma=instrumentation_std).set_bounds(-10, 10, method="arctan")
for name, value in module.state_dict().items() # type: ignore
} # bounded to avoid overflows
self.instrumentation = p.Instrumentation(**kwargs)
@classmethod
def from_module_maker(
cls,
env: gym.Env,
module_maker: Callable[[Tuple[int, ...], int], nn.Module],
deterministic: bool = True
) -> "TorchAgent":
assert isinstance(env.action_space, gym.spaces.Discrete)
assert isinstance(env.observation_space, gym.spaces.Box)
module = module_maker(env.observation_space.shape, env.action_space.n)
return cls(module, deterministic=deterministic)
def act(self, observation: Any, reward: Any, done: bool, info: Optional[Dict[Any, Any]] = None) -> Any:
obs = torch.from_numpy(observation.astype(np.float32))
forward = self.module.forward(obs) # type: ignore
probas = F.softmax(forward, dim=0)
if self.deterministic:
return probas.max(0)[1].view(1, 1).item()
else:
return next(iter(WeightedRandomSampler(probas, 1)))
def copy(self) -> "TorchAgent":
return TorchAgent(_copy.deepcopy(self.module), self.deterministic)
def load_state_dict(self, state_dict: Dict[str, np.ndarray]) -> None:
self.module.load_state_dict({x: torch.tensor(y.astype(np.float32)) for x, y in state_dict.items()}) # type: ignore
class TorchAgentFunction(ExperimentFunction):
"""Instrumented function which plays the agent using an environment runner
"""
_num_test_evaluations = 1000
def __init__(
self, agent: TorchAgent, env_runner: base.EnvironmentRunner, reward_postprocessing: Callable[[float], float] = operator.neg
) -> None:
assert isinstance(env_runner.env, gym.Env)
self.agent = agent.copy()
self.runner = env_runner.copy()
self.reward_postprocessing = reward_postprocessing
super().__init__(self.compute, self.agent.instrumentation.copy().set_name(""))
self.register_initialization(agent=agent, env_runner=env_runner, reward_postprocessing=reward_postprocessing)
self._descriptors.update(num_repetitions=self.runner.num_repetitions, archi=self.agent.module.__class__.__name__)
def compute(self, **kwargs: np.ndarray) -> float:
self.agent.load_state_dict(kwargs)
try: # safeguard against nans
with torch.no_grad():
reward = self.runner.run(self.agent)
except RuntimeError as e:
warnings.warn(f"Returning 0 after error: {e}")
reward = 0.0
assert isinstance(reward, (int, float))
return self.reward_postprocessing(reward)
def evaluation_function(self, *args: Any, **kwargs: Any) -> float:
"""Implements the call of the function.
Under the hood, __call__ delegates to oracle_call + add some noise if noise_level > 0.
"""
num_tests = max(1, int(self._num_test_evaluations / self.runner.num_repetitions))
return sum(self.compute(**kwargs) for _ in range(num_tests)) / num_tests
class Perceptron(nn.Module):
def __init__(self, input_shape: Tuple[int, ...], output_size: int) -> None:
super().__init__() # type: ignore
assert len(input_shape) == 1
self.head = nn.Linear(input_shape[0], output_size) # type: ignore
def forward(self, *args: Any) -> Any:
assert len(args) == 1
return self.head(args[0])
class DenseNet(nn.Module):
def __init__(self, input_shape: Tuple[int, ...], output_size: int) -> None:
super().__init__() # type: ignore
assert len(input_shape) == 1
self.lin1 = nn.Linear(input_shape[0], 16) # type: ignore
self.lin2 = nn.Linear(16, 16) # type: ignore
self.lin3 = nn.Linear(16, 16) # type: ignore
self.head = nn.Linear(16, output_size) # type: ignore
def forward(self, *args: Any) -> Any:
assert len(args) == 1
x = F.relu(self.lin1(args[0]))
x = F.relu(self.lin2(x))
x = F.relu(self.lin3(x))
return self.head(x)
| 37.956284 | 134 | 0.658077 |
cba843daea3f6711edbbce3034e7c53105b7547a | 403 | py | Python | src/idea/migrations/0008_idea_tags.py | micha31r/The-Impossible | 7a79dea3169907eb93107107f4003c5813de58dc | [
"MIT"
] | null | null | null | src/idea/migrations/0008_idea_tags.py | micha31r/The-Impossible | 7a79dea3169907eb93107107f4003c5813de58dc | [
"MIT"
] | 2 | 2020-04-15T03:57:42.000Z | 2020-06-06T01:43:34.000Z | src/the_impossible/live/migrations/idea/migrations/0008_idea_tags.py | micha31r/The-Impossible | 7a79dea3169907eb93107107f4003c5813de58dc | [
"MIT"
] | null | null | null | # Generated by Django 2.2.7 on 2020-04-25 00:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('idea', '0007_tag'),
]
operations = [
migrations.AddField(
model_name='idea',
name='tags',
field=models.ManyToManyField(blank=True, related_name='tagged', to='idea.Tag'),
),
]
| 21.210526 | 91 | 0.588089 |
6f942623aab87a359f177e7618b058c1922ae87c | 301 | py | Python | contrib/spendfrom/setup.py | fujicoin/fujicoin-bitcore | bd4219c284e716c2326ba450cc3288ca691cd8b3 | [
"MIT"
] | null | null | null | contrib/spendfrom/setup.py | fujicoin/fujicoin-bitcore | bd4219c284e716c2326ba450cc3288ca691cd8b3 | [
"MIT"
] | null | null | null | contrib/spendfrom/setup.py | fujicoin/fujicoin-bitcore | bd4219c284e716c2326ba450cc3288ca691cd8b3 | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(name='btcspendfrom',
version='1.0',
description='Command-line utility for fujicoin "coin control"',
author='Gavin Andresen',
author_email='gavin@fujicoinfoundation.org',
requires=['jsonrpc'],
scripts=['spendfrom.py'],
)
| 30.1 | 69 | 0.66113 |
d50318508f3f855c2f3abc206c7eab2e3355b732 | 1,114 | py | Python | samples/broker_start.py | artemisw/mqtt_broker | be50891e762e874319cca7298e1f563e4336761e | [
"MIT"
] | null | null | null | samples/broker_start.py | artemisw/mqtt_broker | be50891e762e874319cca7298e1f563e4336761e | [
"MIT"
] | null | null | null | samples/broker_start.py | artemisw/mqtt_broker | be50891e762e874319cca7298e1f563e4336761e | [
"MIT"
] | null | null | null | import logging
import asyncio
import os
from hbmqtt.broker import Broker
logger = logging.getLogger(__name__)
config = {
'listeners': {
'default': {
'type': 'tcp',
'bind': '0.0.0.0:1883',
},
'ws-mqtt': {
'bind': '127.0.0.1:8080',
'type': 'ws',
'max_connections': 10,
},
},
'sys_interval': 10,
'auth': {
'allow-anonymous': True,
'password-file': os.path.join(os.path.dirname(os.path.realpath(__file__)), "passwd"),
'plugins': [
'auth_file', 'auth_anonymous'
]
}
}
broker = Broker(config)
@asyncio.coroutine
def test_coro():
yield from broker.start()
#yield from asyncio.sleep(5)
#yield from broker.shutdown()
if __name__ == '__main__':
formatter = "[%(asctime)s] :: %(levelname)s :: %(name)s :: %(message)s"
#formatter = "%(asctime)s :: %(levelname)s :: %(message)s"
logging.basicConfig(level=logging.INFO, format=formatter)
asyncio.get_event_loop().run_until_complete(test_coro())
asyncio.get_event_loop().run_forever() | 24.755556 | 93 | 0.572711 |
724dc466ff4e6d3d86e3997834f25c86ad2a5ce7 | 9,946 | py | Python | train_LAS.py | maxsampson/kpconv-pdal | 1b5ccd5308b4500b73f4d1cc2e877a3587e06181 | [
"MIT"
] | null | null | null | train_LAS.py | maxsampson/kpconv-pdal | 1b5ccd5308b4500b73f4d1cc2e877a3587e06181 | [
"MIT"
] | null | null | null | train_LAS.py | maxsampson/kpconv-pdal | 1b5ccd5308b4500b73f4d1cc2e877a3587e06181 | [
"MIT"
] | null | null | null | #
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Callable script to start a training on an LAS dataset
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Derived from train_S3DIS.py by Hugues THOMAS - 06/03/2020
# Brad CHAMBERS - 05/13/2021
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Common libs
import signal
import os
import sys
# Dataset
from datasets.LAS import *
from torch.utils.data import DataLoader
from utils.config import Config
from utils.trainer import ModelTrainer
from models.architectures import KPFCNN
from torch.utils.tensorboard import SummaryWriter
# Start of some automation
from automation import *
# ----------------------------------------------------------------------------------------------------------------------
#
# Config Class
# \******************/
#
class LASConfig(Config):
"""
Override the parameters you want to modify for this dataset
"""
####################
# Dataset parameters
####################
# Dataset name
dataset = 'LAS'
# Number of classes in the dataset (This value is overwritten by dataset class when Initializating dataset).
num_classes = None
# Type of task performed on this dataset (also overwritten)
dataset_task = ''
# Number of CPU threads for the input pipeline
input_threads = 10
#########################
# Architecture definition
#########################
# Define layers
architecture = ['simple',
'resnetb',
'resnetb_strided',
'resnetb',
'resnetb',
'resnetb_strided',
'resnetb_deformable',
'resnetb_deformable',
'resnetb_deformable_strided',
'resnetb_deformable',
'resnetb_deformable',
'resnetb_deformable_strided',
'resnetb_deformable',
'resnetb_deformable',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary',
'nearest_upsample',
'unary']
###################
# KPConv parameters
###################
# Radius of the input sphere
in_radius = 50.0
# Number of kernel points
num_kernel_points = 15
# Size of the first subsampling grid in meter
first_subsampling_dl = 2.0
# Radius of convolution in "number grid cell". (2.5 is the standard value)
conv_radius = 2.5
# Radius of deformable convolution in "number grid cell". Larger so that deformed kernel can spread out
deform_radius = 6.0
# Radius of the area of influence of each kernel point in "number grid cell". (1.0 is the standard value)
KP_extent = 1.2
# Behavior of convolutions in ('constant', 'linear', 'gaussian')
KP_influence = 'linear'
# Aggregation function of KPConv in ('closest', 'sum')
aggregation_mode = 'sum'
# Choice of input features
first_features_dim = 128
in_features_dim = 5
# Can the network learn modulations
modulated = False
# Batch normalization parameters
use_batch_norm = True
batch_norm_momentum = 0.02
# Deformable offset loss
# 'point2point' fitting geometry by penalizing distance from deform point to input points
# 'point2plane' fitting geometry by penalizing distance from deform point to input point triplet (not implemented)
deform_fitting_mode = 'point2point'
deform_fitting_power = 1.0 # Multiplier for the fitting/repulsive loss
deform_lr_factor = 0.1 # Multiplier for learning rate applied to the deformations
repulse_extent = 1.2 # Distance of repulsion for deformed kernel points
#####################
# Training parameters
#####################
# Maximal number of epochs
max_epoch = epochs_num()
# Learning rate management
learning_rate = 1e-2
momentum = 0.98
lr_decays = {i: 0.1 ** (1 / 150) for i in range(1, max_epoch)}
grad_clip_norm = 100.0
# Number of batch
batch_num = 8
# Number of steps per epochs
epoch_steps = 500
# Number of validation examples per epoch
validation_size = 50
# Number of epoch between each checkpoint
checkpoint_gap = 50
# Augmentations
augment_scale_anisotropic = True
augment_symmetries = [True, False, False]
augment_rotation = 'vertical'
augment_scale_min = 0.9
augment_scale_max = 1.1
augment_noise = 0.05
augment_color = 0.8
# The way we balance segmentation loss
# > 'none': Each point in the whole batch has the same contribution.
# > 'class': Each class has the same contribution (points are weighted according to class balance)
# > 'batch': Each cloud in the batch has the same contribution (points are weighted according cloud sizes)
segloss_balance = 'none'
# Do we nee to save convergence
saving = True
saving_path = None
# Dataset folder
path = file_location()
writer = SummaryWriter(tens_dirc())
# ----------------------------------------------------------------------------------------------------------------------
#
# Main Call
# \***************/
#
if __name__ == '__main__':
############################
# Initialize the environment
############################
# Set which gpu is going to be used
GPU_ID = '0'
# Set GPU visible device
os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID
###############
# Previous chkp
###############
# Choose here if you want to start training from a previous snapshot (None for new training)
# previous_training_path = 'Log_2020-03-19_19-53-27'
previous_training_path = ''
# Choose index of checkpoint to start from. If None, uses the latest chkp
chkp_idx = None
if previous_training_path:
# Find all snapshot in the chosen training folder
chkp_path = os.path.join('results', previous_training_path, 'checkpoints')
chkps = [f for f in os.listdir(chkp_path) if f[:4] == 'chkp']
# Find which snapshot to restore
if chkp_idx is None:
chosen_chkp = 'best_miou_chkp.tar'
else:
chosen_chkp = np.sort(chkps)[chkp_idx]
chosen_chkp = os.path.join('results', previous_training_path, 'checkpoints', chosen_chkp)
else:
chosen_chkp = None
##############
# Prepare Data
##############
print()
print('Data Preparation')
print('****************')
# Initialize configuration class
config = LASConfig()
if previous_training_path:
config.load(os.path.join('results', previous_training_path))
config.saving_path = None
# Get path from argument if given
if len(sys.argv) > 1:
config.saving_path = sys.argv[1]
# Initialize datasets
training_dataset = LASDataset(config, set='training', use_potentials=True)
test_dataset = LASDataset(config, set='validation', use_potentials=True)
# Initialize samplers
training_sampler = LASSampler(training_dataset)
test_sampler = LASSampler(test_dataset)
# Initialize the dataloader
training_loader = DataLoader(training_dataset,
batch_size=1,
sampler=training_sampler,
collate_fn=LASCollate,
num_workers=config.input_threads,
pin_memory=True)
test_loader = DataLoader(test_dataset,
batch_size=1,
sampler=test_sampler,
collate_fn=LASCollate,
num_workers=config.input_threads,
pin_memory=True)
# Calibrate samplers
training_sampler.calibration(training_loader, verbose=True)
test_sampler.calibration(test_loader, verbose=True)
# Optional debug functions
# debug_timing(training_dataset, training_loader)
# debug_timing(test_dataset, test_loader)
# debug_upsampling(training_dataset, training_loader)
print('\nModel Preparation')
print('*****************')
# Define network model
t1 = time.time()
label_value_ids = np.array([training_dataset.label_to_idx[l] for l in training_dataset.label_values])
net = KPFCNN(config, label_value_ids, training_dataset.ignored_labels)
debug = False
if debug:
print('\n*************************************\n')
print(net)
print('\n*************************************\n')
for param in net.parameters():
if param.requires_grad:
print(param.shape)
print('\n*************************************\n')
print("Model size %i" % sum(param.numel() for param in net.parameters() if param.requires_grad))
print('\n*************************************\n')
# Define a trainer class
trainer = ModelTrainer(net, config, chkp_path=chosen_chkp)
print('Done in {:.1f}s\n'.format(time.time() - t1))
print('\nStart training')
print('**************')
# Training
trainer.train(net, training_loader, test_loader, config)
print('Forcing exit now')
os.kill(os.getpid(), signal.SIGINT)
| 31.08125 | 120 | 0.544138 |
532b8ca4ab967b4af24a6b69a35409f008bff4bb | 675 | py | Python | tests/test_decorated_func.py | i-trofimtschuk/flask-hintful | f9c09cf48bf1dc930ba6353d3e9146e5cc073fd7 | [
"MIT"
] | 10 | 2019-08-14T01:18:37.000Z | 2022-03-09T17:02:56.000Z | tests/test_decorated_func.py | i-trofimtschuk/flask-hintful | f9c09cf48bf1dc930ba6353d3e9146e5cc073fd7 | [
"MIT"
] | 2 | 2020-05-25T19:51:43.000Z | 2022-03-10T18:54:20.000Z | tests/test_decorated_func.py | i-trofimtschuk/flask-hintful | f9c09cf48bf1dc930ba6353d3e9146e5cc073fd7 | [
"MIT"
] | 2 | 2022-03-08T19:13:15.000Z | 2022-03-17T20:10:53.000Z | from functools import wraps
def test_decorated_func(api):
'''Should be able to able to follow wrapped funcs
'''
def some_decorator(func):
@wraps(func)
def decorator(*args, **kwargs):
return func(*args, **kwargs)
return decorator
@api.route('/test')
@some_decorator
def _(arg: str) -> str:
return arg
with api.flask_app.test_client() as client:
resp = client.get('/test?arg=some_arg')
assert resp.get_data(as_text=True) == 'some_arg'
assert api.openapi_provider.openapi_paths[0].params[0].name == 'arg'
assert api.openapi_provider.openapi_paths[0].responses[0].data_type == str
| 27 | 78 | 0.645926 |
29dec298fc564d6d221db84acb2883dcd9a575e1 | 2,025 | py | Python | custom_components/phoniebox/__init__.py | c0un7-z3r0/hass-phoniebox | 7d9c6f05342f4ac49e3fcdb361f39243b260a2ca | [
"MIT"
] | 2 | 2022-01-25T10:25:45.000Z | 2022-01-27T10:41:43.000Z | custom_components/phoniebox/__init__.py | c0un7-z3r0/hass-phoniebox | 7d9c6f05342f4ac49e3fcdb361f39243b260a2ca | [
"MIT"
] | 19 | 2022-01-26T22:34:18.000Z | 2022-03-15T23:58:50.000Z | custom_components/phoniebox/__init__.py | c0un7-z3r0/hass-phoniebox | 7d9c6f05342f4ac49e3fcdb361f39243b260a2ca | [
"MIT"
] | null | null | null | """
Custom integration to integrate Phoniebox with Home Assistant.
For more details about this integration, please refer to
https://github.com/c0un7-z3r0/hass-phoniebox
"""
import asyncio
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import Config, HomeAssistant
from .const import CONF_MQTT_BASE_TOPIC, DOMAIN, PLATFORMS
from .data_coordinator import DataCoordinator
from .mqtt_client import MqttClient
async def async_setup(hass: HomeAssistant, config: Config):
"""Set up this integration using YAML is not supported."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up this integration using UI."""
if hass.data.get(DOMAIN) is None:
hass.data.setdefault(DOMAIN, {})
base_topic = entry.data.get(CONF_MQTT_BASE_TOPIC)
mqtt = MqttClient(hass, base_topic)
coordinator = DataCoordinator(mqtt)
hass.data[DOMAIN][entry.entry_id] = coordinator
for platform in PLATFORMS:
if entry.options.get(platform, True):
coordinator.platforms.append(platform)
hass.async_add_job(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
entry.async_on_unload(entry.add_update_listener(async_reload_entry))
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Handle removal of an entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
unloaded = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
if platform in coordinator.platforms
]
)
)
if unloaded:
hass.data[DOMAIN].pop(entry.entry_id)
return unloaded
async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Reload config entry."""
await async_unload_entry(hass, entry)
await async_setup_entry(hass, entry)
| 31.153846 | 79 | 0.705679 |
77e9b3908332e8e4d6ba55296bc00138efd25457 | 11,526 | py | Python | packages/main/src/RPA/Netsuite.py | amisol/rpaframework | b1ee8a745a8e4d7bd41fa7765b26ab02b90cfb57 | [
"Apache-2.0"
] | 518 | 2020-05-29T11:39:34.000Z | 2022-03-31T22:04:08.000Z | packages/main/src/RPA/Netsuite.py | aikarjal/rpaframework | cd0599b33b7fcca3d43ea45116a43fc7507b73c9 | [
"Apache-2.0"
] | 316 | 2020-05-29T06:09:28.000Z | 2022-03-31T12:00:33.000Z | packages/main/src/RPA/Netsuite.py | aikarjal/rpaframework | cd0599b33b7fcca3d43ea45116a43fc7507b73c9 | [
"Apache-2.0"
] | 99 | 2020-05-27T20:23:54.000Z | 2022-03-26T02:57:35.000Z | from functools import wraps
import itertools
import logging
from netsuitesdk import NetSuiteConnection
from netsuitesdk.internal.client import NetSuiteClient
from netsuitesdk.internal.utils import PaginatedSearch
from robot.libraries.BuiltIn import BuiltIn, RobotNotRunningError
from RPA.core.helpers import required_env
from RPA.RobotLogListener import RobotLogListener
try:
BuiltIn().import_library("RPA.RobotLogListener")
except RobotNotRunningError:
pass
def ns_instance_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if args[0].client is None:
raise NetsuiteAuthenticationError("Authentication is not completed")
return f(*args, **kwargs)
return wrapper
class NetsuiteAuthenticationError(Exception):
"Error when authenticated Netsuite instance does not exist."
class Netsuite:
"""`Netsuite` is a library for accessing Netsuite using NetSuite SOAP web service SuiteTalk.
The library extends the `netsuitesdk library`_.
More information available at `NetSuite SOAP webservice SuiteTalk`_.
.. _netsuitesdk library:
https://github.com/fylein/netsuite-sdk-py
.. _NetSuite SOAP webservice SuiteTalk:
http://www.netsuite.com/portal/platform/developer/suitetalk.shtml
**Examples**
**Robot Framework**
.. code-block:: robotframework
*** Settings ***
Library RPA.Netsuite
Library RPA.Excel.Files
Library RPA.Tables
Task Setup Authorize Netsuite
*** Tasks ***
Get data from Netsuite and Store into Excel files
${accounts}= Get Accounts account_type=_expense
${accounts}= Create table ${accounts}
Create Workbook
Append Rows To Worksheet ${accounts}
Save Workbook netsuite_accounts.xlsx
Close Workbook
${bills}= Get Vendor Bills
${bills}= Create table ${bills}
Create Workbook
Append Rows To Worksheet ${bills}
Save Workbook netsuite_bills.xlsx
Close Workbook
*** Keywords ***
Authorize Netsuite
${secrets}= Get Secret netsuite
Connect
... account=${secrets}[ACCOUNT]
... consumer_key=${secrets}[CONSUMER_KEY]
... consumer_secret=${secrets}[CONSUMER_KEY]
... token_key=${secrets}[CONSUMER_SECRET]
... token_secret=${secrets}[TOKEN_KEY]
**Python**
.. code-block:: python
from RPA.Netsuite import Netsuite
ns = Netsuite()
ns.connect()
accounts = ns.get_accounts()
currencies = ns.get_currencies()
""" # noqa: E501
ROBOT_LIBRARY_SCOPE = "GLOBAL"
ROBOT_LIBRARY_DOC_FORMAT = "REST"
def __init__(self) -> None:
self.client = None
self.account = None
self.logger = logging.getLogger(__name__)
listener = RobotLogListener()
listener.register_protected_keywords(
["RPA.Netsuite.connect", "RPA.Netsuite.login"]
)
def connect(
self,
account: str = None,
consumer_key: str = None,
consumer_secret: str = None,
token_key: str = None,
token_secret: str = None,
) -> None:
"""Connect to Netsuite with credentials from environment
variables.
Parameters are not logged into Robot Framework log.
:param account: parameter or environment variable `NS_ACCOUNT`
:param consumer_key: parameter or environment variable `NS_CONSUMER_KEY`
:param consumer_secret: parameter or environment variable `NS_CONSUMER_SECRET`
:param token_key: parameter or environment variable `NS_TOKEN_KEY`
:param token_secret: parameter or environment variable `NS_TOKEN_SECRET`
"""
if account is None:
self.account = required_env("NS_ACCOUNT")
else:
self.account = account
NS_CONSUMER_KEY = required_env("NS_CONSUMER_KEY", consumer_key)
NS_CONSUMER_SECRET = required_env("NS_CONSUMER_SECRET", consumer_secret)
NS_TOKEN_KEY = required_env("NS_TOKEN_KEY", token_key)
NS_TOKEN_SECRET = required_env("NS_TOKEN_SECRET", token_secret)
self.client = NetSuiteConnection(
account=self.account,
consumer_key=NS_CONSUMER_KEY,
consumer_secret=NS_CONSUMER_SECRET,
token_key=NS_TOKEN_KEY,
token_secret=NS_TOKEN_SECRET,
)
def login(
self,
account: str = None,
email: str = None,
password: str = None,
role: str = None,
appid: str = None,
) -> None:
"""Login to Netsuite with credentials from environment variables
Parameters are not logged into Robot Framework log.
:param account: parameter or environment variable `NS_ACCOUNT`
:param email: parameter or environment variable `NS_EMAIL`
:param password: parameter or environment variable `NS_PASSWORD`
:param role: parameter or environment variable `NS_ROLE`
:param appid: parameter or environment variable `NS_APPID`
"""
if account is None:
account = required_env("NS_ACCOUNT", self.account)
if account is None:
raise NetsuiteAuthenticationError("Authentication is not completed")
NS_EMAIL = required_env("NS_EMAIL", email)
NS_PASSWORD = required_env("NS_PASSWORD", password)
NS_ROLE = required_env("NS_ROLE", role)
NS_APPID = required_env("NS_APPID", appid)
if self.client is None:
self.client = NetSuiteClient(account=account)
self.client.login(
email=NS_EMAIL,
password=NS_PASSWORD,
role=NS_ROLE,
application_id=NS_APPID,
)
@ns_instance_required
def netsuite_get(
self, record_type: str = None, internal_id: str = None, external_id: str = None
) -> list:
"""Get all records of given type and internalId and/or externalId.
:param record_type: type of Netsuite record to get
:param internal_id: internalId of the type, default None
:param external_id: external_id of the type, default None
:raises ValueError: if record_type is not given
:return: records as a list or None
"""
if record_type is None:
raise ValueError("Parameter 'record_type' is required for kw: netsuite_get")
if internal_id is None and external_id is None:
raise ValueError(
"Parameter 'internal_id' or 'external_id' "
" is required for kw: netsuite_get"
)
kwargs = {"recordType": record_type}
if internal_id is not None:
kwargs["internalId"] = internal_id
if external_id is not None:
kwargs["externalId"] = external_id
return self.client.get(**kwargs)
@ns_instance_required
def netsuite_get_all(self, record_type: str) -> list:
"""Get all records of given type.
:param record_type: type of Netsuite record to get
:raises ValueError: if record_type is not given
:return: records as a list or None
"""
if record_type is None:
raise ValueError(
"Parameter 'record_type' is required for kw: netsuite_get_all"
)
return self.client.getAll(recordType=record_type)
def netsuite_search(
self,
type_name: str,
search_value: str,
operator: str = "contains",
page_size: int = 5,
) -> PaginatedSearch:
"""Search Netsuite for value from a type. Default operator is
`contains`.
:param type_name: search target type name
:param search_value: what to search for within type
:param operator: name of the operation, defaults to "contains"
:param page_size: result items within one page, defaults to 5
:return: paginated search object
"""
# pylint: disable=E1101
record_type_search_field = self.client.SearchStringField(
searchValue=search_value, operator=operator
)
basic_search = self.client.basic_search_factory(
type_name, recordType=record_type_search_field
)
paginated_search = PaginatedSearch(
client=self.client,
type_name=type_name,
basic_search=basic_search,
pageSize=page_size,
)
return paginated_search
def netsuite_search_all(
self, type_name: str, page_size: int = 20
) -> PaginatedSearch:
"""Search Netsuite for a type results.
:param type_name: search target type name
:param page_size: result items within one page, defaults to 5
:return: paginated search object
"""
paginated_search = PaginatedSearch(
client=self.client, type_name=type_name, pageSize=page_size
)
return paginated_search
@ns_instance_required
def get_accounts(self, count: int = 100, account_type: str = None) -> list:
"""Get Accounts of any type or specified type.
:param count: number of Accounts to return, defaults to 100
:param account_type: if None returns all account types, example. "_expense",
defaults to None
:return: accounts
"""
all_accounts = list(
itertools.islice(self.client.accounts.get_all_generator(), count)
)
if account_type is None:
return all_accounts
return [a for a in all_accounts if a["acctType"] == account_type]
@ns_instance_required
def get_currency(self, currency_id: str) -> object:
"""Get all a Netsuite Currency by its ID
:param currency_id: ID of the currency to get
:return: currency
"""
return self.client.currencies.get(internalId=currency_id)
@ns_instance_required
def get_currencies(self) -> list:
"""Get all Netsuite Currencies
:return: currencies
"""
return self.client.currencies.get_all()
@ns_instance_required
def get_locations(self) -> list:
"""Get all Netsuite Locations
:return: locations
"""
return self.client.locations.get_all()
@ns_instance_required
def get_departments(self) -> list:
"""Get all Netsuite Departments
:return: departments
"""
return self.client.departments.get_all()
@ns_instance_required
def get_classifications(self) -> list:
"""Get all Netsuite Classifications
:return: classifications
"""
return self.client.classifications.get_all()
@ns_instance_required
def get_vendors(self, count: int = 10) -> list:
"""Get list of vendors
:param count: number of vendors to return, defaults to 10
:return: list of vendors
"""
return list(itertools.islice(self.client.vendors.get_all_generator(), count))
@ns_instance_required
def get_vendor_bills(self, count: int = 10) -> list:
"""Get list of vendor bills
:param count: number of vendor bills to return, defaults to 10
:return: list of vendor bills
"""
return list(
itertools.islice(self.client.vendor_bills.get_all_generator(), count)
)
| 33.603499 | 96 | 0.629186 |
6b0a7901310e56500b1ddee61931f394b159c569 | 357 | py | Python | recursion/0070_climbing_stairs.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
] | null | null | null | recursion/0070_climbing_stairs.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
] | null | null | null | recursion/0070_climbing_stairs.py | MartinMa28/Algorithms_review | 3f2297038c00f5a560941360ca702e6868530f34 | [
"MIT"
] | null | null | null | class Solution:
def __init__(self):
self.cache = {}
def climbStairs(self, n: int) -> int:
if n in self.cache:
return self.cache[n]
if n < 2:
self.cache[n] = 1
return 1
self.cache[n] = self.climbStairs(n - 1) + self.climbStairs(n - 2)
return self.cache[n] | 25.5 | 73 | 0.487395 |
e53a843f7f3ccc56f176d830db1a14f5e9928a9f | 992 | py | Python | tests/test_middlewares.py | Reyvel/django-shift-work | 834f198221550c8de5428d46cc2d4560b5925a9b | [
"MIT"
] | null | null | null | tests/test_middlewares.py | Reyvel/django-shift-work | 834f198221550c8de5428d46cc2d4560b5925a9b | [
"MIT"
] | null | null | null | tests/test_middlewares.py | Reyvel/django-shift-work | 834f198221550c8de5428d46cc2d4560b5925a9b | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.conf import settings
from django.utils import timezone
from freezegun import freeze_time
from datetime import datetime
from unittest.mock import MagicMock
import pytz
from django_shift_work import middlewares
class TestDjango_shift_work(TestCase):
def setUp(self):
self.request = MagicMock()
tz = settings.TIME_ZONE
now = timezone.localtime()
self.test_datetimes = (
now.replace(hour=0),
now.replace(hour=12),
now.replace(hour=7),
now.replace(hour=19)
)
def test_get_shift_shift_work(self):
mw = middlewares.ShiftWorkMiddleWare(lambda x: x)
dt = self.test_datetimes
for dt, name in zip(self.test_datetimes, ('night', 'morning', 'morning', 'night')):
with freeze_time(lambda: dt):
resp = mw(self.request)
assert resp.shift['name'] == name
def tearDown(self):
pass
| 26.810811 | 91 | 0.640121 |
c0ab0be898622bbe67b0a03fe2ec281f0e450c3c | 11,695 | py | Python | simulator/dpdp_competition/src/utils/checker.py | ZhuYQi/xingtian | 623b15c76aea48b089a451ce4d81980e6c5ac292 | [
"MIT"
] | null | null | null | simulator/dpdp_competition/src/utils/checker.py | ZhuYQi/xingtian | 623b15c76aea48b089a451ce4d81980e6c5ac292 | [
"MIT"
] | null | null | null | simulator/dpdp_competition/src/utils/checker.py | ZhuYQi/xingtian | 623b15c76aea48b089a451ce4d81980e6c5ac292 | [
"MIT"
] | null | null | null | # Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
import copy
from src.utils.logging_engine import logger
from src.utils.tools import get_item_list_of_vehicles
class Checker(object):
@staticmethod
def check_dispatch_result(dispatch_result, id_to_vehicle: dict, id_to_order: dict):
vehicle_id_to_destination = dispatch_result.vehicle_id_to_destination
vehicle_id_to_planned_route = dispatch_result.vehicle_id_to_planned_route
# 检查是否所有的车辆都有返回值
if len(vehicle_id_to_destination) != len(id_to_vehicle):
logger.error(f"Num of returned destination {len(vehicle_id_to_destination)} "
f"is not equal to vehicle number {len(id_to_vehicle)}")
return False
if len(vehicle_id_to_planned_route) != len(id_to_vehicle):
logger.error(f"Num of returned planned route {len(vehicle_id_to_planned_route)} "
f"is not equal to vehicle number {len(id_to_vehicle)}")
return False
# 逐个检查各车辆路径
for vehicle_id, vehicle in id_to_vehicle.items():
if vehicle_id not in vehicle_id_to_destination:
logger.error(f"Destination information of Vehicle {vehicle_id} is not in the returned result")
return False
# check destination
destination_in_result = vehicle_id_to_destination.get(vehicle_id)
origin_destination = vehicle.destination
if origin_destination is not None:
if destination_in_result is None:
logger.error(f"Vehicle {vehicle_id}, returned destination is None, "
f"however the origin destination is not None.")
return False
else:
if origin_destination.id != destination_in_result.id:
logger.error(f"Vehicle {vehicle_id}, returned destination id is {destination_in_result.id}, "
f"however the origin destination id is {origin_destination.id}.")
return False
if origin_destination.arrive_time != destination_in_result.arrive_time:
logger.error(f"Vehicle {vehicle_id}, arrive time of returned destination is "
f"{destination_in_result.arrive_time}, "
f"however the arrive time of origin destination is "
f"{origin_destination.arrive_time}.")
return False
# check routes
if vehicle_id not in vehicle_id_to_planned_route:
logger.error(f"Planned route of Vehicle {vehicle_id} is not in the returned result")
return False
route = []
if destination_in_result is not None:
route.append(destination_in_result)
route.extend(vehicle_id_to_planned_route.get(vehicle_id))
if len(route) > 0:
# check capacity
if not Checker.__meet_capacity_constraint(route, copy.deepcopy(vehicle.carrying_items),
vehicle.board_capacity):
logger.error(f"Vehicle {vehicle_id} violates the capacity constraint")
return False
# check LIFO
if not Checker.__meet_loading_and_unloading_constraint(route, copy.deepcopy(vehicle.carrying_items)):
logger.error(f"Vehicle {vehicle_id} violates the LIFO constraint")
return False
# # check duplicated node id
# if Checker.contain_duplicate_nodes(complete_route):
# return False
# check duplicated item id
if Checker.__contain_duplicate_items(route, copy.deepcopy(vehicle.carrying_items)):
return False
# check order splitting
if not Checker.__meet_order_splitting_constraint(dispatch_result, id_to_vehicle, id_to_order):
return False
return True
# 载重约束
@staticmethod
def __meet_capacity_constraint(route: list, carrying_items, capacity):
left_capacity = capacity
# Stack
while not carrying_items.is_empty():
item = carrying_items.pop()
left_capacity -= item.demand
if left_capacity < 0:
logger.error(f"left capacity {left_capacity} < 0")
return False
for node in route:
delivery_items = node.delivery_items
pickup_items = node.pickup_items
for item in delivery_items:
left_capacity += item.demand
if left_capacity > capacity:
logger.error(f"left capacity {left_capacity} > capacity {capacity}")
return False
for item in pickup_items:
left_capacity -= item.demand
if left_capacity < 0:
logger.error(f"left capacity {left_capacity} < 0")
return False
return True
# LIFO约束
@staticmethod
def __meet_loading_and_unloading_constraint(route: list, carrying_items):
for node in route:
delivery_items = node.delivery_items
pickup_items = node.pickup_items
for item in delivery_items:
unload_item = carrying_items.pop()
if unload_item is None or unload_item.id != item.id:
return False
for item in pickup_items:
carrying_items.push(item)
return carrying_items.is_empty()
# 检查路径是否有重复的点
@staticmethod
def __contain_duplicate_nodes(route):
node_id_list = []
for node in route:
if node.id not in node_id_list:
node_id_list.append(node.id)
else:
logger.info(f"Duplicate node {node.id}")
return True
return False
@staticmethod
def __contain_duplicate_items(route, carrying_items):
item_id_list = []
while not carrying_items.is_empty():
item = carrying_items.pop()
if item.id not in item_id_list:
item_id_list.append(item.id)
else:
logger.error(f"Item {item.id}: duplicate item id")
return True
for node in route:
pickup_items = node.pickup_items
for item in pickup_items:
if item.id not in item_id_list:
item_id_list.append(item.id)
else:
logger.error(f"Item {item.id}: duplicate item id")
return True
return False
@staticmethod
def __meet_order_splitting_constraint(dispatch_result, id_to_vehicle: dict, id_to_order: dict):
vehicle_id_to_destination = dispatch_result.vehicle_id_to_destination
vehicle_id_to_planned_route = dispatch_result.vehicle_id_to_planned_route
vehicle_id_to_item_list = get_item_list_of_vehicles(dispatch_result, id_to_vehicle)
capacity = 0
split_order_id_list = Checker.__find_split_orders_from_vehicles(vehicle_id_to_item_list)
for vehicle_id, vehicle in id_to_vehicle.items():
logger.debug(f"Find split orders of vehicle {vehicle_id}")
capacity = vehicle.board_capacity
carrying_items = copy.deepcopy(vehicle.carrying_items)
route = []
if vehicle_id in vehicle_id_to_destination and vehicle_id_to_destination.get(vehicle_id) is not None:
route.append(vehicle_id_to_destination.get(vehicle_id))
if vehicle_id in vehicle_id_to_planned_route:
route.extend(vehicle_id_to_planned_route.get(vehicle_id))
split_order_id_list.extend(Checker.__find_split_orders_in_vehicle_routes(route, carrying_items))
for order_id in split_order_id_list:
if order_id in id_to_order:
order = id_to_order.get(order_id)
if order.demand <= capacity:
logger.error(f"order {order.id} demand: {order.demand} <= {capacity}, we can not split this order.")
return False
return True
@staticmethod
def __find_split_orders_from_vehicles(vehicle_id_to_item_list: dict):
order_id_to_vehicle_ids = {}
for vehicle_id, item_list in vehicle_id_to_item_list.items():
order_id_list = []
for item in item_list:
order_id = item.order_id
if order_id not in order_id_list:
order_id_list.append(order_id)
logger.debug(f"Vehicle {vehicle_id} contains {len(order_id_list)} orders, {len(item_list)} order items")
for order_id in order_id_list:
if order_id not in order_id_to_vehicle_ids:
order_id_to_vehicle_ids[order_id] = []
order_id_to_vehicle_ids[order_id].append(vehicle_id)
split_order_ids = []
for order_id, vehicle_ids in order_id_to_vehicle_ids.items():
if len(vehicle_ids) > 1:
split_order_ids.append(order_id)
logger.debug(f"Find {len(split_order_ids)} split orders from vehicles")
return split_order_ids
@staticmethod
def __find_split_orders_in_vehicle_routes(route, carrying_items):
order_id_list = []
split_order_ids = []
while not carrying_items.is_empty():
item = carrying_items.pop()
order_id = item.order_id
if order_id not in order_id_list:
order_id_list.append(order_id)
for node in route:
tmp_order_id_list = []
pickup_items = node.pickup_items
for item in pickup_items:
if item.order_id not in tmp_order_id_list:
tmp_order_id_list.append(item.order_id)
for order_id in tmp_order_id_list:
if order_id not in order_id_list:
order_id_list.append(order_id)
else:
split_order_ids.append(order_id)
logger.debug(f"find {len(split_order_ids)} split orders")
return split_order_ids
| 44.808429 | 121 | 0.609748 |
c1dd4274eb7321fffd083c2e5b6fb1ce608c5f73 | 639 | py | Python | apps/shortener/migrations/0002_auto_20200824_0612.py | ShAlireza/Yektanet | 9e638395b85346b7536cf422c514ae7762faa9b4 | [
"MIT"
] | null | null | null | apps/shortener/migrations/0002_auto_20200824_0612.py | ShAlireza/Yektanet | 9e638395b85346b7536cf422c514ae7762faa9b4 | [
"MIT"
] | null | null | null | apps/shortener/migrations/0002_auto_20200824_0612.py | ShAlireza/Yektanet | 9e638395b85346b7536cf422c514ae7762faa9b4 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-08-24 06:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shortener', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='shortenedurl',
name='suggested_path',
field=models.CharField(blank=True, max_length=128, null=True),
),
migrations.AlterField(
model_name='shortenedurl',
name='destination',
field=models.CharField(default=None, max_length=512, unique=True),
preserve_default=False,
),
]
| 25.56 | 78 | 0.599374 |
7474b5fa613fb35bf166005d7188f12545be41ba | 1,452 | py | Python | config/settings/local.py | anhlt59/django_refactor | 1b1d86af9f732a000e30feb7644f6ca60b6e516a | [
"MIT"
] | null | null | null | config/settings/local.py | anhlt59/django_refactor | 1b1d86af9f732a000e30feb7644f6ca60b6e516a | [
"MIT"
] | null | null | null | config/settings/local.py | anhlt59/django_refactor | 1b1d86af9f732a000e30feb7644f6ca60b6e516a | [
"MIT"
] | null | null | null | from .base import * # noqa
# GENERAL
# ------------------------------------------------------------------------------
DEBUG = True
ALLOWED_HOSTS = ["*"]
# django-debug-toolbar
# ------------------------------------------------------------------------------
INSTALLED_APPS += [
"debug_toolbar",
"widget_tweaks",
"django_extensions",
"django_user_agents",
] # noqa F405
MIDDLEWARE += [
"debug_toolbar.middleware.DebugToolbarMiddleware",
"django_user_agents.middleware.UserAgentMiddleware",
# "app.core.middlewares.debug_query.QueryCountDebugMiddleware",
] # noqa F405
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# debug toolbar is shown only if your IP address is listed in the INTERNAL_IPS
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
# in case using docker
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS += [".".join(ip.split(".")[:-1] + ["1"]) for ip in ips]
# minimal logging
import sys
LOGGING = {
"version": 1,
"handlers": {
"console": {
"class": "logging.StreamHandler",
"stream": sys.stdout,
}
},
"root": {
"handlers": ["console"],
"level": "INFO",
},
"loggers": {
"django.db.backends": {
"level": "DEBUG",
},
},
}
del DEFAULT_FILE_STORAGE
del STATICFILES_STORAGE
| 25.034483 | 80 | 0.559917 |
a072b8b0f0197bf1d8626194fbb3c0287394562f | 6,268 | py | Python | tests/unit/test_signals.py | ksamuel/postscriptum | d6eecb944069e26a5e5c98703267d579ee077716 | [
"MIT"
] | null | null | null | tests/unit/test_signals.py | ksamuel/postscriptum | d6eecb944069e26a5e5c98703267d579ee077716 | [
"MIT"
] | null | null | null | tests/unit/test_signals.py | ksamuel/postscriptum | d6eecb944069e26a5e5c98703267d579ee077716 | [
"MIT"
] | null | null | null | import signal
from unittest.mock import Mock
from signal import Signals
import pytest
from postscriptum.utils import IS_UNIX, IS_WINDOWS
from postscriptum.signals import (
signals_from_names,
SIGNAL_HANDLERS_HISTORY,
register_signals_handler,
restore_previous_signals_handlers,
)
@pytest.mark.skipif(not IS_WINDOWS, reason="Windows only test")
def test_signals_from_names_windows():
signals = list(signals_from_names(("SIGABRT", "SIGBREAK", "SIGTERM")))
assert signals == [Signals.SIGABRT, Signals.SIGBREAK]
@pytest.mark.skipif(not IS_UNIX, reason="Unix only test")
def test_signals_from_names_unix():
signals = list(signals_from_names(("SIGABRT", "SIGBREAK", "SIGTERM")))
assert signals == [Signals.SIGABRT, Signals.SIGTERM]
signals = list(signals_from_names((Signals.SIGABRT, "SIGBREAK", "SIGTERM")))
assert signals == [Signals.SIGABRT, Signals.SIGTERM]
def test_register_and_restore_signal_handlers():
assert not SIGNAL_HANDLERS_HISTORY
mock_handler_1 = Mock()
mock_handler_2 = Mock()
sigart_default_handler = signal.getsignal(signal.SIGABRT)
original_python_handlers = register_signals_handler(mock_handler_1, ["SIGABRT"])
(sig, original_handler), *_ = original_python_handlers.items()
assert SIGNAL_HANDLERS_HISTORY == {
sig: [original_handler]
}, "handler history should contain original handler"
assert (
original_handler is sigart_default_handler
), "Original handler should be python builtin one"
assert (
signal.getsignal(signal.SIGABRT) is not original_handler
), "The signal handler should not be the original one anymore"
assert (
signal.getsignal(signal.SIGABRT).__wrapped__ is mock_handler_1
), "The current signal handler should be a wrapper around our callback"
first_handler_wrapper = signal.getsignal(signal.SIGABRT)
previous_handlers = register_signals_handler(mock_handler_2, ["SIGABRT"])
(sig, prev_handler), *_ = previous_handlers.items()
assert (
prev_handler == first_handler_wrapper
), "The previous handler should be the wrapper around our first callback"
assert SIGNAL_HANDLERS_HISTORY == {
sig: [original_handler, first_handler_wrapper]
}, "The history should now contains both the original python handler and the wrapper around our first callback"
assert (
signal.getsignal(signal.SIGABRT) is not first_handler_wrapper
), "The wrapper around our first callback should not be the current signal handler"
assert (
signal.getsignal(signal.SIGABRT).__wrapped__ is mock_handler_2
), "The current signal handler should be the wrapper around our second callback"
restore_previous_signals_handlers(["SIGABRT"])
assert SIGNAL_HANDLERS_HISTORY == {
sig: [original_handler]
}, "History should have been restored to its previous state"
assert (
signal.getsignal(signal.SIGABRT) is first_handler_wrapper
), "Except handler should have been restored to it's previous value"
restore_previous_signals_handlers(["SIGABRT"])
assert not SIGNAL_HANDLERS_HISTORY, "History should be empty"
assert (
signal.getsignal(signal.SIGABRT) is original_handler
), "Current signal handler should be the original one"
@pytest.mark.skipif(not IS_UNIX, reason="Unix only test")
def test_register_and_restore_several_signal_handlers():
assert not SIGNAL_HANDLERS_HISTORY
handler = Mock()
sigart_default_handler = signal.getsignal(signal.SIGABRT)
sigint_default_handler = signal.getsignal(signal.SIGINT)
original_python_handlers = register_signals_handler(
handler, [signal.SIGABRT, "SIGINT"]
)
(
(sigart, original_sigart_handler),
(sigint, original_sigint_handler),
) = original_python_handlers.items()
assert (
SIGNAL_HANDLERS_HISTORY
== {sigart: [original_sigart_handler], sigint: [original_sigint_handler],}
== {sigart: [sigart_default_handler], sigint: [sigint_default_handler],}
), "handler history should contain original handlers"
assert (
signal.getsignal(signal.SIGABRT).__wrapped__ is handler
), "The current signal handler should be a wrapper around our callback"
assert (
signal.getsignal(signal.SIGINT).__wrapped__ is handler
), "The current signal handler should be a wrapper around our callback"
restore_previous_signals_handlers(["SIGABRT", signal.SIGINT])
assert not SIGNAL_HANDLERS_HISTORY, "History should be empty"
with pytest.raises(IndexError):
restore_previous_signals_handlers(["SIGABRT", signal.SIGINT])
assert (
signal.getsignal(signal.SIGABRT) is original_sigart_handler
), "Current signal handler should be the original one"
assert (
signal.getsignal(signal.SIGINT) is original_sigint_handler
), "Current signal handler should be the original one"
original_python_handlers = register_signals_handler(
handler, [signal.SIGABRT, "SIGINT"]
)
restore_previous_signals_handlers([signal.SIGINT])
assert (
signal.getsignal(signal.SIGINT) is original_sigint_handler
), "Current signal handler should be the original one"
assert (
signal.getsignal(signal.SIGABRT).__wrapped__ is handler
), "The current signal handler should be a wrapper around our callback"
assert SIGNAL_HANDLERS_HISTORY == {signal.SIGABRT: [original_sigart_handler]}
def test_register_and_restore_signal_handler_with_call():
mock_handler_1 = Mock()
mock_handler_2 = Mock()
fake_frame = Mock()
original_python_handlers = register_signals_handler(mock_handler_1, ["SIGABRT"])
previous_handlers = register_signals_handler(mock_handler_2, ["SIGABRT"])
signal.getsignal(signal.SIGABRT)(signal.SIGABRT, fake_frame)
restore_previous_signals_handlers(["SIGABRT"])
signal.getsignal(signal.SIGABRT)(signal.SIGABRT, fake_frame)
restore_previous_signals_handlers(["SIGABRT"])
mock_handler_1.assert_called_once_with(
signal.SIGABRT, fake_frame, original_python_handlers[signal.SIGABRT]
)
mock_handler_2.assert_called_once_with(
signal.SIGABRT, fake_frame, previous_handlers[signal.SIGABRT]
)
| 36.022989 | 115 | 0.739151 |
e8163cfdf2d7fa0a32158347c9c5f81ab0a3df6a | 1,759 | py | Python | tests/conftest.py | SolomidHero/speech-regeneration-enhancer | eb43907ff085d68a707ff7bc3af14e93ff66fd65 | [
"MIT"
] | 8 | 2021-03-27T00:17:17.000Z | 2022-01-26T05:26:19.000Z | tests/conftest.py | SolomidHero/speech-regeneration-enhancer | eb43907ff085d68a707ff7bc3af14e93ff66fd65 | [
"MIT"
] | 1 | 2021-09-26T02:29:21.000Z | 2021-09-27T08:56:13.000Z | tests/conftest.py | SolomidHero/speech-regeneration-enhancer | eb43907ff085d68a707ff7bc3af14e93ff66fd65 | [
"MIT"
] | 1 | 2021-03-26T12:30:14.000Z | 2021-03-26T12:30:14.000Z | # here we make fixtures of toy data
# real parameters are stored and accessed from config
import pytest
import librosa
import os
import numpy as np
from hydra.experimental import compose, initialize
@pytest.fixture(scope="session")
def cfg():
with initialize(config_path="../", job_name="test_app"):
config = compose(config_name="config")
config.dataset = compose(config_name="tests/test_dataset_config")
config.train = compose(config_name="tests/test_train_config")
return config
@pytest.fixture(scope="session")
def sample_rate(cfg):
return cfg.data.sample_rate
@pytest.fixture(scope="session")
def example_wav(sample_rate):
wav, sr = librosa.load(
os.path.dirname(__file__) + "/data/example.mp3",
sr=sample_rate, dtype=np.float32,
)
return { 'wav': wav, 'sr': sr }
@pytest.fixture(scope="session")
def n_fft(cfg):
return cfg.data.n_fft
@pytest.fixture(scope="session")
def hop_length(cfg):
return cfg.data.hop_length
@pytest.fixture(scope="session")
def win_length(cfg):
return cfg.data.win_length
@pytest.fixture(scope="session")
def f_min(cfg):
return cfg.data.f_min
@pytest.fixture(scope="session")
def f_max(cfg):
return cfg.data.f_max
@pytest.fixture(scope="session")
def hop_ms(example_wav, hop_length):
return 1e3 * hop_length / example_wav['sr']
@pytest.fixture(scope="session")
def n_frames(example_wav, hop_length):
return (example_wav['wav'].shape[-1] - 1) // hop_length + 1
# It is not clear if we should cleanup the test directories
# or leave them for debugging
# https://github.com/pytest-dev/pytest/issues/3051
@pytest.fixture(autouse=True, scope='session')
def clear_files_teardown():
yield None
os.system("rm -r tests/test_dataset tests/test_experiment tests/test_logs") | 25.867647 | 77 | 0.74133 |
349f43eb239b577b74f308fb70ae2335a55a5370 | 944 | py | Python | app.py | camiloibanez/web-scraping-challenge | 60d18027dc967e856b5d95c9d547367eb764b081 | [
"ADSL"
] | 1 | 2020-08-18T12:03:07.000Z | 2020-08-18T12:03:07.000Z | app.py | camiloibanez/web-scraping-challenge | 60d18027dc967e856b5d95c9d547367eb764b081 | [
"ADSL"
] | null | null | null | app.py | camiloibanez/web-scraping-challenge | 60d18027dc967e856b5d95c9d547367eb764b081 | [
"ADSL"
] | null | null | null | from flask import Flask, render_template, redirect
import pymongo
app = Flask(__name__)
conn = "mongodb+srv://Client1:QEPI88Emag3yxyvU@mars-scraped-info.0hpmq.mongodb.net/mars_db?retryWrites=true&w=majority"
client = pymongo.MongoClient(conn)
db = client.mars_db
@app.route("/")
def index():
results = db.scraped_info.find_one()
pic1 = results['hemisphere_image_urls'][0]
pic2 = results['hemisphere_image_urls'][1]
pic3 = results['hemisphere_image_urls'][2]
pic4 = results['hemisphere_image_urls'][3]
tables = [results['Mars_facts_table']]
return render_template('index.html', dict=results, pic1=pic1, pic2=pic2, pic3=pic3, pic4=pic4, tables=tables)
@app.route("/scrape")
def to_scrape():
from scrape_mars import scrape
results = scrape()
db.scraped_info.drop()
db.scraped_info.insert_one(
results
)
return redirect("/")
if __name__ == "__main__":
app.run(debug=True) | 24.205128 | 119 | 0.705508 |
f102a02898ade01f081994385e8cb1cefbac37bc | 3,619 | py | Python | app/controllers/accounts.py | Studio-Link-old/webapp | 78a7f75c7a7837efa737f5a6f9a9b70e28d98664 | [
"BSD-2-Clause"
] | 6 | 2015-01-08T00:50:08.000Z | 2017-08-03T12:57:14.000Z | app/controllers/accounts.py | Studio-Link-old/webapp | 78a7f75c7a7837efa737f5a6f9a9b70e28d98664 | [
"BSD-2-Clause"
] | 7 | 2015-01-05T21:26:07.000Z | 2016-01-18T07:21:01.000Z | app/controllers/accounts.py | Studio-Link-old/webapp | 78a7f75c7a7837efa737f5a6f9a9b70e28d98664 | [
"BSD-2-Clause"
] | 2 | 2015-01-24T11:01:42.000Z | 2015-01-28T15:50:13.000Z | # -*- encoding: utf-8; py-indent-offset: 4 -*-
# +--------------------------------------------------------------------------+
# | _____ __ ___ __ _ __ |
# | / ___// /___ ______/ (_)___ / / (_)___ / /__ |
# | \__ \/ __/ / / / __ / / __ \ / / / / __ \/ //_/ |
# | ___/ / /_/ /_/ / /_/ / / /_/ / / /___/ / / / / ,< |
# |/____/\__/\__,_/\__,_/_/\____/ /_____/_/_/ /_/_/|_| |
# |Copyright Sebastian Reimers 2013 - 2015 studio-link.de |
# |License: BSD-2-Clause (see LICENSE File) |
# +--------------------------------------------------------------------------+
from flask import Blueprint, render_template, redirect, request, url_for, flash
from app import db
from app.models.accounts import Accounts
from app.forms.accounts import AddForm, EditForm, EditProvisioningForm
from app import tasks
from sqlalchemy.exc import IntegrityError
from app.libs import baresip
mod = Blueprint('accounts', __name__, url_prefix='/accounts')
@mod.route('/')
def index():
accounts = Accounts.query.all()
status = {}
for account in accounts:
sip = account.username + '@' + account.server
status[account.id] = baresip.get('reg_status', sip)
return render_template("accounts/index.html",
accounts=accounts,
status=status,
ipv4=baresip.get('network', 'IPv4'),
ipv6=baresip.get('network', 'IPv6'))
@mod.route('/add/', methods=('GET', 'POST'))
def add():
form = AddForm(request.form)
if form.validate_on_submit():
account = Accounts(form.data)
db.session.add(account)
try:
db.session.commit()
account_config()
return redirect(url_for('accounts.index'))
except IntegrityError:
flash(u'IntegrityError', 'danger')
return render_template("accounts/form.html", form=form)
@mod.route('/edit/<id>', methods=('GET', 'POST'))
def edit(id):
account = Accounts.query.get(id)
account.codecs = ['opus/48000/2', 'opus/48000/1', 'G722', 'G726-40/8000/1', 'PCMU','PCMA', 'GSM', 'L16']
options = account.options.split(';')
old_options = ""
for option in options:
if option:
(key, value) = option.split('=')
if key == "audio_codecs":
account.codecs = value.split(',')
else:
old_options = old_options + ";" + option
password = account.password
if account.provisioning:
form = EditProvisioningForm(obj=account)
else:
form = EditForm(obj=account)
if form.validate_on_submit():
form.populate_obj(account)
if account.provisioning:
account.password = password
else:
if not request.form['password']:
account.password = password
if account.codecs:
account.options = old_options + ";audio_codecs=" + ','.join(account.codecs)
db.session.add(account)
db.session.commit()
account_config()
return redirect(url_for('accounts.index'))
return render_template("accounts/form.html",
form=form, action='/accounts/edit/'+id)
@mod.route('/delete/<id>')
def delete(id):
db.session.delete(Accounts.query.get(id))
db.session.commit()
account_config()
return redirect(url_for('accounts.index'))
def account_config():
tasks.account_config.delay()
| 36.555556 | 108 | 0.53081 |
07aef6e6612b0a0e793593f19c001fd4d524a632 | 6,634 | py | Python | bin/batch_conversion.py | mondalspandan/DeepJetCore | 1fbbf488eee695f28538c65365693829a4b1400b | [
"Apache-2.0"
] | null | null | null | bin/batch_conversion.py | mondalspandan/DeepJetCore | 1fbbf488eee695f28538c65365693829a4b1400b | [
"Apache-2.0"
] | null | null | null | bin/batch_conversion.py | mondalspandan/DeepJetCore | 1fbbf488eee695f28538c65365693829a4b1400b | [
"Apache-2.0"
] | null | null | null | #!/bin/env python
from argparse import ArgumentParser
from pdb import set_trace
import subprocess
import os
parser = ArgumentParser('program to convert root tuples to traindata format')
parser.add_argument("infile", help="set input sample description (output from the check.py script)", metavar="FILE")
parser.add_argument("nchunks", type=int, help="number of jobs to be submitted")
parser.add_argument("out", help="output path")
parser.add_argument("batch_dir", help="batch directory")
parser.add_argument("-c", help="output class", default="")
parser.add_argument("--testdatafor", default='')
parser.add_argument("--nforweighter", default='500000', help='set number of samples to be used for weight and mean calculation')
parser.add_argument("--meansfrom", default="", help='where to get means/std, in case already computed')
parser.add_argument("--useexistingsplit", default=False, help='use an existing file split (potentially dangerous)')
args = parser.parse_args()
args.infile = os.path.abspath(args.infile)
args.out = os.path.abspath(args.out)
args.batch_dir = os.path.abspath(args.batch_dir)
if len(args.c)<1:
print("please specify and output class")
exit(-1)
cmssw_version='CMSSW_10_0_0'
deep_jet_base = os.environ['DEEPJETCORE_SUBPACKAGE']
if len(deep_jet_base) < 1:
raise RuntimeError('I cannot find the project root directory. DEEPJETCORE_SUBPACKAGE needs to be defined')
deep_jet_base_name = os.path.basename(deep_jet_base)
deep_jet_core = os.path.abspath((os.environ['DEEPJETCORE']))
print('creating CMSSW based installation of DeepJetCore to run on sl6 nodes')
fullcommand='''
source deactivate ;
cd {batchdir} ;
export SCRAM_ARCH=slc6_amd64_gcc630 ;
scramv1 project CMSSW {cmssw_version} ;
cd {cmssw_version}/src ;
echo setting up cmssw env ;
eval `scram runtime -sh` ;
cp -rL {djc} . ;
mkdir -p {DJ_base_name} ;
cp -rL {DJ}/modules {DJ_base_name}/ ;
cp {DJ}/* {DJ_base_name}/ ;
cd {batchdir}/{cmssw_version}/src/DeepJetCore/compiled ;
pwd ;
echo compiling DeepJetCore ;
make clean;
make -j4;
cd {batchdir}/{cmssw_version}/src/{DJ_base_name} ;
echo sourcing {batchdir}/{cmssw_version}/src/DeepJetCore ;
cd {batchdir}/{cmssw_version}/src/DeepJetCore
export DEEPJETCORE=`pwd`
export PYTHONPATH=`pwd`/../:$PYTHONPATH
export LD_LIBRARY_PATH=`pwd`/compiled:$LD_LIBRARY_PATH
export PATH=`pwd`/bin:$PATH
echo "compiling {DJ_base_name} (if needed)";
echo {batchdir}/{cmssw_version}/src/{DJ_base_name}/modules
cd {batchdir}/{cmssw_version}/src/{DJ_base_name}/modules ;
make clean;
make -j4 ;
'''.format(DJ=deep_jet_base,DJ_base_name=deep_jet_base_name, djc=deep_jet_core, batchdir=args.batch_dir,cmssw_version=cmssw_version)
#print(fullcommand)
#exit()
if os.path.isdir(args.out):
print ("output dir must not exists")
exit(-2)
if os.path.isdir(args.batch_dir):
print ("batch dir must not exists")
os.mkdir(args.batch_dir)
if not os.path.isdir('%s/batch' % args.batch_dir):
os.mkdir('%s/batch' % args.batch_dir)
os.system(fullcommand)
djc_cmssw=args.batch_dir+'/'+cmssw_version +'/src/DeepJetCore'
if not (len(args.meansfrom) or args.testdatafor):
#Run a fisrt round of root conversion to get the means/std and weights
print('creating a dummy datacollection for means/norms and weighter (can take a while)...')
cmd = [
'convertFromRoot.py',
'-i', args.infile,
'-c', args.c,
'-o', args.out,
'--nforweighter', args.nforweighter,
'--means'
]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
code = proc.wait()
if code != 0:
raise RuntimeError('The first round of root conversion failed with message: \n\n%s' % err)
else:
print('means/norms/weighter produced successfully')
elif args.meansfrom:
if not os.path.exists(args.meansfrom):
raise Exception("The file "+args.meansfrom+" does not exist")
print('using means/weighter from '+args.meansfrom)
os.mkdir(args.out)
os.system('cp '+args.meansfrom+' '+args.out+'/batch_template.dc')
inputs = [i for i in open(args.infile)]
def chunkify(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
if not args.infile.endswith('.txt'):
raise ValueError('The code assumes that the input files has .txt extension')
print('splitting input file...')
txt_template = args.infile.replace('.txt', '.%s.txt')
batch_txts = []
nchunks = 0
for idx, chunk in enumerate(chunkify(inputs, len(inputs)/args.nchunks)):
name = txt_template % idx
batch_txts.append(name)
if not args.useexistingsplit:
with open(name, 'w') as cfile:
cfile.write(''.join(chunk))
nchunks = idx
batch_template = '''#!/bin/bash
sleep $(shuf -i1-300 -n1) #sleep a random amount of time between 1s and 10' to avoid bottlenecks in reaching afs
echo "JOBSUB::RUN job running"
trap "echo JOBSUB::FAIL job killed" SIGTERM
BASEDIR=`pwd`
cd {djc_cmssw}
eval `scram runtime -sh` #get glibc libraries
export PATH={djc_cmssw}/bin:$PATH
export LD_LIBRARY_PATH={djc_cmssw}/compiled:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH={djc_cmssw}/../{DJ}/modules:$LD_LIBRARY_PATH
export PYTHONPATH={djc_cmssw}/../:$PYTHONPATH
export PYTHONPATH={djc_cmssw}/../{DJ}/modules:$PYTHONPATH
cd $BASEDIR
convertFromRoot.py "$@"
exitstatus=$?
if [ $exitstatus != 0 ]
then
echo JOBSUB::FAIL job failed with status $exitstatus
else
echo JOBSUB::SUCC job ended sucessfully
fi
'''.format(DJ=deep_jet_base_name,djc_cmssw=djc_cmssw)
batch_script = '%s/batch.sh' % args.batch_dir
with open(batch_script, 'w') as bb:
bb.write(batch_template)
means_file = '%s/batch_template.dc' % os.path.realpath(args.out) if not args.testdatafor else args.testdatafor
option = '--usemeansfrom' if not args.testdatafor else '--testdatafor'
with open('%s/submit.sub' % args.batch_dir, 'w') as bb:
bb.write('''
executable = {EXE}
arguments = -i {INFILE} -c {CLASS} -o {OUT} --nothreads --batch conversion.$(ProcId).dc {OPTION} {MEANS}
output = batch/con_out.$(ProcId).out
error = batch/con_out.$(ProcId).err
log = batch/con_out.$(ProcId).log
+MaxRuntime = 86399
getenv = True
use_x509userproxy = True
queue {NJOBS}
'''.format(
EXE = os.path.realpath(batch_script),
NJOBS = nchunks+1,
INFILE = txt_template % '$(ProcId)',
CLASS = args.c,
OUT = os.path.realpath(args.out),
OPTION = option,
MEANS = means_file,
)
)
print('condor submit file can be found in '+ args.batch_dir+'\nuse check_conversion.py ' + args.batch_dir + ' to to check jobs')
| 33.505051 | 133 | 0.708924 |
1cb12c087b525be3556bbfb04cd751afb499dfac | 756 | py | Python | tests/test_config.py | Stoom/client-python | bf8e2db7b54c6657717b963333e1ca8e8acce3f1 | [
"MIT"
] | null | null | null | tests/test_config.py | Stoom/client-python | bf8e2db7b54c6657717b963333e1ca8e8acce3f1 | [
"MIT"
] | 3 | 2021-04-05T02:27:32.000Z | 2021-04-05T03:43:34.000Z | tests/test_config.py | Stoom/client-python | bf8e2db7b54c6657717b963333e1ca8e8acce3f1 | [
"MIT"
] | null | null | null | # pylint:disable=missing-function-docstring
import pytest
from incognitus_client import IncognitusConfig
def test_config__sets_tenant_id():
config = IncognitusConfig("abc", "def")
assert config.tenant_id == "abc"
def test_config__sets_application_id():
config = IncognitusConfig("abc", "def")
assert config.application_id == "def"
def test_config__raises_when_missing_tenant_id():
with pytest.raises(ValueError) as ex:
IncognitusConfig("", "def")
assert 'Tenant ID is required' in str(ex.value)
def test_config__raises_when_missing_application_id():
with pytest.raises(ValueError) as ex:
IncognitusConfig("abc", "")
assert ex is not None
assert 'Application ID is required' in str(ex.value)
| 23.625 | 56 | 0.731481 |
32119ee56d947cbee394346bf3e52d6d3fb85422 | 1,780 | py | Python | nicos_mlz/kws3/setups/ls340.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/kws3/setups/ls340.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-08-18T10:55:42.000Z | 2021-08-18T10:55:42.000Z | nicos_mlz/kws3/setups/ls340.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
description = "Setup for the LakeShore 340 temperature controller"
group = "optional"
includes = ["alias_T"]
tango_base = "tango://phys.kws3.frm2:10000/kws3"
tango_ls340 = tango_base + "/ls340"
devices = dict(
T_ls340 = device("nicos.devices.tango.TemperatureController",
description = "Temperature regulation",
tangodevice = tango_ls340 + "/t_control1",
pollinterval = 2,
maxage = 5,
abslimits = (0, 300),
precision = 0.01,
),
ls340_heaterrange = device("nicos.devices.tango.DigitalOutput",
description = "Temperature regulation",
tangodevice = tango_ls340 + "/t_range1",
unit = '',
fmtstr = '%d',
),
T_ls340_A = device("nicos.devices.tango.Sensor",
description = "Sensor A",
tangodevice = tango_ls340 + "/t_sensor1",
pollinterval = 2,
maxage = 5,
),
T_ls340_B = device("nicos.devices.tango.Sensor",
description = "Sensor B",
tangodevice = tango_ls340 + "/t_sensor2",
pollinterval = 2,
maxage = 5,
),
T_ls340_C = device("nicos.devices.tango.Sensor",
description = "Sensor C",
tangodevice = tango_ls340 + "/t_sensor3",
pollinterval = 2,
maxage = 5,
),
T_ls340_D = device("nicos.devices.tango.Sensor",
description = "Sensor D",
tangodevice = tango_ls340 + "/t_sensor4",
pollinterval = 2,
maxage = 5,
),
)
alias_config = {
"T": {
"T_%s" % setupname: 100
},
"Ts":
{
"T_%s_A" % setupname: 110,
"T_%s_B" % setupname: 100,
"T_%s_C" % setupname: 90,
"T_%s_D" % setupname: 80,
"T_%s" % setupname: 120,
},
}
| 27.384615 | 67 | 0.556742 |
9eb21104afcd04d6c9c512722905ae606fd60b0c | 1,609 | py | Python | display/ATOLED1_XPRO-128x32/module.py | rbryson74/gfx | 656a3b443187c91e19cb78551a74a29ab0691de4 | [
"0BSD"
] | null | null | null | display/ATOLED1_XPRO-128x32/module.py | rbryson74/gfx | 656a3b443187c91e19cb78551a74a29ab0691de4 | [
"0BSD"
] | null | null | null | display/ATOLED1_XPRO-128x32/module.py | rbryson74/gfx | 656a3b443187c91e19cb78551a74a29ab0691de4 | [
"0BSD"
] | null | null | null | # coding: utf-8
##############################################################################
# Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries.
#
# Subject to your compliance with these terms, you may use Microchip software
# and any derivatives exclusively with Microchip products. It is your
# responsibility to comply with third party license terms applicable to your
# use of third party software (including open source software) that may
# accompany Microchip software.
#
# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
# PARTICULAR PURPOSE.
#
# IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
# INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
# WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
# BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
# FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
# ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
# THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
##############################################################################
def loadModule():
component = Module.CreateComponent("gfx_disp_atoled1-xpro_128x32", "OLED1 Xplained Pro", "/Graphics/Displays/", "atoled1-xpro.py")
component.setDisplayType("128x32 OLED1 Xplained Pro")
component.addCapability("gfx_display", "Graphics Display", False) | 57.464286 | 131 | 0.709758 |
e0be7b9b8eaacfeccb4957aae305855b472b15e2 | 3,001 | py | Python | vega/core/evaluator/evaluator.py | qixiuai/vega | 3e6588ea4aedb03e3594a549a97ffdb86adb88d1 | [
"MIT"
] | null | null | null | vega/core/evaluator/evaluator.py | qixiuai/vega | 3e6588ea4aedb03e3594a549a97ffdb86adb88d1 | [
"MIT"
] | null | null | null | vega/core/evaluator/evaluator.py | qixiuai/vega | 3e6588ea4aedb03e3594a549a97ffdb86adb88d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Evaluate used to do evaluate process."""
import os
import copy
import logging
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.core.trainer.distributed_worker import DistributedWorker
from vega.core.trainer.utils import WorkerTypes
@ClassFactory.register(ClassType.EVALUATOR)
class Evaluator(DistributedWorker):
"""Evaluator.
:param worker_info: worker_info
:type worker_info: dict, default to None
"""
def __init__(self, worker_info=None):
"""Init Evaluator."""
super(Evaluator, self).__init__(self.cfg)
Evaluator.__worker_id__ = Evaluator.__worker_id__ + 1
self._worker_id = Evaluator.__worker_id__
# for init ids
self.worker_type = WorkerTypes.EVALUATOR
self.worker_info = worker_info
if worker_info is not None:
self.step_name = self.worker_info["step_name"]
self.worker_id = self.worker_info["worker_id"]
# main evalutors setting
self.sub_worker_list = []
@property
def size(self):
"""Return the size of current evaluator list."""
return len(self.sub_worker_list)
def add_evaluator(self, evaluator):
"""Add a sub-evaluator to this evaluator.
:param evaluator: Description of parameter `evaluator`.
:type evaluator: object,
"""
if not isinstance(evaluator, DistributedWorker):
return
elif evaluator.worker_type is not None:
sub_evaluator = copy.deepcopy(evaluator)
sub_evaluator.worker_info = self.worker_info
if self.worker_info is not None:
sub_evaluator.step_name = self.worker_info["step_name"]
sub_evaluator.worker_id = self.worker_info["worker_id"]
self.sub_worker_list.append(sub_evaluator)
return
def set_worker_info(self, worker_info):
"""Set current evaluator's worker_info.
:param worker_info: Description of parameter `worker_info`.
:type worker_info: dict,
"""
if worker_info is None:
raise ValueError("worker_info should not be None type!")
self.worker_info = worker_info
self.step_name = self.worker_info["step_name"]
self.worker_id = self.worker_info["worker_id"]
for sub_evaluator in self.sub_worker_list:
sub_evaluator.worker_info = self.worker_info
sub_evaluator.step_name = self.worker_info["step_name"]
sub_evaluator.worker_id = self.worker_info["worker_id"]
return
| 37.049383 | 72 | 0.679773 |
9d7d9a5f6c899a713f7cb84302381f17c226c78a | 25,567 | py | Python | src/ion/util/preload.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 2 | 2015-10-05T20:36:35.000Z | 2018-11-21T11:45:24.000Z | src/ion/util/preload.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 21 | 2015-03-18T14:39:32.000Z | 2016-07-01T17:16:29.000Z | src/ion/util/preload.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 12 | 2015-03-18T10:53:49.000Z | 2018-06-21T11:19:57.000Z | #!/usr/bin/env python
"""Utility to bulk load resources into the system, e.g. for initial preload"""
__author__ = 'Michael Meisinger'
import json
import yaml
import re
import os
from pyon.core import MSG_HEADER_ACTOR, MSG_HEADER_ROLES, MSG_HEADER_VALID
from pyon.core.bootstrap import get_service_registry
from pyon.core.governance import get_system_actor
from pyon.ion.identifier import create_unique_resource_id, create_unique_association_id
from pyon.ion.resource import get_restype_lcsm, create_access_args
from pyon.public import CFG, log, BadRequest, Inconsistent, NotFound, IonObject, RT, OT, AS, LCS, named_any, get_safe, get_ion_ts, PRED
from ion.util.parse_utils import get_typed_value
# Well known action config keys
KEY_SCENARIO = "scenario"
KEY_ID = "id"
KEY_OWNER = "owner"
KEY_LCSTATE = "lcstate"
KEY_ORGS = "orgs"
# Well known aliases
ID_ORG_ION = "ORG_ION"
ID_SYSTEM_ACTOR = "USER_SYSTEM"
UUID_RE = '^[0-9a-fA-F]{32}$'
class Preloader(object):
def initialize_preloader(self, process, preload_cfg):
log.info("Initialize preloader")
self.process = process
self.preload_cfg = preload_cfg or {}
self._init_preload()
self.rr = self.process.container.resource_registry
self.bulk = self.preload_cfg.get("bulk", False) is True
# Loads internal bootstrapped resource ids that will be referenced during preload
self._load_system_ids()
# Load existing resources by preload ID
self._prepare_incremental()
def _init_preload(self):
self.obj_classes = {} # Cache of class for object types
self.object_definitions = None # Dict of preload rows before processing
self.resource_ids = {} # Holds a mapping of preload IDs to internal resource ids
self.resource_objs = {} # Holds a mapping of preload IDs to the actual resource objects
self.resource_assocs = {} # Holds a mapping of existing associations list by predicate
self.bulk_resources = {} # Keeps resource objects to be bulk inserted/updated
self.bulk_associations = {} # Keeps association objects to be bulk inserted/updated
self.bulk_existing = set() # This keeps the ids of the bulk objects to update instead of delete
def _read_preload_file(self, filename, safe_load=False):
is_json = filename.lower().endswith(".json")
with open(filename, "r") as f:
if is_json:
content_obj = json.load(f)
return content_obj
file_content = f.read()
if safe_load:
content_obj = yaml.safe_load(file_content)
else:
content_obj = yaml.load(file_content)
return content_obj
def preload_master(self, filename, skip_steps=None):
"""Executes a preload master file"""
log.info("Preloading from master file: %s", filename)
master_cfg = self._read_preload_file(filename)
if not "preload_type" in master_cfg or master_cfg["preload_type"] != "steps":
raise BadRequest("Invalid preload steps file")
if "actions" in master_cfg:
# Shorthand notation for one step in master
step_filename = filename
self._execute_step("default", step_filename, skip_steps)
return
for step in master_cfg["steps"]:
if skip_steps and step in skip_steps:
log.info("Skipping step %s" % step)
continue
step_filename = "%s/%s.yml" % (os.path.dirname(filename), step)
self._execute_step(step, step_filename, skip_steps)
def _execute_step(self, step, filename, skip_steps):
"""Executes a preload step file"""
step_cfg = self._read_preload_file(filename, safe_load=True)
if not "preload_type" in step_cfg or step_cfg["preload_type"] not in ("actions", "steps"):
raise BadRequest("Invalid preload actions file")
if skip_steps and step_cfg["preload_type"] == "actions" and step_cfg.get("requires", ""):
if any([rs in skip_steps for rs in step_cfg["requires"].split(",")]):
log.info("Skipping step %s - required step was skipped" % step)
skip_steps.append(step)
return
for action in step_cfg["actions"]:
try:
self._execute_action(action)
except Exception as ex:
log.warn("Action failed: " + str(ex), exc_info=True)
self.commit_bulk()
def _execute_action(self, action):
"""Executes a preload action"""
action_type = action["action"]
#log.debug("Preload action %s id=%s", action_type, action.get("id", ""))
scope, func_type = action_type.split(":", 1)
default_funcname = "_load_%s_%s" % (scope, func_type)
action_func = getattr(self, default_funcname, None)
if not action_func:
action_funcname = self.preload_cfg["action_plugins"].get(action_type, {})
if not action_funcname:
log.warn("Unknown action: %s", action_type)
return
action_func = getattr(self, action_funcname, None)
if not action_func:
log.warn("Action function %s not found for action %s", action_funcname, action_type)
return
action_func(action)
# -------------------------------------------------------------------------
def _load_system_ids(self):
"""Read some system objects for later reference"""
org_objs, _ = self.rr.find_resources(name="ION", restype=RT.Org, id_only=False)
if not org_objs:
raise BadRequest("ION org not found. Was system force_cleaned since bootstrap?")
ion_org_id = org_objs[0]._id
self._register_id(ID_ORG_ION, ion_org_id, org_objs[0])
system_actor = get_system_actor()
system_actor_id = system_actor._id if system_actor else 'anonymous'
self._register_id(ID_SYSTEM_ACTOR, system_actor_id, system_actor if system_actor else None)
def _prepare_incremental(self):
"""
Look in the resource registry for any resources that have a preload ID on them so that
they can be referenced under this preload ID during this load run.
"""
log.debug("Loading prior preloaded resources for reference")
access_args = create_access_args("SUPERUSER", ["SUPERUSER"])
res_objs, res_keys = self.rr.find_resources_ext(alt_id_ns="PRE", id_only=False, access_args=access_args)
res_preload_ids = [key['alt_id'] for key in res_keys]
res_ids = [obj._id for obj in res_objs]
log.debug("Found %s previously preloaded resources", len(res_objs))
res_assocs = self.rr.find_associations(predicate="*", id_only=False)
[self.resource_assocs.setdefault(assoc["p"], []).append(assoc) for assoc in res_assocs]
log.debug("Found %s existing associations", len(res_assocs))
existing_resources = dict(zip(res_preload_ids, res_objs))
if len(existing_resources) != len(res_objs):
log.error("Stored preload IDs are NOT UNIQUE!!! This causes random links to existing resources")
res_id_mapping = dict(zip(res_preload_ids, res_ids))
self.resource_ids.update(res_id_mapping)
res_obj_mapping = dict(zip(res_preload_ids, res_objs))
self.resource_objs.update(res_obj_mapping)
def create_object_from_cfg(self, cfg, objtype, key="resource", prefix="", existing_obj=None):
"""
Construct an IonObject of a determined type from given config dict with attributes.
Convert all attributes according to their schema target type. Supports nested objects.
Supports edit of objects of same type.
"""
log.trace("Create object type=%s, prefix=%s", objtype, prefix)
if objtype == "dict":
schema = None
else:
schema = self._get_object_class(objtype)._schema
obj_fields = {} # Attributes for IonObject creation as dict
nested_done = set() # Names of attributes with nested objects already created
obj_cfg = get_safe(cfg, key)
for subkey, value in obj_cfg.iteritems():
if subkey.startswith(prefix):
attr = subkey[len(prefix):]
if '.' in attr: # We are a parent entry
# TODO: Make sure to not create nested object multiple times
slidx = attr.find('.')
nested_obj_field = attr[:slidx]
parent_field = attr[:slidx+1]
nested_prefix = prefix + parent_field # prefix plus nested object name
if '[' in nested_obj_field and nested_obj_field[-1] == ']':
sqidx = nested_obj_field.find('[')
nested_obj_type = nested_obj_field[sqidx+1:-1]
nested_obj_field = nested_obj_field[:sqidx]
elif objtype == "dict":
nested_obj_type = "dict"
else:
nested_obj_type = schema[nested_obj_field]['type']
# Make sure to not create the same nested object twice
if parent_field in nested_done:
continue
# Support direct indexing in a list
list_idx = -1
if nested_obj_type.startswith("list/"):
_, list_idx, nested_obj_type = nested_obj_type.split("/")
list_idx = int(list_idx)
log.trace("Get nested object field=%s type=%s, prefix=%s", nested_obj_field, nested_obj_type, prefix)
nested_obj = self.create_object_from_cfg(cfg, nested_obj_type, key, nested_prefix)
if list_idx >= 0:
my_list = obj_fields.setdefault(nested_obj_field, [])
if list_idx >= len(my_list):
my_list[len(my_list):list_idx] = [None]*(list_idx-len(my_list)+1)
my_list[list_idx] = nested_obj
else:
obj_fields[nested_obj_field] = nested_obj
nested_done.add(parent_field)
elif objtype == "dict":
# TODO: What about type?
obj_fields[attr] = value
elif attr in schema: # We are the leaf attribute
try:
if value:
fieldvalue = get_typed_value(value, schema[attr])
obj_fields[attr] = fieldvalue
except Exception:
log.warn("Object type=%s, prefix=%s, field=%s cannot be converted to type=%s. Value=%s",
objtype, prefix, attr, schema[attr]['type'], value, exc_info=True)
#fieldvalue = str(fieldvalue)
else:
# warn about unknown fields just once -- not on each row
log.warn("Skipping unknown field in %s: %s%s", objtype, prefix, attr)
if objtype == "dict":
obj = obj_fields
else:
if existing_obj:
# Edit attributes
if existing_obj.type_ != objtype:
raise Inconsistent("Cannot edit resource. Type mismatch old=%s, new=%s" % (existing_obj.type_, objtype))
# TODO: Don't edit empty nested attributes
for attr in list(obj_fields.keys()):
if not obj_fields[attr]:
del obj_fields[attr]
for attr in ('alt_ids','_id','_rev','type_'):
if attr in obj_fields:
del obj_fields[attr]
existing_obj.__dict__.update(obj_fields)
log.trace("Update object type %s using field names %s", objtype, obj_fields.keys())
obj = existing_obj
else:
if cfg.get(KEY_ID, None) and 'alt_ids' in schema:
if 'alt_ids' in obj_fields:
obj_fields['alt_ids'].append("PRE:"+cfg[KEY_ID])
else:
obj_fields['alt_ids'] = ["PRE:"+cfg[KEY_ID]]
log.trace("Create object type %s from field names %s", objtype, obj_fields.keys())
obj = IonObject(objtype, **obj_fields)
return obj
def _get_object_class(self, objtype):
if objtype in self.obj_classes:
return self.obj_classes[objtype]
try:
obj_class = named_any("interface.objects.%s" % objtype)
self.obj_classes[objtype] = obj_class
return obj_class
except Exception:
log.error('failed to find class for type %s' % objtype)
def _get_service_client(self, service):
return get_service_registry().services[service].client(process=self.process)
def _register_id(self, alias, resid, res_obj=None, is_update=False):
"""Keep preload resource in internal dict for later reference"""
if not is_update and alias in self.resource_ids:
raise BadRequest("ID alias %s used twice" % alias)
self.resource_ids[alias] = resid
self.resource_objs[alias] = res_obj
log.trace("Added resource alias=%s to id=%s", alias, resid)
def _read_resource_id(self, res_id):
existing_obj = self.rr.read(res_id)
self.resource_objs[res_id] = existing_obj
self.resource_ids[res_id] = res_id
return existing_obj
def _get_resource_id(self, alias_id):
"""Returns resource ID from preload alias ID, scanning also for real resource IDs to be loaded"""
if alias_id in self.resource_ids:
return self.resource_ids[alias_id]
elif re.match(UUID_RE, alias_id):
# This is obviously an ID of a real resource - let it fail if not existing
self._read_resource_id(alias_id)
log.debug("Referencing existing resource via direct ID: %s", alias_id)
return alias_id
else:
raise KeyError(alias_id)
def _get_resource_obj(self, res_id, silent=False):
"""Returns a resource object from one of the memory locations for given preload or internal ID"""
if self.bulk and res_id in self.bulk_resources:
return self.bulk_resources[res_id]
elif res_id in self.resource_objs:
return self.resource_objs[res_id]
else:
# Real ID not alias - reverse lookup
alias_ids = [alias_id for alias_id,int_id in self.resource_ids.iteritems() if int_id==res_id]
if alias_ids:
return self.resource_objs[alias_ids[0]]
if not silent:
log.debug("_get_resource_obj(): No object found for '%s'", res_id)
return None
def _resource_exists(self, res_id):
if not res_id:
return None
res = self._get_resource_obj(res_id, silent=True)
return res is not None
def _has_association(self, sub, pred, obj):
"""Returns True if the described associated already exists."""
for assoc in self.resource_assocs.get(pred, []):
if assoc.s == sub and assoc.o == obj:
return True
return False
def _update_resource_obj(self, res_id):
"""Updates an existing resource object"""
res_obj = self._get_resource_obj(res_id)
self.rr.update(res_obj)
log.debug("Updating resource %s (pre=%s id=%s): '%s'", res_obj.type_, res_id, res_obj._id, res_obj.name)
def _get_alt_id(self, res_obj, prefix):
alt_ids = getattr(res_obj, 'alt_ids', [])
for alt_id in alt_ids:
if alt_id.startswith(prefix+":"):
alt_id_str = alt_id[len(prefix)+1:]
return alt_id_str
def _get_op_headers(self, owner_id, force_user=False):
headers = {}
if owner_id:
owner_id = self.resource_ids[owner_id]
headers[MSG_HEADER_ACTOR] = owner_id
headers[MSG_HEADER_ROLES] = {'ION': ['SUPERUSER', 'MODERATOR']}
headers[MSG_HEADER_VALID] = '0'
elif force_user:
return self._get_system_actor_headers()
return headers
def _get_system_actor_headers(self):
return {MSG_HEADER_ACTOR: self.resource_ids[ID_SYSTEM_ACTOR],
MSG_HEADER_ROLES: {'ION': ['SUPERUSER', 'MODERATOR']},
MSG_HEADER_VALID: '0'}
def basic_resource_create(self, cfg, restype, svcname, svcop, key="resource",
set_attributes=None, support_bulk=False, **kwargs):
"""
Orchestration method doing the following:
- create an object from a row,
- add any defined constraints,
- make a service call to create resource for given object,
- share resource in a given Org
- store newly created resource id and obj for future reference
- (optional) support bulk create/update
"""
res_id_alias = cfg[KEY_ID]
existing_obj = None
if res_id_alias in self.resource_ids:
# TODO: Catch case when ID used twice
existing_obj = self.resource_objs[res_id_alias]
elif re.match(UUID_RE, res_id_alias):
# This is obviously an ID of a real resource
try:
existing_obj = self._read_resource_id(res_id_alias)
log.debug("Updating existing resource via direct ID: %s", res_id_alias)
except NotFound as nf:
pass # Ok it was not there after all
try:
res_obj = self.create_object_from_cfg(cfg, restype, key, "", existing_obj=existing_obj)
except Exception as ex:
log.exception("Error creating object")
raise
if set_attributes:
for attr, attr_val in set_attributes.iteritems():
setattr(res_obj, attr, attr_val)
if existing_obj:
res_id = self.resource_ids[res_id_alias]
if self.bulk and support_bulk:
self.bulk_resources[res_id] = res_obj
self.bulk_existing.add(res_id) # Make sure to remember which objects are existing
else:
# TODO: Use the appropriate service call here
self.rr.update(res_obj)
else:
if self.bulk and support_bulk:
res_id = self._create_bulk_resource(res_obj, res_id_alias)
headers = self._get_op_headers(cfg.get(KEY_OWNER, None))
self._resource_assign_owner(headers, res_obj)
self._resource_advance_lcs(cfg, res_id)
else:
svc_client = self._get_service_client(svcname)
headers = self._get_op_headers(cfg.get(KEY_OWNER, None), force_user=True)
res_id = getattr(svc_client, svcop)(res_obj, headers=headers, **kwargs)
if res_id:
if svcname == "resource_registry" and svcop == "create":
res_id = res_id[0]
res_obj._id = res_id
self._register_id(res_id_alias, res_id, res_obj)
self._resource_assign_org(cfg, res_id)
return res_id
def _create_bulk_resource(self, res_obj, res_alias=None):
if not hasattr(res_obj, "_id"):
res_obj._id = create_unique_resource_id()
ts = get_ion_ts()
if hasattr(res_obj, "ts_created") and not res_obj.ts_created:
res_obj.ts_created = ts
if hasattr(res_obj, "ts_updated") and not res_obj.ts_updated:
res_obj.ts_updated = ts
res_id = res_obj._id
self.bulk_resources[res_id] = res_obj
if res_alias:
self._register_id(res_alias, res_id, res_obj)
return res_id
def _resource_advance_lcs(self, cfg, res_id):
"""
Change lifecycle state of object to requested state. Supports bulk.
"""
res_obj = self._get_resource_obj(res_id)
restype = res_obj.type_
lcsm = get_restype_lcsm(restype)
initial_lcmat = lcsm.initial_state if lcsm else LCS.DEPLOYED
initial_lcav = lcsm.initial_availability if lcsm else AS.AVAILABLE
lcstate = cfg.get(KEY_LCSTATE, None)
if lcstate:
row_lcmat, row_lcav = lcstate.split("_", 1)
if self.bulk and res_id in self.bulk_resources:
self.bulk_resources[res_id].lcstate = row_lcmat
self.bulk_resources[res_id].availability = row_lcav
else:
if row_lcmat != initial_lcmat: # Vertical transition
self.rr.set_lifecycle_state(res_id, row_lcmat)
if row_lcav != initial_lcav: # Horizontal transition
self.rr.set_lifecycle_state(res_id, row_lcav)
elif self.bulk and res_id in self.bulk_resources:
# Set the lcs to resource type appropriate initial values
self.bulk_resources[res_id].lcstate = initial_lcmat
self.bulk_resources[res_id].availability = initial_lcav
def _resource_assign_org(self, cfg, res_id):
"""
Shares the resource in the given orgs. Supports bulk.
"""
org_ids = cfg.get(KEY_ORGS, None)
if org_ids:
org_ids = get_typed_value(org_ids, targettype="simplelist")
for org_id in org_ids:
org_res_id = self.resource_ids[org_id]
if self.bulk and res_id in self.bulk_resources:
# Note: org_id is alias, res_id is internal ID
org_obj = self._get_resource_obj(org_id)
res_obj = self._get_resource_obj(res_id)
# Create association to given Org
assoc_obj = self._create_association(org_obj, PRED.hasResource, res_obj, support_bulk=True)
else:
svc_client = self._get_service_client("org_management")
svc_client.share_resource(org_res_id, res_id, headers=self._get_system_actor_headers())
def _resource_assign_owner(self, headers, res_obj):
if self.bulk and 'ion-actor-id' in headers:
owner_id = headers['ion-actor-id']
user_obj = self._get_resource_obj(owner_id)
if owner_id and owner_id != 'anonymous':
self._create_association(res_obj, PRED.hasOwner, user_obj, support_bulk=True)
def basic_associations_create(self, cfg, res_alias, support_bulk=False):
for assoc in cfg.get("associations", []):
direction, other_id, predicate = assoc.split(",")
res_id = self.resource_ids[res_alias]
other_res_id = self.resource_ids[other_id]
if direction == "TO":
self._create_association(res_id, predicate, other_res_id, support_bulk=support_bulk)
elif direction == "FROM":
self._create_association(other_res_id, predicate, res_id, support_bulk=support_bulk)
def _create_association(self, subject=None, predicate=None, obj=None, support_bulk=False):
"""
Create an association between two IonObjects with a given predicate.
Supports bulk mode
"""
if self.bulk and support_bulk:
if not subject or not predicate or not obj:
raise BadRequest("Association must have all elements set: %s/%s/%s" % (subject, predicate, obj))
if isinstance(subject, basestring):
subject = self._get_resource_obj(subject)
if "_id" not in subject:
raise BadRequest("Subject id not available")
subject_id = subject._id
st = subject.type_
if isinstance(obj, basestring):
obj = self._get_resource_obj(obj)
if "_id" not in obj:
raise BadRequest("Object id not available")
object_id = obj._id
ot = obj.type_
assoc_id = create_unique_association_id()
assoc_obj = IonObject("Association",
s=subject_id, st=st,
p=predicate,
o=object_id, ot=ot,
ts=get_ion_ts())
assoc_obj._id = assoc_id
self.bulk_associations[assoc_id] = assoc_obj
return assoc_id, '1-norev'
else:
return self.rr.create_association(subject, predicate, obj)
def commit_bulk(self):
if not self.bulk_resources and not self.bulk_associations:
return
# Perform the create for resources
res_new = [obj for obj in self.bulk_resources.values() if obj["_id"] not in self.bulk_existing]
res = self.rr.rr_store.create_mult(res_new, allow_ids=True)
# Perform the update for resources
res_upd = [obj for obj in self.bulk_resources.values() if obj["_id"] in self.bulk_existing]
res = self.rr.rr_store.update_mult(res_upd)
# Perform the create for associations
assoc_new = [obj for obj in self.bulk_associations.values()]
res = self.rr.rr_store.create_mult(assoc_new, allow_ids=True)
log.info("Bulk stored {} resource objects ({} updates) and {} associations".format(len(res_new), len(res_upd), len(assoc_new)))
self.bulk_resources.clear()
self.bulk_associations.clear()
self.bulk_existing.clear()
| 45.33156 | 135 | 0.606993 |
36f8af0e316e9fcd58ebd2fae1981fb5bcd562ac | 5,422 | py | Python | tests/test_builder.py | wilhelmer/lunr.py | f455dfd84616d49c6d47ec3d4f5d3441228faba9 | [
"MIT"
] | null | null | null | tests/test_builder.py | wilhelmer/lunr.py | f455dfd84616d49c6d47ec3d4f5d3441228faba9 | [
"MIT"
] | null | null | null | tests/test_builder.py | wilhelmer/lunr.py | f455dfd84616d49c6d47ec3d4f5d3441228faba9 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from builtins import str
import pytest
from lunr.builder import Builder
from lunr.token_set import TokenSet
from lunr.index import Index
from lunr.vector import Vector
def _assert_deep_keys(dict_, keys):
d = dict_
for key in keys.split("."):
d_keys_as_str = [str(k) for k in d]
assert key in d_keys_as_str
d = d[key]
class TestBuilderBuild:
def setup_method(self, method):
self.builder = Builder()
doc = {"id": "id", "title": "test", "body": "missing"}
self.builder.ref("id")
self.builder.field("title")
self.builder.add(doc)
self.index = self.builder.build()
def test_adds_tokens_to_inverted_index(self):
_assert_deep_keys(self.builder.inverted_index, "test.title.id")
def test_builds_vector_space_of_the_document_fields(self):
assert "title/id" in self.builder.field_vectors
assert isinstance(self.builder.field_vectors["title/id"], Vector)
def test_skips_fields_not_defined_for_indexing(self):
assert "missing" not in self.builder.inverted_index
def test_builds_a_token_set_for_the_corpus(self):
needle = TokenSet.from_string("test")
assert "test" in self.builder.token_set.intersect(needle).to_list()
def test_calculates_document_count(self):
assert self.builder.average_field_length["title"] == 1
def test_index_is_returned(self):
assert isinstance(self.index, Index)
class TestBuilderAdd:
def test_builder_casts_docrefs_to_strings(self):
self.builder = Builder()
self.builder.ref("id")
self.builder.field("title")
self.builder.add(dict(id=123, title="test", body="missing"))
_assert_deep_keys(self.builder.inverted_index, "test.title.123")
def test_builder_metadata_whitelist_includes_metadata_in_index(self):
self.builder = Builder()
self.builder.ref("id")
self.builder.field("title")
self.builder.metadata_whitelist = ["position"]
self.builder.add(dict(id="a", title="test", body="missing"))
self.builder.add(dict(id="b", title="another test", body="missing"))
assert self.builder.inverted_index["test"]["title"]["a"] == {
"position": [[0, 4]]
}
assert self.builder.inverted_index["test"]["title"]["b"] == {
"position": [[8, 4]]
}
def test_builder_field_raises_if_contains_slash(self):
self.builder = Builder()
with pytest.raises(ValueError):
self.builder.field("foo/bar")
def test_builder_extracts_nested_properties_from_document(self):
self.builder = Builder()
self.builder.field("name", extractor=lambda d: d["person"]["name"])
self.builder.add({"id": "id", "person": {"name": "bob"}})
assert self.builder.inverted_index["bob"]["name"]["id"] == {}
def test_builder_field_term_frequency_and_length(self):
self.builder = Builder()
self.builder.ref("id")
self.builder.field("title")
self.builder.add(dict(id="a", title="test a testing test", body="missing"))
assert self.builder.field_term_frequencies == {
"title/a": {"test": 2, "a": 1, "testing": 1}
}
assert self.builder.field_lengths == {"title/a": 4}
class TestBuilderUse:
def setup_method(self, method):
self.builder = Builder()
def test_calls_plugin_function(self):
def plugin(*args):
assert True
self.builder.use(plugin)
def test_plugin_is_called_with_builder_as_first_argument(self):
def plugin(builder):
assert builder is self.builder
self.builder.use(plugin)
def test_forwards_arguments_to_the_plugin(self):
def plugin(builder, *args, **kwargs):
assert args == (1, 2, 3)
assert kwargs == {"foo": "bar"}
self.builder.use(plugin, 1, 2, 3, foo="bar")
class TestBuilderK1:
def test_k1_default_value(self):
builder = Builder()
assert builder._k1 == 1.2
def test_k1_can_be_set(self):
builder = Builder()
builder.k1(1.6)
assert builder._k1 == 1.6
class TestBuilderB:
def test_b_default_value(self):
builder = Builder()
assert builder._b == 0.75
def test_b_within_range(self):
builder = Builder()
builder.b(0.5)
assert builder._b == 0.5
def test_b_less_than_zero(self):
builder = Builder()
builder.b(-1)
assert builder._b == 0
def test_b_higher_than_one(self):
builder = Builder()
builder.b(1.5)
assert builder._b == 1
class TestBuilerRef:
def test_default_reference(self):
builder = Builder()
assert builder._ref == "id"
def test_defining_a_reference_field(self):
builder = Builder()
builder.ref("foo")
assert builder._ref == "foo"
class TestBuilderField:
def test_define_fields_to_index(self):
builder = Builder()
builder.field("foo")
assert len(builder._fields) == 1
assert builder._fields["foo"].name == "foo"
assert builder._fields["foo"].boost == 1
assert builder._fields["foo"].extractor is None
assert repr(builder._fields["foo"]) == '<Field "foo" boost="1">'
assert hash(builder._fields["foo"]) == hash("foo")
| 29.791209 | 83 | 0.634821 |
608b2ab05c753bd6a568f1f76678fb6e7af44286 | 8,908 | py | Python | .history/pages/intro_20220303161551.py | rypaik/Streamlit_Ref | 5ce11cecbe8307238463c126b88b3beed66c99fa | [
"MIT"
] | null | null | null | .history/pages/intro_20220303161551.py | rypaik/Streamlit_Ref | 5ce11cecbe8307238463c126b88b3beed66c99fa | [
"MIT"
] | null | null | null | .history/pages/intro_20220303161551.py | rypaik/Streamlit_Ref | 5ce11cecbe8307238463c126b88b3beed66c99fa | [
"MIT"
] | null | null | null | """
Off Multipage Cheatsheet
https://github.com/daniellewisDL/streamlit-cheat-sheet
@daniellewisDL : https://github.com/daniellewisDL
"""
import streamlit as st
from pathlib import Path
import base64
from modules.toc import *
# Initial page config
st.set_page_config(
page_title='Code Compendium Intro Page',
layout="wide",
# initial_sidebar_state="expanded",
)
# col2.title("Table of contents")
# col2.write("http://localhost:8502/#display-progress-and-status")
# toc.header("Header 1")
# toc.header("Header 2")
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# Thanks to streamlitopedia for the following code snippet
def img_to_bytes(img_path):
img_bytes = Path(img_path).read_bytes()
encoded = base64.b64encode(img_bytes).decode()
return encoded
# sidebar
# def cs_sidebar():
# st.sidebar.markdown('''[<img src='data:image/png;base64,{}' class='img-fluid' width=32 height=32>](https://streamlit.io/)'''.format(img_to_bytes("logomark_website.png")), unsafe_allow_html=True)
# st.sidebar.header('Streamlit cheat sheet')
# st.sidebar.markdown('''
# <small>Summary of the [docs](https://docs.streamlit.io/en/stable/api.html), as of [Streamlit v1.0.0](https://www.streamlit.io/).</small>
# ''', unsafe_allow_html=True)
# st.sidebar.markdown('__How to install and import__')
# st.sidebar.code('$ pip install streamlit')
# st.sidebar.markdown('Import convention')
# st.sidebar.code('>>> import streamlit as st')
# st.sidebar.markdown('__Add widgets to sidebar__')
# st.sidebar.code('''
# st.sidebar.<widget>
# >>> a = st.sidebar.radio(\'R:\',[1,2])
# ''')
# st.sidebar.markdown('__Command line__')
# st.sidebar.code('''
# $ streamlit --help
# $ streamlit run your_script.py
# $ streamlit hello
# $ streamlit config show
# $ streamlit cache clear
# $ streamlit docs
# $ streamlit --version
# ''')
# st.sidebar.markdown('__Pre-release features__')
# st.sidebar.markdown('[Beta and experimental features](https://docs.streamlit.io/en/stable/api.html#beta-and-experimental-features)')
# st.sidebar.code('''
# pip uninstall streamlit
# pip install streamlit-nightly --upgrade
# ''')
# st.sidebar.markdown('''<small>[st.cheat_sheet v1.0.0](https://github.com/daniellewisDL/streamlit-cheat-sheet) | Oct 2021</small>''', unsafe_allow_html=True)
# return None
##########################
# Main body of cheat sheet
##########################
def cs_body():
col1, col2 = st.columns(2)
col1.title('Ryan Paik Coding Compendium')
col1.markdown('''
----------
#### “*You don't learn to walk by following rules. You learn by doing, and by falling over.*”
#### ~ Richard Branson
--------
''')
col1.subheader("Welcome to my Code Compendium.")
col1.markdown('''
This website/webapp is my personal cheatsheet for of all the code snippets that I have needed over the past 2 years. This ended up being a quick detour into Streamlit while I was building flask api's.
-----
#### **Programming is only as deep as you want to dive in.**
i
This webapp features the basic code snippets from all the "googling" from programming I have done.
I have taken the plunge and have created my own markdown notebooks organizing information from quick solution tidbits to documentation for programming languages.
Please visit my github for practical code and my research notebooks:
*[rypaik (Ryan Paik) · GitHub](https://github.com/rypaik)*
If you would like access to my Gist please email me.
ryanpaik@protonmail.com
-----
##### **Bio:**
Currently a Sophomore at University of Illinois at Urbana-Champaign
Working Nights on my degree from the System Engineering Program
##### **Hobbies:**
Trying to become a real guitar hero minus the game system, playing Valorant with the St Mark's crew, getting interesting eats no matter where I am, and playing toss with my baseball field rat of a cousin.
The newest hobby is figuring out what I can build with all the new breakthroughs in technology.
##### **Currently Working On**
Frameworks and Languages:
- Flask, Django, FastAPI, PyTorch, Streamlit, OpenCV, shell scripting, Python, C++
Databases:
- Postgres, Redis, MongoDB, and applicable ORMs
When I can get up for Air:
- React, swift(ios), Rust, GO!!
- Find a team to get a paper In Arxiv
**This site will be constantly updated as long as I program. Feel free to pass on the URL.**
''')
# col2.subheader('Display interactive widgets')
# col2.code('''
# st.button('Hit me')
# st.download_button('On the dl', data)
# st.checkbox('Check me out')
# st.radio('Radio', [1,2,3])
# st.selectbox('Select', [1,2,3])
# st.multiselect('Multiselect', [1,2,3])
# st.slider('Slide me', min_value=0, max_value=10)
# st.select_slider('Slide to select', options=[1,'2'])
# st.text_input('Enter some text')
# st.number_input('Enter a number')
# st.text_area('Area for textual entry')
# st.date_input('Date input')
# st.time_input('Time entry')
# st.file_uploader('File uploader')
# st.color_picker('Pick a color')
# ''')
# col2.write('Use widgets\' returned values in variables:')
# col2.code('''
# >>> for i in range(int(st.number_input('Num:'))): foo()
# >>> if st.sidebar.selectbox('I:',['f']) == 'f': b()
# >>> my_slider_val = st.slider('Quinn Mallory', 1, 88)
# >>> st.write(slider_val)
# ''')
# # Control flow
# col2.subheader('Control flow')
# col2.code('''
# st.stop()
# ''')
# # Lay out your app
# col2.subheader('Lay out your app')
# col2.code('''
# st.form('my_form_identifier')
# st.form_submit_button('Submit to me')
# st.container()
# st.columns(spec)
# >>> col1, col2 = st.columns(2)
# >>> col1.subheader('Columnisation')
# st.expander('Expander')
# >>> with st.expander('Expand'):
# >>> st.write('Juicy deets')
# ''')
# col2.write('Batch widgets together in a form:')
# col2.code('''
# >>> with st.form(key='my_form'):
# >>> text_input = st.text_input(label='Enter some text')
# >>> submit_button = st.form_submit_button(label='Submit')
# ''')
# # Display code
# col2.subheader('Display code')
# col2.code('''
# st.echo()
# >>> with st.echo():
# >>> st.write('Code will be executed and printed')
# ''')
# # Display progress and status
# col2.subheader('Display progress and status')
# col2.code('''
# st.progress(progress_variable_1_to_100)
# st.spinner()
# >>> with st.spinner(text='In progress'):
# >>> time.sleep(5)
# >>> st.success('Done')
# st.balloons()
# st.error('Error message')
# st.warning('Warning message')
# st.info('Info message')
# st.success('Success message')
# st.exception(e)
# ''')
# # Placeholders, help, and options
# col2.subheader('Placeholders, help, and options')
# col2.code('''
# st.empty()
# >>> my_placeholder = st.empty()
# >>> my_placeholder.text('Replaced!')
# st.help(pandas.DataFrame)
# st.get_option(key)
# st.set_option(key, value)
# st.set_page_config(layout='wide')
# ''')
# # Mutate data
# col2.subheader('Mutate data')
# col2.code('''
# DeltaGenerator.add_rows(data)
# >>> my_table = st.table(df1)
# >>> my_table.add_rows(df2)
# >>> my_chart = st.line_chart(df1)
# >>> my_chart.add_rows(df2)
# ''')
# # Optimize performance
# col2.subheader('Optimize performance')
# col2.code('''
# @st.cache
# >>> @st.cache
# ... def fetch_and_clean_data(url):
# ... # Mutate data at url
# ... return data
# >>> # Executes d1 as first time
# >>> d1 = fetch_and_clean_data(ref1)
# >>> # Does not execute d1; returns cached value, d1==d2
# >>> d2 = fetch_and_clean_data(ref1)
# >>> # Different arg, so function d1 executes
# >>> d3 = fetch_and_clean_data(ref2)
# ''')
# col2.subheader('Other key parts of the API')
# col2.markdown('''
# <small>[State API](https://docs.streamlit.io/en/stable/session_state_api.html)</small><br>
# <small>[Theme option reference](https://docs.streamlit.io/en/stable/theme_options.html)</small><br>
# <small>[Components API reference](https://docs.streamlit.io/en/stable/develop_streamlit_components.html)</small><br>
# <small>[API cheat sheet](https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py)</small><br>
# ''', unsafe_allow_html=True)
# Column 3 TOC Generator
# col3.subheader('test')
# toc = Toc(col3)
# # col2.title("Table of contents")
# col3.write("http://localhost:8502/#display-progress-and-status", unsafe_allow_html=True)
# toc.header("Header 1")
# toc.header("Header 2")
# toc.generate()
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# return None
# Run main()
# if __name__ == '__main__':
# main()
# def main():
def app():
# cs_sidebar()
cs_body()
return None
| 27.24159 | 206 | 0.653907 |
0cd549d67b1e0191ee9b21133b4a2a23e265d5c3 | 1,072 | py | Python | Guessing_Game.py | 224alpha/Python | e413cc5a53751191df2ce146f061a6460f6661e0 | [
"MIT"
] | null | null | null | Guessing_Game.py | 224alpha/Python | e413cc5a53751191df2ce146f061a6460f6661e0 | [
"MIT"
] | null | null | null | Guessing_Game.py | 224alpha/Python | e413cc5a53751191df2ce146f061a6460f6661e0 | [
"MIT"
] | null | null | null | import random
a = comGuess = random.randint(0, 100) # a and comGuess is initialised with a random number between 0 and 100
while True: # loop will run until encountered with the break statement(user enters the right answer)
userGuess = int(input("Enter your guessed no. b/w 0-100:")) # user input for guessing the number
if userGuess < comGuess: # if number guessed by user is lesser than the random number than the user is told to guess higher and the random number comGuess is changed to a new random number between a and 100
print("Guess Higher")
comGuess = random.randint(a, 100)
a += 1
elif userGuess > comGuess: # if number guessed by user is greater than the random number than the user is told to guess lower and the random number comGuess is changed to a new random number between 0 and a
print("Guess Lower")
comGuess = random.randint(0, a)
a += 1
else: # if guessed correctly the loop will break and the player will win
print("Guessed Corectly")
break
| 53.6 | 212 | 0.688433 |
a387680839dfcd1d35c2aa419260ca85929f7d39 | 2,590 | py | Python | hw/ip/otbn/dv/otbnsim/sim/decode.py | y-srini/opentitan | b46a08d07671c9d6c020e54fb44424f1611c43a0 | [
"Apache-2.0"
] | null | null | null | hw/ip/otbn/dv/otbnsim/sim/decode.py | y-srini/opentitan | b46a08d07671c9d6c020e54fb44424f1611c43a0 | [
"Apache-2.0"
] | null | null | null | hw/ip/otbn/dv/otbnsim/sim/decode.py | y-srini/opentitan | b46a08d07671c9d6c020e54fb44424f1611c43a0 | [
"Apache-2.0"
] | 1 | 2022-01-27T08:49:59.000Z | 2022-01-27T08:49:59.000Z | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
'''Code to load instruction words into a simulator'''
import struct
from typing import List, Optional, Iterator
from .constants import ErrBits
from .isa import INSNS_FILE, OTBNInsn
from .insn import INSN_CLASSES
from .state import OTBNState
MNEM_TO_CLASS = {cls.insn.mnemonic: cls for cls in INSN_CLASSES}
class IllegalInsn(OTBNInsn):
'''A catch-all subclass of Instruction for bad data
This handles anything that doesn't decode correctly. Doing so for OTBN is
much easier than if we wanted to support compressed-mode (RV32IC), because
we don't need to worry about whether we have 16 or 32 bits of rubbish.
Note that we declare this with an opcode of zero. Note that this implies
the bottom two bits are 0, which would imply a compressed instruction, so
we know this doesn't match any real instruction.
'''
def __init__(self, pc: int, raw: int, msg: str) -> None:
super().__init__(raw, {})
self.msg = msg
# Override the memoized disassembly for the instruction, avoiding us
# disassembling the underlying DummyInsn.
self._disasm = (pc, '?? 0x{:08x}'.format(raw))
def execute(self, state: OTBNState) -> Optional[Iterator[None]]:
state.stop_at_end_of_cycle(ErrBits.ILLEGAL_INSN)
return None
def _decode_word(pc: int, word: int) -> OTBNInsn:
mnem = INSNS_FILE.mnem_for_word(word)
if mnem is None:
return IllegalInsn(pc, word, 'No legal decoding')
cls = MNEM_TO_CLASS.get(mnem)
if cls is None:
return IllegalInsn(pc, word, f'No insn class for mnemonic {mnem}')
# Decode the instruction. We know that we have an encoding (we checked in
# get_insn_masks).
assert cls.insn.encoding is not None
enc_vals = cls.insn.encoding.extract_operands(word)
# Make sense of these encoded values as "operand values" (doing any
# shifting, sign interpretation etc.)
op_vals = cls.insn.enc_vals_to_op_vals(pc, enc_vals)
return cls(word, op_vals)
def decode_bytes(base_addr: int, data: bytes) -> List[OTBNInsn]:
'''Decode instruction bytes as instructions'''
assert len(data) & 3 == 0
return [_decode_word(base_addr + 4 * offset, int_val[0])
for offset, int_val in enumerate(struct.iter_unpack('<I', data))]
def decode_file(base_addr: int, path: str) -> List[OTBNInsn]:
with open(path, 'rb') as handle:
return decode_bytes(base_addr, handle.read())
| 35 | 78 | 0.702703 |
d7c051818740014092db03de2bcd9da455fe5973 | 56,729 | py | Python | nova/api/openstack/compute/plugins/v3/servers.py | bdelliott/nova | 3912d7aae21dfff5a42dd71bd45ef5c5f3b1a82b | [
"Apache-2.0"
] | null | null | null | nova/api/openstack/compute/plugins/v3/servers.py | bdelliott/nova | 3912d7aae21dfff5a42dd71bd45ef5c5f3b1a82b | [
"Apache-2.0"
] | null | null | null | nova/api/openstack/compute/plugins/v3/servers.py | bdelliott/nova | 3912d7aae21dfff5a42dd71bd45ef5c5f3b1a82b | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import re
import stevedore
from oslo.config import cfg
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.plugins.v3 import ips
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.image import glance
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
CONF = cfg.CONF
CONF.import_opt('enable_instance_password',
'nova.api.openstack.compute.servers')
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
CONF.import_opt('extensions_blacklist', 'nova.api.openstack', group='osapi_v3')
CONF.import_opt('extensions_whitelist', 'nova.api.openstack', group='osapi_v3')
LOG = logging.getLogger(__name__)
def make_fault(elem):
fault = xmlutil.SubTemplateElement(elem, 'fault', selector='fault')
fault.set('code')
fault.set('created')
msg = xmlutil.SubTemplateElement(fault, 'message')
msg.text = 'message'
det = xmlutil.SubTemplateElement(fault, 'details')
det.text = 'details'
def make_server(elem, detailed=False):
elem.set('name')
elem.set('id')
if detailed:
elem.set('user_id')
elem.set('tenant_id')
elem.set('updated')
elem.set('created')
elem.set('host_id')
elem.set('status')
elem.set('progress')
elem.set('reservation_id')
# Attach image node
image = xmlutil.SubTemplateElement(elem, 'image', selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
# Attach flavor node
flavor = xmlutil.SubTemplateElement(elem, 'flavor', selector='flavor')
flavor.set('id')
xmlutil.make_links(flavor, 'links')
# Attach fault node
make_fault(elem)
# Attach metadata node
elem.append(common.MetadataTemplate())
# Attach addresses node
elem.append(ips.AddressesTemplate())
xmlutil.make_links(elem, 'links')
server_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('admin_password')
return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
master = ServerTemplate()
master.attach(ServerAdminPassTemplate())
return master
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
metadata_deserializer = common.MetadataXMLDeserializer()
want_controller = True
def __init__(self, controller):
self.controller = controller
def _extract_server(self, node):
"""Marshal the server attribute of a parsed request."""
server = {}
server_node = self.find_first_child_named(node, 'server')
attributes = ["name", "image_ref", "flavor_ref", "admin_password",
"key_name"]
for attr in attributes:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
metadata_node = self.find_first_child_named(server_node, "metadata")
if metadata_node is not None:
server["metadata"] = self.extract_metadata(metadata_node)
networks = self._extract_networks(server_node)
if networks is not None:
server["networks"] = networks
if self.controller:
self.controller.server_create_xml_deserialize(server_node, server)
return server
def _extract_networks(self, server_node):
"""Marshal the networks attribute of a parsed request."""
node = self.find_first_child_named(server_node, "networks")
if node is not None:
networks = []
for network_node in self.find_children_named(node,
"network"):
item = {}
if network_node.hasAttribute("uuid"):
item["uuid"] = network_node.getAttribute("uuid")
if network_node.hasAttribute("fixed_ip"):
item["fixed_ip"] = network_node.getAttribute("fixed_ip")
if network_node.hasAttribute("port"):
item["port"] = network_node.getAttribute("port")
networks.append(item)
return networks
else:
return None
class ActionDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server action requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
dom = xmlutil.safe_minidom_parse_string(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
action_deserializer = {
'create_image': self._action_create_image,
'reboot': self._action_reboot,
'rebuild': self._action_rebuild,
'resize': self._action_resize,
'confirm_resize': self._action_confirm_resize,
'revert_resize': self._action_revert_resize,
}.get(action_name, super(ActionDeserializer, self).default)
action_data = action_deserializer(action_node)
return {'body': {action_name: action_data}}
def _action_create_image(self, node):
return self._deserialize_image_action(node, ('name',))
def _action_reboot(self, node):
if not node.hasAttribute("type"):
raise AttributeError("No reboot type was specified in request")
return {"type": node.getAttribute("type")}
def _action_rebuild(self, node):
rebuild = {}
if node.hasAttribute("name"):
name = node.getAttribute("name")
if not name:
raise AttributeError("Name cannot be blank")
rebuild['name'] = name
metadata_node = self.find_first_child_named(node, "metadata")
if metadata_node is not None:
rebuild["metadata"] = self.extract_metadata(metadata_node)
if not node.hasAttribute("image_ref"):
raise AttributeError("No image_ref was specified in request")
rebuild["image_ref"] = node.getAttribute("image_ref")
if node.hasAttribute("admin_password"):
rebuild["admin_password"] = node.getAttribute("admin_password")
if self.controller:
self.controller.server_rebuild_xml_deserialize(node, rebuild)
return rebuild
def _action_resize(self, node):
resize = {}
if node.hasAttribute("flavor_ref"):
resize["flavor_ref"] = node.getAttribute("flavor_ref")
else:
raise AttributeError("No flavor_ref was specified in request")
return resize
def _action_confirm_resize(self, node):
return None
def _action_revert_resize(self, node):
return None
def _deserialize_image_action(self, node, allowed_attributes):
data = {}
for attribute in allowed_attributes:
value = node.getAttribute(attribute)
if value:
data[attribute] = value
metadata_node = self.find_first_child_named(node, 'metadata')
if metadata_node is not None:
metadata = self.metadata_deserializer.extract_metadata(
metadata_node)
data['metadata'] = metadata
return data
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
"""Deserialize an xml-formatted server create request."""
dom = xmlutil.safe_minidom_parse_string(string)
server = self._extract_server(dom)
return {'body': {'server': server}}
class ServersController(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
EXTENSION_CREATE_NAMESPACE = 'nova.api.v3.extensions.server.create'
EXTENSION_DESERIALIZE_EXTRACT_SERVER_NAMESPACE = (
'nova.api.v3.extensions.server.create.deserialize')
EXTENSION_REBUILD_NAMESPACE = 'nova.api.v3.extensions.server.rebuild'
EXTENSION_DESERIALIZE_EXTRACT_REBUILD_NAMESPACE = (
'nova.api.v3.extensions.server.rebuild.deserialize')
EXTENSION_UPDATE_NAMESPACE = 'nova.api.v3.extensions.server.update'
_view_builder_class = views_servers.ViewBuilderV3
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = link[0]['href'].encode('utf-8')
# Convenience return
return robj
def __init__(self, **kwargs):
def _check_load_extension(required_function):
def check_whiteblack_lists(ext):
# Check whitelist is either empty or if not then the extension
# is in the whitelist
if (not CONF.osapi_v3.extensions_whitelist or
ext.obj.alias in CONF.osapi_v3.extensions_whitelist):
# Check the extension is not in the blacklist
if ext.obj.alias not in CONF.osapi_v3.extensions_blacklist:
return True
else:
LOG.warning(_("Not loading %s because it is "
"in the blacklist"), ext.obj.alias)
return False
else:
LOG.warning(
_("Not loading %s because it is not in the whitelist"),
ext.obj.alias)
return False
def check_load_extension(ext):
if isinstance(ext.obj, extensions.V3APIExtensionBase):
# Filter out for the existence of the required
# function here rather than on every request. We
# don't have a new abstract base class to reduce
# duplication in the extensions as they may want
# to implement multiple server (and other) entry
# points if hasattr(ext.obj, 'server_create'):
if hasattr(ext.obj, required_function):
LOG.debug(_('extension %(ext_alias)s detected by '
'servers extension for function %(func)s'),
{'ext_alias': ext.obj.alias,
'func': required_function})
return check_whiteblack_lists(ext)
else:
LOG.debug(
_('extension %(ext_alias)s is missing %(func)s'),
{'ext_alias': ext.obj.alias,
'func': required_function})
return False
else:
return False
return check_load_extension
self.extension_info = kwargs.pop('extension_info')
super(ServersController, self).__init__(**kwargs)
self.compute_api = compute.API()
# Look for implementation of extension point of server creation
self.create_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_CREATE_NAMESPACE,
check_func=_check_load_extension('server_create'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.create_extension_manager):
LOG.debug(_("Did not find any server create extensions"))
# Look for implementation of extension point of server create
# XML deserialization
self.create_xml_deserialize_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_DESERIALIZE_EXTRACT_SERVER_NAMESPACE,
check_func=_check_load_extension(
'server_xml_extract_server_deserialize'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.create_xml_deserialize_manager):
LOG.debug(_("Did not find any server create xml deserializer"
" extensions"))
# Look for implementation of extension point of server rebuild
self.rebuild_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_REBUILD_NAMESPACE,
check_func=_check_load_extension('server_rebuild'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.rebuild_extension_manager):
LOG.debug(_("Did not find any server rebuild extensions"))
# Look for implementation of extension point of server rebuild
# XML deserialization
self.rebuild_xml_deserialize_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_DESERIALIZE_EXTRACT_REBUILD_NAMESPACE,
check_func=_check_load_extension(
'server_xml_extract_rebuild_deserialize'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.rebuild_xml_deserialize_manager):
LOG.debug(_("Did not find any server rebuild xml deserializer"
" extensions"))
# Look for implementation of extension point of server update
self.update_extension_manager = \
stevedore.enabled.EnabledExtensionManager(
namespace=self.EXTENSION_UPDATE_NAMESPACE,
check_func=_check_load_extension('server_update'),
invoke_on_load=True,
invoke_kwds={"extension_info": self.extension_info},
propagate_map_exceptions=True)
if not list(self.update_extension_manager):
LOG.debug(_("Did not find any server update extensions"))
@wsgi.serializers(xml=MinimalServersTemplate)
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
@wsgi.serializers(xml=ServersTemplate)
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
status = search_opts.pop('status', None)
if status is not None:
vm_state, task_state = common.task_and_vm_state_from_status(status)
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes_since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes_since'])
except ValueError:
msg = _('Invalid changes_since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes_since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes_since' is specified, because 'changes_since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes_since' not in search_opts:
# No 'changes_since', so we only want non-deleted servers
search_opts['deleted'] = False
if 'changes_since' in search_opts:
search_opts['changes-since'] = search_opts.pop('changes_since')
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPBadRequest(explanation=msg)
# If tenant_id is passed as a search parameter this should
# imply that all_tenants is also enabled unless explicitly
# disabled. Note that the tenant_id parameter is filtered out
# by remove_invalid_options above unless the requestor is an
# admin.
if 'tenant_id' in search_opts and not 'all_tenants' in search_opts:
# We do not need to add the all_tenants flag if the tenant
# id associated with the token is the tenant id
# specified. This is done so a request that does not need
# the all_tenants flag does not fail because of lack of
# policy permission for compute:get_all_tenants when it
# doesn't actually need it.
if context.project_id != search_opts.get('tenant_id'):
search_opts['all_tenants'] = 1
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(str(err))
if 'all_tenants' in search_opts:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
del search_opts['all_tenants']
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts,
limit=limit,
marker=marker,
want_objects=True)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
log_msg = _("Flavor '%s' could not be found ")
LOG.debug(log_msg, search_opts['flavor'])
instance_list = []
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
try:
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
def _validate_device_name(self, value):
self._check_string_length(value, 'Device name', max_length=255)
if ' ' in value:
msg = _("Device name cannot include spaces.")
raise exc.HTTPBadRequest(explanation=msg)
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
for network in requested_networks:
try:
# fixed IP address is optional
# if the fixed IP address is not provided then
# it will use one of the available IP address from the network
address = network.get('fixed_ip', None)
if address is not None and not utils.is_valid_ipv4(address):
msg = _("Invalid fixed IP address (%s)") % address
raise exc.HTTPBadRequest(explanation=msg)
port_id = network.get('port', None)
if port_id:
network_uuid = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument: port")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(port_id):
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % port_id
raise exc.HTTPBadRequest(explanation=msg)
if address is not None:
msg = _("Specified Fixed IP '%(addr)s' cannot be used "
"with port '%(port)s': port already has "
"a Fixed IP allocated.") % {"addr": address,
"port": port_id}
raise exc.HTTPBadRequest(explanation=msg)
else:
network_uuid = network['uuid']
if not port_id and not uuidutils.is_uuid_like(network_uuid):
br_uuid = network_uuid.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % network_uuid
raise exc.HTTPBadRequest(explanation=msg)
# For neutronv2, requested_networks
# should be tuple of (network_uuid, fixed_ip, port_id)
if utils.is_neutron():
networks.append((network_uuid, address, port_id))
else:
# check if the network id is already present in the list,
# we don't want duplicate networks to be passed
# at the boot time
for id, ip in networks:
if id == network_uuid:
expl = (_("Duplicate networks"
" (%s) are not allowed") %
network_uuid)
raise exc.HTTPBadRequest(explanation=expl)
networks.append((network_uuid, address))
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return networks
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
@wsgi.serializers(xml=ServerTemplate)
def show(self, req, id):
"""Returns server details by server id."""
try:
context = req.environ['nova.context']
instance = self.compute_api.get(context, id, want_objects=True)
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPBadRequest(_("The request body is invalid"))
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
# Arguments to be passed to instance create function
create_kwargs = {}
# Query extensions which want to manipulate the keyword
# arguments.
# NOTE(cyeoh): This is the hook that extensions use
# to replace the extension specific code below.
# When the extensions are ported this will also result
# in some convenience function from this class being
# moved to the extension
if list(self.create_extension_manager):
self.create_extension_manager.map(self._create_extension_point,
server_dict, create_kwargs)
image_uuid = self._image_from_req_data(server_dict, create_kwargs)
# NOTE(cyeoh): Although an extension can set
# return_reservation_id in order to request that a reservation
# id be returned to the client instead of the newly created
# instance information we do not want to pass this parameter
# to the compute create call which always returns both. We use
# this flag after the instance create call to determine what
# to return to the client
return_reservation_id = create_kwargs.pop('return_reservation_id',
False)
requested_networks = None
# TODO(cyeoh): bp v3-api-core-as-extensions
# Replace with an extension point when the os-networks
# extension is ported. Currently reworked
# to take into account is_neutron
#if (self.ext_mgr.is_loaded('os-networks')
# or utils.is_neutron()):
# requested_networks = server_dict.get('networks')
if utils.is_neutron():
requested_networks = server_dict.get('networks')
if requested_networks is not None:
requested_networks = self._get_requested_networks(
requested_networks)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavor_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
try:
inst_type = flavors.get_flavor_by_flavor_id(
flavor_id, ctxt=context, read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
metadata=server_dict.get('metadata', {}),
admin_password=password,
requested_networks=requested_networks,
**create_kwargs)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavor_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except rpc_common.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.InvalidRequest,
exception.MultiplePortsNotApplicable,
exception.InstanceUserDataMalformed,
exception.PortNotFound,
exception.SecurityGroupNotFound,
exception.NetworkNotFound) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if return_reservation_id:
return wsgi.ResponseObject(
{'servers_reservation': {'reservation_id': resv_id}},
xml=wsgi.XMLDictSerializer)
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['admin_password'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _create_extension_point(self, ext, server_dict, create_kwargs):
handler = ext.obj
LOG.debug(_("Running _create_extension_point for %s"), ext.obj)
handler.server_create(server_dict, create_kwargs)
def _rebuild_extension_point(self, ext, rebuild_dict, rebuild_kwargs):
handler = ext.obj
LOG.debug(_("Running _rebuild_extension_point for %s"), ext.obj)
handler.server_rebuild(rebuild_dict, rebuild_kwargs)
def _resize_extension_point(self, ext, resize_dict, resize_kwargs):
handler = ext.obj
LOG.debug(_("Running _resize_extension_point for %s"), ext.obj)
handler.server_resize(resize_dict, resize_kwargs)
def _update_extension_point(self, ext, update_dict, update_kwargs):
handler = ext.obj
LOG.debug(_("Running _update_extension_point for %s"), ext.obj)
handler.server_update(update_dict, update_kwargs)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@wsgi.serializers(xml=ServerTemplate)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPBadRequest(_("The request body is invalid"))
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'host_id' in body['server']:
msg = _("host_id cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if list(self.update_extension_manager):
self.update_extension_manager.map(self._update_extension_point,
body['server'], update_dict)
try:
instance = self.compute_api.get(ctxt, id, want_objects=True)
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(req, instance)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('confirm_resize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirm_resize')
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('revert_resize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revert_resize')
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid image_ref provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, server_dict, create_kwargs):
"""Get image data from the request or raise appropriate
exceptions.
The field image_ref is mandatory when no block devices have been
defined and must be a proper uuid when present.
"""
image_href = server_dict.get('image_ref')
if not image_href and create_kwargs.get('block_device_mapping'):
return ''
elif image_href:
return self._image_uuid_from_href(unicode(image_href))
else:
msg = _("Missing image_ref attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavor_ref']
except (TypeError, KeyError):
msg = _("Missing flavor_ref attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
resize_dict = body['resize']
try:
flavor_ref = str(resize_dict["flavor_ref"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavor_ref' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavor_ref' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
resize_kwargs = {}
return self._resize(req, id, flavor_ref, **resize_kwargs)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
try:
rebuild_dict = body['rebuild']
except (KeyError, TypeError):
msg = _('Invalid request body')
raise exc.HTTPBadRequest(explanation=msg)
try:
image_href = rebuild_dict["image_ref"]
except (KeyError, TypeError):
msg = _("Could not parse image_ref from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(rebuild_dict)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'name': 'display_name',
'metadata': 'metadata',
}
if 'name' in rebuild_dict:
self._validate_server_name(rebuild_dict['name'])
rebuild_kwargs = {}
if list(self.rebuild_extension_manager):
self.rebuild_extension_manager.map(self._rebuild_extension_point,
rebuild_dict, rebuild_kwargs)
for request_attribute, instance_attribute in attr_map.items():
try:
rebuild_kwargs[instance_attribute] = rebuild_dict[
request_attribute]
except (KeyError, TypeError):
pass
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
**rebuild_kwargs)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
# Add on the admin_password attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['admin_password'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('create_image')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("create_image", {})
image_name = entity.get("name")
if not image_name:
msg = _("create_image entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = self.compute_api.get_instance_bdms(context, instance,
legacy=False)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
if not img:
# NOTE(Vincent Hou) The private method
# _get_bdm_image_metadata only works, when boot
# device is set to 'vda'. It needs to be fixed later,
# but tentatively we use it here.
image_meta = {'properties': self.compute_api.
_get_bdm_image_metadata(context, bdms,
legacy_bdm=False)}
else:
src_image = self.compute_api.\
image_service.show(context, img)
image_meta = dict(src_image)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'create_image')
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
image_ref = glance.generate_image_url(image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['admin_password']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid admin_password"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, six.string_types):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes_since', 'all_tenants')
def _server_create_xml_deserialize_extension_point(self, ext, server_node,
server_dict):
handler = ext.obj
LOG.debug(_("Running create xml deserialize ep for %s"),
handler.alias)
handler.server_xml_extract_server_deserialize(server_node,
server_dict)
def server_create_xml_deserialize(self, server_node, server):
if list(self.create_xml_deserialize_manager):
self.create_xml_deserialize_manager.map(
self._server_create_xml_deserialize_extension_point,
server_node, server)
def _server_rebuild_xml_deserialize_extension_point(self, ext,
rebuild_node,
rebuild_dict):
handler = ext.obj
LOG.debug(_("Running rebuild xml deserialize ep for %s"),
handler.alias)
handler.server_xml_extract_rebuild_deserialize(rebuild_node,
rebuild_dict)
def server_rebuild_xml_deserialize(self, rebuild_node, rebuild_dict):
if list(self.rebuild_xml_deserialize_manager):
self.rebuild_xml_deserialize_manager.map(
self._server_rebuild_xml_deserialize_extension_point,
rebuild_node, rebuild_dict)
def _server_resize_xml_deserialize_extension_point(self, ext, resize_node,
resize_dict):
handler = ext.obj
LOG.debug(_("Running rebuild xml deserialize ep for %s"),
handler.alias)
handler.server_xml_extract_resize_deserialize(resize_node, resize_dict)
def server_resize_xml_deserialize(self, resize_node, resize_dict):
if list(self.resize_xml_deserialize_manager):
self.resize_xml_deserialize_manager.map(
self._server_resize_xml_deserialize_extension_point,
resize_node, resize_dict)
def _get_instance(self, context, instance_uuid):
try:
attrs = ['system_metadata', 'metadata']
return instance_obj.Instance.get_by_uuid(context, instance_uuid,
expected_attrs=attrs)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
@extensions.expected_errors((404, 409))
@wsgi.action('start')
def _start_server(self, req, id, body):
"""Start an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
LOG.debug(_('start instance'), instance=instance)
try:
self.compute_api.start(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
@extensions.expected_errors((404, 409))
@wsgi.action('stop')
def _stop_server(self, req, id, body):
"""Stop an instance."""
context = req.environ['nova.context']
instance = self._get_instance(context, id)
LOG.debug(_('stop instance'), instance=instance)
try:
self.compute_api.stop(context, instance)
except (exception.InstanceNotReady, exception.InstanceIsLocked) as e:
raise webob.exc.HTTPConflict(explanation=e.format_message())
return webob.Response(status_int=202)
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug(_("Removing options '%s' from query"),
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
class Servers(extensions.V3APIExtensionBase):
"""Servers."""
name = "Servers"
alias = "servers"
namespace = "http://docs.openstack.org/compute/core/servers/v3"
version = 1
def get_resources(self):
member_actions = {'action': 'POST'}
collection_actions = {'detail': 'GET'}
resources = [
extensions.ResourceExtension(
'servers',
ServersController(extension_info=self.extension_info),
member_name='server', collection_actions=collection_actions,
member_actions=member_actions)]
return resources
def get_controller_extensions(self):
return []
| 41.7125 | 79 | 0.606921 |
f3e4186e4ce0f54a933c82a0961ce39e8d414054 | 1,940 | py | Python | tools/dealData.py | adnappp/trackerByDetection | e05297c8f50298ed375492178101927ef6d816d7 | [
"MIT"
] | null | null | null | tools/dealData.py | adnappp/trackerByDetection | e05297c8f50298ed375492178101927ef6d816d7 | [
"MIT"
] | 1 | 2018-12-27T03:44:55.000Z | 2019-01-08T02:58:52.000Z | tools/dealData.py | adnappp/trackerByDetection | e05297c8f50298ed375492178101927ef6d816d7 | [
"MIT"
] | 1 | 2019-09-19T06:10:02.000Z | 2019-09-19T06:10:02.000Z | import os
import random
'''这个文件的功能主要是在已有数据集选取训练集,验证集,测试集,这几个txt文件里面存放的是xml和image文件的名称,如00001'''
trainval_percent = 0.66
train_percent = 0.5
xmlfilepath = '/home/chenxingli/dengtaAI/my-tf-faster-rcnn/data/VOCdevkit2007/VOC2007/Annotations'
#这个地方imagpath也没用到,只需要xml或者image中一个就可以,可以去掉
imagefilepath = '/home/chenxingli/dengtaAI/my-tf-faster-rcnn/data/VOCdevkit2007/VOC2007/JPEGImages'
#txtsavepath 就是之前创建的空文件夹,但是这里没有用到,我后面直接填写了,没有用这个代替
txtsavepath = '/home/chenxingli/dengtaAI/my-tf-faster-rcnn/data/VOCdevkit2007/VOC2007/ImageSets/Main'
total_xml = os.listdir(xmlfilepath) #获取所有的xml文件
num=len(total_xml) #有多少xml文件
list_xml =range(num) #改list为list_xml(命名规范),list_xml就是从0-num的一个list
tv=int(num*trainval_percent) #取66%xml来作为训练验证集
tr=int(tv*train_percent) #再从训练验证集中取50%作为训练集
trainval= random.sample(list_xml,tv) #从总的xml文件中随机选择66%的文件,trainval里面是文件名称
train=random.sample(trainval,tr) #同上
#这里就是和上面的txtsavepath重复的地方,可以用txtsavepath代替前面一样的路径
ftrainval = open('/home/chenxingli/dengtaAI/my-tf-faster-rcnn/data/VOCdevkit2007/VOC2007/ImageSets/Main/trainval.txt', 'w')
ftest = open('/home/chenxingli/dengtaAI/my-tf-faster-rcnn/data/VOCdevkit2007/VOC2007/ImageSets/Main/test.txt', 'w')
ftrain = open('/home/chenxingli/dengtaAI/my-tf-faster-rcnn/data/VOCdevkit2007/VOC2007/ImageSets/Main/train.txt', 'w')
fval = open('/home/chenxingli/dengtaAI/my-tf-faster-rcnn/data/VOCdevkit2007/VOC2007/ImageSets/Main/val.txt', 'w')
def deal():
for i in list_xml:
#遍历所有xml文件
name=total_xml[i].split(".")[0]+ '\n'
if i in trainval: #对于在之前选取的训练验证集中的文件名称
ftrainval.write(name) #写入trainval.txt
if i in train: #又在其中选择50%的写入trian.txt
ftrain.write(name)
else: #另外训练验证集的50%写入验证集
fval.write(name)
else: #剩下的34%写入测试集test.txt
ftest.write(name)
#关闭文件
ftrainval.close()
ftrain.close()
fval.close()
ftest .close()
if __name__ == "__main__":
deal() | 42.173913 | 123 | 0.743299 |
872271437a8e958bdf3fb85aa67438fe8d412690 | 140 | py | Python | crud/apps.py | OuroborosD/03-PiggoV2 | 0fdabfeca3a29cf0cfb87f120506ad517ee75ce6 | [
"MIT"
] | null | null | null | crud/apps.py | OuroborosD/03-PiggoV2 | 0fdabfeca3a29cf0cfb87f120506ad517ee75ce6 | [
"MIT"
] | 18 | 2021-07-01T08:35:13.000Z | 2021-07-25T08:18:09.000Z | crud/apps.py | OuroborosD/03-PiggoV2 | 0fdabfeca3a29cf0cfb87f120506ad517ee75ce6 | [
"MIT"
] | 1 | 2021-07-13T05:12:14.000Z | 2021-07-13T05:12:14.000Z | from django.apps import AppConfig
class CrudConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'crud'
| 20 | 56 | 0.75 |
f4d11b25edc600cf5f80b9b468ac4f591e23463b | 2,490 | py | Python | src/data_standardization/tests/test_standardization_util.py | nsteins/crash-model | 521a63e48a561298e694432d74caa3c385f913b6 | [
"MIT"
] | 54 | 2018-06-21T18:48:34.000Z | 2020-04-15T23:07:20.000Z | src/data_standardization/tests/test_standardization_util.py | nsteins/crash-model | 521a63e48a561298e694432d74caa3c385f913b6 | [
"MIT"
] | 172 | 2018-06-14T17:33:41.000Z | 2020-06-15T16:45:15.000Z | src/data_standardization/tests/test_standardization_util.py | nsteins/crash-model | 521a63e48a561298e694432d74caa3c385f913b6 | [
"MIT"
] | 25 | 2017-02-13T21:45:07.000Z | 2018-06-13T19:41:38.000Z | from .. import standardization_util
import json
import os
import pytz
TEST_FP = os.path.dirname(os.path.abspath(__file__))
def test_parse_date():
timezone = pytz.timezone('America/New_York')
assert standardization_util.parse_date(
'01/08/2009 08:53:00 PM', timezone) == '2009-01-08T20:53:00-05:00'
assert standardization_util.parse_date(
'01/08/2009',
timezone,
time='08:53:00 PM') == '2009-01-08T20:53:00-05:00'
assert standardization_util.parse_date(
'01/08/2009',
timezone,
time='75180',
time_format='seconds') == '2009-01-08T20:53:00-05:00'
assert standardization_util.parse_date('01/08/2009 unk', timezone) \
is None
assert standardization_util.parse_date(
'01/08/2009',
timezone,
time='0201',
time_format='military') == '2009-01-08T02:01:00-05:00'
assert standardization_util.parse_date(
'01/08/2009',
timezone,
time='1201',
time_format='military') == '2009-01-08T12:01:00-05:00'
assert standardization_util.parse_date(
'01/08/2009',
timezone,
time='9999',
time_format='military') == '2009-01-08T00:00:00-05:00'
# Test daylight savings time
assert standardization_util.parse_date(
'08/08/2009 08:53:00 PM', timezone) == '2009-08-08T20:53:00-04:00'
# Test UTC conversion
assert standardization_util.parse_date(
'2009-01-08T08:53:00.000Z', timezone) == '2009-01-08T03:53:00-05:00'
def test_parse_address():
address = "29 OXFORD ST\n" + \
"Cambridge, MA\n" + \
"(42.37857940800046, -71.11657724799966)"
street, lat, lon = standardization_util.parse_address(address)
assert street == 'OXFORD ST'
assert lat == 42.37857940800046
assert lon == -71.11657724799966
def test_validate_and_write_schema(tmpdir):
tmppath = tmpdir.strpath
values = [{
"id": "1",
"dateOccurred": "2009-01-08T20:53:00Z",
"location": {
"latitude": 42.37857940800046,
"longitude": -71.11657724799966
}
}]
print(values)
standardization_util.validate_and_write_schema(
os.path.join(TEST_FP, 'test-schema.json'),
values,
os.path.join(tmppath, 'test.json')
)
# Now load the json back and make sure it matches
items = json.load(open(os.path.join(tmppath, 'test.json')))
assert items == values
| 27.362637 | 76 | 0.618876 |
eebc5a5c9aed284c2058c643d59182a108bcaffc | 4,467 | py | Python | napatrackmater/napari_animation/_qt/animation_widget.py | kapoorlabs/NapaTrackMater | ff1c5203ac589dff415edc0eb4388c40b3610b61 | [
"BSD-3-Clause"
] | null | null | null | napatrackmater/napari_animation/_qt/animation_widget.py | kapoorlabs/NapaTrackMater | ff1c5203ac589dff415edc0eb4388c40b3610b61 | [
"BSD-3-Clause"
] | null | null | null | napatrackmater/napari_animation/_qt/animation_widget.py | kapoorlabs/NapaTrackMater | ff1c5203ac589dff415edc0eb4388c40b3610b61 | [
"BSD-3-Clause"
] | null | null | null | from qtpy.QtWidgets import QWidget, QLabel, QVBoxLayout, QLineEdit, QPushButton
from ..animation import Animation
from ..easing import Easing
from .frame_widget import FrameWidget
class AnimationWidget(QWidget):
"""Widget for interatviely making animations using the napari viewer.
Parameters
----------
viewer : napari.Viewer
napari viewer.
Attributes
----------
key_frames : list of dict
List of viewer state dictionaries.
frame : int
Currently shown key frame.
"""
def __init__(self, viewer: 'napari.viewer.Viewer', savedir : None, trackid:0, T:0, parent=None):
super().__init__(parent=parent)
self._layout = QVBoxLayout()
self.setLayout(self._layout)
self._layout.addWidget(QLabel('Animation Wizard', parent=self))
self.frameWidget = FrameWidget(parent=self)
self._layout.addWidget(self.frameWidget)
self.captureButton = QPushButton('Capture Frame', parent=self)
self.captureButton.clicked.connect(self._capture_keyframe_callback)
self._layout.addWidget(self.captureButton)
self._layout.addStretch(1)
self.pathText = QLineEdit(parent=self)
self.pathText.setText(savedir + 'Track' + str(trackid) +'.mp4')
self._layout.addWidget(self.pathText)
self.saveButton = QPushButton('Save Animation', parent=self)
self.saveButton.clicked.connect(self._save_callback)
self._layout.addWidget(self.saveButton)
# Create animation
self.animation = Animation(viewer,savedir,trackid, T)
# establish key bindings
self._add_callbacks()
def _add_callbacks(self):
"""Bind keys"""
self.animation.viewer.bind_key("Alt-f", self._capture_keyframe_callback)
self.animation.viewer.bind_key("Alt-r", self._replace_keyframe_callback)
self.animation.viewer.bind_key("Alt-d", self._delete_keyframe_callback)
self.animation.viewer.bind_key("Alt-a", self._key_adv_frame)
self.animation.viewer.bind_key("Alt-b", self._key_back_frame)
def _release_callbacks(self):
"""Release keys"""
self.animation.viewer.bind_key("Alt-f", None)
self.animation.viewer.bind_key("Alt-r", None)
self.animation.viewer.bind_key("Alt-d", None)
self.animation.viewer.bind_key("Alt-a", None)
self.animation.viewer.bind_key("Alt-b", None)
def _get_interpolation_steps(self):
return int(self.frameWidget.stepsSpinBox.value())
def _get_easing_function(self):
easing_name = str(self.frameWidget.easeComboBox.currentText())
easing_func = Easing[easing_name.upper()].value
return easing_func
def _set_current_frame(self):
return self.frameWidget.frameSpinBox.setValue(self.animation.frame)
def _capture_keyframe_callback(self, event=None):
"""Record current key-frame"""
self.animation.capture_keyframe(steps=self._get_interpolation_steps(),
ease=self._get_easing_function())
self._set_current_frame()
def _replace_keyframe_callback(self, event=None):
"""Replace current key-frame with new view"""
self.animation.capture_keyframe(steps=self._get_interpolation_steps(), ease=self._get_easing_function(), insert=False)
self._set_current_frame()
def _delete_keyframe_callback(self, event=None):
"""Delete current key-frame"""
self.animation.key_frames.pop(self.animation.frame)
self.animation.frame = (self.animation.frame - 1) % len(self.animation.key_frames)
self.animation.set_to_keyframe(self.animation.frame)
self._set_current_frame()
def _key_adv_frame(self, event=None):
"""Go forwards in key-frame list"""
new_frame = (self.animation.frame + 1) % len(self.animation.key_frames)
self.animation.set_to_keyframe(new_frame)
self._set_current_frame()
def _key_back_frame(self, event=None):
"""Go backwards in key-frame list"""
new_frame = (self.animation.frame - 1) % len(self.animation.key_frames)
self.animation.set_to_keyframe(new_frame)
self._set_current_frame()
def _save_callback(self, event=None):
path = self.pathText.text()
print('Saving animation to', path)
self.animation.animate(path)
def close(self):
self._release_callbacks()
super().close()
| 35.452381 | 126 | 0.67786 |
372cfe1e798bc9902725696c98e6b1c98d500260 | 80 | py | Python | location_field/__init__.py | Gerleff/django-location-field | 092e7e352146f54f602b50b53ae0cd3f82860db3 | [
"MIT"
] | 443 | 2015-01-12T12:33:30.000Z | 2022-03-31T07:23:23.000Z | location_field/__init__.py | Gerleff/django-location-field | 092e7e352146f54f602b50b53ae0cd3f82860db3 | [
"MIT"
] | 104 | 2015-03-24T09:38:32.000Z | 2022-03-22T20:20:49.000Z | location_field/__init__.py | Gerleff/django-location-field | 092e7e352146f54f602b50b53ae0cd3f82860db3 | [
"MIT"
] | 122 | 2015-02-22T20:17:00.000Z | 2022-03-23T10:22:45.000Z | __version__ = '2.1.0'
default_app_config = 'location_field.apps.DefaultConfig'
| 20 | 56 | 0.7875 |
187dfd78347d1a89cf64c3aa57db110366569837 | 2,459 | py | Python | flask-backend/misc/cards-update/generate_precons.py | rafauke/vdb | bdbdaa1f762dd741f7462a959bf618306699c33b | [
"MIT"
] | null | null | null | flask-backend/misc/cards-update/generate_precons.py | rafauke/vdb | bdbdaa1f762dd741f7462a959bf618306699c33b | [
"MIT"
] | null | null | null | flask-backend/misc/cards-update/generate_precons.py | rafauke/vdb | bdbdaa1f762dd741f7462a959bf618306699c33b | [
"MIT"
] | null | null | null | import json
import re
bundles = {
"V5": {
"PM": {},
"PN": {},
"PTo": {},
"PTr": {},
"PV": {},
},
"SP": {
"LB": {},
"PwN": {},
"DoF": {},
"PoS": {},
},
"HttBR": {
"A": {},
"B": {},
},
"KoTR": {
"A": {},
"B": {},
},
"25th": {
"": {},
},
"FB": {
"PM": {},
"PN": {},
"PTo": {},
"PTr": {},
"PV": {},
},
"Anthology": {
"": {},
},
"LK": {
"": {},
},
"HttB": {
"PKia": {},
"PSam": {},
"PSal": {},
"PGar": {},
},
"KoT": {
"PB": {},
"PM": {},
"PT": {},
"PV": {},
},
"BSC": {
"X": {},
},
"LotN": {
"PA": {},
"PS": {},
"PG": {},
"PR": {},
},
"Third": {
"PB": {},
"PM": {},
"PTr": {},
"PTz": {},
},
"LoB": {
"PA": {},
"PG": {},
"PI": {},
"PO": {},
},
"KMW": {
"PAl": {},
"PAn": {},
"PB": {},
"PG": {},
},
"Tenth": {
"A": {},
"B": {},
},
"BH": {
"PM": {},
"PN": {},
"PTo": {},
"PTr": {},
},
"Anarchs": {
"PAB": {},
"PAG": {},
"PG": {},
},
"CE": {
"PB": {},
"PM": {},
"PN": {},
"PTo": {},
"PTr": {},
"PV": {},
},
"FN": {
"PA": {},
"PS": {},
"PG": {},
"PR": {},
},
"SW": {
"PB": {},
"PL": {},
"PT": {},
"PV": {},
},
}
with open("vtescrypt.json", "r") as crypt_file, open("vteslib.json", "r+") as library_file, open("preconDecks.json", "w") as precons_file:
crypt = json.load(crypt_file)
library = json.load(library_file)
for card in crypt + library:
for card_set, card_precons in card["Set"].items():
if card_set in bundles:
for precon in bundles[card_set].keys():
if precon in card_precons:
bundles[card_set][precon][card["Id"]] = int(card_precons[precon])
# json.dump(precons, precons_file, separators=(',', ':'))
# Use this instead, for output with indentation (e.g. for debug)
json.dump(bundles, precons_file, indent=4, separators=(',', ':'))
| 18.628788 | 138 | 0.286295 |
f4ff95010fc0e089bb479262a5173199f1835ed6 | 4,169 | py | Python | sonic/venv2/Lib/site-packages/pip/_internal/network/utils.py | yoyomonem/sonic-pygame | 5449ff862f03265473b7647583ccc48d9103a284 | [
"MIT"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | sonic/venv2/Lib/site-packages/pip/_internal/network/utils.py | yoyomonem/sonic-pygame | 5449ff862f03265473b7647583ccc48d9103a284 | [
"MIT"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | sonic/venv2/Lib/site-packages/pip/_internal/network/utils.py | yoyomonem/sonic-pygame | 5449ff862f03265473b7647583ccc48d9103a284 | [
"MIT"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._internal.exceptions import NetworkConnectionError
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, Iterator
# The following comments and HTTP headers were originally added by
# Donald Stufft in git commit 22c562429a61bb77172039e480873fb239dd8c03.
#
# We use Accept-Encoding: identity here because requests defaults to
# accepting compressed responses. This breaks in a variety of ways
# depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible file
# and will leave the file alone and with an empty Content-Encoding
# - Some servers will notice that the file is already compressed and
# will leave the file alone, adding a Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take a file
# that's already been compressed and compress it again, and set
# the Content-Encoding: gzip header
# By setting this to request only the identity encoding we're hoping
# to eliminate the third case. Hopefully there does not exist a server
# which when given a file will notice it is already compressed and that
# you're not asking for a compressed file and will then decompress it
# before sending because if that's the case I don't think it'll ever be
# possible to make this work.
HEADERS = {'Accept-Encoding': 'identity'} # type: Dict[str, str]
def raise_for_status(resp):
# type: (Response) -> None
http_error_msg = ''
if isinstance(resp.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings.
try:
reason = resp.reason.decode('utf-8')
except UnicodeDecodeError:
reason = resp.reason.decode('iso-8859-1')
else:
reason = resp.reason
if 400 <= resp.status_code < 500:
http_error_msg = '%s Client Error: %s for url: %s' % (
resp.status_code, reason, resp.url)
elif 500 <= resp.status_code < 600:
http_error_msg = '%s Server Error: %s for url: %s' % (
resp.status_code, reason, resp.url)
if http_error_msg:
raise NetworkConnectionError(http_error_msg, response=resp)
def response_chunks(response, chunk_size=CONTENT_CHUNK_SIZE):
# type: (Response, int) -> Iterator[bytes]
"""Given a requests Response, provide the data chunks.
"""
try:
# Special case for urllib3.
for chunk in response.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False,
):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = response.raw.read(chunk_size)
if not chunk:
break
yield chunk
| 42.540816 | 71 | 0.66131 |
0edbb03569cb5450efa52e225bd7629acb947758 | 875 | py | Python | frameworks/helloworld/tests/test_web_url.py | krisis/dcos-commons | 715d27f3e43a5e25b8ecb4beed97333b136fdd9a | [
"Apache-2.0"
] | 1 | 2021-01-06T21:14:00.000Z | 2021-01-06T21:14:00.000Z | frameworks/helloworld/tests/test_web_url.py | krisis/dcos-commons | 715d27f3e43a5e25b8ecb4beed97333b136fdd9a | [
"Apache-2.0"
] | null | null | null | frameworks/helloworld/tests/test_web_url.py | krisis/dcos-commons | 715d27f3e43a5e25b8ecb4beed97333b136fdd9a | [
"Apache-2.0"
] | null | null | null | import pytest
import sdk_install as install
import sdk_plan as plan
import sdk_utils
from tests.config import (
PACKAGE_NAME
)
def setup_module(module):
install.uninstall(PACKAGE_NAME)
options = {
"service": {
"spec_file": "examples/web-url.yml"
}
}
# this config produces 1 hello's + 0 world's:
install.install(PACKAGE_NAME, 1, additional_options=options)
def teardown_module(module):
install.uninstall(PACKAGE_NAME)
@pytest.mark.sanity
def test_deploy():
plan.wait_for_completed_deployment(PACKAGE_NAME)
deployment_plan = plan.get_deployment_plan(PACKAGE_NAME).json()
sdk_utils.out("deployment_plan: " + str(deployment_plan))
assert(len(deployment_plan['phases']) == 1)
assert(deployment_plan['phases'][0]['name'] == 'hello')
assert(len(deployment_plan['phases'][0]['steps']) == 1)
| 23.026316 | 67 | 0.697143 |
e10122890681340aa2f7c3ff925d1133a8e22023 | 540 | py | Python | 04 Row iteration vs column iteration demo.py | jpmaldonado/python4finance | d21f772e79f9b1b10ecc71c69d088c69c3bea1fc | [
"MIT"
] | null | null | null | 04 Row iteration vs column iteration demo.py | jpmaldonado/python4finance | d21f772e79f9b1b10ecc71c69d088c69c3bea1fc | [
"MIT"
] | null | null | null | 04 Row iteration vs column iteration demo.py | jpmaldonado/python4finance | d21f772e79f9b1b10ecc71c69d088c69c3bea1fc | [
"MIT"
] | null | null | null | from timeit import timeit
setup = """
import numpy as np
a = np.arange(100000000).reshape(10000, 10000)
def contiguous_sum(x):
for i in range(x.shape[0]):
x[i].sum()
def non_contiguous_sum(x):
for i in range(x.shape[-1]):
x[:, i].sum()
"""
n=100
time_contiguous = timeit('contiguous_sum(a)', setup=setup, number=n) / n
time_non_contiguous = timeit('non_contiguous_sum(a)', setup=setup, number=n) / n
print("Contiguous: {:.4f}s per loop".format(time_contiguous))
print("Non-contiguous: {:.4f}s per loop".format(time_non_contiguous)) | 28.421053 | 80 | 0.707407 |
b68b3e48dfc886d0a87e5df68b1c04d5a707ad2e | 553 | py | Python | words.py | Sandeep6262/Hangman | 0a7cc2d1f4a9bf4ff01039fb178f215135e5c070 | [
"Apache-2.0"
] | null | null | null | words.py | Sandeep6262/Hangman | 0a7cc2d1f4a9bf4ff01039fb178f215135e5c070 | [
"Apache-2.0"
] | null | null | null | words.py | Sandeep6262/Hangman | 0a7cc2d1f4a9bf4ff01039fb178f215135e5c070 | [
"Apache-2.0"
] | null | null | null | import string
import random
def load_words():
"""
Ye function kaafi jayada words ko load karne mai help karega
"""
WORDLIST_FILENAME = "words.txt"
inFile = open(WORDLIST_FILENAME,'r')
line = inFile.readline()
word_list = string.split(line)
return word_list
def choose_word():
"""
word_list (list): list of words (strings)
ye function ek word randomly return karega
"""
word_list = load_words()
secret_word = random.choice(word_list)
secret_word = secret_word.lower()
return secret_word
| 23.041667 | 64 | 0.674503 |
e4e008f705f98888e5802250f36f268eb2509ecd | 436 | py | Python | lazy.py | rrmhearts/python-playground | 0dd938280bbfa3a21a876d34c66112593809af3d | [
"MIT"
] | null | null | null | lazy.py | rrmhearts/python-playground | 0dd938280bbfa3a21a876d34c66112593809af3d | [
"MIT"
] | null | null | null | lazy.py | rrmhearts/python-playground | 0dd938280bbfa3a21a876d34c66112593809af3d | [
"MIT"
] | null | null | null | def Naturals(n):
yield n
yield from Naturals(n+1)
s = Naturals(1)
print("Natural #s", next(s), next(s), next(s), next(s))
def sieve(s):
n = next(s)
yield n
yield from sieve(i for i in s if i%n != 0)
p = sieve(Naturals(2))
print("Prime #s", next(p), next(p), next(p), \
next(p), next(p), next(p), next(p), next(p))
def gensend():
item = yield
yield item
g = gensend()
next(g)
print ( g.send("hello")) | 18.956522 | 55 | 0.573394 |
05b1cda5a72ba39ab16147f144157d3bc1f68537 | 8,161 | py | Python | pysmartt/smartt_client.py | s10i/smartt_client_python | 16dc8b0ba77b760e21262c6ff410e4f91b6af25c | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2016-12-05T01:30:02.000Z | 2016-12-05T01:30:02.000Z | pysmartt/smartt_client.py | s10i/smartt_client_python | 16dc8b0ba77b760e21262c6ff410e4f91b6af25c | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | pysmartt/smartt_client.py | s10i/smartt_client_python | 16dc8b0ba77b760e21262c6ff410e4f91b6af25c | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-05-29T01:02:03.000Z | 2019-05-29T01:02:03.000Z |
# Standard library imports
import socket
import ssl
import select
# Local imports
from smartt_simple_protocol import SmarttSimpleProtocol
from smartt_client_functions import setupSmarttFunctions
class SmarttClientException(BaseException):
pass
##############################################################################
### SmarttClient class - encapsulates connection and communication with Smartt
### server, preseting a nice and easy to use API to the user
class SmarttClient(object):
##########################################################################
#############
# API Enums #
#############
marketNames = [
"Bovespa",
"BMF"
]
orderStatuses = [
"canceled",
"executed",
"hung",
"hung_cancellable",
"hung_pending",
"partially_canceled",
"partially_executed",
"partially_executed_cancellable",
"rejected",
"expired"
]
ordersEventsTypes = [
"order_sent",
"order_canceled",
"order_changed",
"order_executed",
"order_expired"
]
stopOrderStatuses = [
"canceled_by_client",
"canceled_expired_option",
"canceled_not_allowed_market",
"canceled_not_enough_balance",
"canceled_not_positioned",
"canceled_order_limit_exceeded",
"hung",
"sent",
"expired"
]
stopOrdersEventsTypes = [
"stop_order_sent",
"stop_order_canceled",
"stop_order_triggered",
"stop_order_expired"
]
validityTypes = [
"HJ",
"DE",
"AC"
]
##########################################################################
### Init function - connects to the server (possibly initializing the SSL
### protocol as well) and setups the protocol handler
def __init__(self, host="smarttbot-server.smarttbot.com", port=5060, use_ssl=True,
print_raw_messages=False):
self.host = host
self.port = port
self.smartt_socket = socket.create_connection((self.host, self.port))
if use_ssl:
self.smartt_socket = ssl.wrap_socket(self.smartt_socket,
ssl_version=ssl.PROTOCOL_TLSv1)
self.protocol = SmarttSimpleProtocol(self.smartt_socket.recv,
self.smartt_socket.send,
print_raw_messages)
# Generic Wrapper for all Smartt functions - sends the function message
# and returns the response (next message from the server)
def smarttFunction(self, message):
self.protocol.send(message)
response = self.protocol.receive()
if len(response) > 0 and response[0] == "ERROR":
if len(response) != 3:
print("STRANGE! Error response doesn't have 3 values: %s" %
str(response))
raise SmarttClientException( response[0] +
"(" + response[1] + "): " +
response[2] )
return response
##########################################################################
### Generic messages (list of strings) handling ###
###################################################
def sendMessage(self, message):
self.protocol.send(message)
def receiveMessage(self):
return self.protocol.receive()
##########################################################################
##########################################################################
### Raw messages handling ###
#############################
def sendRawMessage(self, message):
self.smartt_socket.send(message)
# Reads everything available until timing out
def receiveRawMessage(self):
# Read in chunks of at most 4K - the magical number for recv calls :)
receive_size = 4096
# Timeout of half a second - just enough so that a continuous
# transmission from the server isn't missed (totally arbitrary choice)
select_timeout = 0.5
# Has to receive something, so just use the blocking function
data = self.smartt_socket.recv(receive_size)
# Wait and check for data, if available, read, if times out, stops
while len(select.select([self.smartt_socket], [], [],
select_timeout)[0]) > 0:
data += self.smartt_socket.recv(receive_size)
return data
##########################################################################
##########################################################################
### Helper functions ###
########################
def checkAttributes(self, attributes, possibleValues):
for attribute in attributes:
if attribute not in possibleValues:
raise SmarttClientException("Invalid attribute: " + attribute)
def formatAttributes(self, name, attributes, possibleValues):
if not attributes:
return ""
self.checkAttributes(attributes, possibleValues)
return self.formatString(name, ",".join(attributes))
def formatString(self, name, value, optional=True):
if value is None:
if not optional:
raise SmarttClientException("Non-optional parameter is NULL: "
+ name)
else:
return []
return [("%s=%s" % (name, value))]
def formatInteger(self, name, value, optional=True):
formattedValue = (str(int(value))
if value is not None else None)
return self.formatString(name, formattedValue, optional)
def formatDecimal2(self, name, value, optional=True):
formattedValue = (("%.2f" % float(value))
if value is not None else None)
return self.formatString(name, formattedValue, optional)
def formatDecimal6(self, name, value, optional=True):
formattedValue = (("%.6f" % float(value))
if value is not None else None)
return self.formatString(name, formattedValue, optional)
def formatDatetime(self, name, value, optional=True):
formattedValue = (value.strftime("%Y-%m-%d %H:%M:%S")
if value is not None else None)
return self.formatString(name, formattedValue, optional)
def formatDate(self, name, value, optional=True):
formattedValue = (value.strftime("%Y-%m-%d")
if value is not None else None)
return self.formatString(name, formattedValue, optional)
def formatBoolean(self, name, value, falseAndTrueValues=["no", "yes"], optional=True):
formattedValue = None
if value == 0 or value is False or value == falseAndTrueValues[0]:
formattedValue = "0"
elif value == 1 or value is True or value == falseAndTrueValues[1]:
formattedValue = "1"
elif value is not None:
raise SmarttClientException("Invalid boolean value '" + name +
"': " + value)
return self.formatString(name, formattedValue, optional)
def formatEnum(self, name, value, enumValues, optional=True):
if value is not None and value not in enumValues:
raise SmarttClientException("Invalid '" + name +
"' parameter value: " + value)
return self.formatString(name, value, optional)
def formatDictResponse(self, values, attributes, defaultAttributes=[]):
if not attributes:
attributes = defaultAttributes
return dict(zip(attributes, values))
def formatListOfDictsResponse(self, values, attributes, defaultAttributes):
if not attributes:
attributes = defaultAttributes
k = len(attributes)
return [self.formatDictResponse(values[i:i + k], attributes) for i in
range(0, len(values), k)]
setupSmarttFunctions(SmarttClient)
| 35.482609 | 90 | 0.539762 |
c0180712169827517723e6047746595f1fe08c8f | 56,172 | py | Python | lib/anitraintools.py | vwcruzeiro/ANI-Tools | bc44c65589148b6e6dd8065d63e73ead00d8ac9d | [
"MIT"
] | null | null | null | lib/anitraintools.py | vwcruzeiro/ANI-Tools | bc44c65589148b6e6dd8065d63e73ead00d8ac9d | [
"MIT"
] | null | null | null | lib/anitraintools.py | vwcruzeiro/ANI-Tools | bc44c65589148b6e6dd8065d63e73ead00d8ac9d | [
"MIT"
] | null | null | null | import hdnntools as hdt
import pyanitools as pyt
import pyNeuroChem as pync
from pyNeuroChem import cachegenerator as cg
import numpy as np
from scipy.integrate import quad
import pandas as pd
from time import sleep
import subprocess
import random
import re
import os
from multiprocessing import Process
import shutil
import copy
conv_au_ev = 27.21138505
def interval(v, S):
ps = 0.0
ds = 1.0 / float(S)
for s in range(S):
if v > ps and v <= ps + ds:
return s
ps = ps + ds
def get_train_stats(Nn,train_root):
# rerr = re.compile('EPOCH\s+?(\d+?)\n[\s\S]+?E \(kcal\/mol\)\s+?(\d+?\.\d+?)\s+?(\d+?\.\d+?)\s+?(\d+?\.\d+?)\n\s+?dE \(kcal\/mol\)\s+?(\d+?\.\d+?)\s+?(\d+?\.\d+?)\s+?(\d+?\.\d+?)\n[\s\S]+?Current best:\s+?(\d+?)\n[\s\S]+?Learning Rate:\s+?(\S+?)\n[\s\S]+?TotalEpoch:\s+([\s\S]+?)\n')
# rerr = re.compile('EPOCH\s+?(\d+?)\s+?\n[\s\S]+?E \(kcal\/mol\)\s+?(\S+?)\s+?(\S+?)\s+?(\S+?)\n\s+?dE \(kcal\/mol\)\s+?(\S+?)\s+?(\S+?)\s+?(\S+?)\n')
rblk = re.compile('=+?\n([\s\S]+?=+?\n[\s\S]+?(?:=|Deleting))')
repo = re.compile('EPOCH\s+?(\d+?)\s+?\n')
rerr = re.compile('\s+?(\S+?\s+?\(\S+?)\s+?((?:\d|inf)\S*?)\s+?((?:\d|inf)\S*?)\s+?((?:\d|inf)\S*?)\n')
rtme = re.compile('TotalEpoch:\s+?(\d+?)\s+?dy\.\s+?(\d+?)\s+?hr\.\s+?(\d+?)\s+?mn\.\s+?(\d+?\.\d+?)\s+?sc\.')
comp = re.compile('Termination Criterion Met')
allnets = []
completed = []
for index in range(Nn):
#print('reading:', train_root + 'train' + str(index) + '/' + 'output.opt')
if os.path.isfile(train_root + 'train' + str(index) + '/' + 'output.opt'):
optfile = open(train_root + 'train' + str(index) + '/' + 'output.opt', 'r').read()
matches = re.findall(rblk, optfile)
run = dict({'EPOCH': [], 'RTIME': [], 'ERROR': dict()})
for i, data in enumerate(matches):
run['EPOCH'].append(int(re.search(repo, data).group(1)))
m = re.search(rtme, data)
run['RTIME'].append(86400.0 * float(m.group(1)) +
3600.0 * float(m.group(2)) +
60.0 * float(m.group(3)) +
float(m.group(4)))
err = re.findall(rerr, data)
for e in err:
if e[0] in run['ERROR']:
run['ERROR'][e[0]].append(np.array([float(e[1]), float(e[2]), float(e[3])], dtype=np.float64))
else:
run['ERROR'].update(
{e[0]: [np.array([float(e[1]), float(e[2]), float(e[3])], dtype=np.float64)]})
for key in run['ERROR'].keys():
run['ERROR'][key] = np.vstack(run['ERROR'][key])
if re.match(comp, optfile):
completed.append(True)
else:
completed.append(False)
allnets.append(run)
else:
completed.append(False)
return allnets, completed
def get_train_stats_ind(index,train_root):
# rerr = re.compile('EPOCH\s+?(\d+?)\n[\s\S]+?E \(kcal\/mol\)\s+?(\d+?\.\d+?)\s+?(\d+?\.\d+?)\s+?(\d+?\.\d+?)\n\s+?dE \(kcal\/mol\)\s+?(\d+?\.\d+?)\s+?(\d+?\.\d+?)\s+?(\d+?\.\d+?)\n[\s\S]+?Current best:\s+?(\d+?)\n[\s\S]+?Learning Rate:\s+?(\S+?)\n[\s\S]+?TotalEpoch:\s+([\s\S]+?)\n')
# rerr = re.compile('EPOCH\s+?(\d+?)\s+?\n[\s\S]+?E \(kcal\/mol\)\s+?(\S+?)\s+?(\S+?)\s+?(\S+?)\n\s+?dE \(kcal\/mol\)\s+?(\S+?)\s+?(\S+?)\s+?(\S+?)\n')
rblk = re.compile('=+?\n([\s\S]+?=+?\n[\s\S]+?(?:=|Deleting))')
repo = re.compile('EPOCH\s+?(\d+?)\s+?\n')
rerr = re.compile('\s+?(\S+?\s+?\(\S+?)\s+?((?:\d|inf)\S*?)\s+?((?:\d|inf)\S*?)\s+?((?:\d|inf)\S*?)\n')
rtme = re.compile('TotalEpoch:\s+?(\d+?)\s+?dy\.\s+?(\d+?)\s+?hr\.\s+?(\d+?)\s+?mn\.\s+?(\d+?\.\d+?)\s+?sc\.')
comp = re.compile('Termination Criterion Met')
allnets = []
completed = False
#print('reading:', train_root + 'train' + str(index) + '/' + 'output.opt')
if os.path.isfile(train_root + 'train' + str(index) + '/' + 'output.opt'):
optfile = open(train_root + 'train' + str(index) + '/' + 'output.opt', 'r').read()
matches = re.findall(rblk, optfile)
run = dict({'EPOCH': [], 'RTIME': [], 'ERROR': dict()})
for i, data in enumerate(matches):
run['EPOCH'].append(int(re.search(repo, data).group(1)))
m = re.search(rtme, data)
run['RTIME'].append(86400.0 * float(m.group(1)) +
3600.0 * float(m.group(2)) +
60.0 * float(m.group(3)) +
float(m.group(4)))
err = re.findall(rerr, data)
for e in err:
if e[0] in run['ERROR']:
run['ERROR'][e[0]].append(np.array([float(e[1]), float(e[2]), float(e[3])], dtype=np.float64))
else:
run['ERROR'].update(
{e[0]: [np.array([float(e[1]), float(e[2]), float(e[3])], dtype=np.float64)]})
for key in run['ERROR'].keys():
run['ERROR'][key] = np.vstack(run['ERROR'][key])
if re.match(comp, optfile):
completed = True
else:
completed = False
allnets.append(run)
else:
completed = False
return allnets, True
class ANITesterTool:
def load_models(self):
self.ncl = [pync.molecule(self.cnstfile, self.saefile, self.model_path + 'train' + str(i) + '/networks/', self.gpuid, False) for i in range(self.ens_size)]
def __init__(self,model_path,ens_size,gpuid):
self.model_path = model_path
self.ens_size = ens_size
self.gpuid = gpuid
self.cnstfile = model_path+[f for f in os.listdir(self.model_path) if f[-7:] == '.params'][0]
self.saefile = model_path+[f for f in os.listdir(self.model_path) if f[-4:] == '.dat'][0]
self.load_models()
def evaluate_individual_testset(self,energy_key='energies',force_key='forces',forces=False,pbc=True,remove_sae=True):
self.Evals = []
self.Fvals = []
for i,nc in enumerate(self.ncl):
adl = pyt.anidataloader(self.model_path+'/testset/testset'+str(i)+'.h5')
Evals_ind = []
Fvals_ind = []
for data in adl:
S = data['species']
X = data['coordinates']
C = data['cell']
E = conv_au_ev*data[energy_key]
F = conv_au_ev*data[force_key]
if remove_sae:
Esae = conv_au_ev*hdt.compute_sae(self.saefile,S)
else:
Esae = 0.0
for x,c,e,f in zip(X,C,E,F):
if pbc is True:
pbc_inv = np.linalg.inv(c).astype(np.float64)
nc.setMolecule(coords=np.array(x,dtype=np.float64), types=list(S))
nc.setPBC(bool(True), bool(True), bool(True))
nc.setCell(np.array(c,dtype=np.float64),pbc_inv)
else:
nc.setMolecule(coords=np.array(x, dtype=np.float64), types=list(S))
Eani = conv_au_ev*nc.energy().copy()[0]
if forces:
Fani = conv_au_ev*nc.force().copy()
else:
Fani = f
if pbc is True:
Evals_ind.append(np.array([Eani-Esae,e-Esae])/len(S))
else:
Evals_ind.append(np.array([Eani-Esae,e-Esae]))
Fvals_ind.append(np.stack([Fani.flatten(),f.flatten()]).T)
self.Evals.append(np.stack(Evals_ind))
self.Fvals.append(np.vstack(Fvals_ind))
return self.Evals,self.Fvals
def evaluate_individual_dataset(self,dataset_file,energy_key='energies',force_key='forces',forces=False,pbc=True,remove_sae=True):
self.Evals = []
self.Fvals = []
for i,nc in enumerate(self.ncl):
adl = pyt.anidataloader(dataset_file)
Evals_ind = []
Fvals_ind = []
for data in adl:
S = data['species']
X = data['coordinates']
E = conv_au_ev*data[energy_key]
if pbc:
C = data['cell']
else:
C = np.zeros(shape=(E.size,3,3),dtype=np.float64)
if forces:
F = conv_au_ev*data[force_key]
else:
F = np.zeros(shape=X.shape,dtype=np.float64)
if remove_sae:
Esae = conv_au_ev*hdt.compute_sae(self.saefile,S)
else:
Esae = 0.0
for x,c,e,f in zip(X,C,E,F):
if pbc is True:
pbc_inv = np.linalg.inv(c).astype(np.float64)
nc.setMolecule(coords=np.array(x,dtype=np.float64), types=list(S))
nc.setPBC(bool(True), bool(True), bool(True))
nc.setCell(np.array(c,dtype=np.float64),pbc_inv)
else:
nc.setMolecule(coords=np.array(x, dtype=np.float64), types=list(S))
Eani = conv_au_ev*nc.energy().copy()[0]
if forces:
Fani = conv_au_ev*nc.force().copy()
else:
Fani = f
if pbc is True:
Evals_ind.append(np.array([Eani-Esae,e-Esae])/len(S))
else:
Evals_ind.append(np.array([Eani-Esae,e-Esae]))
Fvals_ind.append(np.stack([Fani.flatten(),f.flatten()]).T)
self.Evals.append(np.stack(Evals_ind))
self.Fvals.append(np.vstack(Fvals_ind))
return self.Evals,self.Fvals
def build_ind_error_dataframe(self):
d = {'Emae':[],'Erms':[],'Fmae':[],'Frms':[],}
for i,(e,f) in enumerate(zip(self.Evals,self.Fvals)):
d['Emae'].append(1000.0*hdt.calculatemeanabserror(e[:,0],e[:,1]))
d['Erms'].append(1000.0*hdt.calculaterootmeansqrerror(e[:,0],e[:,1]))
d['Fmae'].append(hdt.calculatemeanabserror(f[:,0],f[:,1]))
d['Frms'].append(hdt.calculaterootmeansqrerror(f[:,0],f[:,1]))
df = pd.DataFrame(data=d)
df.loc['Avg.'] = df.mean()
return df
def plot_corr_dist(self, Xa, Xp, inset=True,linfit=True, xlabel='$F_{dft}$' + r' $(kcal \times mol^{-1} \times \AA^{-1})$', ylabel='$F_{dft}$' + r' $(kcal \times mol^{-1} \times \AA^{-1})$', figsize=[13,10]):
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.backends.backend_pdf import PdfPages
cmap = mpl.cm.viridis
Fmx = Xa.max()
Fmn = Xa.min()
label_size = 14
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['ytick.labelsize'] = label_size
fig, ax = plt.subplots(figsize=figsize)
# Plot ground truth line
if linfit:
ax.plot([Fmn, Fmx], [Fmn, Fmx], '--', c='r', linewidth=3)
# Set labels
ax.set_xlabel(xlabel, fontsize=22)
ax.set_ylabel(ylabel, fontsize=22)
#cmap = mpl.cm.viridis
#cmap = mpl.cm.brg
# Plot 2d Histogram
if linfit:
bins = ax.hist2d(Xa, Xp, bins=200, norm=LogNorm(), range= [[Xa.min(), Xa.max()], [Xp.min(), Xp.max()]], cmap=cmap)
else:
bins = ax.hist2d(Xa, Xp, bins=200, norm=LogNorm(), cmap=cmap)
# Build color bar
#cbaxes = fig.add_axes([0.91, 0.1, 0.03, 0.8])
cb1 = fig.colorbar(bins[-1], cmap=cmap)
cb1.set_label('Count', fontsize=16)
# Annotate with errors
PMAE = hdt.calculatemeanabserror(Xa, Xp)
PRMS = hdt.calculaterootmeansqrerror(Xa, Xp)
if linfit:
ax.text(0.75*((Fmx-Fmn))+Fmn, 0.43*((Fmx-Fmn))+Fmn, 'MAE='+"{:.3f}".format(PMAE)+'\nRMSE='+"{:.3f}".format(PRMS), fontsize=20,
bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5})
if not linfit:
plt.vlines(x=0.0,ymin=130,ymax=300,linestyle='--',color='red')
if inset:
axins = zoomed_inset_axes(ax, 2.2, loc=2) # zoom = 6
sz = 6
axins.hist2d(Xa, Xp,bins=50, range=[[Fmn/sz, Fmx/sz], [Fmn/sz, Fmx/sz]], norm=LogNorm(), cmap=cmap)
axins.plot([Xa.min(), Xa.max()], [Xa.min(), Xa.max()], '--', c='r', linewidth=3)
# sub region of the original image
x1, x2, y1, y2 = Fmn/sz, Fmx/sz, Fmn/sz, Fmx/sz
axins.set_xlim(x1, x2)
axins.set_ylim(y1, y2)
axins.yaxis.tick_right()
plt.xticks(visible=True)
plt.yticks(visible=True)
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="0.5")
Ferr = Xa - Xp
std = np.std(Ferr)
men = np.mean(Ferr)
axh = plt.axes([.49, .16, .235, .235])
axh.hist(Ferr, bins=75, range=[men-4*std, men+4*std], normed=True)
axh.set_title('Difference distribution')
#plt.draw()
plt.show()
def plot_corr_dist_ax(ax, Xa, Xp, errors=False,linfit=True, xlabel='$F_{dft}$' + r' $(kcal \times mol^{-1} \times \AA^{-1})$', ylabel='$F_{dft}$' + r' $(kcal \times mol^{-1} \times \AA^{-1})$'):
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.backends.backend_pdf import PdfPages
cmap = mpl.cm.viridis
Fmx = Xa.max()
Fmn = Xa.min()
label_size = 14
#mpl.rcParams['xtick.labelsize'] = label_size
#mpl.rcParams['ytick.labelsize'] = label_size
# Plot ground truth line
if linfit:
ax.plot([Fmn, Fmx], [Fmn, Fmx], '--', c='r', linewidth=1)
# Set labels
#ax.set_xlabel(xlabel, fontsize=18)
#ax.set_ylabel(ylabel, fontsize=18)
# Plot 2d Histogram
if linfit:
bins = ax.hist2d(Xa, Xp, bins=200, norm=LogNorm(), range= [[Xa.min(), Xa.max()], [Xp.min(), Xp.max()]], cmap=cmap)
else:
bins = ax.hist2d(Xa, Xp, bins=200, norm=LogNorm(), cmap=cmap)
# Build color bar
#cbaxes = fig.add_axes([0.91, 0.1, 0.03, 0.8])
#cb1 = ax.colorbar(bins[-1], cmap=cmap)
#cb1.set_label('Count', fontsize=16)
# Annotate with errors
PMAE = hdt.calculatemeanabserror(Xa, Xp)
PRMS = hdt.calculaterootmeansqrerror(Xa, Xp)
if errors:
ax.text(0.55*((Fmx-Fmn))+Fmn, 0.2*((Fmx-Fmn))+Fmn, 'MAE='+"{:.3f}".format(PMAE)+'\nRMSE='+"{:.3f}".format(PRMS), fontsize=20,
bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5})
#if not linfit:
# plt.vlines(x=0.0,ymin=130,ymax=300,linestyle='--',color='red')
class anitrainerparamsdesigner():
def __init__(self, elements, Nrr, Rcr, Nar=0, Nzt=0, Rca=3.5, Xst=0.7, Charge=False, Repuls=False, ACA=False, descriptor="ANI_NORMAL"):
self.params = {"elm":elements,
"Nrr":Nrr,
"Rcr":Rcr,
"Nar":Nar,
"Nzt":Nzt,
"Rca":Rca,
"Xst":Xst,
"ACA":ACA,
"Crg":Charge,
"Rps":Repuls,
"Dsc":descriptor
}
# ------------------------------------------
# Radial Function Cos
# ------------------------------------------
def cutoffcos(self, X, Rc):
Xt = X
for i in range(0, Xt.shape[0]):
if Xt[i] > Rc:
Xt[i] = Rc
return 0.5 * (np.cos((np.pi * Xt) / Rc) + 1.0)
# ------------------------------------------
# Radial Function Cos
# ------------------------------------------
def radialfunction(self, X, eta, Rs):
return np.exp(-eta * (X - Rs) ** 2.0)
# ------------------------------------------
# Radial Build Functions
# ------------------------------------------
def radialfunctioncos(self, X, eta, Rc, Rs):
return self.radialfunction(X, eta, Rs) * self.cutoffcos(X, Rc)
def compute_overlap(self, eta, Rs1, Rs2):
func1 = lambda x: self.radialfunction(x, eta, Rs1)
func2 = lambda x: self.radialfunction(x, eta, Rs2)
funcC = lambda x: min(func1(x),func2(x))
i_f1 = quad(func1, -10, 20)[0]
i_fC = quad(funcC, -10, 20)
return i_fC[0]/i_f1
def determine_eta(self, req_olap, Rs1, Rs2, dx = 0.1):
eta = 0.1
olap = 1.0
while olap > req_olap:
eta = eta + 0.1
olap = self.compute_overlap(eta, Rs1, Rs2)
return eta
def obtain_radial_parameters(self, Nrr, Xst, Rcr):
ShfR = np.zeros(Nrr)
for i in range(0, Nrr):
stepsize = (Rcr - Xst) / float(Nrr)
step = i * stepsize + Xst
ShfR[i] = step
eta = self.determine_eta(0.4, ShfR[0], ShfR[1])
return ShfR, eta
def get_Rradial_parameters(self):
Nrr = self.params['Nrr']
Xst = self.params['Xst']
Rcr = self.params['Rcr']
return self.obtain_radial_parameters(Nrr, Xst, Rcr)
def get_Aradial_parameters(self):
Nar = self.params['Nar']
Xst = self.params['Xst']
Rca = self.params['Rca']
return self.obtain_radial_parameters(Nar, Xst, Rca)
def plot_radial_funcs(self, Shf, Eta, Rc):
for sr in Shf:
X = np.linspace(0, Rc, 1000, endpoint=True)
Y = self.radialfunctioncos(X, Eta, Rc, sr)
plt.plot(X, Y, color='red', linewidth=2)
plt.show()
def plot_Rradial_funcs(self):
ShfR, EtaR = self.obtain_Rradial_parameters()
self.plot_radial_funcs(ShfR, EtaR, self.params['Rcr'])
def plot_Aradial_funcs(self):
ShfA, EtaA = self.obtain_Aradial_parameters()
self.plot_radial_funcs(ShfA, EtaA, self.params['Rca'])
# ------------------------------------------
# Angular Build Functions
# ------------------------------------------
def angularfunction(self, T, zeta, lam, Ts):
F = 0.5 * (2.0 ** (1.0 - zeta)) * ((1.0 + lam * np.cos(T - Ts)) ** zeta)
return F
def compute_overlap_angular(self, zeta, Zs1, Zs2):
func1 = lambda x: self.angularfunction(x, zeta, 1, Zs1)
func2 = lambda x: self.angularfunction(x, zeta, 1, Zs2)
funcC = lambda x: min(func1(x),func2(x))
i_f1 = quad(func1, -6, 6)[0]
i_fC = quad(funcC, -6, 6)
return i_fC[0]/i_f1
def determine_zeta(self, req_olap, Zs1, Zs2, dx = 0.1):
zeta = 4.0
olap = 1.0
while olap > req_olap:
zeta = zeta + dx
olap = self.compute_overlap_angular(zeta, Zs1, Zs2)
return zeta
def obtain_angular_parameters(self, Nzt):
ShfZ = np.zeros(Nzt)
for i in range(0, Nzt):
stepsize = np.pi / float(Nzt)
step = i * stepsize + stepsize/2.0
ShfZ[i] = step
zeta = self.determine_zeta(0.35, ShfZ[0], ShfZ[1])
return ShfZ, zeta
def get_angular_parameters(self):
Nzt = self.params['Nzt']
return self.obtain_angular_parameters(Nzt)
def build_angular_plots(self, ShfZ, Zeta):
for sz in ShfZ:
X = np.linspace(0, np.pi, 1000, endpoint=True)
Y = self.angularfunction(X, Zeta, 1, sz)
plt.plot(X, Y, color='red', linewidth=2)
plt.show()
def plot_angular_funcs(self):
ShfZ, Zeta = self.get_angular_parameters()
self.build_angular_plots(ShfZ, Zeta)
# ------------------------------------------
# Get a file name
# ------------------------------------------
def get_filename(self):
return "r"+"".join(self.params["elm"])+"-" + "{0:.1f}".format(self.params["Rcr"]) + "R_" \
+ str(self.params["Nrr"]) + "-"\
+ "{0:.1f}".format(self.params["Rca"]) + "A_a" \
+ str(self.params["Nar"]) + "-" \
+ str(self.params["Nzt"]) + ".params"
# ------------------------------------------
# Print data to file
# ------------------------------------------
def printdatatofile(self, f, title, X, N):
f.write(title + ' = [')
for i in range(0, N):
if i < N - 1:
s = "{:.7e}".format(X[i]) + ','
else:
s = "{:.7e}".format(X[i])
f.write(s)
f.write(']\n')
def get_aev_size(self):
Na = len(self.params['elm'])
Nar = self.params['Nar']
Nzt = self.params['Nzt']
Nrr = self.params['Nrr']
Nat = Nar * (Na * (Na + 1) / 2) * Nzt
Nrt = Nrr * Na
return int(Nat + Nrt)
# ------------------------------------------
# Create params file
# ------------------------------------------
def create_params_file(self, path):
ShfR,EtaR = self.get_Rradial_parameters()
if self.params["Nzt"] is not 0 or self.params["Nar"] is not 0:
ShfA,EtaA = self.get_Aradial_parameters()
ShfZ,Zeta = self.get_angular_parameters()
Rcr = self.params['Rcr']
Rca = self.params['Rca']
f = open(path+"/"+self.get_filename(),"w")
f.write('DESC = ' + self.params['Dsc'] + '\n')
f.write('TM = ' + str(1) + '\n')
f.write('CG = ' + str(1 if self.params['Crg'] else 0) + '\n')
f.write('RP = ' + str(1 if self.params['Rps'] else 0) + '\n')
f.write('AC = ' + str(1 if self.params['ACA'] else 0) + '\n')
f.write('Rcr = ' + "{:.4e}".format(Rcr) + '\n')
f.write('Rca = ' + "{:.4e}".format(Rca) + '\n')
self.printdatatofile(f, 'EtaR', [EtaR], 1)
self.printdatatofile(f, 'ShfR', ShfR, ShfR.size)
if self.params["Nzt"] is not 0 or self.params["Nar"] is not 0:
self.printdatatofile(f, 'Zeta', [Zeta], 1)
self.printdatatofile(f, 'ShfZ', ShfZ, ShfZ.size)
self.printdatatofile(f, 'EtaA', [EtaA], 1)
self.printdatatofile(f, 'ShfA', ShfA, ShfA.size)
f.write('Atyp = [' + ",".join(self.params['elm']) + ']\n')
f.close()
class anitrainerinputdesigner:
def __init__(self):
self.params = {"sflparamsfile": None, # AEV parameters file
"ntwkStoreDir": "networks/", # Store network dir
"atomEnergyFile": None, # Atomic energy shift file
"nmax": 0, # Max training iterations
"tolr": 50, # Annealing tolerance (patience)
"emult": 0.5, # Annealing multiplier
"eta": 0.001, # Learning rate
"tcrit": 1.0e-5, # eta termination crit.
"tmax": 0, # Maximum time (0 = inf)
"tbtchsz": 2048, # training batch size
"vbtchsz": 2048, # validation batch size
"gpuid": 0, # Default GPU id (is overridden by -g flag for HDAtomNNP-Trainer exe)
"ntwshr": 0, # Use a single network for all types... (THIS IS BROKEN, DO NOT USE)
"nkde": 2, # Energy delta regularization
"energy": 1, # Enable/disable energy training
"force": 0, # Enable/disable force training
"dipole": 0, # Enable/disable dipole training
"charge": 0, # Enable/disable charge training
"acachg": 0, # Enable/disable ACA charge training
"fmult": 1.0, # Multiplier of force cost
"pbc": 0, # Use PBC in training (Warning, this only works for data with a single rect. box size)
"cmult": 1.0, # Charge cost multiplier (CHARGE TRAINING BROKEN IN CURRENT VERSION)
"runtype": "ANNP_CREATE_HDNN_AND_TRAIN", # DO NOT CHANGE - For NeuroChem backend
"adptlrn": "OFF",
"decrate": 0.9,
"moment": "ADAM",
"mu": 0.99
}
self.layers = dict()
def add_layer(self, atomtype, layer_dict):
layer_dict.update({"type": 0})
if atomtype not in self.layers:
self.layers[atomtype] = [layer_dict]
else:
self.layers[atomtype].append(layer_dict)
def set_parameter(self, key, value):
self.params[key] = value
def print_layer_parameters(self):
for ak in self.layers.keys():
print('Species:', ak)
for l in self.layers[ak]:
print(' -', l)
def print_training_parameters(self):
print(self.params)
def __get_value_string__(self, value):
if type(value) == float:
string = "{0:10.7e}".format(value)
else:
string = str(value)
return string
def __build_network_str__(self, iptsize):
network = "network_setup {\n"
network += " inputsize=" + str(iptsize) + ";\n"
for ak in self.layers.keys():
network += " atom_net " + ak + " $\n"
if int(self.params["dipole"]) != 0 or int(self.params["charge"]) != 0:
self.layers[ak].append({"nodes": 12, "activation": 6, "type": 0})
#self.layers[ak].append({"nodes": 2, "activation": 6, "type": 0})
elif int(self.params["acachg"]) != 0:
self.layers[ak].append({"nodes": 2, "activation": 6, "type": 0})
else:
self.layers[ak].append({"nodes": 1, "activation": 6, "type": 0})
for l in self.layers[ak]:
network += " layer [\n"
for key in l.keys():
network += " " + key + "=" + self.__get_value_string__(l[key]) + ";\n"
network += " ]\n"
network += " $\n"
network += "}\n"
return network
def write_input_file(self, file, iptsize):
f = open(file, 'w')
for key in self.params.keys():
f.write(key + '=' + self.__get_value_string__(self.params[key]) + '\n')
f.write(self.__build_network_str__(iptsize))
f.close()
class alaniensembletrainer():
def __init__(self, train_root, netdict, input_builder, h5dir, Nn, random_seed=-1):
if random_seed != -1:
np.random.seed(random_seed)
self.train_root = train_root
# self.train_pref = train_pref
self.h5dir = h5dir
self.Nn = Nn
self.netdict = netdict
self.iptbuilder = input_builder
if h5dir is not None:
self.h5file = [f for f in os.listdir(self.h5dir) if f.rsplit('.', 1)[1] == 'h5']
# print(self.h5dir,self.h5file)
def build_training_cache(self, forces=True):
store_dir = self.train_root + "cache-data-"
N = self.Nn
for i in range(N):
if not os.path.exists(store_dir + str(i)):
os.mkdir(store_dir + str(i))
if os.path.exists(store_dir + str(i) + '/../testset/testset' + str(i) + '.h5'):
os.remove(store_dir + str(i) + '/../testset/testset' + str(i) + '.h5')
if not os.path.exists(store_dir + str(i) + '/../testset'):
os.mkdir(store_dir + str(i) + '/../testset')
cachet = [cg('_train', self.netdict['saefile'], store_dir + str(r) + '/', False) for r in range(N)]
cachev = [cg('_valid', self.netdict['saefile'], store_dir + str(r) + '/', False) for r in range(N)]
testh5 = [pyt.datapacker(store_dir + str(r) + '/../testset/testset' + str(r) + '.h5') for r in range(N)]
Nd = np.zeros(N, dtype=np.int32)
Nbf = 0
for f, fn in enumerate(self.h5file):
print('Processing file(' + str(f + 1) + ' of ' + str(len(self.h5file)) + '):', fn)
adl = pyt.anidataloader(self.h5dir + fn)
To = adl.size()
Ndc = 0
Fmt = []
Emt = []
for c, data in enumerate(adl):
Pn = data['path'] + '_' + str(f).zfill(6) + '_' + str(c).zfill(6)
# Progress indicator
# sys.stdout.write("\r%d%% %s" % (int(100 * c / float(To)), Pn))
# sys.stdout.flush()
# print(data.keys())
# Extract the data
X = data['coordinates']
E = data['energies']
S = data['species']
# 0.0 forces if key doesnt exist
if forces:
F = data['forces']
else:
F = 0.0 * X
Fmt.append(np.max(np.linalg.norm(F, axis=2), axis=1))
Emt.append(E)
Mv = np.max(np.linalg.norm(F, axis=2), axis=1)
index = np.where(Mv > 10.5)[0]
indexk = np.where(Mv <= 10.5)[0]
Nbf += index.size
# CLear forces
X = X[indexk]
F = F[indexk]
E = E[indexk]
Esae = hdt.compute_sae(self.netdict['saefile'], S)
hidx = np.where(np.abs(E - Esae) > 10.0)
lidx = np.where(np.abs(E - Esae) <= 10.0)
if hidx[0].size > 0:
print(' -(' + str(c).zfill(3) + ')High energies detected:\n ', E[hidx])
X = X[lidx]
E = E[lidx]
F = F[lidx]
Ndc += E.size
if (set(S).issubset(self.netdict['atomtyp'])):
# if (set(S).issubset(['C', 'N', 'O', 'H', 'F', 'S', 'Cl'])):
# Random mask
R = np.random.uniform(0.0, 1.0, E.shape[0])
idx = np.array([interval(r, N) for r in R])
# Build random split lists
split = []
for j in range(N):
split.append([i for i, s in enumerate(idx) if s == j])
nd = len([i for i, s in enumerate(idx) if s == j])
Nd[j] = Nd[j] + nd
# Store data
for i, t, v, te in zip(range(N), cachet, cachev, testh5):
## Store training data
X_t = np.array(np.concatenate([X[s] for j, s in enumerate(split) if j != i]), order='C',
dtype=np.float32)
F_t = np.array(np.concatenate([F[s] for j, s in enumerate(split) if j != i]), order='C',
dtype=np.float32)
E_t = np.array(np.concatenate([E[s] for j, s in enumerate(split) if j != i]), order='C',
dtype=np.float64)
if E_t.shape[0] != 0:
t.insertdata(X_t, F_t, E_t, list(S))
## Split test/valid data and store\
# tv_split = np.array_split(split[i], 2)
## Store Validation
if np.array(split[i]).size > 0:
X_v = np.array(X[split[i]], order='C', dtype=np.float32)
F_v = np.array(F[split[i]], order='C', dtype=np.float32)
E_v = np.array(E[split[i]], order='C', dtype=np.float64)
if E_v.shape[0] != 0:
v.insertdata(X_v, F_v, E_v, list(S))
## Store testset
# if tv_split[1].size > 0:
# X_te = np.array(X[split[i]], order='C', dtype=np.float32)
# F_te = np.array(F[split[i]], order='C', dtype=np.float32)
# E_te = np.array(E[split[i]], order='C', dtype=np.float64)
# if E_te.shape[0] != 0:
# te.store_data(Pn, coordinates=X_te, forces=F_te, energies=E_te, species=list(S))
# sys.stdout.write("\r%d%%" % int(100))
# print(" Data Kept: ", Ndc, 'High Force: ', Nbf)
# sys.stdout.flush()
# print("")
# Print some stats
print('Data count:', Nd)
print('Data split:', 100.0 * Nd / np.sum(Nd), '%')
# Save train and valid meta file and cleanup testh5
for t, v, th in zip(cachet, cachev, testh5):
t.makemetadata()
v.makemetadata()
th.cleanup()
def sae_linear_fitting(self, Ekey='energies', energy_unit=1.0, Eax0sum=False):
from sklearn import linear_model
print('Performing linear fitting...')
datadir = self.h5dir
sae_out = self.netdict['saefile']
smap = dict()
for i, Z in enumerate(self.netdict['atomtyp']):
smap.update({Z: i})
Na = len(smap)
files = os.listdir(datadir)
X = []
y = []
for f in files[0:20]:
print(f)
adl = pyt.anidataloader(datadir + f)
for data in adl:
# print(data['path'])
S = data['species']
if data[Ekey].size > 0:
if Eax0sum:
E = energy_unit * np.sum(np.array(data[Ekey], order='C', dtype=np.float64), axis=1)
else:
E = energy_unit * np.array(data[Ekey], order='C', dtype=np.float64)
S = S[0:data['coordinates'].shape[1]]
unique, counts = np.unique(S, return_counts=True)
x = np.zeros(Na, dtype=np.float64)
for u, c in zip(unique, counts):
x[smap[u]] = c
for e in E:
X.append(np.array(x))
y.append(np.array(e))
X = np.array(X)
y = np.array(y).reshape(-1, 1)
lin = linear_model.LinearRegression(fit_intercept=False)
lin.fit(X, y)
coef = lin.coef_
print(coef)
sae = open(sae_out, 'w')
for i, c in enumerate(coef[0]):
sae.write(next(key for key, value in smap.items() if value == i) + ',' + str(i) + '=' + str(c) + '\n')
sae.close()
print('Linear fitting complete.')
def build_strided_training_cache(self, Nblocks, Nvalid, Ntest, build_test=True,
Ekey='energies', energy_unit=1.0,
forces=True, grad=False, Fkey='forces', forces_unit=1.0,
dipole=False, dipole_unit=1.0, Dkey='dipoles',
charge=False, charge_unit=1.0, Ckey='charges',
solvent=False,
pbc=False,
Eax0sum=False, rmhighe=True,rmhighf=False,force_exact_split=False):
if not os.path.isfile(self.netdict['saefile']):
self.sae_linear_fitting(Ekey=Ekey, energy_unit=energy_unit, Eax0sum=Eax0sum)
h5d = self.h5dir
store_dir = self.train_root + "cache-data-"
N = self.Nn
Ntrain = Nblocks - Nvalid - Ntest
if Nblocks % N != 0:
raise ValueError('Error: number of networks must evenly divide number of blocks.')
Nstride = Nblocks / N
for i in range(N):
if not os.path.exists(store_dir + str(i)):
os.mkdir(store_dir + str(i))
if build_test:
if os.path.exists(store_dir + str(i) + '/../testset/testset' + str(i) + '.h5'):
os.remove(store_dir + str(i) + '/../testset/testset' + str(i) + '.h5')
if not os.path.exists(store_dir + str(i) + '/../testset'):
os.mkdir(store_dir + str(i) + '/../testset')
cachet = [cg('_train', self.netdict['saefile'], store_dir + str(r) + '/', False) for r in range(N)]
cachev = [cg('_valid', self.netdict['saefile'], store_dir + str(r) + '/', False) for r in range(N)]
if build_test:
testh5 = [pyt.datapacker(store_dir + str(r) + '/../testset/testset' + str(r) + '.h5') for r in range(N)]
if rmhighe:
dE = []
for f in self.h5file:
adl = pyt.anidataloader(h5d + f)
for data in adl:
S = data['species']
E = data[Ekey]
X = data['coordinates']
Esae = hdt.compute_sae(self.netdict['saefile'], S)
dE.append((E - Esae) / np.sqrt(len(S)))
dE = np.concatenate(dE)
cidx = np.where(np.abs(dE) < 15.0)
std = dE[cidx].std()
men = np.mean(dE[cidx])
print(men, std, men + std)
idx = np.intersect1d(np.where(dE >= -np.abs(8 * std + men))[0], np.where(dE <= np.abs(8 * std + men))[0])
cnt = idx.size
print('DATADIST: ', dE.size, cnt, (dE.size - cnt), 100.0 * ((dE.size - cnt) / dE.size))
E = []
data_count = np.zeros((N, 3), dtype=np.int32)
for f in self.h5file:
adl = pyt.anidataloader(h5d + f)
for data in adl:
# print(data['path'],data['energies'].size)
S = data['species']
if data[Ekey].size > 0 and (set(S).issubset(self.netdict['atomtyp'])):
X = np.array(data['coordinates'], order='C', dtype=np.float32)
if Eax0sum:
E = energy_unit * np.sum(np.array(data[Ekey], order='C', dtype=np.float64), axis=1)
else:
E = energy_unit * np.array(data[Ekey], order='C', dtype=np.float64)
Sv = np.zeros((E.size,7),dtype=np.float32)
if solvent:
Sv = np.array(data['solvent'], order='C', dtype=np.float32)
if forces and not grad:
F = forces_unit * np.array(data[Fkey], order='C', dtype=np.float32)
elif forces and grad:
F = -forces_unit * np.array(data[Fkey], order='C', dtype=np.float32)
else:
F = 0.0 * X
D = np.zeros((E.size,3),dtype=np.float32)
if dipole:
D = dipole_unit * np.array(data[Dkey], order='C', dtype=np.float32).reshape(E.size,3)
else:
D = 0.0 * D
P = np.zeros((E.size,3,3),dtype=np.float32)
if pbc:
P = np.array(data['cell'], order='C', dtype=np.float32).reshape(E.size,3,3)
else:
P = 0.0 * P
C = np.zeros((E.size,X.shape[1]),dtype=np.float32)
if charge:
C = charge_unit * np.array(data[Ckey], order='C', dtype=np.float32).reshape(E.size,len(S))
else:
C = 0.0 * C
if rmhighe:
Esae = hdt.compute_sae(self.netdict['saefile'], S)
ind_dE = (E - Esae) / np.sqrt(len(S))
hidx = np.union1d(np.where(ind_dE < -(9.0 * std + men))[0],
np.where(ind_dE > (9.0 * std + men))[0])
lidx = np.intersect1d(np.where(ind_dE >= -(9.0 * std + men))[0],
np.where(ind_dE <= (9.0 * std + men))[0])
if hidx.size > 0:
print(' -(' + f + ':' + data['path'] + ')High energies detected:\n ',
(E[hidx] - Esae) / np.sqrt(len(S)))
X = X[lidx]
E = E[lidx]
F = F[lidx]
D = D[lidx]
C = C[lidx]
P = P[lidx]
#Sv = Sv[lidx]
if rmhighf:
hfidx = np.where(np.abs(F) > 2.0)
if hfidx[0].size > 0:
print('High force:',hfidx)
hfidx = np.all(np.abs(F).reshape(E.size,-1) <= 2.0,axis=1)
X = X[hfidx]
E = E[hfidx]
F = F[hfidx]
D = D[hfidx]
C = C[hfidx]
P = P[hfidx]
#Sv = Sv[hfidx]
# Build random split index
ridx = np.random.randint(0, Nblocks, size=E.size)
Didx = [np.argsort(ridx)[np.where(ridx == i)] for i in range(Nblocks)]
# Build training cache
for nid, cache in enumerate(cachet):
set_idx = np.concatenate(
[Didx[((bid + nid * int(Nstride)) % Nblocks)] for bid in range(Ntrain)])
if set_idx.size != 0:
data_count[nid, 0] += set_idx.size
#print("Py tDIPOLE1:\n",D[set_idx][0:3],D.shape)
#print("Py tDIPOLE2:\n",D[set_idx][-3:],D.shape)
#cache.insertdata(X[set_idx], F[set_idx], C[set_idx], D[set_idx], E[set_idx], list(S))
#cache.insertdata(X[set_idx], F[set_idx], C[set_idx], D[set_idx], P[set_idx], E[set_idx], Sv[set_idx], list(S))
cache.insertdata(X[set_idx], F[set_idx], C[set_idx], D[set_idx], P[set_idx], E[set_idx], list(S))
for nid, cache in enumerate(cachev):
set_idx = np.concatenate(
[Didx[(Ntrain + bid + nid * int(Nstride)) % Nblocks] for bid in range(Nvalid)])
if set_idx.size != 0:
data_count[nid, 1] += set_idx.size
#print("Py vDIPOLE1:\n",D[set_idx][0:3],D.shape)
#print("Py vDIPOLE2:\n",D[set_idx][-3:],D.shape)
#cache.insertdata(X[set_idx], F[set_idx], C[set_idx], D[set_idx], E[set_idx], list(S))
#cache.insertdata(X[set_idx], F[set_idx], C[set_idx], D[set_idx], P[set_idx], E[set_idx],Sv[set_idx], list(S))
cache.insertdata(X[set_idx], F[set_idx], C[set_idx], D[set_idx], P[set_idx], E[set_idx], list(S))
if build_test:
for nid, th5 in enumerate(testh5):
set_idx = np.concatenate(
[Didx[(Ntrain + Nvalid + bid + nid * int(Nstride)) % Nblocks] for bid in range(Ntest)])
if set_idx.size != 0:
data_count[nid, 2] += set_idx.size
#th5.store_data(f + data['path'], coordinates=X[set_idx], forces=F[set_idx], charges=C[set_idx], dipoles=D[set_idx], cell=P[set_idx],energies=E[set_idx], species=list(S))
#th5.store_data(f + data['path'], coordinates=X[set_idx], forces=F[set_idx], charges=C[set_idx], dipoles=D[set_idx],
th5.store_data(f + data['path'], coordinates=X[set_idx], forces=F[set_idx], charges=C[set_idx], dipoles=D[set_idx], cell=P[set_idx],energies=E[set_idx], species=list(S))
#th5.store_data(f + data['path'], coordinates=X[set_idx], forces=F[set_idx], charges=C[set_idx], dipoles=D[set_idx], cell=P[set_idx],energies=E[set_idx],solvent=Sv[set_idx], species=list(S))
# Save train and valid meta file and cleanup testh5
for t, v in zip(cachet, cachev):
t.makemetadata()
v.makemetadata()
if build_test:
for th in testh5:
th.cleanup()
print(' Train ', ' Valid ', ' Test ')
print(data_count)
print('Training set built.')
def build_strided_training_cache_ind(self, ids, rseed, Nblocks, Nvalid, Ntest, build_test=True,
Ekey='energies', energy_unit=1.0,
forces=True, grad=False, Fkey='forces', forces_unit=1.0,
dipole=False, dipole_unit=1.0, Dkey='dipoles',
charge=False, charge_unit=1.0, Ckey='charges',
pbc=False,
Eax0sum=False, rmhighe=True,rmhighf=False,force_exact_split=False):
np.random.seed(rseed)
if not os.path.isfile(self.netdict['saefile']):
self.sae_linear_fitting(Ekey=Ekey, energy_unit=energy_unit, Eax0sum=Eax0sum)
h5d = self.h5dir
store_dir = self.train_root + "cache-data-"
N = self.Nn
Ntrain = Nblocks - Nvalid - Ntest
if Nblocks % N != 0:
raise ValueError('Error: number of networks must evenly divide number of blocks.')
Nstride = Nblocks / N
if not os.path.exists(store_dir + str(ids)):
os.mkdir(store_dir + str(ids))
if build_test:
if os.path.exists(store_dir + str(ids) + '/../testset/testset' + str(ids) + '.h5'):
os.remove(store_dir + str(ids) + '/../testset/testset' + str(ids) + '.h5')
if not os.path.exists(store_dir + str(ids) + '/../testset'):
os.mkdir(store_dir + str(ids) + '/../testset')
cachet = cg('_train', self.netdict['saefile'], store_dir + str(ids) + '/', False)
cachev = cg('_valid', self.netdict['saefile'], store_dir + str(ids) + '/', False)
if build_test:
testh5 = pyt.datapacker(store_dir + str(ids) + '/../testset/testset' + str(ids) + '.h5')
if rmhighe:
dE = []
for f in self.h5file:
adl = pyt.anidataloader(h5d + f)
for data in adl:
S = data['species']
E = data[Ekey]
X = data['coordinates']
Esae = hdt.compute_sae(self.netdict['saefile'], S)
dE.append((E - Esae) / np.sqrt(len(S)))
dE = np.concatenate(dE)
cidx = np.where(np.abs(dE) < 15.0)
std = np.abs(dE[cidx]).std()
men = np.mean(dE[cidx])
print(men, std, men + std)
idx = np.intersect1d(np.where(dE >= -np.abs(15 * std + men))[0], np.where(dE <= np.abs(11 * std + men))[0])
cnt = idx.size
print('DATADIST: ', dE.size, cnt, (dE.size - cnt), 100.0 * ((dE.size - cnt) / dE.size))
E = []
data_count = np.zeros((N, 3), dtype=np.int32)
for f in self.h5file:
adl = pyt.anidataloader(h5d + f)
for data in adl:
# print(data['path'],data['energies'].size)
S = data['species']
if data[Ekey].size > 0 and (set(S).issubset(self.netdict['atomtyp'])):
X = np.array(data['coordinates'], order='C', dtype=np.float32)
if Eax0sum:
E = energy_unit * np.sum(np.array(data[Ekey], order='C', dtype=np.float64), axis=1)
else:
E = energy_unit * np.array(data[Ekey], order='C', dtype=np.float64)
if forces and not grad:
F = forces_unit * np.array(data[Fkey], order='C', dtype=np.float32)
elif forces and grad:
F = -forces_unit * np.array(data[Fkey], order='C', dtype=np.float32)
else:
F = 0.0 * X
D = np.zeros((E.size,3),dtype=np.float32)
if dipole:
D = dipole_unit * np.array(data[Dkey], order='C', dtype=np.float32).reshape(E.size,3)
else:
D = 0.0 * D
P = np.zeros((E.size,3,3),dtype=np.float32)
if pbc:
P = np.array(data['cell'], order='C', dtype=np.float32).reshape(E.size,3,3)
else:
P = 0.0 * P
C = np.zeros((E.size,X.shape[1]),dtype=np.float32)
if charge:
C = charge_unit * np.array(data[Ckey], order='C', dtype=np.float32).reshape(E.size,len(S))
else:
C = 0.0 * C
if rmhighe:
Esae = hdt.compute_sae(self.netdict['saefile'], S)
ind_dE = (E - Esae) / np.sqrt(len(S))
hidx = np.union1d(np.where(ind_dE < -(15.0 * std + men))[0],
np.where(ind_dE > (11.0 * std + men))[0])
lidx = np.intersect1d(np.where(ind_dE >= -(15.0 * std + men))[0],
np.where(ind_dE <= (11.0 * std + men))[0])
if hidx.size > 0:
print(' -(' + f + ':' + data['path'] + ') High energies detected:\n ',
(E[hidx] - Esae) / np.sqrt(len(S)))
X = X[lidx]
E = E[lidx]
F = F[lidx]
D = D[lidx]
C = C[lidx]
P = P[lidx]
if rmhighf:
hfidx = np.where(np.abs(F) > rmhighf)
if hfidx[0].size > 0:
print('High force:',hfidx)
hfidx = np.all(np.abs(F).reshape(E.size,-1) <= rmhighf,axis=1)
X = X[hfidx]
E = E[hfidx]
F = F[hfidx]
D = D[hfidx]
C = C[hfidx]
P = P[hfidx]
# Build random split index
ridx = np.random.randint(0, Nblocks, size=E.size)
Didx = [np.argsort(ridx)[np.where(ridx == i)] for i in range(Nblocks)]
# Build training cache
#for nid, cache in enumerate(cachet):
set_idx = np.concatenate(
[Didx[((bid + ids * int(Nstride)) % Nblocks)] for bid in range(Ntrain)])
if set_idx.size != 0:
data_count[ids, 0] += set_idx.size
cachet.insertdata(X[set_idx], F[set_idx], C[set_idx], D[set_idx], P[set_idx], E[set_idx], list(S))
#for nid, cache in enumerate(cachev):
set_idx = np.concatenate(
[Didx[(Ntrain + bid + ids * int(Nstride)) % Nblocks] for bid in range(Nvalid)])
if set_idx.size != 0:
data_count[ids, 1] += set_idx.size
cachev.insertdata(X[set_idx], F[set_idx], C[set_idx], D[set_idx], P[set_idx], E[set_idx], list(S))
if build_test:
#for nid, th5 in enumerate(testh5):
set_idx = np.concatenate(
[Didx[(Ntrain + Nvalid + bid + ids * int(Nstride)) % Nblocks] for bid in range(Ntest)])
if set_idx.size != 0:
data_count[ids, 2] += set_idx.size
testh5.store_data(f + data['path'], coordinates=X[set_idx], forces=F[set_idx], charges=C[set_idx], dipoles=D[set_idx], cell=P[set_idx], energies=E[set_idx], species=list(S))
# Save train and valid meta file and cleanup testh5
cachet.makemetadata()
cachev.makemetadata()
if build_test:
testh5.cleanup()
#print(' Train ', ' Valid ', ' Test ')
#print(data_count[ids])
#print(ids,'Training set built.')
def train_ensemble(self, GPUList, remove_existing=False):
print('Training Ensemble...')
processes = []
indicies = np.array_split(np.arange(self.Nn), len(GPUList))
seeds = np.array_split(np.random.randint(low=0,high=2**32,size=self.Nn), len(GPUList))
for gpu, (idc,seedl) in enumerate(zip(indicies,seeds)):
processes.append(Process(target=self.train_network, args=(GPUList[gpu], idc, seedl, remove_existing)))
processes[-1].start()
# self.train_network(pyncdict, trdict, layers, id, i)
for p in processes:
p.join()
print('Training Complete.')
def train_ensemble_single(self, gpuid, ntwkids, remove_existing=False, random_seed = 0):
print('Training Single Model From Ensemble...')
np.random.seed(random_seed)
random_seeds = np.random.randint(0,2**32,size=len(ntwkids))
self.train_network(gpuid, ntwkids, random_seeds, remove_existing)
print('Training Complete.')
def train_network(self, gpuid, indicies, seeds, remove_existing=False):
for index,seed in zip(indicies,seeds):
pyncdict = dict()
pyncdict['wkdir'] = self.train_root + 'train' + str(index) + '/'
pyncdict['ntwkStoreDir'] = self.train_root + 'train' + str(index) + '/' + 'networks/'
pyncdict['datadir'] = self.train_root + "cache-data-" + str(index) + '/'
pyncdict['gpuid'] = str(gpuid)
if not os.path.exists(pyncdict['wkdir']):
os.mkdir(pyncdict['wkdir'])
if remove_existing:
shutil.rmtree(pyncdict['ntwkStoreDir'])
if not os.path.exists(pyncdict['ntwkStoreDir']):
os.mkdir(pyncdict['ntwkStoreDir'])
outputfile = pyncdict['wkdir'] + 'output.opt'
ibuild = copy.deepcopy(self.iptbuilder)
ibuild.set_parameter('seed',str(seed))
nfile = pyncdict['wkdir']+'inputtrain.ipt'
ibuild.write_input_file(nfile,iptsize=self.netdict["iptsize"])
shutil.copy2(self.netdict['cnstfile'], pyncdict['wkdir'])
shutil.copy2(self.netdict['saefile'], pyncdict['wkdir'])
if "/" in nfile:
nfile = nfile.rsplit("/", 1)[1]
command = "cd " + pyncdict['wkdir'] + " && HDAtomNNP-Trainer -i " + nfile + " -d " + pyncdict[
'datadir'] + " -p 1.0 -m -g " + pyncdict['gpuid'] + " > output.opt"
proc = subprocess.Popen(command, shell=True)
proc.communicate()
if 'Termination Criterion Met!' not in open(pyncdict['wkdir']+'output.opt','r').read():
with open(pyncdict['wkdir']+"output.opt",'a+') as output:
output.write("\n!!!TRAINING FAILED TO COMPLETE!!!\n")
print(' -Model', index, 'complete')
| 41.825763 | 288 | 0.470875 |
247970d5deb8e0d2edd1cdde14bc56163e22c0b6 | 3,918 | py | Python | probe/modules/metadata/yara/plugin.py | quarkslab/irma | 29d8baa4e27bacaf7aa9dd570c16e5268ae6237c | [
"Apache-2.0"
] | 248 | 2015-01-08T09:36:44.000Z | 2022-01-12T10:29:21.000Z | probe/modules/metadata/yara/plugin.py | quarkslab/irma | 29d8baa4e27bacaf7aa9dd570c16e5268ae6237c | [
"Apache-2.0"
] | 50 | 2015-01-09T08:31:57.000Z | 2022-03-30T10:41:13.000Z | probe/modules/metadata/yara/plugin.py | quarkslab/irma | 29d8baa4e27bacaf7aa9dd570c16e5268ae6237c | [
"Apache-2.0"
] | 74 | 2015-01-05T09:11:21.000Z | 2022-03-29T02:16:54.000Z | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import os
import sys
from configparser import ConfigParser
from datetime import datetime
from irma.common.utils.utils import timestamp
from irma.common.plugins import PluginBase
from irma.common.plugins import ModuleDependency, FileDependency
from irma.common.plugin_result import PluginResult
from irma.common.base.utils import IrmaProbeType
class YaraPlugin(PluginBase):
class YaraResult:
ERROR = -1
FOUND = 1
NOT_FOUND = 0
# =================
# plugin metadata
# =================
_plugin_name_ = "Yara"
_plugin_display_name_ = "Yara"
_plugin_author_ = "Bryan Nolen @BryanNolen"
_plugin_version_ = "1.0.0"
_plugin_category_ = IrmaProbeType.metadata
_plugin_description_ = "Plugin to run files against yara rules"
_plugin_dependencies_ = [
ModuleDependency(
'yara',
help='Requires yara 3 or greater and matching yara-python'
),
FileDependency(
os.path.join(os.path.dirname(__file__), 'config.ini')
)
]
# =============
# constructor
# =============
def __init__(self, rule_path=None):
# load default configuration file
config = ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'config.ini'))
# override default values if specified
if rule_path is None:
self.rule_path = config.get('Yara', 'rule_path')
else:
self.rule_path = rule_path
self.rules = sys.modules['yara'].compile(filepath=self.rule_path)
def get_file_report(self, filename):
try:
results = (False, self.rules.match(filename, timeout=60))
except Exception as e:
results = (True, type(e).__name__ + " : " + str(e))
finally:
return results
# ==================
# probe interfaces
# ==================
def run(self, paths):
results = PluginResult(name=type(self).plugin_display_name,
type=type(self).plugin_category,
version=None)
try:
# get the report, automatically append results
started = timestamp(datetime.utcnow())
(error_raised, response) = self.get_file_report(paths)
stopped = timestamp(datetime.utcnow())
results.duration = stopped - started
# check eventually for errors
if error_raised:
results.status = self.YaraResult.ERROR
results.error = response
elif response.__len__() == 0:
results.status = self.YaraResult.NOT_FOUND
else:
results.status = self.YaraResult.FOUND
match_string = ""
matches = []
if results.status is self.YaraResult.FOUND:
for match in response:
match_string = "{0}, {1}".format(match_string, match)
matches.append("{0!s}".format(match))
results.results = None
if not error_raised:
# results.results = {'Matches': "{0}".format(match_string)}
results.results = {'Matches': matches}
except Exception as e:
results.status = self.YaraResult.ERROR
results.results = type(e).__name__ + " : " + str(e)
return results
| 33.487179 | 75 | 0.595712 |
5c529be7de1e08e946abd08ba3b7a5b69b284f14 | 76 | py | Python | flux/__init__.py | j-fdion/cyme | ee9d1c106bc27e4514d661285e2515f19c3db6e9 | [
"MIT"
] | null | null | null | flux/__init__.py | j-fdion/cyme | ee9d1c106bc27e4514d661285e2515f19c3db6e9 | [
"MIT"
] | null | null | null | flux/__init__.py | j-fdion/cyme | ee9d1c106bc27e4514d661285e2515f19c3db6e9 | [
"MIT"
] | null | null | null | """ Outils lies au flux de materiel. """
from . import machine, bt_machine
| 19 | 40 | 0.697368 |
0160bc3fe6da8685fd8bbf8d88c3da9f60954c6b | 6,361 | py | Python | lib/models/multiple_correct_mcq_multee_esim.py | StonyBrookNLP/multee | ab78ca6708bee810c584e83e5ec61ab324bf9802 | [
"Apache-2.0"
] | 27 | 2019-04-24T00:31:18.000Z | 2021-07-08T07:54:47.000Z | lib/models/multiple_correct_mcq_multee_esim.py | StonyBrookNLP/multee | ab78ca6708bee810c584e83e5ec61ab324bf9802 | [
"Apache-2.0"
] | 1 | 2019-05-10T15:15:05.000Z | 2019-06-07T00:42:03.000Z | lib/models/multiple_correct_mcq_multee_esim.py | StonyBrookNLP/multee | ab78ca6708bee810c584e83e5ec61ab324bf9802 | [
"Apache-2.0"
] | 5 | 2019-04-24T05:55:32.000Z | 2021-06-16T13:28:51.000Z | from typing import Dict, Optional, List
import copy
import re
from overrides import overrides
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward
from allennlp.modules import Seq2SeqEncoder, SimilarityFunction, TextFieldEmbedder
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.modules.similarity_functions.dot_product import DotProductSimilarity
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn.util import get_text_field_mask
from allennlp.training.metrics import CategoricalAccuracy, F1Measure
from lib.modules import CoverageLoss
from lib.nn.util import unbind_tensor_dict
from lib.models.multee_esim import MulteeEsim
@Model.register("multiple_correct_mcq_multee_esim")
class MultipleCorrectMcqMulteeEsim(MulteeEsim):
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
encoder: Seq2SeqEncoder,
projection_feedforward: FeedForward,
inference_encoder: Seq2SeqEncoder,
output_feedforward: FeedForward,
output_logit: FeedForward,
final_feedforward: FeedForward,
coverage_loss: CoverageLoss,
similarity_function: SimilarityFunction = DotProductSimilarity(),
dropout: float = 0.5,
contextualize_pair_comparators: bool = False,
pair_context_encoder: Seq2SeqEncoder = None,
pair_feedforward: FeedForward = None,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super().__init__(vocab=vocab,
text_field_embedder=text_field_embedder,
encoder=encoder,
similarity_function=similarity_function,
projection_feedforward=projection_feedforward,
inference_encoder=inference_encoder,
output_feedforward=output_feedforward,
output_logit=output_logit,
final_feedforward=final_feedforward,
coverage_loss=coverage_loss,
contextualize_pair_comparators=contextualize_pair_comparators,
pair_context_encoder=pair_context_encoder,
pair_feedforward=pair_feedforward,
dropout=dropout,
initializer=initializer,
regularizer=regularizer)
self._ignore_index = -1
self._answer_loss = torch.nn.CrossEntropyLoss(ignore_index=self._ignore_index)
self._coverage_loss = coverage_loss
self._accuracy = CategoricalAccuracy()
self._entailment_f1 = F1Measure(self._label2idx["entailment"])
@overrides
def forward(self, # type: ignore
premises: Dict[str, torch.LongTensor],
hypotheses: Dict[str, torch.LongTensor],
paragraph: Dict[str, torch.LongTensor],
answer_correctness_mask: torch.IntTensor = None,
relevance_presence_mask: torch.Tensor = None) -> Dict[str, torch.Tensor]:
hypothesis_list = unbind_tensor_dict(hypotheses, dim=1)
label_logits = []
premises_attentions = []
premises_aggregation_attentions = []
coverage_losses = []
for hypothesis in hypothesis_list:
output_dict = super().forward(premises=premises, hypothesis=hypothesis,
paragraph=paragraph, relevance_presence_mask=relevance_presence_mask)
individual_logit = output_dict["label_logits"]
label_logits.append(individual_logit)
premises_attention = output_dict["premises_attention"]
premises_attentions.append(premises_attention)
premises_aggregation_attention = output_dict.get("premises_aggregation_attention", None)
premises_aggregation_attentions.append(premises_aggregation_attention)
if relevance_presence_mask is not None:
coverage_loss = output_dict["coverage_loss"]
coverage_losses.append(coverage_loss)
label_logits = torch.stack(label_logits, dim=1)
premises_attentions = torch.stack(premises_attentions, dim=1)
premises_aggregation_attentions = torch.stack(premises_aggregation_attentions, dim=1)
if relevance_presence_mask is not None:
coverage_losses = torch.stack(coverage_losses, dim=0)
label_probs = torch.nn.functional.softmax(label_logits, dim=-1)
output_dict = {"label_logits": label_logits[:, :, self._label2idx["entailment"]],
"label_probs": label_probs[:, :, self._label2idx["entailment"]],
"premises_attentions": premises_attentions,
"premises_aggregation_attentions": premises_aggregation_attentions}
if answer_correctness_mask is not None:
label = ((answer_correctness_mask == 1).long()*self._label2idx["entailment"]
+ (answer_correctness_mask == 0).long()*self._label2idx["neutral"]
+ (answer_correctness_mask == -1).long()*self._ignore_index)
loss = self._answer_loss(label_logits.reshape((-1, label_logits.shape[-1])), label.reshape((-1)))
# coverage loss
if relevance_presence_mask is not None:
loss += coverage_losses.mean()
output_dict["loss"] = loss
mask = answer_correctness_mask != -1
self._accuracy(label_logits, label, mask)
self._entailment_f1(label_logits, label, mask)
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
accuracy_metric = self._accuracy.get_metric(reset)
entailment_f1_metric = self._entailment_f1.get_metric(reset)
return {'_accuracy': accuracy_metric,
'_entailment_prec': entailment_f1_metric[0],
'_entailment_rec': entailment_f1_metric[1],
'entailment_f1': entailment_f1_metric[2]}
| 49.310078 | 109 | 0.656972 |
36409369acd3a63587e34036e6239a81008ce0f9 | 6,077 | py | Python | src/m8_still_more_mutation.py | callahrs/SequencesAndMutation | 88bbb3fc44984f5a029f75f4d7080fd274662200 | [
"MIT"
] | null | null | null | src/m8_still_more_mutation.py | callahrs/SequencesAndMutation | 88bbb3fc44984f5a029f75f4d7080fd274662200 | [
"MIT"
] | null | null | null | src/m8_still_more_mutation.py | callahrs/SequencesAndMutation | 88bbb3fc44984f5a029f75f4d7080fd274662200 | [
"MIT"
] | null | null | null | """
This module lets you practice MUTATION of lists.
In this module, you mutate by DELETING elements of a list.
Authors: David Mutchler, Amanda Stouder, Chandan Rupakheti, Katie Dion,
Claude Anderson, Delvin Defoe, Curt Clifton, their colleagues,
and Riley Callahan.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
import m6_mutation
def main():
run_test_RETURN_delete_negatives()
run_test_MUTATE_delete_negatives()
def run_test_RETURN_delete_negatives():
""" Tests the RETURN_delete_negatives function. """
print()
print('--------------------------------')
print('Testing RETURN_delete_negatives:')
print('--------------------------------')
# ------------------------------------------------------------------
# Test 1:
# ------------------------------------------------------------------
run_test_number = 1
original_argument = [-30.2, 50, 12.5, -1, -5, 8, 0]
correct_argument_value_after_function_call = original_argument.copy()
correct_returned_value = [50, 12.5, 8, 0]
m6_mutation.run_test(RETURN_delete_negatives,
original_argument,
run_test_number,
correct_returned_value,
correct_returned_value)
# ------------------------------------------------------------------
# Test 2:
# ------------------------------------------------------------------
run_test_number = 2
original_argument = [2, 0, -9, 1, -30]
correct_argument_value_after_function_call = original_argument.copy()
correct_returned_value = [2, 0, 1]
m6_mutation.run_test(RETURN_delete_negatives,
original_argument,
run_test_number,
correct_returned_value,
correct_returned_value)
def RETURN_delete_negatives(numbers):
"""
Returns a NEW list that is the same as the given list of numbers,
but with each negative number in the list DELETED from the list.
For example, if the given list is [-30.2, 50, 12.5, -1, -5, 8, 0].
then the returned list is the NEW list [50, 12.5, 8, 0].
This function must NOT mutate the given list.
Precondition:
:type numbers: list
where the list is a list of numbers.
"""
# TODO: 2. First, READ THE ABOVE TEST CODE.
# Make sure that you understand it.
# In particular, note how it calls the run_test function
# from the module m6_mutation by using the notation:
# m6_mutation.run_test(...)
# Then, IMPLEMENT and test THIS FUNCTION
# (using the above code for testing).
newlist = numbers
count = 0
for k in range(len(numbers)):
if numbers[k - count] < 0:
del newlist[k - count]
count = count + 1
return newlist
def run_test_MUTATE_delete_negatives():
""" Tests the MUTATE_delete_negatives function. """
print()
print('--------------------------------')
print('Testing MUTATE_delete_negatives:')
print('--------------------------------')
# ------------------------------------------------------------------
# Test 1:
# ------------------------------------------------------------------
run_test_number = 1
original_argument = [-30.2, 50, 12.5, -1, -5, 8, 0]
correct_argument_value_after_function_call = [50, 12.5, 8, 0]
correct_returned_value = None
m6_mutation.run_test(MUTATE_delete_negatives,
original_argument,
run_test_number,
correct_returned_value,
correct_argument_value_after_function_call)
# ------------------------------------------------------------------
# Test 2:
# ------------------------------------------------------------------
run_test_number = 2
original_argument = [2, 0, -9, 1, -30]
correct_argument_value_after_function_call = [2, 0, 1]
correct_returned_value = None
m6_mutation.run_test(MUTATE_delete_negatives,
original_argument,
run_test_number,
correct_returned_value,
correct_argument_value_after_function_call)
def MUTATE_delete_negatives(numbers):
"""
MUTATES the given list of numbers so that each negative number
in the list is DELETED from the list.
For example, if the given list is [-30.2, 50, 12.5, -1, -5, 8, 0].
then that list is MUTATED to become [50, 12.5, 8, 0].
This function MAY use ONE additional list beyond the given list
(but see if you can solve the problem WITHOUT any additional lists).
The function must NOT return anything (other than the default None).
Precondition: The argument is a list of numbers.
"""
# Done: 3. First, READ THE ABOVE TEST CODE.
# Make sure that you understand it.
# In particular, note how it calls the run_test function
# from the module m6_mutation by using the notation:
# m6_mutation.run_test(...)
# Then, IMPLEMENT and test THIS FUNCTION
# (using the above code for testing).
#
# HINT: This problem is MUCH harder than it would appear,
# for various quite-subtle reasons.
# Take a stab at this problem,
# then ask for help as needed.
# HINT #2: Why might it be wise to start at the end and
# work backwards through the list to the beginning?
count = 0
for k in range(len(numbers)):
if numbers[k - count] < 0:
del numbers[k - count]
count = count + 1
# ----------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
| 37.745342 | 73 | 0.530031 |
22f0dc850272634d0dd10d548d658874d07cb3a6 | 8,418 | py | Python | epson_printer/epsonprinter.py | alex071995/python-epson-printer | 7d89b2f21bc76d2cc4d5ad548e19a356ca92fbc5 | [
"MIT"
] | 48 | 2015-02-13T19:58:40.000Z | 2022-01-13T21:03:35.000Z | epson_printer/epsonprinter.py | alex071995/python-epson-printer | 7d89b2f21bc76d2cc4d5ad548e19a356ca92fbc5 | [
"MIT"
] | 12 | 2015-01-03T11:24:48.000Z | 2018-05-08T13:38:23.000Z | epson_printer/epsonprinter.py | alex071995/python-epson-printer | 7d89b2f21bc76d2cc4d5ad548e19a356ca92fbc5 | [
"MIT"
] | 25 | 2015-02-06T21:50:01.000Z | 2022-01-13T22:25:13.000Z | from __future__ import division
import math
import io
import base64
import numpy as np
import usb.core
from functools import wraps
from PIL import Image
ESC = 27
GS = 29
FULL_PAPER_CUT = [
GS,
86, # V
0] # \0
UNDERLINE_OFF = [
ESC,
45, # -
0]
BOLD_ON = [
ESC,
69, # E
1]
BOLD_OFF = [
ESC,
69, # E
0]
DEFAULT_LINE_SPACING = [
ESC,
50] # 2
CENTER = [
ESC,
97, # a
1]
LEFT_JUSTIFIED = [
ESC,
97, # a
0]
RIGHT_JUSTIFIED = [
ESC,
97, # a
2]
def linefeed(lines=1):
return [
ESC, # ESC
100, # d
lines]
def underline_on(weight):
return [
ESC,
45, # -
weight]
def set_line_spacing(dots):
return [
ESC,
51, # 3
dots]
def set_text_size(width_magnification, height_magnification):
if width_magnification < 0 or width_magnification > 7:
raise Exception("Width magnification should be between 0(x1) and 7(x8)")
if height_magnification < 0 or height_magnification > 7:
raise Exception("Height magnification should be between 0(x1) and 7(x8)")
n = 16 * width_magnification + height_magnification
byte_array = [
GS,
33, # !
n]
return byte_array
def set_print_speed(speed):
byte_array = [
GS, # GS
40, # (
75, # K
2,
0,
50,
speed]
return byte_array
class PrintableImage:
"""
Container for image data ready to be sent to the printer
The transformation from bitmap data to PrintableImage data is explained at the link below:
http://nicholas.piasecki.name/blog/2009/12/sending-a-bit-image-to-an-epson-tm-t88iii-receipt-printer-using-c-and-escpos/
"""
def __init__(self, data, height):
self.data = data
self.height = height
@classmethod
def from_image(cls, image):
"""
Create a PrintableImage from a PIL Image
:param image: a PIL Image
:return:
"""
(w, h) = image.size
# Thermal paper is 512 pixels wide
if w > 512:
ratio = 512. / w
h = int(h * ratio)
image = image.resize((512, h), Image.ANTIALIAS)
if image.mode != '1':
image = image.convert('1')
pixels = np.array(list(image.getdata())).reshape(h, w)
# Add white pixels so that image fits into bytes
extra_rows = int(math.ceil(h / 24)) * 24 - h
extra_pixels = np.ones((extra_rows, w), dtype=bool)
pixels = np.vstack((pixels, extra_pixels))
h += extra_rows
nb_stripes = h / 24
pixels = pixels.reshape(nb_stripes, 24, w).swapaxes(1, 2).reshape(-1, 8)
nh = int(w / 256)
nl = w % 256
data = []
pixels = np.invert(np.packbits(pixels))
stripes = np.split(pixels, nb_stripes)
for stripe in stripes:
data.extend([
ESC,
42, # *
33, # double density mode
nl,
nh])
data.extend(stripe)
data.extend([
27, # ESC
74, # J
48])
# account for double density mode
height = h * 2
return cls(data, height)
def append(self, other):
"""
Append a Printable Image at the end of the current instance.
:param other: another PrintableImage
:return: PrintableImage containing data from both self and other
"""
self.data.extend(other.data)
self.height = self.height + other.height
return self
class EpsonPrinter:
""" An Epson thermal printer based on ESC/POS"""
printer = None
def __init__(self, id_vendor, id_product, out_ep=0x01):
"""
@param id_vendor : Vendor ID
@param id_product : Product ID
@param interface : USB device interface
@param in_ep : Input end point
@param out_ep : Output end point
"""
self.out_ep = out_ep
# Search device on USB tree and set is as printer
self.printer = usb.core.find(idVendor=id_vendor, idProduct=id_product)
if self.printer is None:
raise ValueError("Printer not found. Make sure the cable is plugged in.")
if self.printer.is_kernel_driver_active(0):
try:
self.printer.detach_kernel_driver(0)
except usb.core.USBError as e:
print("Could not detatch kernel driver: %s" % str(e))
try:
self.printer.set_configuration()
self.printer.reset()
except usb.core.USBError as e:
print("Could not set configuration: %s" % str(e))
def write_this(func):
"""
Decorator that writes the bytes to the wire
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
byte_array = func(self, *args, **kwargs)
self.write_bytes(byte_array)
return wrapper
def write_bytes(self, byte_array):
msg = ''.join([chr(b) for b in byte_array])
self.write(msg)
def write(self, msg):
self.printer.write(self.out_ep, msg, timeout=20000)
def print_text(self, msg):
self.write(msg)
@write_this
def linefeed(self, lines=1):
"""Feed by the specified number of lines."""
return linefeed(lines)
@write_this
def cut(self):
"""Full paper cut."""
return FULL_PAPER_CUT
@write_this
def print_image(self, printable_image):
dyl = printable_image.height % 256
dyh = int(printable_image.height / 256)
# Set the size of the print area
byte_array = [
ESC,
87, # W
46, # xL
0, # xH
0, # yL
0, # yH
0, # dxL
2, # dxH
dyl,
dyh]
# Enter page mode
byte_array.extend([
27,
76])
byte_array.extend(printable_image.data)
# Return to standard mode
byte_array.append(12)
return byte_array
def print_images(self, *printable_images):
"""
This method allows printing several images in one shot. This is useful if the client code does not want the
printer to make pause during printing
"""
printable_image = reduce(lambda x, y: x.append(y), list(printable_images))
self.print_image(printable_image)
def print_image_from_file(self, image_file, rotate=False):
image = Image.open(image_file)
if rotate:
image = image.rotate(180)
printable_image = PrintableImage.from_image(image)
self.print_image(printable_image)
def print_image_from_buffer(self, data, rotate=False):
image = Image.open(io.BytesIO(base64.b64decode(data)))
if rotate:
image = image.rotate(180)
printable_image = PrintableImage.from_image(image)
self.print_image(printable_image)
@write_this
def underline_on(self, weight=1):
""" Activate underline
weight = 0 1-dot-width
weight = 1 2-dots-width
"""
return underline_on(weight)
@write_this
def underline_off(self):
return UNDERLINE_OFF
@write_this
def bold_on(self):
return BOLD_ON
@write_this
def bold_off(self):
return BOLD_OFF
@write_this
def set_line_spacing(self, dots):
"""Set line spacing with a given number of dots. Default is 30."""
return set_line_spacing(dots)
@write_this
def set_default_line_spacing(self):
return DEFAULT_LINE_SPACING
@write_this
def set_text_size(self, width_magnification, height_magnification):
"""Set the text size. width_magnification and height_magnification can
be between 0(x1) and 7(x8).
"""
return set_text_size(width_magnification, height_magnification)
@write_this
def center(self):
return CENTER
@write_this
def left_justified(self):
return LEFT_JUSTIFIED
@write_this
def right_justified(self):
return RIGHT_JUSTIFIED
@write_this
def set_print_speed(self, speed):
return set_print_speed(speed)
| 25.432024 | 124 | 0.570919 |
6c8cf9e1323a6a7451e292a1d5c7c3ed8dc2528c | 176 | py | Python | scripts/item/consume_2434601.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | scripts/item/consume_2434601.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | scripts/item/consume_2434601.py | Snewmy/swordie | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | [
"MIT"
] | null | null | null | # Transparent Damage Skins
success = sm.addDamageSkin(2434601)
if success:
sm.chat("The Transparent Damage Skins has been added to your account's damage skin collection.")
| 35.2 | 100 | 0.778409 |
05aa638fcb804a3f163b28a5c089553d716d2d19 | 1,287 | py | Python | gcloud/taskflow3/migrations/0002_taskflowinstance_template_source.py | gangh/bk-sops | 29f4b4915be42650c2eeee637e0cf798e4066f09 | [
"Apache-2.0"
] | 1 | 2019-12-23T07:23:35.000Z | 2019-12-23T07:23:35.000Z | gcloud/taskflow3/migrations/0002_taskflowinstance_template_source.py | bk-sops/bk-sops | 9f5950b13473bf7b5032528b20016b7a571bb3cd | [
"Apache-2.0"
] | 9 | 2020-02-12T03:15:49.000Z | 2021-06-10T22:04:51.000Z | gcloud/taskflow3/migrations/0002_taskflowinstance_template_source.py | bk-sops/bk-sops | 9f5950b13473bf7b5032528b20016b7a571bb3cd | [
"Apache-2.0"
] | 1 | 2022-01-17T11:32:05.000Z | 2022-01-17T11:32:05.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('taskflow3', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='taskflowinstance',
name='template_source',
field=models.CharField(default=b'business', max_length=32, verbose_name='\u6d41\u7a0b\u6a21\u677f\u6765\u6e90', choices=[(b'business', '\u4e1a\u52a1\u6d41\u7a0b'), (b'common', '\u516c\u5171\u6d41\u7a0b')]),
),
]
| 40.21875 | 218 | 0.729604 |
a63e9b71542f477b6076f63231e379cc54465f49 | 42,617 | py | Python | acoular/grids.py | ishine/acoular | 4d790517adb38dc012b1f06966262b94f3625358 | [
"BSD-3-Clause"
] | 294 | 2015-03-24T09:19:12.000Z | 2022-03-11T02:59:11.000Z | acoular/grids.py | ishine/acoular | 4d790517adb38dc012b1f06966262b94f3625358 | [
"BSD-3-Clause"
] | 45 | 2015-11-06T15:15:22.000Z | 2022-03-18T07:05:30.000Z | acoular/grids.py | ishine/acoular | 4d790517adb38dc012b1f06966262b94f3625358 | [
"BSD-3-Clause"
] | 100 | 2015-05-05T15:18:57.000Z | 2022-03-21T09:48:05.000Z | # -*- coding: utf-8 -*-
#pylint: disable-msg=E0611, E1101, C0103, R0901, R0902, R0903, R0904, W0232
#------------------------------------------------------------------------------
# Copyright (c) 2007-2021, Acoular Development Team.
#------------------------------------------------------------------------------
"""Implements support for two- and threedimensional grids
.. autosummary::
:toctree: generated/
Grid
RectGrid
RectGrid3D
ImportGrid
LineGrid
MergeGrid
Sector
RectSector
CircSector
PolySector
ConvexSector
MultiSector
"""
# imports from other packages
from numpy import mgrid, s_, array, arange, isscalar, absolute, ones, argmin,\
zeros, where, asfarray,concatenate,sum,ma,ones_like,inf,copysign,fabs ,append,\
tile,newaxis
from numpy.linalg import norm
from traits.api import HasPrivateTraits, Float, Property, Any, \
property_depends_on, cached_property, Bool, List, Instance, File ,on_trait_change,\
CArray, Tuple, Int
from traits.trait_errors import TraitError
#from matplotlib.path import Path
from scipy.spatial import Delaunay
from os import path
from .internal import digest
def in_hull(p, hull, border= True, tol = 0 ):
"""
test if points in `p` are in `hull`
`p` should be a `NxK` coordinates of `N` points in `K` dimensions
`hull` is either a scipy.spatial.Delaunay object or the `MxK` array of the
coordinates of `M` points in `K`dimensions for which Delaunay triangulation
will be computed
"""
if not isinstance(hull,Delaunay):
hull = Delaunay(hull)
if border:
return hull.find_simplex(p,tol = tol)>=0
else:
return hull.find_simplex(p,tol = tol)>0
def _det(xvert, yvert):
'''Compute twice the area of the triangle defined by points with using
determinant formula.
Input parameters:
xvert -- A vector of nodal x-coords (array-like).
yvert -- A vector of nodal y-coords (array-like).
Output parameters:
Twice the area of the triangle defined by the points.
Notes:
_det is positive if points define polygon in anticlockwise order.
_det is negative if points define polygon in clockwise order.
_det is zero if at least two of the points are concident or if
all points are collinear.
'''
xvert = asfarray(xvert)
yvert = asfarray(yvert)
x_prev = concatenate(([xvert[-1]], xvert[:-1]))
y_prev = concatenate(([yvert[-1]], yvert[:-1]))
return sum(yvert * x_prev - xvert * y_prev, axis=0)
class Polygon:
'''Polygon object.
Input parameters:
x -- A sequence of nodal x-coords.
y -- A sequence of nodal y-coords.
'''
def __init__(self, x, y):
if len(x) != len(y):
raise IndexError('x and y must be equally sized.')
self.x = asfarray(x)
self.y = asfarray(y)
# Closes the polygon if were open
x1, y1 = x[0], y[0]
xn, yn = x[-1], y[-1]
if x1 != xn or y1 != yn:
self.x = concatenate((self.x, [x1]))
self.y = concatenate((self.y, [y1]))
# Anti-clockwise coordinates
if _det(self.x, self.y) < 0:
self.x = self.x[::-1]
self.y = self.y[::-1]
def is_inside(self, xpoint, ypoint, smalld=1e-12):
'''Check if point is inside a general polygon.
Input parameters:
xpoint -- The x-coord of the point to be tested.
ypoint -- The y-coords of the point to be tested.
smalld -- A small float number.
xpoint and ypoint could be scalars or array-like sequences.
Output parameters:
mindst -- The distance from the point to the nearest point of the
polygon.
If mindst < 0 then point is outside the polygon.
If mindst = 0 then point in on a side of the polygon.
If mindst > 0 then point is inside the polygon.
Notes:
An improved version of the algorithm of Nordbeck and Rydstedt.
REF: SLOAN, S.W. (1985): A point-in-polygon program. Adv. Eng.
Software, Vol 7, No. 1, pp 45-47.
'''
xpoint = asfarray(xpoint)
ypoint = asfarray(ypoint)
# Scalar to array
if xpoint.shape is tuple():
xpoint = array([xpoint], dtype=float)
ypoint = array([ypoint], dtype=float)
scalar = True
else:
scalar = False
# Check consistency
if xpoint.shape != ypoint.shape:
raise IndexError('x and y has different shapes')
# If snear = True: Dist to nearest side < nearest vertex
# If snear = False: Dist to nearest vertex < nearest side
snear = ma.masked_all(xpoint.shape, dtype=bool)
# Initialize arrays
mindst = ones_like(xpoint, dtype=float) * inf
j = ma.masked_all(xpoint.shape, dtype=int)
x = self.x
y = self.y
n = len(x) - 1 # Number of sides/vertices defining the polygon
# Loop over each side defining polygon
for i in range(n):
d = ones_like(xpoint, dtype=float) * inf
# Start of side has coords (x1, y1)
# End of side has coords (x2, y2)
# Point has coords (xpoint, ypoint)
x1 = x[i]
y1 = y[i]
x21 = x[i + 1] - x1
y21 = y[i + 1] - y1
x1p = x1 - xpoint
y1p = y1 - ypoint
# Points on infinite line defined by
# x = x1 + t * (x1 - x2)
# y = y1 + t * (y1 - y2)
# where
# t = 0 at (x1, y1)
# t = 1 at (x2, y2)
# Find where normal passing through (xpoint, ypoint) intersects
# infinite line
t = -(x1p * x21 + y1p * y21) / (x21 ** 2 + y21 ** 2)
tlt0 = t < 0
tle1 = (0 <= t) & (t <= 1)
# Normal intersects side
d[tle1] = ((x1p[tle1] + t[tle1] * x21) ** 2 +
(y1p[tle1] + t[tle1] * y21) ** 2)
# Normal does not intersects side
# Point is closest to vertex (x1, y1)
# Compute square of distance to this vertex
d[tlt0] = x1p[tlt0] ** 2 + y1p[tlt0] ** 2
# Store distances
mask = d < mindst
mindst[mask] = d[mask]
j[mask] = i
# Point is closer to (x1, y1) than any other vertex or side
snear[mask & tlt0] = False
# Point is closer to this side than to any other side or vertex
snear[mask & tle1] = True
if ma.count(snear) != snear.size:
raise IndexError('Error computing distances')
mindst **= 0.5
# Point is closer to its nearest vertex than its nearest side, check if
# nearest vertex is concave.
# If the nearest vertex is concave then point is inside the polygon,
# else the point is outside the polygon.
jo = j.copy()
jo[j == 0] -= 1
area = _det([x[j + 1], x[j], x[jo - 1]], [y[j + 1], y[j], y[jo - 1]])
mindst[~snear] = copysign(mindst, area)[~snear]
# Point is closer to its nearest side than to its nearest vertex, check
# if point is to left or right of this side.
# If point is to left of side it is inside polygon, else point is
# outside polygon.
area = _det([x[j], x[j + 1], xpoint], [y[j], y[j + 1], ypoint])
mindst[snear] = copysign(mindst, area)[snear]
# Point is on side of polygon
mindst[fabs(mindst) < smalld] = 0
# If input values were scalar then the output should be too
if scalar:
mindst = float(mindst)
return mindst
class Grid( HasPrivateTraits ):
"""
Virtual base class for grid geometries.
Defines the common interface for all grid classes and
provides facilities to query grid properties and related data. This class
may be used as a base for specialized grid implementaions. It should not
be used directly as it contains no real functionality.
"""
#: Overall number of grid points. Readonly; is set automatically when
#: other grid defining properties are set
size = Property(desc="overall number of grid points")
#: Shape of grid. Readonly, gives the shape as tuple, useful for cartesian
#: grids
shape = Property(desc="grid shape as tuple")
#: Grid positions as (3, :attr:`size`) array of floats, without invalid
#: microphones; readonly.
gpos = Property(desc="x, y, z positions of grid points")
# internal identifier
digest = Property
def _get_digest( self ):
return ''
# 'digest' is a placeholder for other properties in derived classes,
# necessary to trigger the depends on mechanism
@property_depends_on('digest')
def _get_size ( self ):
return 1
# 'digest' is a placeholder for other properties in derived classes
@property_depends_on('digest')
def _get_shape ( self ):
return (1, 1)
@property_depends_on('digest')
def _get_gpos( self ):
return array([[0.], [0.], [0.]])
def pos ( self ):
"""
Calculates grid co-ordinates.
Deprecated; use :attr:`gpos` attribute instead.
Returns
-------
array of floats of shape (3, :attr:`size`)
The grid point x, y, z-coordinates in one array.
"""
return self.gpos# array([[0.], [0.], [0.]])
def subdomain (self, sector) :
"""
Queries the indices for a subdomain in the grid.
Allows arbitrary subdomains of type :class:`Sector`
Parameters
----------
sector : :class:`Sector`
Sector describing the subdomain.
Returns
-------
2-tuple of arrays of integers or of numpy slice objects
The indices that can be used to mask/select the grid subdomain from
an array with the same shape as the grid.
"""
xpos = self.gpos
# construct grid-shaped array with "True" entries where sector is
xyi = sector.contains(xpos).reshape(self.shape)
# return indices of "True" entries
return where(xyi)
class RectGrid( Grid ):
"""
Provides a cartesian 2D grid for the beamforming results.
The grid has square or nearly square cells and is on a plane perpendicular
to the z-axis. It is defined by lower and upper x- and y-limits and the
z co-ordinate.
"""
#: The lower x-limit that defines the grid, defaults to -1.
x_min = Float(-1.0,
desc="minimum x-value")
#: The upper x-limit that defines the grid, defaults to 1.
x_max = Float(1.0,
desc="maximum x-value")
#: The lower y-limit that defines the grid, defaults to -1.
y_min = Float(-1.0,
desc="minimum y-value")
#: The upper y-limit that defines the grid, defaults to 1.
y_max = Float(1.0,
desc="maximum y-value")
#: The z co-ordinate that defines the grid, defaults to 1.
z = Float(1.0,
desc="position on z-axis")
#: The cell side length for the grid, defaults to 0.1.
increment = Float(0.1,
desc="step size")
#: Number of grid points along x-axis, readonly.
nxsteps = Property(
desc="number of grid points along x-axis")
#: Number of grid points along y-axis, readonly.
nysteps = Property(
desc="number of grid points along y-axis")
# internal identifier
digest = Property(
depends_on = ['x_min', 'x_max', 'y_min', 'y_max', 'z', 'increment']
)
@property_depends_on('nxsteps, nysteps')
def _get_size ( self ):
return self.nxsteps*self.nysteps
@property_depends_on('nxsteps, nysteps')
def _get_shape ( self ):
return (self.nxsteps, self.nysteps)
@property_depends_on('x_min, x_max, increment')
def _get_nxsteps ( self ):
i = abs(self.increment)
if i != 0:
return int(round((abs(self.x_max-self.x_min)+i)/i))
return 1
@property_depends_on('y_min, y_max, increment')
def _get_nysteps ( self ):
i = abs(self.increment)
if i != 0:
return int(round((abs(self.y_max-self.y_min)+i)/i))
return 1
@cached_property
def _get_digest( self ):
return digest( self )
@property_depends_on('x_min, x_max, y_min, y_max, increment')
def _get_gpos ( self ):
"""
Calculates grid co-ordinates.
Returns
-------
array of floats of shape (3, :attr:`~Grid.size`)
The grid point x, y, z-coordinates in one array.
"""
bpos = mgrid[self.x_min:self.x_max:self.nxsteps*1j, \
self.y_min:self.y_max:self.nysteps*1j, \
self.z:self.z+0.1]
bpos.resize((3, self.size))
return bpos
def index ( self, x, y ):
"""
Queries the indices for a grid point near a certain co-ordinate.
This can be used to query results or co-ordinates at/near a certain
co-ordinate.
Parameters
----------
x, y : float
The co-ordinates for which the indices are queried.
Returns
-------
2-tuple of integers
The indices that give the grid point nearest to the given x, y
co-ordinates from an array with the same shape as the grid.
"""
if x < self.x_min or x > self.x_max:
raise ValueError("x-value out of range")
if y < self.y_min or y > self.y_max:
raise ValueError("y-value out of range")
xi = int((x-self.x_min)/self.increment+0.5)
yi = int((y-self.y_min)/self.increment+0.5)
return xi, yi
def indices ( self, *r):
"""
Queries the indices for a subdomain in the grid.
Allows either rectangular, circular or polygonial subdomains.
This can be used to mask or to query results from a certain
sector or subdomain.
Parameters
----------
x1, y1, x2, y2, ... : float
If three parameters are given, then a circular sector is assumed
that is given by its center (x1, y1) and the radius x2.
If four paramters are given, then a rectangular sector is
assumed that is given by two corners (x1, y1) and (x2, y2).
If more parameters are given, the subdomain is assumed to have
polygonial shape with corners at (x_n, y_n).
Returns
-------
2-tuple of arrays of integers or of numpy slice objects
The indices that can be used to mask/select the grid subdomain from
an array with the same shape as the grid.
"""
if len(r) == 3: # only 3 values given -> use x,y,radius method
xpos = self.gpos
xis = []
yis = []
dr2 = (xpos[0, :]-r[0])**2 + (xpos[1, :]-r[1])**2
# array with true/false entries
inds = dr2 <= r[2]**2
for np in arange(self.size)[inds]: # np -- points in x2-circle
xi, yi = self.index(xpos[0, np], xpos[1, np])
xis += [xi]
yis += [yi]
if not (xis and yis): # if no points in circle, take nearest one
return self.index(r[0], r[1])
else:
return array(xis), array(yis)
elif len(r) == 4: # rectangular subdomain - old functionality
xi1, yi1 = self.index(min(r[0], r[2]), min(r[1], r[3]))
xi2, yi2 = self.index(max(r[0], r[2]), max(r[1], r[3]))
return s_[xi1:xi2+1], s_[yi1:yi2+1]
else: # use enveloping polygon
xpos = self.gpos
xis = []
yis = []
#replaced matplotlib Path by numpy
#p = Path(array(r).reshape(-1,2))
#inds = p.contains_points()
#inds = in_poly(xpos[:2,:].T,array(r).reshape(-1,2))
poly = Polygon(array(r).reshape(-1,2)[:,0],array(r).reshape(-1,2)[:,1])
dists = poly.is_inside(xpos[0,:],xpos[1,:])
inds = dists >= 0
for np in arange(self.size)[inds]: # np -- points in x2-circle
xi, yi = self.index(xpos[0, np], xpos[1, np])
xis += [xi]
yis += [yi]
if not (xis and yis): # if no points inside, take nearest to center
center = array(r).reshape(-1,2).mean(0)
return self.index(center[0], center[1])
else:
return array(xis), array(yis)
#return arange(self.size)[inds]
def extend (self) :
"""
The extension of the grid in pylab.imshow compatible form.
Returns
-------
4-tuple of floats
The extent of the grid as a tuple of x_min, x_max, y_min, y_max)
"""
return (self.x_min, self.x_max, self.y_min, self.y_max)
class RectGrid3D( RectGrid):
"""
Provides a cartesian 3D grid for the beamforming results.
The grid has cubic or nearly cubic cells. It is defined by lower and upper
x-, y- and z-limits.
"""
#: The lower z-limit that defines the grid, defaults to -1.
z_min = Float(-1.0,
desc="minimum z-value")
#: The upper z-limit that defines the grid, defaults to 1.
z_max = Float(1.0,
desc="maximum z-value")
#: Number of grid points along x-axis, readonly.
nzsteps = Property(
desc="number of grid points along x-axis")
# Private trait for increment handling
_increment = Any(0.1)
#: The cell side length for the grid. This can either be a scalar (same
#: increments in all 3 dimensions) or a (3,) array of floats with
#: respective increments in x,y, and z-direction (in m).
#: Defaults to 0.1.
increment = Property(desc="step size")
def _get_increment(self):
return self._increment
def _set_increment(self, increment):
if isscalar(increment):
try:
self._increment = absolute(float(increment))
except:
raise TraitError(args=self,
name='increment',
info='Float or CArray(3,)',
value=increment)
elif len(increment) == 3:
self._increment = array(increment,dtype=float)
else:
raise(TraitError(args=self,
name='increment',
info='Float or CArray(3,)',
value=increment))
# Respective increments in x,y, and z-direction (in m).
# Deprecated: Use :attr:`~RectGrid.increment` for this functionality
increment3D = Property(desc="3D step sizes")
def _get_increment3D(self):
if isscalar(self._increment):
return array([self._increment,self._increment,self._increment])
else:
return self._increment
def _set_increment3D(self, inc):
if not isscalar(inc) and len(inc) == 3:
self._increment = array(inc,dtype=float)
else:
raise(TraitError(args=self,
name='increment3D',
info='CArray(3,)',
value=inc))
# internal identifier
digest = Property(
depends_on = ['x_min', 'x_max', 'y_min', 'y_max', 'z_min', 'z_max', \
'_increment']
)
@property_depends_on('nxsteps, nysteps, nzsteps')
def _get_size ( self ):
return self.nxsteps*self.nysteps*self.nzsteps
@property_depends_on('nxsteps, nysteps, nzsteps')
def _get_shape ( self ):
return (self.nxsteps, self.nysteps, self.nzsteps)
@property_depends_on('x_min, x_max, increment3D')
def _get_nxsteps ( self ):
i = abs(self.increment3D[0])
if i != 0:
return int(round((abs(self.x_max-self.x_min)+i)/i))
return 1
@property_depends_on('y_min, y_max, increment3D')
def _get_nysteps ( self ):
i = abs(self.increment3D[1])
if i != 0:
return int(round((abs(self.y_max-self.y_min)+i)/i))
return 1
@property_depends_on('z_min, z_max, increment3D')
def _get_nzsteps ( self ):
i = abs(self.increment3D[2])
if i != 0:
return int(round((abs(self.z_max-self.z_min)+i)/i))
return 1
@property_depends_on('digest')
def _get_gpos ( self ):
"""
Calculates grid co-ordinates.
Returns
-------
array of floats of shape (3, :attr:`~Grid.size`)
The grid point x, y, z-coordinates in one array.
"""
bpos = mgrid[self.x_min:self.x_max:self.nxsteps*1j, \
self.y_min:self.y_max:self.nysteps*1j, \
self.z_min:self.z_max:self.nzsteps*1j]
bpos.resize((3, self.size))
return bpos
@cached_property
def _get_digest( self ):
return digest( self )
def index ( self, x, y, z ):
"""
Queries the indices for a grid point near a certain co-ordinate.
This can be used to query results or co-ordinates at/near a certain
co-ordinate.
Parameters
----------
x, y, z : float
The co-ordinates for which the indices is queried.
Returns
-------
3-tuple of integers
The indices that give the grid point nearest to the given x, y, z
co-ordinates from an array with the same shape as the grid.
"""
if x < self.x_min or x > self.x_max:
raise ValueError("x-value out of range %f (%f, %f)" % \
(x,self.x_min,self.x_max))
if y < self.y_min or y > self.y_max:
raise ValueError("y-value out of range %f (%f, %f)" % \
(y,self.y_min,self.y_max))
if z < self.z_min or z > self.z_max:
raise ValueError("z-value out of range %f (%f, %f)" % \
(z,self.z_min,self.z_max))
xi = int(round((x-self.x_min)/self.increment3D[0]))
yi = int(round((y-self.y_min)/self.increment3D[1]))
zi = int(round((z-self.z_min)/self.increment3D[2]))
return xi, yi, zi
def indices ( self, x1, y1, z1, x2, y2, z2 ):
"""
Queries the indices for a subdomain in the grid.
Allows box-shaped subdomains. This can be used to
mask or to query results from a certain sector or subdomain.
Parameters
----------
x1, y1, z1, x2, y2, z2 : float
A box-shaped sector is assumed that is given by two corners
(x1,y1,z1) and (x2,y2,z2).
Returns
-------
3-tuple of numpy slice objects
The indices that can be used to mask/select the grid subdomain from
an array with the same shape as the grid.
"""
xi1, yi1, zi1 = self.index(min(x1, x2), min(y1, y2), min(z1, z2))
xi2, yi2, zi2 = self.index(max(x1, x2), max(y1, y2), max(z1, z2))
return s_[xi1:xi2+1], s_[yi1:yi2+1], s_[zi1:zi2+1]
class ImportGrid( Grid ):
"""
Loads a 3D grid from xml file.
"""
# internal identifier
digest = Property
#: Name of the .xml-file from wich to read the data.
from_file = File(filter=['*.xml'],
desc="name of the xml file to import")
gpos_file = CArray(dtype=float,
desc="x, y, z position of all Grid Points")
#: Basename of the .xml-file, without the extension; is set automatically / readonly.
basename = Property( depends_on = 'from_file',
desc="basename of xml file")
# internal identifier
digest = Property(
depends_on = ['from_file']
)
@cached_property
def _get_basename( self ):
return path.splitext(path.basename(self.from_file))[0]
@cached_property
def _get_digest( self ):
return digest( self )
# 'digest' is a placeholder for other properties in derived classes,
# necessary to trigger the depends on mechanism
@property_depends_on('basename')
def _get_size ( self ):
return self.gpos.shape[-1]
# 'digest' is a placeholder for other properties in derived classes
@property_depends_on('basename')
def _get_shape ( self ):
return self.gpos.shape[-1]
@property_depends_on('basename')
def _get_gpos( self ):
return self.gpos_file
subgrids = CArray(
desc="names of subgrids for each point")
@on_trait_change('basename')
def import_gpos( self ):
"""
Import the microphone positions from .xml file.
Called when :attr:`basename` changes.
"""
if not path.isfile(self.from_file):
# no file there
self.gpos_file = array([], 'd')
return
import xml.dom.minidom
doc = xml.dom.minidom.parse(self.from_file)
names = []
xyz = []
for el in doc.getElementsByTagName('pos'):
names.append(el.getAttribute('subgrid'))
xyz.append(list(map(lambda a : float(el.getAttribute(a)), 'xyz')))
self.gpos_file = array(xyz, 'd').swapaxes(0, 1)
self.subgrids = array(names)
def index ( self, x, y ):
"""
Queries the indices for a grid point near a certain co-ordinate.
This can be used to query results or co-ordinates at/near a certain
co-ordinate.
Parameters
----------
x, y : float
The co-ordinates for which the indices are queried.
Returns
-------
2-tuple of integers
The indices that give the grid point nearest to the given x, y
co-ordinates from an array with the same shape as the grid.
"""
if x < self.x_min or x > self.x_max:
raise ValueError("x-value out of range")
if y < self.y_min or y > self.y_max:
raise ValueError("y-value out of range")
xi = int((x-self.x_min)/self.increment+0.5)
yi = int((y-self.y_min)/self.increment+0.5)
return xi, yi
def indices ( self, *r):
"""
Queries the indices for a subdomain in the grid.
Allows either rectangular, circular or polygonial subdomains.
This can be used to mask or to query results from a certain
sector or subdomain.
Parameters
----------
x1, y1, x2, y2, ... : float
If three parameters are given, then a circular sector is assumed
that is given by its center (x1, y1) and the radius x2.
If four paramters are given, then a rectangular sector is
assumed that is given by two corners (x1, y1) and (x2, y2).
If more parameters are given, the subdomain is assumed to have
polygonial shape with corners at (x_n, y_n).
Returns
-------
2-tuple of arrays of integers or of numpy slice objects
The indices that can be used to mask/select the grid subdomain from
an array with the same shape as the grid.
"""
if len(r) == 3: # only 3 values given -> use x,y,radius method
xpos = self.gpos
xis = []
yis = []
dr2 = (xpos[0, :]-r[0])**2 + (xpos[1, :]-r[1])**2
# array with true/false entries
inds = dr2 <= r[2]**2
for np in arange(self.size)[inds]: # np -- points in x2-circle
xi, yi = self.index(xpos[0, np], xpos[1, np])
xis += [xi]
yis += [yi]
if not (xis and yis): # if no points in circle, take nearest one
return self.index(r[0], r[1])
else:
return array(xis), array(yis)
elif len(r) == 4: # rectangular subdomain - old functionality
xi1, yi1 = self.index(min(r[0], r[2]), min(r[1], r[3]))
xi2, yi2 = self.index(max(r[0], r[2]), max(r[1], r[3]))
return s_[xi1:xi2+1], s_[yi1:yi2+1]
else: # use enveloping polygon
xpos = self.gpos
xis = []
yis = []
#replaced matplotlib Path by numpy
#p = Path(array(r).reshape(-1,2))
#inds = p.contains_points()
#inds = in_poly(xpos[:2,:].T,array(r).reshape(-1,2))
poly = Polygon(array(r).reshape(-1,2)[:,0],array(r).reshape(-1,2)[:,1])
dists = poly.is_inside(xpos[0,:],xpos[1,:])
inds = dists >= 0
for np in arange(self.size)[inds]: # np -- points in x2-circle
xi, yi = self.index(xpos[0, np], xpos[1, np])
xis += [xi]
yis += [yi]
if not (xis and yis): # if no points inside, take nearest to center
center = array(r).reshape(-1,2).mean(0)
return self.index(center[0], center[1])
else:
return array(xis), array(yis)
#return arange(self.size)[inds]
class LineGrid( Grid ):
"""
Class for Line grid geometries.
"""
#: Staring point of the Grid
loc = Tuple((0.0, 0.0, 0.0))
#: Vector to define the orientation of the line source
direction = Tuple((1.0, 0.0, 0.0),
desc="Line orientation ")
#: Vector to define the length of the line source in meter
length = Float(1,desc="length of the line source")
#:number of grid points.
numpoints = Int(1,desc="length of the line source")
#: Overall number of grid points. Readonly; is set automatically when
#: other grid defining properties are set
size = Property(desc="overall number of grid points")
#: Grid positions as (3, :attr:`size`) array of floats, without invalid
#: microphones; readonly.
gpos = Property(desc="x, y, z positions of grid points")
digest = Property(
depends_on = ['loc', 'direction', 'length', 'numpoints', 'size']
)
@cached_property
def _get_digest( self ):
return digest( self )
# 'digest' is a placeholder for other properties in derived classes,
# necessary to trigger the depends on mechanism
@property_depends_on('numpoints')
def _get_size ( self ):
return self.gpos.shape[-1]
# 'digest' is a placeholder for other properties in derived classes
@property_depends_on('numpoints')
def _get_shape ( self ):
return self.gpos.shape[-1]
@property_depends_on('numpoints,length,direction,loc')
def _get_gpos( self ):
dist = self.length / self.numpoints
loc = array(self.loc, dtype = float).reshape((3, 1))
direc_n = self.direction/norm(self.direction)
pos = zeros((self.numpoints,3))
for s in range(self.numpoints):
pos[s] = (loc.T+direc_n*dist*s)
return pos.T
class MergeGrid( Grid ):
"""
Base class for merging different grid geometries.
"""
#: List of Grids to be merged
#: each grid gets a new subdomain in the new grid
#: other grid defining properties are set
grids = List(desc="list of grids")
grid_digest = Property(desc="digest of the merged grids")
subgrids = Property(desc="names of subgrids for each point")
# internal identifier
digest = Property(
depends_on = ['grids','grid_digest']
)
@cached_property
def _get_digest( self ):
return digest( self )
@cached_property
def _get_grid_digest( self ):
griddigest = []
for grid in self.grids:
griddigest.append(grid.digest)
return griddigest
# 'digest' is a placeholder for other properties in derived classes,
# necessary to trigger the depends on mechanism
@property_depends_on('digest')
def _get_size ( self ):
return self.gpos.shape[-1]
# 'digest' is a placeholder for other properties in derived classes
@property_depends_on('digest')
def _get_shape ( self ):
return self.gpos.shape[-1]
@property_depends_on('digest')
def _get_subgrids( self ):
subgrids = zeros((1,0),dtype=str)
for grid in self.grids:
subgrids = append(subgrids,tile(grid.__class__.__name__+grid.digest,grid.size))
return subgrids[:,newaxis].T
@property_depends_on('digest')
def _get_gpos( self ):
bpos = zeros((3,0))
#subgrids = zeros((1,0))
for grid in self.grids:
bpos = append(bpos,grid.gpos, axis = 1)
#subgrids = append(subgrids,str(grid))
return bpos
class Sector( HasPrivateTraits ):
"""
Base class for sector types.
Defines the common interface for all sector classes. This class
may be used as a base for diverse sector implementaions. If used
directly, it implements a sector encompassing the whole grid.
"""
#: Boolean flag, if 'True' (default), grid points lying on the sector border are included.
include_border = Bool(True,
desc="include points on the border")
#: Absolute tolerance for sector border
abs_tol = Float(1e-12,
desc="absolute tolerance for sector border")
#: Boolean flag, if 'True' (default), the nearest grid point is returned if none is inside the sector.
default_nearest = Bool(True,
desc="return nearest grid point to center of none inside sector")
def contains ( self, pos ):
"""
Queries whether the coordinates in a given array lie within the
defined sector.
For this sector type, any position is valid.
Parameters
----------
pos : array of floats
Array with the shape 3x[number of gridpoints] containing the
grid positions
Returns
-------
array of bools with as many entries as columns in pos
Array indicating which of the given positions lie within the
given sector
"""
return ones(pos.shape[1], dtype=bool)
class RectSector( Sector ):
"""
Class for defining a rectangular sector.
Can be used for 2D Grids for definining a rectangular sector or
for 3D grids for a rectangular cylinder sector parallel to the z-axis.
"""
#: The lower x position of the rectangle
x_min = Float(-1.0,
desc="minimum x position of the rectangle")
#: The upper x position of the rectangle
x_max = Float(1.0,
desc="maximum x position of the rectangle")
#: The lower y position of the rectangle
y_min = Float(-1.0,
desc="minimum y position of the rectangle")
#: The upper y position of the rectangle
y_max = Float(1.0,
desc="maximum y position of the rectangle")
def contains ( self, pos ):
"""
Queries whether the coordinates in a given array lie within the
rectangular sector.
If no coordinate is inside, the nearest one to the rectangle center
is returned if :attr:`~Sector.default_nearest` is True.
Parameters
----------
pos : array of floats
Array with the shape 3x[number of gridpoints] containing the
grid positions
Returns
-------
array of bools with as many entries as columns in pos
Array indicating which of the given positions lie within the
given sector
"""
# make sure xmin is minimum etc
xmin = min(self.x_min,self.x_max)
xmax = max(self.x_min,self.x_max)
ymin = min(self.y_min,self.y_max)
ymax = max(self.y_min,self.y_max)
abs_tol = self.abs_tol
# get pos indices inside rectangle (* == and)
if self.include_border:
inds = (pos[0, :] - xmin > -abs_tol) * \
(pos[0, :] - xmax < abs_tol) * \
(pos[1, :] - ymin > -abs_tol) * \
(pos[1, :] - ymax < abs_tol)
else:
inds = (pos[0, :] - xmin > abs_tol) * \
(pos[0, :] - xmax < -abs_tol) * \
(pos[1, :] - ymin > abs_tol) * \
(pos[1, :] - ymax < -abs_tol)
# if none inside, take nearest
if ~inds.any() and self.default_nearest:
x = (xmin + xmax) / 2.0
y = (ymin + ymax) / 2.0
dr2 = (pos[0, :] - x)**2 + (pos[1, :] - y)**2
inds[argmin(dr2)] = True
return inds.astype(bool)
class CircSector( Sector ):
"""
Class for defining a circular sector.
Can be used for 2D Grids for definining a circular sector or
for 3D grids for a cylindrical sector parallel to the z-axis.
"""
#: x position of the circle center
x = Float(0.0,
desc="x position of the circle center")
#: y position of the circle center
y = Float(0.0,
desc="y position of the circle center")
#: radius of the circle
r = Float(1.0,
desc="radius of the circle")
def contains ( self, pos ):
"""
Queries whether the coordinates in a given array lie within the
circular sector.
If no coordinate is inside, the nearest one outside is returned
if :attr:`~Sector.default_nearest` is True.
Parameters
----------
pos : array of floats
Array with the shape 3x[number of gridpoints] containing the
grid positions
Returns
-------
array of bools with as many entries as columns in pos
Array indicating which of the given positions lie within the
given sector
"""
dr2 = (pos[0, :]-self.x)**2 + (pos[1, :]-self.y)**2
# which points are in the circle?
if self.include_border:
inds = (dr2 - self.r**2) < self.abs_tol
else:
inds = (dr2 - self.r**2) < -self.abs_tol
# if there's no poit inside
if ~inds.any() and self.default_nearest:
inds[argmin(dr2)] = True
return inds
class PolySector( Sector ):
"""
Class for defining a polygon sector.
Can be used for 2D Grids for definining a polygon sector.
"""
# x1, y1, x2, y2, ... xn, yn :
edges = List( Float )
def contains ( self, pos ):
"""
Queries whether the coordinates in a given array lie within the
ploygon sector.
If no coordinate is inside, the nearest one to the rectangle center
is returned if :attr:`~Sector.default_nearest` is True.
Parameters
----------
pos : array of floats
Array with the shape 3x[number of gridpoints] containing the
grid positions
Returns
-------
array of bools with as many entries as columns in pos
Array indicating which of the given positions lie within the
given sector
"""
poly = Polygon(array(self.edges).reshape(-1,2)[:,0],array(self.edges).reshape(-1,2)[:,1])
dists = poly.is_inside(pos[0,:],pos[1,:])
if self.include_border:
inds = dists >= -self.abs_tol
else:
inds = dists > 0
# if none inside, take nearest
if ~inds.any() and self.default_nearest:
dr2 = array(self.edges).reshape(-1,2).mean(0)
inds[argmin(dr2)] = True
return inds
class ConvexSector( Sector ):
"""
Class for defining a convex hull sector.
Can be used for 2D Grids for definining a convex hull sector.
"""
# x1, y1, x2, y2, ... xn, yn :
edges = List( Float )
def contains ( self, pos ):
"""
Queries whether the coordinates in a given array lie within the
convex sector.
If no coordinate is inside, the nearest one to the rectangle center
is returned if :attr:`~Sector.default_nearest` is True.
Parameters
----------
pos : array of floats
Array with the shape 3x[number of gridpoints] containing the
grid positions
Returns
-------
array of bools with as many entries as columns in pos
Array indicating which of the given positions lie within the
given sector
"""
inds = in_hull(pos[:2,:].T, array(self.edges).reshape(-1,2), \
border = self.include_border ,tol = self.abs_tol)
# if none inside, take nearest
if ~inds.any() and self.default_nearest:
dr2 = array(self.edges).reshape(-1,2).mean(0)
inds[argmin(dr2)] = True
return inds
class MultiSector(Sector):
"""
Class for defining a sector consisting of multiple sectors.
Can be used to sum over different sectors. Takes a list of sectors
and returns the points contained in each sector.
"""
#: List of :class:`acoular.grids.Sector` objects
#: to be mixed.
sectors = List(Instance(Sector))
def contains ( self, pos ):
"""
Queries whether the coordinates in a given array lie within any
of the sub-sectors.
Parameters
----------
pos : array of floats
Array with the shape 3x[number of gridpoints] containing the
grid positions
Returns
-------
array of bools with as many entries as columns in pos
Array indicating which of the given positions lie within the
sectors
"""
# initialize with only "False" entries
inds = zeros(pos.shape[1], dtype=bool)
# add points contained in each sector
for sec in self.sectors:
inds += sec.contains(pos)
return inds.astype(bool)
| 34.011971 | 106 | 0.557524 |
3c3aa3e7d642d29a59713e659adec8638bd57a74 | 218 | py | Python | tests/test_eth.py | polyswarm/polyswarmd | b732d60f0f829cc355c1f938bbe6de69f9985098 | [
"MIT"
] | 14 | 2018-04-16T18:04:23.000Z | 2019-11-26T06:39:23.000Z | tests/test_eth.py | polyswarm/polyswarmd | b732d60f0f829cc355c1f938bbe6de69f9985098 | [
"MIT"
] | 227 | 2018-04-03T01:10:34.000Z | 2021-03-25T21:49:58.000Z | tests/test_eth.py | polyswarm/polyswarmd | b732d60f0f829cc355c1f938bbe6de69f9985098 | [
"MIT"
] | 2 | 2018-04-23T18:37:47.000Z | 2021-04-26T10:58:39.000Z | from .utils import heck
def test_get_nonce(client, token_address):
response = client.get('/nonce', query_string={'account': token_address}).json
assert response == heck({'result': heck.UINT, 'status': 'OK'})
| 31.142857 | 81 | 0.701835 |
5263c4d9aa384574b85287d2a21b9455044d170c | 3,361 | py | Python | Lib/site-packages/chainer/functions/array/select_item.py | km-t/dcpython | c0fcd5557691004d7d9d22a662d90e52ecc5f34f | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/site-packages/chainer/functions/array/select_item.py | km-t/dcpython | c0fcd5557691004d7d9d22a662d90e52ecc5f34f | [
"CNRI-Python-GPL-Compatible"
] | 11 | 2020-01-28T22:49:05.000Z | 2022-03-11T23:50:27.000Z | Lib/site-packages/chainer/functions/array/select_item.py | km-t/dcpython | c0fcd5557691004d7d9d22a662d90e52ecc5f34f | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
class SelectItem(function_node.FunctionNode):
"""Select elements stored in given indices."""
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 't'))
x_type, t_type = in_types
type_check.expect(
t_type.dtype.kind == 'i',
x_type.ndim == 2,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0],
)
def forward(self, inputs):
self.retain_inputs((1,))
x, t = inputs
self._in_shape = x.shape
self._in_dtype = x.dtype
if chainer.is_debug():
if not ((0 <= t).all() and
(t < x.shape[1]).all()):
msg = 'Each label `t` need to satisfty `0 <= t < x.shape[1]`'
raise ValueError(msg)
xp = backend.get_array_module(x)
if xp is numpy:
# This code is equivalent to `t.choose(x.T)`, but `numpy.choose`
# does not work when `x.shape[1] > 32`.
return x[six.moves.range(t.size), t],
else:
y = cuda.elementwise(
'S t, raw T x',
'T y',
'int ind[] = {i, t}; y = x[ind];',
'getitem_fwd'
)(t, x)
return y,
def backward(self, indexes, gy):
t = self.get_retained_inputs()[0]
ret = []
if 0 in indexes:
gx = Assign(self._in_shape, self._in_dtype, t).apply(gy)[0]
ret.append(gx)
if 1 in indexes:
ret.append(None)
return ret
class Assign(function_node.FunctionNode):
def __init__(self, shape, dtype, t):
self.shape = shape
self.dtype = dtype
self.t = t.data
def forward_cpu(self, inputs):
gx = numpy.zeros(self.shape, self.dtype)
gx[six.moves.range(self.t.size), self.t] = inputs[0]
return gx,
def forward_gpu(self, inputs):
gx = cuda.cupy.zeros(self.shape, self.dtype)
gx = cuda.elementwise(
'S t, T gloss',
'raw T gx',
'int ind[] = {i, t}; gx[ind] = gloss;',
'getitem_bwd'
)(self.t, inputs[0], gx)
return gx,
def backward(self, indexes, gy):
return SelectItem().apply((gy[0], self.t))
def select_item(x, t):
"""Select elements stored in given indices.
This function returns ``t.choose(x.T)``, that means
``y[i] == x[i, t[i]]`` for all ``i``.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable storing arrays. A two-dimensional float array.
t (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable storing index numbers. A one-dimensional int array.
Length of the ``t`` should be equal to ``x.shape[0]``.
Returns:
~chainer.Variable: Variable that holds ``t``-th element of ``x``.
.. admonition:: Example
>>> x = np.array([[0, 1, 2], [3, 4, 5]], np.float32)
>>> t = np.array([0, 2], np.int32)
>>> y = F.select_item(x, t)
>>> y.shape
(2,)
>>> y.data
array([0., 5.], dtype=float32)
"""
return SelectItem().apply((x, t))[0]
| 28.974138 | 77 | 0.528117 |
a93a2174e6f94a762b488bad5f32b936500c5034 | 906 | py | Python | vn.rpc/testClient.py | AuthenticBladeRunner/PyTrader | d58bd5999fc29852106462bb5d1afa72e3f3f496 | [
"MIT"
] | null | null | null | vn.rpc/testClient.py | AuthenticBladeRunner/PyTrader | d58bd5999fc29852106462bb5d1afa72e3f3f496 | [
"MIT"
] | null | null | null | vn.rpc/testClient.py | AuthenticBladeRunner/PyTrader | d58bd5999fc29852106462bb5d1afa72e3f3f496 | [
"MIT"
] | null | null | null | # encoding: UTF-8
from time import sleep
from vnrpc import RpcClient
########################################################################
class TestClient(RpcClient):
""""""
#----------------------------------------------------------------------
def __init__(self, reqAddress, subAddress):
"""Constructor"""
super(TestClient, self).__init__(reqAddress, subAddress)
#----------------------------------------------------------------------
def callback(self, topic, data):
"""回调函数实现"""
print('client received topic:', topic, ', data:', data)
if __name__ == '__main__':
reqAddress = 'tcp://localhost:2014'
subAddress = 'tcp://localhost:0602'
tc = TestClient(reqAddress, subAddress)
tc.subscribeTopic('')
tc.start()
while 1:
print(tc.add(1, 3))
sleep(2) | 27.454545 | 76 | 0.428256 |
8c1a571b7aa8054e093f5f89f618c466b7200845 | 547 | py | Python | deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/MESA/pack_invert.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 210 | 2016-04-09T14:26:00.000Z | 2022-03-25T18:36:19.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/MESA/pack_invert.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 72 | 2016-09-04T09:30:19.000Z | 2022-03-27T17:06:53.000Z | deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/MESA/pack_invert.py | ShujaKhalid/deep-rl | 99c6ba6c3095d1bfdab81bd01395ced96bddd611 | [
"MIT"
] | 64 | 2016-04-09T14:26:49.000Z | 2022-03-21T11:19:47.000Z | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_MESA_pack_invert'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_MESA_pack_invert',error_checker=_errors._error_checker)
GL_PACK_INVERT_MESA=_C('GL_PACK_INVERT_MESA',0x8758)
| 34.1875 | 113 | 0.798903 |
1d1f7a33ec6070a282e2ace9158fb1cc6911f94d | 590 | py | Python | tests/barebones/conf.py | choldgraf/sphinx-basic-ng | 678a10d27891ae1df58c584d0a55775204308cff | [
"MIT"
] | null | null | null | tests/barebones/conf.py | choldgraf/sphinx-basic-ng | 678a10d27891ae1df58c584d0a55775204308cff | [
"MIT"
] | null | null | null | tests/barebones/conf.py | choldgraf/sphinx-basic-ng | 678a10d27891ae1df58c584d0a55775204308cff | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# Full list of options can be found in the Sphinx documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
#
# -- Project information -----------------------------------------------------
#
project = "sphinx-basic-ng demo"
copyright = "2021, Pradyun Gedam"
author = "Pradyun Gedam"
#
# -- General configuration ---------------------------------------------------
#
extensions = ["myst_parser"]
#
# -- Options for HTML output -------------------------------------------------
#
html_theme = "basic-ng"
| 23.6 | 78 | 0.530508 |
969991e574cc708ffe66eb6888e60dfbc0111492 | 3,276 | py | Python | setup.py | zopefoundation/z3c.wizard | 3dba9adf3f2173a50708163aa9cc42f6917bba5a | [
"ZPL-2.1"
] | 1 | 2019-05-22T04:20:44.000Z | 2019-05-22T04:20:44.000Z | setup.py | zopefoundation/z3c.wizard | 3dba9adf3f2173a50708163aa9cc42f6917bba5a | [
"ZPL-2.1"
] | 3 | 2020-11-08T09:37:22.000Z | 2020-12-17T07:00:18.000Z | setup.py | zopefoundation/z3c.wizard | 3dba9adf3f2173a50708163aa9cc42f6917bba5a | [
"ZPL-2.1"
] | 1 | 2019-01-08T15:56:42.000Z | 2019-01-08T15:56:42.000Z | ##############################################################################
#
# Copyright (c) 2007 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import os
from setuptools import setup, find_packages
def read(*rnames):
with open(os.path.join(os.path.dirname(__file__), *rnames)) as f:
return f.read()
setup(
name='z3c.wizard',
version='1.2.dev0',
author="Roger Ineichen and the Zope Community",
author_email="zope-dev@zope.org",
description="Wizard based on z3c.form for for Zope3",
long_description='\n\n'.join([
read('README.rst'),
'.. contents::',
read('src', 'z3c', 'wizard', 'README.rst'),
read('src', 'z3c', 'wizard', 'zcml.rst'),
read('CHANGES.rst'),
]),
license="ZPL 2.1",
keywords="zope zope3 z3c form wizard",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Zope Public License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
'Framework :: Zope :: 3',
],
url='https://github.com/zopefoundation/z3c.wizard',
packages=find_packages('src'),
include_package_data=True,
package_dir={'': 'src'},
namespace_packages=['z3c'],
python_requires=', '.join([
'>=2.7',
'!=3.0.*',
'!=3.1.*',
'!=3.2.*',
'!=3.3.*',
'!=3.4.*',
]),
extras_require=dict(
test=[
'z3c.macro',
'zope.app.pagetemplate',
'zope.app.testing',
'zope.browserresource',
'zope.publisher',
'zope.testing',
'zope.testrunner',
],
),
install_requires=[
'setuptools',
'z3c.form >= 2.0',
'z3c.formui',
'z3c.pagelet',
'zope.browserpage',
'zope.component',
'zope.configuration',
'zope.event',
'zope.i18nmessageid',
'zope.interface',
'zope.lifecycleevent',
'zope.location',
'zope.publisher',
'zope.schema',
'zope.security',
'zope.traversing',
],
zip_safe=False,
)
| 32.117647 | 78 | 0.54304 |
22e7466d6b3191a9641d99d3cb8324e0c8053dee | 7,191 | py | Python | blueair/cli.py | thedjinn/blueair-py | 6b10a3f7c2a13560af682ceef8c631b249bb41ed | [
"MIT"
] | 1 | 2021-12-19T02:07:17.000Z | 2021-12-19T02:07:17.000Z | blueair/cli.py | aijayadams/blueair-py | 8887748d26c5ea1add1afaa287d8cbf04d692c6c | [
"MIT"
] | 1 | 2021-12-18T14:35:04.000Z | 2021-12-18T14:35:04.000Z | blueair/cli.py | aijayadams/blueair-py | 8887748d26c5ea1add1afaa287d8cbf04d692c6c | [
"MIT"
] | 2 | 2021-02-20T02:11:00.000Z | 2021-06-14T05:55:07.000Z | """This module contains the Blueair command line client."""
import argparse
import coloredlogs # type: ignore
import logging
import matplotlib
import matplotlib.pyplot as pyplot
import pandas as pd
import tzlocal
from datetime import datetime, timedelta, timezone
from itertools import chain, groupby
from time import sleep
from typing import Any, Dict, Sequence, List
from .blueair import BlueAir
from .database import Database
logger = logging.getLogger(__name__)
def _tabularize(it: Sequence[Dict[str, Any]], titles: List[str], field_keys: List[str]) -> None:
# Get maximum string lengths per dict key
grouped = groupby(sorted(chain(*(i.items() for i in it)), key=lambda x: x[0]), lambda x: x[0])
lengths = {k: max(len(str(w)) for _, w in v) for k, v in grouped}
title_mapping = dict(zip(field_keys, titles))
lengths = {k: max(v, len(title_mapping[k])) for k, v in lengths.items()}
print(" ".join(title.ljust(lengths[field]) for title, field in zip(titles, field_keys)))
print(" ".join("-" * lengths[field] for field in field_keys))
for entry in it:
print(" ".join(str(entry[field]).ljust(lengths[field]) for field in field_keys))
def _collect_measurements(blueair: BlueAir, database: Database, device_uuid: str) -> None:
now = datetime.now(timezone.utc)
start_timestamp = database.get_latest_timestamp() or int((now - timedelta(days=10)).timestamp())
end_timestamp = int(now.timestamp())
measurements = blueair.get_data_points_between(device_uuid, start_timestamp + 1, end_timestamp)
for measurement in measurements:
database.insert_measurement(**measurement)
database.commit()
logger.info(f"Collected {len(measurements)} new measurements")
def _plot_measurements(database: Database, filename: str) -> None:
measurements = database.get_all_measurements()
# get the local timezone
zone = tzlocal.get_localzone().zone
# Create a dataframe for plotting
dataframe = pd.DataFrame(measurements)
dataframe["timestamp"] = dataframe["timestamp"].apply(lambda timestamp: datetime.utcfromtimestamp(timestamp))
dataframe["timestamp"] = dataframe["timestamp"].dt.tz_localize("UTC").dt.tz_convert(zone) # type: ignore
matplotlib.rcParams["timezone"] = zone # type: ignore
pyplot.style.use("bmh") # type: ignore
fig, axs = pyplot.subplots(4, 1, figsize=(10, 10), constrained_layout=True) # type: ignore
axs[0].plot(dataframe["timestamp"], dataframe["pm25"])
axs[0].set_title("PM 2.5")
axs[0].set_ylabel("ug/m3")
axs[0].set_ylim(bottom=0)
axs[0].set_facecolor("#ffffff")
axs[0].margins(x=0.01, y=0.1)
axs[0].axhspan(0, 10, facecolor="#8AE3CE40")
axs[0].axhspan(10, 20, facecolor="#D1E5A840")
axs[0].axhspan(20, 35, facecolor="#FEDC9A40")
axs[0].axhspan(35, 80, facecolor="#FAC07E40")
axs[0].axhspan(80, 1000, facecolor="#FA897E40")
locator = matplotlib.dates.AutoDateLocator() # type: ignore
axs[0].get_xaxis().set_major_locator(locator)
axs[0].get_xaxis().set_major_formatter(matplotlib.dates.ConciseDateFormatter(locator)) # type: ignore
axs[1].plot(dataframe["timestamp"], dataframe["voc"])
axs[1].sharex(axs[0])
axs[1].set_title("VOC")
axs[1].set_ylabel("ppb")
axs[1].set_ylim(bottom=0)
axs[1].set_facecolor("#ffffff")
axs[1].margins(x=0.01, y=0.1)
axs[1].axhspan(0, 200, facecolor="#8AE3CE40")
axs[1].axhspan(200, 400, facecolor="#D1E5A840")
axs[1].axhspan(400, 600, facecolor="#FEDC9A40")
axs[1].axhspan(600, 800, facecolor="#FAC07E40")
axs[1].axhspan(800, 10000, facecolor="#FA897E40")
axs[2].plot(dataframe["timestamp"], dataframe["temperature"])
axs[2].sharex(axs[0])
axs[2].set_title("Temperature")
axs[2].set_ylabel("C")
axs[2].margins(x=0.01, y=0.1)
axs[3].plot(dataframe["timestamp"], dataframe["humidity"])
axs[3].sharex(axs[0])
axs[3].set_title("Relative Humidity")
axs[3].set_ylabel("%")
axs[3].margins(x=0.01, y=0.1)
pyplot.savefig(filename)
pyplot.show()
def run() -> None:
"""Run the Blueair command line client."""
# Create argument parser
parser = argparse.ArgumentParser(description="An example application using the Python BlueAir client that collects and graphs measurements.")
parser.add_argument("--email", help="The username for the BlueAir account")
parser.add_argument("--password", help="The password for the BlueAir acount")
parser.add_argument("--list-devices", action="store_true", help="List the available devices for the account and exit")
parser.add_argument("--list-attributes", action="store_true", help="List the available attributes for the device and exit")
parser.add_argument("--uuid", help="The device UUID to use for collecting measurements")
parser.add_argument("--interval", type=int, metavar="N", help="Collect measurements every N seconds")
parser.add_argument("--output", default="chart.png", help="The filename to use for the generated chart (defaults to chart.png)")
parser.add_argument("--database", default="blueair.db", help="The filename to use for the SQLite database (defaults to blueair.db)")
parser.add_argument("--verbose", action="store_true", help="Enable verbose logging")
args = parser.parse_args()
# Configure logger
coloredlogs.install(
level=args.verbose and logging.INFO or logging.WARNING,
fmt="%(asctime)s %(name)s[%(process)d] %(levelname)s %(message)s"
)
if not args.email or not args.password:
raise RuntimeError("Must provide both email and password")
blueair = BlueAir(args.email, args.password)
# Handle device list mode
if args.list_devices:
devices = blueair.get_devices()
_tabularize(
devices,
["UUID", "User ID", "MAC Address", "Device Name"],
["uuid", "userId", "mac", "name"]
)
exit()
# Get UUID or fetch from device list if not specified
uuid = args.uuid
if not uuid:
devices = blueair.get_devices()
if not devices:
raise RuntimeError("No devices found")
elif len(devices) != 1:
raise RuntimeError("Found multiple devices, use --uuid argument to specify which one to use")
else:
uuid = devices[0]["uuid"]
# Handle attributes list mode
if args.list_attributes:
for key, value in blueair.get_attributes(uuid).items():
print(f"{key}: {value}")
exit()
# Initialize database session
database = Database(filename=args.database)
# Loop if interval is specified
if args.interval:
while True:
logger.info("Collecting measurements")
_collect_measurements(blueair, database, uuid)
logger.info("Generating chart")
_plot_measurements(database, args.output)
logger.info(f"Waiting {args.interval} seconds")
sleep(args.interval)
else:
logger.info("Collecting measurements")
_collect_measurements(blueair, database, uuid)
logger.info("Generating chart")
_plot_measurements(database, args.output)
| 38.25 | 145 | 0.675428 |
5f1e5899162cb0de5b5191babaad037fed445f63 | 547 | py | Python | homeworks/BurgersEquation/templates/plot_error.py | padomu/NPDECODES | d2bc5b0d2d5e76e4d5b8ab6948c82f902211182e | [
"MIT"
] | 15 | 2019-04-29T11:28:56.000Z | 2022-03-22T05:10:58.000Z | homeworks/BurgersEquation/templates/plot_error.py | padomu/NPDECODES | d2bc5b0d2d5e76e4d5b8ab6948c82f902211182e | [
"MIT"
] | 12 | 2020-02-29T15:05:58.000Z | 2022-02-21T13:51:07.000Z | homeworks/BurgersEquation/templates/plot_error.py | padomu/NPDECODES | d2bc5b0d2d5e76e4d5b8ab6948c82f902211182e | [
"MIT"
] | 26 | 2020-01-09T15:59:23.000Z | 2022-03-24T16:27:33.000Z | from numpy import genfromtxt
from matplotlib.pyplot import figure, legend, loglog, savefig, xlabel, ylabel
from sys import argv
input_file = str(argv[1])
output_file = str(argv[2])
data = genfromtxt(input_file, delimiter=',')
h = data[0]
error_short = data[1]
error_long = data[2]
x = [0.01, 0.1]
fig = figure()
loglog(h, error_short, 'o-', label='T = 0.3')
loglog(h, error_long, 'o-', label='T = 3.0')
loglog(x, x, '--', label='slope 1')
xlabel('mesh-width h')
ylabel('error')
legend()
savefig(output_file)
print('Generated ' + output_file)
| 21.88 | 77 | 0.685558 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.