hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b11deac5da9eefb744eefe58f9e242d6b3092af3 | 1,228 | py | Python | xmitgcm/test/test_file_utils.py | fraserwg/xmitgcm | 177a7e62630a1552f5402ab51ba28dbcf802b5dd | [
"MIT"
] | 28 | 2016-10-17T07:23:39.000Z | 2019-08-29T06:38:50.000Z | xmitgcm/test/test_file_utils.py | fraserwg/xmitgcm | 177a7e62630a1552f5402ab51ba28dbcf802b5dd | [
"MIT"
] | 160 | 2016-10-12T03:04:23.000Z | 2019-09-23T22:28:21.000Z | xmitgcm/test/test_file_utils.py | fraserwg/xmitgcm | 177a7e62630a1552f5402ab51ba28dbcf802b5dd | [
"MIT"
] | 53 | 2016-10-12T01:27:07.000Z | 2019-08-06T18:35:39.000Z | import pytest
from xmitgcm import file_utils
@pytest.fixture(scope="session")
def directory_with_files(tmpdir_factory):
temppath = tmpdir_factory.mktemp("xmitgcm_test_data")
temppath.join('bar.0000000001.meta').ensure(file=True)
temppath.join('baz.data').ensure(file=True)
return temppath
def test_listdir(directory_with_files):
path = str(directory_with_files)
assert sorted(file_utils.listdir(path)) == sorted(['bar.0000000001.meta', 'baz.data'])
def test_listdir_startswith(directory_with_files):
path = str(directory_with_files)
assert file_utils.listdir_startswith(path, 'bar') == ['bar.0000000001.meta']
def test_listdir_endswith(directory_with_files):
path = str(directory_with_files)
assert file_utils.listdir_endswith(path, '.data') == ['baz.data']
def test_listdir_startsandendswith(directory_with_files):
path = str(directory_with_files)
assert file_utils.listdir_startsandendswith(path, 'bar', '.meta') == ['bar.0000000001.meta']
def test_listdir_fnmatch(directory_with_files):
path = str(directory_with_files)
assert file_utils.listdir_fnmatch(path, '*.??????????.meta') == ['bar.0000000001.meta']
def test_clear_cache():
file_utils.clear_cache()
| 36.117647 | 96 | 0.753257 |
f07376c24b718a99fc05884460a46e8c64477121 | 3,836 | py | Python | huaweicloud-sdk-lts/huaweicloudsdklts/v2/model/update_log_group_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-lts/huaweicloudsdklts/v2/model/update_log_group_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-lts/huaweicloudsdklts/v2/model/update_log_group_request.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateLogGroupRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'log_group_id': 'str',
'body': 'UpdateLogGroupParams'
}
attribute_map = {
'log_group_id': 'log_group_id',
'body': 'body'
}
def __init__(self, log_group_id=None, body=None):
"""UpdateLogGroupRequest - a model defined in huaweicloud sdk"""
self._log_group_id = None
self._body = None
self.discriminator = None
self.log_group_id = log_group_id
if body is not None:
self.body = body
@property
def log_group_id(self):
"""Gets the log_group_id of this UpdateLogGroupRequest.
日志组ID,获取方式请参见:获取账号ID、项目ID、日志组ID、日志流ID(https://support.huaweicloud.com/api-lts/lts_api_0006.html)
:return: The log_group_id of this UpdateLogGroupRequest.
:rtype: str
"""
return self._log_group_id
@log_group_id.setter
def log_group_id(self, log_group_id):
"""Sets the log_group_id of this UpdateLogGroupRequest.
日志组ID,获取方式请参见:获取账号ID、项目ID、日志组ID、日志流ID(https://support.huaweicloud.com/api-lts/lts_api_0006.html)
:param log_group_id: The log_group_id of this UpdateLogGroupRequest.
:type: str
"""
self._log_group_id = log_group_id
@property
def body(self):
"""Gets the body of this UpdateLogGroupRequest.
:return: The body of this UpdateLogGroupRequest.
:rtype: UpdateLogGroupParams
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateLogGroupRequest.
:param body: The body of this UpdateLogGroupRequest.
:type: UpdateLogGroupParams
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateLogGroupRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.597122 | 104 | 0.574035 |
14c3d25400be2cdd7f67fc6b09caa02e6180cfa5 | 4,795 | py | Python | pycity_resilience/ga/parser/parse_city_to_ind.py | RWTH-EBC/pyCity_resilience | e0ff1e137adb0edec0509b8b50ca0931e761a9b4 | [
"MIT"
] | 3 | 2020-06-18T02:01:09.000Z | 2020-10-20T13:57:10.000Z | pycity_resilience/ga/parser/parse_city_to_ind.py | RWTH-EBC/pyCity_resilience | e0ff1e137adb0edec0509b8b50ca0931e761a9b4 | [
"MIT"
] | null | null | null | pycity_resilience/ga/parser/parse_city_to_ind.py | RWTH-EBC/pyCity_resilience | e0ff1e137adb0edec0509b8b50ca0931e761a9b4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Parse city information to individuum of GA run
"""
from __future__ import division
import pycity_calc.toolbox.networks.network_ops as netop
import pycity_resilience.ga.verify.check_validity as checkval
def parse_city_to_ind_dict(city, list_build_ids=None,
check_validity=True):
"""
Returns GA individuum dict with city information
Parameters
----------
city : object
City object of pyCity_calc. Should hold demand energy systems
list_build_ids : list (of ints)
List with building node ids (default: None). If None, searches for
all building node ids in city.
check_validity : bool, optional
Checks if generated dict_ind is valid (default: True)
Returns
-------
dict_ind : dict
Dictionary to form individuum for GA run
"""
dict_ind = {}
if list_build_ids is None:
list_build_ids = city.get_list_build_entity_node_ids()
# Initialize empty dicts for every building node id
for n in list_build_ids:#
dict_ind[n] = {}
# Add energy system
for n in list_build_ids:
build = city.nodes[n]['entity']
if build.bes.hasBattery:
dict_ind[n]['bat'] = build.bes.battery.capacity # in Joule
else:
dict_ind[n]['bat'] = 0
if build.bes.hasBoiler:
dict_ind[n]['boi'] = build.bes.boiler.qNominal # in Watt
else:
dict_ind[n]['boi'] = 0
if build.bes.hasChp:
dict_ind[n]['chp'] = build.bes.chp.qNominal # in Watt
else:
dict_ind[n]['chp'] = 0
if build.bes.hasElectricalHeater:
dict_ind[n]['eh'] = build.bes.electricalHeater.qNominal # in Watt
else:
dict_ind[n]['eh'] = 0
if build.bes.hasHeatpump:
# Distinguish between air/water (aw) and water/water (ww) hp
if build.bes.heatpump.hp_type == 'aw':
dict_ind[n]['hp_aw'] = build.bes.heatpump.qNominal # in Watt
dict_ind[n]['hp_ww'] = 0
elif build.bes.heatpump.hp_type == 'ww':
dict_ind[n]['hp_ww'] = build.bes.heatpump.qNominal # in Watt
dict_ind[n]['hp_aw'] = 0
else: # pragma: no cover
msg = 'Unknown heat pump hp_type. Can only be aw or ww.'
raise AssertionError(msg)
else:
dict_ind[n]['hp_aw'] = 0
dict_ind[n]['hp_ww'] = 0
if build.bes.hasPv:
dict_ind[n]['pv'] = build.bes.pv.area # in m2
else:
dict_ind[n]['pv'] = 0
if build.bes.hasTes:
dict_ind[n]['tes'] = build.bes.tes.capacity # in kg
else:
dict_ind[n]['tes'] = 0
# Add LHN information
list_lhn_subnetworks = netop.\
get_list_with_energy_net_con_node_ids(city=city,
build_node_only=True)
if list_lhn_subnetworks is None:
list_lhn_subnetworks = []
else:
# Sort original list_lhn_subnetworks
list_lhn_subnetworks.sort()
dict_ind['lhn'] = list_lhn_subnetworks
# Check_validity
if check_validity: # pragma: no cover
if checkval.check_ind_is_valid(ind=dict_ind) is False:
msg = 'Generated dict_ind is invalid!'
raise AssertionError(msg)
return dict_ind
def hand_over_dict(dict_ind=None, city=None, list_build_ids=None):
"""
Hand over individual city dict to GA. If dict is None and city is set,
generates dict out of city object.
parse_city_to_ind_dict would be enough to generate individuum dicts.
However, hand_over_dicts allows to also hand over other dicts in a simply
way.
Parameters
----------
dict_ind : dict
Dict for city individuum (default: None). If None, tries to generate
dict_ind based on given city object
city : object
City object of pyCity_calc. Should hold energy systems (default: None)
list_build_ids : list (of ints)
List with building node ids (default: None). If None, searches for
all building node ids in city.
Returns
-------
dict_ind : dict
Dict for city individuum (default: None). If None, tries to generate
dict_ind based on given city object
"""
if dict_ind is None:
if city is None: # pragma: no cover
msg = 'City cannot be None, if dict_ind is None. You have to ' \
'hand over a city object instance!'
raise AssertionError(msg)
else:
dict_ind = parse_city_to_ind_dict(city=city,
list_build_ids=list_build_ids)
return dict_ind
| 31.754967 | 78 | 0.594995 |
49640a36511038cfe83285c33da019bd628d3ae3 | 1,061 | py | Python | {{cookiecutter.project_slug}}/tests/locustfile.py | Steamboat/cookiecutter-devops | 6f07329c9e54b76e671a0308d343d2d9ebff5343 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/tests/locustfile.py | Steamboat/cookiecutter-devops | 6f07329c9e54b76e671a0308d343d2d9ebff5343 | [
"BSD-3-Clause"
] | null | null | null | {{cookiecutter.project_slug}}/tests/locustfile.py | Steamboat/cookiecutter-devops | 6f07329c9e54b76e671a0308d343d2d9ebff5343 | [
"BSD-3-Clause"
] | null | null | null | """
Locust is used for load testing services with fake users.
We configure different behaviors for users and then flood the server with an increasing number of these
artificial users to look at the performance impact of different behaviors.
"""
import time
import random
from locust import HttpUser, task, between
from api import global_config
from fixtures import PreloadedEnv, get_webdriver
class QuickstartUser(HttpUser):
wait_time = between(2.5, 10)
driver = None
def on_start(self):
self.driver = get_webdriver(is_headless=True, remote_url=global_config.WEBDRIVER_URL)
time.sleep(0.5)
self.driver.get(global_config.SERVER_URL)
time.sleep(0.5)
@task(10)
def homepage_bounce(self):
"""
User customizes a book and gets the pdf printable
"""
env = PreloadedEnv(driver=self.driver, server_url=global_config.SERVER_URL)
self.driver.get(env.server_url + "/index")
time.sleep(random.random())
env.scroll_to(2000)
time.sleep(random.random())
| 29.472222 | 103 | 0.705938 |
141d7378cc582163ff86a24b798297394d55b3f7 | 8,048 | py | Python | designate/tests/test_notification_handler/test_nova.py | cleosson-daitan/designate | 37b3fa2ea454183e4a5c6a099eca5129959d10e1 | [
"Apache-2.0"
] | 1 | 2022-02-18T11:19:35.000Z | 2022-02-18T11:19:35.000Z | designate/tests/test_notification_handler/test_nova.py | sapcc/designate | c3f084751006a2fe7562f137930542c4759d6fd9 | [
"Apache-2.0"
] | null | null | null | designate/tests/test_notification_handler/test_nova.py | sapcc/designate | c3f084751006a2fe7562f137930542c4759d6fd9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_log import log as logging
from designate.tests import TestCase
from designate.notification_handler.nova import NovaFixedHandler
from designate.tests.test_notification_handler import \
NotificationHandlerMixin
LOG = logging.getLogger(__name__)
class NovaFixedHandlerTest(TestCase, NotificationHandlerMixin):
def setUp(self):
super(NovaFixedHandlerTest, self).setUp()
zone = self.create_zone()
self.zone_id = zone['id']
self.config(zone_id=zone['id'], group='handler:nova_fixed')
self.config(formatv4=['%(host)s.%(zone)s',
'%(host)s.foo.%(zone)s'],
formatv6=['%(host)s.%(zone)s',
'%(host)s.foo.%(zone)s'],
group='handler:nova_fixed')
self.plugin = NovaFixedHandler()
def test_instance_create_end(self):
event_type = 'compute.instance.create.end'
fixture = self.get_notification_fixture('nova', event_type)
self.assertIn(event_type, self.plugin.get_event_types())
criterion = {'zone_id': self.zone_id}
# Ensure we start with 2 records
records = self.central_service.find_records(self.admin_context,
criterion)
# Should only be SOA and NS records
self.assertEqual(2, len(records))
self.plugin.process_notification(
self.admin_context.to_dict(), event_type, fixture['payload'])
# Ensure we now have exactly 1 more record
records = self.central_service.find_records(self.admin_context,
criterion)
self.assertEqual(4, len(records))
def test_instance_create_end_utf8(self):
self.config(formatv4=['%(display_name)s.%(zone)s'],
formatv6=['%(display_name)s.%(zone)s'],
group='handler:nova_fixed')
event_type = 'compute.instance.create.end'
fixture = self.get_notification_fixture('nova', event_type)
# Set the instance display_name to a string containing UTF8.
fixture['payload']['display_name'] = u'Test↟Instance'
self.assertIn(event_type, self.plugin.get_event_types())
criterion = {'zone_id': self.zone_id}
# Ensure we start with 2 records
recordsets = self.central_service.find_recordsets(
self.admin_context, criterion)
# Should only be SOA and NS recordsets
self.assertEqual(2, len(recordsets))
self.plugin.process_notification(
self.admin_context.to_dict(), event_type, fixture['payload'])
# Ensure we now have exactly 1 more recordset
recordsets = self.central_service.find_recordsets(
self.admin_context, criterion)
self.assertEqual(3, len(recordsets))
# Ensure the created record was correctly converted per IDN rules.
criterion['type'] = 'A'
recordsets = self.central_service.find_recordsets(
self.admin_context, criterion)
self.assertEqual('xn--testinstance-q83g.example.com.',
recordsets[0].name)
def test_instance_delete_start(self):
# Prepare for the test
start_event_type = 'compute.instance.create.end'
start_fixture = self.get_notification_fixture('nova', start_event_type)
self.plugin.process_notification(self.admin_context.to_dict(),
start_event_type,
start_fixture['payload'])
# Now - Onto the real test
event_type = 'compute.instance.delete.start'
fixture = self.get_notification_fixture('nova', event_type)
self.assertIn(event_type, self.plugin.get_event_types())
criterion = {'zone_id': self.zone_id}
# Ensure we start with at least 1 record, plus NS and SOA
records = self.central_service.find_records(self.admin_context,
criterion)
self.assertEqual(4, len(records))
self.plugin.process_notification(
self.admin_context.to_dict(), event_type, fixture['payload'])
# Simulate the record having been deleted on the backend
zone_serial = self.central_service.get_zone(
self.admin_context, self.zone_id).serial
self.central_service.update_status(
self.admin_context, self.zone_id, "SUCCESS", zone_serial)
# Ensure we now have exactly 0 records, plus NS and SOA
records = self.central_service.find_records(self.admin_context,
criterion)
self.assertEqual(2, len(records))
def test_label_in_format_v4_v6(self):
event_type = 'compute.instance.create.end'
self.config(formatv4=['%(label)s.example.com.'],
formatv6=['%(label)s.example.com.'],
group='handler:nova_fixed')
fixture = self.get_notification_fixture('nova', event_type)
with mock.patch.object(
self.plugin, '_create_or_update_recordset') as finder:
with mock.patch.object(self.plugin.central_api,
'create_record'):
finder.return_value = {'id': 'fakeid'}
self.plugin.process_notification(
self.admin_context.to_dict(),
event_type, fixture['payload'])
finder.assert_called_once_with(
mock.ANY, mock.ANY, type='A', zone_id=self.zone_id,
name='private.example.com.')
def test_formatv4(self):
event_type = 'compute.instance.create.end'
self.config(formatv4=['%(label)s-v4.example.com.'],
group='handler:nova_fixed')
fixture = self.get_notification_fixture('nova', event_type)
with mock.patch.object(
self.plugin, '_create_or_update_recordset') as finder:
with mock.patch.object(self.plugin.central_api,
'create_record'):
finder.return_value = {'id': 'fakeid'}
self.plugin.process_notification(
self.admin_context.to_dict(),
event_type, fixture['payload'])
finder.assert_called_once_with(
mock.ANY, mock.ANY, type='A', zone_id=self.zone_id,
name='private-v4.example.com.')
def test_formatv6(self):
event_type = 'compute.instance.create.end'
self.config(formatv6=['%(label)s-v6.example.com.'],
group='handler:nova_fixed')
fixture = self.get_notification_fixture('nova', event_type)
with mock.patch.object(
self.plugin, '_create_or_update_recordset') as finder:
with mock.patch.object(self.plugin.central_api,
'create_record'):
finder.return_value = {'id': 'fakeid'}
self.plugin.process_notification(
self.admin_context.to_dict(),
event_type, fixture['payload_v6'])
finder.assert_called_once_with(
mock.ANY, mock.ANY, type='AAAA', zone_id=self.zone_id,
name='private-v6.example.com.')
| 41.271795 | 79 | 0.608598 |
127030d5bf2be4ba3616374738a20df04623e082 | 2,936 | py | Python | codebleu/graph_generator/typeparsing/inheritancerewrite.py | JetBrains-Research/metrics-evaluation | 6e3696d11b9efcc7b4403f94b84651caed247649 | [
"Apache-2.0"
] | 2 | 2020-07-14T17:00:28.000Z | 2020-08-13T10:19:52.000Z | codebleu/graph_generator/typeparsing/inheritancerewrite.py | JetBrains-Research/metrics-evaluation | 6e3696d11b9efcc7b4403f94b84651caed247649 | [
"Apache-2.0"
] | 3 | 2020-07-24T12:29:24.000Z | 2021-06-28T17:07:45.000Z | codebleu/graph_generator/typeparsing/inheritancerewrite.py | JetBrains-Research/metrics-evaluation | 6e3696d11b9efcc7b4403f94b84651caed247649 | [
"Apache-2.0"
] | null | null | null | from itertools import product
from typing import Callable, Iterator, Set
import random
from codebleu.graph_generator.typeparsing.nodes import TypeAnnotationNode, SubscriptAnnotationNode, TupleAnnotationNode, \
ListAnnotationNode, AttributeAnnotationNode, IndexAnnotationNode, ElipsisAnnotationNode
from codebleu.graph_generator.typeparsing.visitor import TypeAnnotationVisitor
__all__ = ['DirectInheritanceRewriting']
class DirectInheritanceRewriting(TypeAnnotationVisitor):
"""Replace Nodes their direct is-a relationships"""
def __init__(self, is_a_info: Callable[[TypeAnnotationNode], Iterator[TypeAnnotationNode]],
non_generic_types: Set[TypeAnnotationNode], limit_combinations_to: int=10000):
self.__is_a = is_a_info
self.__non_generic_types = non_generic_types
self.__limit_combinations_to = limit_combinations_to
def visit_subscript_annotation(self, node: SubscriptAnnotationNode):
value_node_options = node.value.accept_visitor(self)
if node.slice is None:
slice_node_options = [None]
else:
slice_node_options = node.slice.accept_visitor(self)
all_children = []
for v in value_node_options:
if v in self.__non_generic_types:
all_children.append(v)
continue
for s in slice_node_options:
all_children.append(SubscriptAnnotationNode(v, s))
return all_children
def visit_tuple_annotation(self, node: TupleAnnotationNode):
all_elements_options = [e.accept_visitor(self) for e in node.elements]
r = [TupleAnnotationNode(t) for t in product(*all_elements_options)]
if len(r) > self.__limit_combinations_to:
random.shuffle(r)
return r[:self.__limit_combinations_to]
return r
def visit_name_annotation(self, node):
return [node] + list(self.__is_a(node))
def visit_list_annotation(self, node: ListAnnotationNode):
all_elements_options = [e.accept_visitor(self) for e in node.elements]
r = [ListAnnotationNode(t) for t in product(*all_elements_options)]
if len(r) > self.__limit_combinations_to:
random.shuffle(r)
return r[:self.__limit_combinations_to]
return r
def visit_attribute_annotation(self, node: AttributeAnnotationNode):
v = [node] + list(self.__is_a(node))
return v
def visit_index_annotation(self, node: IndexAnnotationNode):
next_values = node.value.accept_visitor(self)
return [IndexAnnotationNode(v) for v in next_values]
def visit_elipsis_annotation(self, node: ElipsisAnnotationNode):
return [node]
def visit_name_constant_annotation(self, node):
return [node]
def visit_unknown_annotation(self, node):
return [node]
| 40.219178 | 123 | 0.688692 |
823691ea75deef3597541b930e5ca63bb9944bb2 | 18,474 | py | Python | accel/cuda/ops_cuda.py | dandycheung/tinygrad | c8b569a8c74ffc1e3fd21cda2bca04749e7c500a | [
"MIT"
] | null | null | null | accel/cuda/ops_cuda.py | dandycheung/tinygrad | c8b569a8c74ffc1e3fd21cda2bca04749e7c500a | [
"MIT"
] | null | null | null | accel/cuda/ops_cuda.py | dandycheung/tinygrad | c8b569a8c74ffc1e3fd21cda2bca04749e7c500a | [
"MIT"
] | null | null | null | import numpy as np
from functools import lru_cache
from tinygrad.tensor import Function
from tinygrad.helpers import binary_broadcast
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
import pycuda.autoinit
dev = cuda.Context.get_device()
MAX_THREADS_BLOCK = dev.get_attribute(cuda.device_attribute.MAX_THREADS_PER_BLOCK)
i32 = np.int32
class CudaBuffer:
def __init__(self, shape, hostbuf=None):
self.shape = tuple(shape)
self.dtype = np.float32
self.sz = int(np.prod(shape)*4)
self.buf = cuda.mem_alloc(self.sz)
if hostbuf is not None:
if isinstance(hostbuf, CudaBuffer):
self.buf = hostbuf.buf
else:
cuda.memcpy_htod(self.buf, hostbuf.flatten().astype(np.float32))
@staticmethod
def fromCPU(data):
if data.dtype != np.float32:
raise Exception('Only float32 is supported')
return CudaBuffer(data.shape, data)
def toCPU(self):
ret = np.empty(self.shape).astype(np.float32)
cuda.memcpy_dtoh(ret, self.buf)
return ret
def buffer_new(shape, zero=False):
return CudaBuffer(shape, hostbuf=None if not zero else np.zeros(shape, dtype=np.float32))
def buffer_np(x):
x = np.array(x).astype(np.float32)
buf = cuda.mem_alloc(int(np.prod(x.shape)*4))
cuda.memcpy_htod(buf, x)
return buf
def get_block_grid(shape=None, nelem=None):
if shape is not None:
nelem = int(np.prod(shape))
block = (int([nelem, MAX_THREADS_BLOCK][int(nelem > MAX_THREADS_BLOCK)]), 1, 1)
grid = (int(1+(nelem-1)//MAX_THREADS_BLOCK), 1)
return block, grid
# ************* unary ops *************
def unary_op(code, x):
block, grid = get_block_grid(x.shape)
mod = SourceModule(f"""
__global__ void unop(float *dest, float *a_g, int bufsz) {{
const int i = blockDim.x*blockIdx.x+threadIdx.x;
if (i < bufsz) {{
float a = a_g[i];
dest[i] = {code};
}}
}}
""")
unop = mod.get_function("unop")
ret = buffer_new(x.shape)
unop(ret.buf, x.buf, i32(np.prod(x.shape)), block=block, grid=grid)
return ret
class ReLU(Function):
def forward(ctx, input):
ctx.save_for_backward(input)
return unary_op('max(a, (float)0.)', input)
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return binary_op('a * (b >= 0)', grad_output, input)
class Log(Function):
def forward(ctx, input):
ctx.save_for_backward(input)
return unary_op('log(a)', input)
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return binary_op('a / b', grad_output, input)
class Exp(Function):
def forward(ctx, input):
ret = unary_op('exp(a)', input)
ctx.save_for_backward(ret)
return ret
def backward(ctx, grad_output):
ret, = ctx.saved_tensors
return binary_op('a * b', grad_output, ret)
# ************* reduce ops *************
def reduce_prg(x, axis, code, code2, start):
rshape = x.shape[:1] if sum(x.shape[:axis+1]) == axis else list(x.shape[:axis]) + list(x.shape[axis+1:])
stride = np.prod(x.shape[axis+1:], dtype=i32)
bstride = np.prod(x.shape[axis:], dtype=i32) # stride to next "block"
nsums = np.prod(rshape, dtype=i32)
block, grid = get_block_grid(nelem=nsums)
mod = SourceModule(f"""
__global__ void prg(float *res, float *a_g, int stride, int bstride, int axis_dim, int nsums) {{
const int d_idx = blockIdx.x*blockDim.x + threadIdx.x;
const int idx = d_idx%stride;
const int n = d_idx/stride;
float out = {start};
if (d_idx<nsums) {{
for (int i=0; i<axis_dim ; i++) {{
float a = a_g[idx+stride*i+n*bstride];
{code};
}}
res[d_idx] = {code2};
}}
}}
""")
prg = mod.get_function("prg")
ret = buffer_new(rshape)
prg(ret.buf, x.buf, stride, bstride, i32(x.shape[axis]), nsums, block=block, grid=grid)
return ret
def reduce_op(code, code2, ret, axis=None, start='0.0'):
if axis is None:
axis = list(range(len(ret.shape)))
new_shape = (1,)
else:
new_shape = np.array(ret.shape)
new_shape[axis if isinstance(axis, int) else list(axis)] = 1
new_shape = tuple(new_shape)
axis = sorted(axis)
# reduces one axis at a time
for i in range(len(axis)):
ret = reduce_prg(ret, axis[i]-i, code, code2, start)
ret.shape = new_shape
return ret
class Sum(Function):
def forward(ctx, input, axis=None):
ctx.save_for_backward(input, axis)
return reduce_op("out += a", "out", input, axis=axis)
def backward(ctx, grad_output):
input, _ = ctx.saved_tensors
ret=binary_op('a+b', grad_output, buffer_new(input.shape, zero=True))
return ret
class Max(Function):
def forward(ctx, input, axis=None):
ret = reduce_op("out = max(a,out)", "out", input, axis=axis, start="-INFINITY")
ctx.save_for_backward(input, axis, ret)
return ret
def backward(ctx, grad_output):
input, axis, ret = ctx.saved_tensors
ret2 = binary_op("1.0*(a==b)", input, ret)
div = reduce_op("out += a", "out+1e-10", ret2, axis=axis)
ret3 = binary_op("a/b", ret2, div)
return binary_op('a*b', ret3, grad_output)
# ************* binary ops *************
@lru_cache
def get_binop_prg(code, complist):
ndims = len(complist)
args = "".join([f", int d{i}" for i in range(ndims)] + [f", int p{i}" for i in range(ndims-1)])
compute_idx_rets = "".join([f"\n int idx_ret{i} = (gid0 / {f'p{i}' if i < ndims-1 else '1'}) % d{i};" for i in range(ndims)])
idx_exprs = ["0", "0"] # [idx_x, idx_y]
for i in range(ndims):
for j in range(2):
if complist[i][j]:
idx_exprs[j] = f"idx_ret{i} + d{i}*({idx_exprs[j]})"
mod = SourceModule(f"""
__global__ void binop(float *res, float *a_g, float *b_g, int bufsz{args}) {{
const int gid0 = blockIdx.x*blockDim.x + threadIdx.x;{compute_idx_rets}
float a = a_g[{idx_exprs[0]}];
float b = b_g[{idx_exprs[1]}];
if (gid0 < bufsz) {{
res[gid0] = {code};
}}
}}
""")
return mod
def binary_op(code, x, y):
shape_ret, dimlist, complist = binary_broadcast(x.shape, y.shape)
block, grid = get_block_grid(shape_ret)
prod_list = np.array(dimlist, dtype=i32)[-1::-1].cumprod(dtype=i32)[-1::-1] # take cumprod from back to front
prg = get_binop_prg(code, tuple(complist)).get_function('binop')
ret = buffer_new(shape_ret, zero=True)
prg(ret.buf, x.buf, y.buf, i32(np.prod(shape_ret)), *dimlist, *(prod_list[1:]), block=block, grid=grid)
return ret
def unbroadcast(out, in_sh):
sum_axis = [i for i in range(len(in_sh)) if in_sh[i]==1 and out.shape[i]>1] if in_sh != (1,) else None
return reduce_op("out += a", "out", out, sum_axis)
class Add(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return binary_op('a+b', x, y)
def backward(ctx, grad_output):
grad_x, grad_y = grad_output, grad_output
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_x, shape_x), unbroadcast(grad_y, shape_y)
class Sub(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x.shape, y.shape)
return binary_op('a-b', x, y)
def backward(ctx, grad_output):
grad_x, grad_y = grad_output, unary_op('-a', grad_output)
shape_x, shape_y = ctx.saved_tensors
return unbroadcast(grad_x, shape_x), unbroadcast(grad_y, shape_y)
class Mul(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return binary_op('a*b', x, y)
def backward(ctx, grad_output):
x,y = ctx.saved_tensors
grad_x = binary_op('a*b', y, grad_output)
grad_y = binary_op('a*b', x, grad_output)
return unbroadcast(grad_x, x.shape), unbroadcast(grad_y, y.shape)
class Pow(Function):
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
return binary_op('pow(a,b)', x, y)
def backward(ctx, grad_output):
x,y = ctx.saved_tensors
grad_x = binary_op('a*b', grad_output,
binary_op('b * (pow((float)a, (float)(b-1.0)))', x, y))
grad_y = binary_op('a*b', grad_output,
binary_op('pow(a, (float)b) * log(a);', x, y))
return unbroadcast(grad_x, x.shape), unbroadcast(grad_y, y.shape)
# ************* movement ops *************
class Reshape(Function):
def forward(ctx, x, shape):
ctx.save_for_backward(x.shape)
shape = tuple(-np.prod(x.shape) // np.prod(shape) if s == -1 else s for s in shape)
r = CudaBuffer(shape, hostbuf=x) # NOTE: this is not a copy
assert np.prod(x.shape) == np.prod(r.shape)
return r
def backward(ctx, grad_output):
in_shape, = ctx.saved_tensors
return CudaBuffer(in_shape, hostbuf=grad_output)
def perm_axis(inp, order):
osize = np.array(inp.shape)[list(order)]
ret = buffer_new(osize)
nthr = int(np.prod(osize))
block, grid = get_block_grid(osize)
perm = SourceModule("""
__global__ void perm(float *a_g, float *res_g, int n_axis,
float *shape, float *order, int bufsz) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
int gi = gid;
int idx = 0;
if (gid < bufsz) {
for(int i = n_axis-1; i>-1; i--) {
int stride = 1;
for(int j=(int)order[i]+1; j<n_axis; j++) stride *= (int)shape[j];
idx += (gi % (int)shape[(int)order[i]])*stride;
gi /= (int)shape[(int)order[i]];
}
res_g[gid] = a_g[idx];
}
}""").get_function("perm")
perm(inp.buf, ret.buf, i32(len(osize)),
buffer_np(np.array(inp.shape, dtype=np.float32)),
buffer_np(np.array(order, dtype=np.float32)), i32(nthr),
block=block, grid=grid)
return ret
class Transpose(Function):
def forward(ctx, x, order=(1,0)):
ctx.save_for_backward(order)
return perm_axis(x, order)
def backward(ctx, grad_output):
return perm_axis(grad_output, np.argsort(ctx.order))
# TODO: merge this with perm axis
def inner_slice(x, arg):
shift = [y[0] for y in arg]
oshape = [y[1]-y[0] for y in arg]
ret = buffer_new(oshape)
nthr = int(np.prod(oshape))
block, grid = get_block_grid(oshape)
gslice = SourceModule("""
__global__ void gslice(float *input, float *output, int prod, int n_dims,
float *shape_x, float *shape_ret, float *shift, int bufsz) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
if (gid < bufsz) {
int iptr = 0;
int zero = 1;
for (int dim = 0; dim < n_dims; dim++) {
prod /= (int)shape_ret[dim];
int sidx = (gid / prod) % (int)shape_ret[dim] + (int)shift[dim];
zero &= (sidx >= 0 && sidx < (int)shape_x[dim]);
iptr = (iptr * (int)shape_x[dim]) + sidx;
}
output[gid] = zero ? input[iptr] : 0.0;
}
}""").get_function('gslice')
gslice(x.buf, ret.buf, i32(np.prod(ret.shape)), i32(len(ret.shape)),
buffer_np(np.array(x.shape, dtype=np.int32)),
buffer_np(np.array(ret.shape, dtype=np.int32)),
buffer_np(np.array(shift, dtype=np.int32)), i32(nthr),
block=block, grid=grid)
return ret
class Slice(Function):
def forward(ctx, x, arg=None):
ctx.save_for_backward(x.shape)
return inner_slice(x, arg)
def backward(ctx, grad_output):
shape, = ctx.saved_tensors
narg = [(0-p[0], grad_output.shape[i]+(shape[i]-p[1])) for i,p in enumerate(ctx.arg)]
return inner_slice(grad_output, narg)
# ************* processing ops *************
class Matmul(Function):
def forward(ctx, input, weight):
assert input.shape[-1] == weight.shape[-2]
cnt = np.prod(input.shape[0:-2]) if len(input.shape) > 2 else 1
isize, msize, osize = i32(input.shape[-2]), i32(input.shape[-1]), i32(weight.shape[-1])
ret = buffer_new(list(input.shape[0:-2])+[isize, osize])
nthr = int(np.prod(ret.shape))
block, grid = get_block_grid(nelem=nthr)
matmul = SourceModule("""
__global__ void matmul(float *input, float *weight, float *res,
int isize, int is0, int is1, int msize,
int ws0, int ws1, int osize, int bufsz) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
int stride = gid/(osize*isize);
int X = gid%isize; // isize
int Y = (gid/isize)%osize; // osize
int ind = X * osize + Y + isize*osize*stride;
if (ind < bufsz) {
float ret = 0.0;
for (int x = 0; x < msize; x++) {
ret += input[X * is0 + x * is1 + isize*msize*stride] *
weight[Y * ws0 + x * ws1 + msize*osize*stride];
}
res[ind] = ret;
}
}""").get_function('matmul')
ctx.save_for_backward(input, weight, matmul, cnt)
# (isize,msize) x (msize,osize) = (isize,osize)
matmul(input.buf, weight.buf, ret.buf, i32(isize),
i32(msize), i32(1), i32(msize), i32(1), i32(osize),
i32(osize), i32(nthr), block=block, grid=grid)
return ret
def backward(ctx, grad_output):
input, weight, matmul, cnt = ctx.saved_tensors
isize, msize, osize = i32(input.shape[-2]), i32(input.shape[-1]), i32(weight.shape[-1])
grad_input = buffer_new(input.shape)
grad_weight = buffer_new(weight.shape)
nthr = int(np.prod(grad_input.shape))
block, grid = get_block_grid(nelem=nthr)
# (isize,osize) x (msize,osize) = (isize,msize)
matmul(grad_output.buf, weight.buf, grad_input.buf, i32(isize),
i32(osize), i32(1), i32(osize), i32(osize), i32(1),
i32(msize), i32(nthr), block=block, grid=grid)
nthr = int(np.prod(grad_weight.shape))
block, grid = get_block_grid(nelem=nthr)
# (isize,msize) x (isize,osize) = (msize,osize)
matmul(input.buf, grad_output.buf, grad_weight.buf, i32(msize), i32(1),
i32(msize), i32(isize), i32(1), i32(osize),
i32(osize), i32(nthr), block=block, grid=grid)
return grad_input, grad_weight
class Conv2D(Function):
def forward(ctx, x, w, stride=1, groups=1):
if isinstance(ctx.stride, int): ctx.stride = (ctx.stride, ctx.stride)
cout,cin,H,W = w.shape
ys,xs = ctx.stride
bs,cin_,iy,ix = x.shape
oy,ox = (iy-(H-ys))//ys, (ix-(W-xs))//xs
if cin*ctx.groups != cin_:
raise Exception(f"Input Tensor shape {x.shape} does not match the shape of the weights {w.shape}. ({cin*ctx.groups} vs. {cin_})")
assert cout % ctx.groups == 0
rcout = cout//ctx.groups
ctx.save_for_backward(x,w)
# output buffer
ret = buffer_new((bs, cout, oy, ox))
nthr = int(np.prod(ret.shape))
block, grid = get_block_grid(nelem=nthr)
conv = SourceModule("""
__global__ void conv(float *input, float *weight, float *output,
int H, int W, int groups, int rcout, int cin, int oy, int ox, int iy, int ix, int ys, int xs, int bs, int bufsz) {
const int gid = blockIdx.x*blockDim.x + threadIdx.x;
int B = gid/(groups*rcout*oy*ox); // range 0-bs
int g = (gid/(rcout*oy*ox))%groups;
int c = (gid/(oy*ox))%rcout;
int Y = gid%oy; // range 0-oy
int X = (gid/oy)%ox; // range 0-ox
int IY = Y*ys;
int IX = X*xs;
int ind = B*groups*rcout*oy*ox + g*rcout*oy*ox + c*oy*ox + Y*ox + X;
if (ind < bufsz) {
float acc = 0.0;
for (int ci = 0; ci < cin; ci++) {
for (int y = IY; y < IY+H; y++) {
for (int x = IX; x < IX+W; x++) {
acc += input[B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + y*ix + x] *
weight[g*rcout*cin*H*W + c*cin*H*W + ci*H*W + (y-IY)*W + (x-IX)];
}
}
}
output[ind] = acc;
}
}""").get_function('conv')
conv(x.buf, w.buf, ret.buf,
i32(H), i32(W), i32(groups), i32(rcout), i32(cin),
i32(oy), i32(ox), i32(iy), i32(ix), i32(ys), i32(xs), i32(bs), i32(nthr),
block=block, grid=grid)
return ret
def backward(ctx, grad_output):
bs,_,oy,ox = grad_output.shape
x, w = ctx.saved_tensors
cout,cin,H,W = w.shape
ys,xs = ctx.stride
bs,cin_,iy,ix = x.shape
oy,ox = (iy-(H-ys))//ys, (ix-(W-xs))//xs
assert cin*ctx.groups == cin_
assert cout % ctx.groups == 0
rcout = cout//ctx.groups
convw = SourceModule("""
__global__ void convw(float *tensx, float *ggg, float *dw,
int H, int W, int groups, int rcout, int cin, int oy, int ox, int iy, int ix, int ys, int xs, int bs, int bufsz) {
const int gid = blockIdx.x*blockDim.x + threadIdx.x;
int g = gid/(rcout*cin*H*W); // range 0-groups
int c = (gid/(cin*H*W))%rcout; // range 0-rcout
int ci = (gid/(H*W))%cin; // range 0-cin
int y = gid%H; // range 0-H
int x = (gid/H)%W; // range 0-W
int ind = (gid/(H*W))*H*W + y*W + x;
if (ind < bufsz) {
float acc = 0.0;
for (int Y = 0; Y < oy; Y++) {
for (int X = 0; X < ox; X++) {
for (int B = 0; B < bs; B++) {
acc += ggg[B*groups*rcout*oy*ox + +g*rcout*oy*ox + c*oy*ox + Y*ox + X] *
tensx[B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + (Y*ys+y)*ix + X*xs+x];
}
}
}
dw[ind] = acc;
}
}""").get_function('convw')
convx = SourceModule("""
__global__ void convx(float *tensw, float *ggg, float *dx,
int H, int W, int groups, int rcout, int cin, int oy, int ox, int iy, int ix, int ys, int xs, int bs, int bufsz) {
const int gid = blockIdx.x*blockDim.x + threadIdx.x;
int B = gid/(groups*cin);
int g = gid%groups;
int ci = (gid/groups)%cin;
for (int Y = 0; Y < oy; Y++) {
for (int X = 0; X < ox; X++) {
for (int y = 0; y < H; y++) {
for (int x = 0; x < W; x++) {
int ind = B*groups*cin*iy*ix + g*cin*iy*ix + ci*iy*ix + (Y*ys+y)*ix + X*xs+x;
if (ind < bufsz) {
float acc = 0.0;
for (int c = 0; c < rcout; c++) {
acc += ggg[B*groups*rcout*oy*ox + g*rcout*oy*ox + c*oy*ox + Y*ox + X] *
tensw[g*rcout*cin*H*W + c*cin*H*W + ci*H*W + y*W + x];
}
dx[ind] += acc;
}
}
}
}
}
}
""").get_function('convx')
conv_args = i32(H), i32(W), i32(ctx.groups), i32(rcout), i32(cin), i32(oy), i32(ox), i32(iy), i32(ix), i32(ys), i32(xs), i32(bs)
dw = buffer_new(w.shape)
nthr = int(ctx.groups*rcout*cin*H*W)
block, grid = get_block_grid(nelem=nthr)
convw(x.buf, grad_output.buf, dw.buf, *conv_args, i32(np.prod(w.shape)), block=block, grid=grid)
dx = buffer_new(x.shape, True)
nthr = int(bs*ctx.groups*cin)
block, grid = get_block_grid(nelem=nthr)
convx(w.buf, grad_output.buf, dx.buf, *conv_args, i32(np.prod(x.shape)), block=block, grid=grid)
return dx, dw
| 32.813499 | 135 | 0.598733 |
069daccf9b6e57b2c71a629321c344a5d9cc5a79 | 13,770 | py | Python | aries_cloudagent/core/tests/test_dispatcher.py | ldej/aries-cloudagent-python | 25b7a9c08921e67b0962c434102489884ac403b2 | [
"Apache-2.0"
] | null | null | null | aries_cloudagent/core/tests/test_dispatcher.py | ldej/aries-cloudagent-python | 25b7a9c08921e67b0962c434102489884ac403b2 | [
"Apache-2.0"
] | 1 | 2020-03-06T12:11:29.000Z | 2020-03-06T12:11:29.000Z | aries_cloudagent/core/tests/test_dispatcher.py | ldej/aries-cloudagent-python | 25b7a9c08921e67b0962c434102489884ac403b2 | [
"Apache-2.0"
] | null | null | null | import asyncio
import json
from asynctest import TestCase as AsyncTestCase, mock as async_mock
from marshmallow import EXCLUDE
from ...config.injection_context import InjectionContext
from ...connections.models.connection_record import ConnectionRecord
from ...core.protocol_registry import ProtocolRegistry
from ...messaging.agent_message import AgentMessage, AgentMessageSchema
from ...messaging.responder import MockResponder
from ...messaging.util import datetime_now
from ...protocols.didcomm_prefix import DIDCommPrefix
from ...protocols.problem_report.v1_0.message import ProblemReport
from ...transport.inbound.message import InboundMessage
from ...transport.inbound.receipt import MessageReceipt
from ...transport.outbound.message import OutboundMessage
from .. import dispatcher as test_module
def make_context() -> InjectionContext:
context = InjectionContext()
context.injector.bind_instance(ProtocolRegistry, ProtocolRegistry())
collector = test_module.Collector()
context.injector.bind_instance(test_module.Collector, collector)
return context
def make_inbound(payload) -> InboundMessage:
return InboundMessage(payload, MessageReceipt(thread_id="dummy-thread"))
class Receiver:
def __init__(self):
self.messages = []
async def send(
self,
context: InjectionContext,
message: OutboundMessage,
inbound: InboundMessage = None,
):
self.messages.append((context, message, inbound))
class StubAgentMessage(AgentMessage):
class Meta:
handler_class = "StubAgentMessageHandler"
schema_class = "StubAgentMessageSchema"
message_type = "proto-name/1.1/message-type"
class StubAgentMessageSchema(AgentMessageSchema):
class Meta:
model_class = StubAgentMessage
unknown = EXCLUDE
class StubAgentMessageHandler:
async def handle(self, context, responder):
pass
class StubV1_2AgentMessage(AgentMessage):
class Meta:
handler_class = "StubV1_2AgentMessageHandler"
schema_class = "StubV1_2AgentMessageSchema"
message_type = "proto-name/1.2/message-type"
class StubV1_2AgentMessageSchema(AgentMessageSchema):
class Meta:
model_class = StubV1_2AgentMessage
unknonw = EXCLUDE
class StubV1_2AgentMessageHandler:
async def handle(self, context, responder):
pass
class TestDispatcher(AsyncTestCase):
async def test_dispatch(self):
context = make_context()
context.enforce_typing = False
registry = await context.inject(ProtocolRegistry)
registry.register_message_types(
{
pfx.qualify(StubAgentMessage.Meta.message_type): StubAgentMessage
for pfx in DIDCommPrefix
}
)
dispatcher = test_module.Dispatcher(context)
await dispatcher.setup()
rcv = Receiver()
message = {
"@type": DIDCommPrefix.qualify_current(StubAgentMessage.Meta.message_type)
}
with async_mock.patch.object(
StubAgentMessageHandler, "handle", autospec=True
) as handler_mock, async_mock.patch.object(
test_module, "ConnectionManager", autospec=True
) as conn_mgr_mock:
conn_mgr_mock.return_value = async_mock.MagicMock(
find_inbound_connection=async_mock.CoroutineMock(
return_value=async_mock.MagicMock(connection_id="dummy")
)
)
await dispatcher.queue_message(make_inbound(message), rcv.send)
await dispatcher.task_queue
handler_mock.assert_awaited_once()
assert isinstance(handler_mock.call_args[0][1].message, StubAgentMessage)
assert isinstance(
handler_mock.call_args[0][2], test_module.DispatcherResponder
)
async def test_dispatch_versioned_message(self):
context = make_context()
context.enforce_typing = False
registry = await context.inject(ProtocolRegistry)
registry.register_message_types(
{
DIDCommPrefix.qualify_current(
StubAgentMessage.Meta.message_type
): StubAgentMessage
},
version_definition={
"major_version": 1,
"minimum_minor_version": 0,
"current_minor_version": 1,
"path": "v1_1",
},
)
dispatcher = test_module.Dispatcher(context)
await dispatcher.setup()
rcv = Receiver()
message = {
"@type": DIDCommPrefix.qualify_current(StubAgentMessage.Meta.message_type)
}
with async_mock.patch.object(
StubAgentMessageHandler, "handle", autospec=True
) as handler_mock:
await dispatcher.queue_message(make_inbound(message), rcv.send)
await dispatcher.task_queue
handler_mock.assert_awaited_once()
assert isinstance(handler_mock.call_args[0][1].message, StubAgentMessage)
assert isinstance(
handler_mock.call_args[0][2], test_module.DispatcherResponder
)
async def test_dispatch_versioned_message_no_message_class(self):
context = make_context()
context.enforce_typing = False
registry = await context.inject(ProtocolRegistry)
registry.register_message_types(
{
DIDCommPrefix.qualify_current(
StubAgentMessage.Meta.message_type
): StubAgentMessage
},
version_definition={
"major_version": 1,
"minimum_minor_version": 0,
"current_minor_version": 1,
"path": "v1_1",
},
)
dispatcher = test_module.Dispatcher(context)
await dispatcher.setup()
rcv = Receiver()
message = {"@type": "proto-name/1.1/no-such-message-type"}
with async_mock.patch.object(
StubAgentMessageHandler, "handle", autospec=True
) as handler_mock:
await dispatcher.queue_message(make_inbound(message), rcv.send)
await dispatcher.task_queue
assert rcv.messages and isinstance(rcv.messages[0][1], OutboundMessage)
payload = json.loads(rcv.messages[0][1].payload)
assert payload["@type"] == DIDCommPrefix.qualify_current(
ProblemReport.Meta.message_type
)
async def test_dispatch_versioned_message_message_class_deserialize_x(self):
context = make_context()
context.enforce_typing = False
registry = await context.inject(ProtocolRegistry)
registry.register_message_types(
{
DIDCommPrefix.qualify_current(
StubAgentMessage.Meta.message_type
): StubAgentMessage
},
version_definition={
"major_version": 1,
"minimum_minor_version": 0,
"current_minor_version": 1,
"path": "v1_1",
},
)
dispatcher = test_module.Dispatcher(context)
await dispatcher.setup()
rcv = Receiver()
message = {"@type": "proto-name/1.1/no-such-message-type"}
with async_mock.patch.object(
StubAgentMessageHandler, "handle", autospec=True
) as handler_mock, async_mock.patch.object(
registry, "resolve_message_class", async_mock.MagicMock()
) as mock_resolve:
mock_resolve.return_value = async_mock.MagicMock(
deserialize=async_mock.MagicMock(
side_effect=test_module.BaseModelError()
)
)
await dispatcher.queue_message(make_inbound(message), rcv.send)
await dispatcher.task_queue
assert rcv.messages and isinstance(rcv.messages[0][1], OutboundMessage)
payload = json.loads(rcv.messages[0][1].payload)
assert payload["@type"] == DIDCommPrefix.qualify_current(
ProblemReport.Meta.message_type
)
async def test_dispatch_versioned_message_handle_greater_succeeds(self):
context = make_context()
context.enforce_typing = False
registry = await context.inject(ProtocolRegistry)
registry.register_message_types(
{
DIDCommPrefix.qualify_current(
StubAgentMessage.Meta.message_type
): StubAgentMessage
},
version_definition={
"major_version": 1,
"minimum_minor_version": 0,
"current_minor_version": 1,
"path": "v1_1",
},
)
dispatcher = test_module.Dispatcher(context)
await dispatcher.setup()
rcv = Receiver()
message = {
"@type": DIDCommPrefix.qualify_current(
StubV1_2AgentMessage.Meta.message_type
)
}
with async_mock.patch.object(
StubAgentMessageHandler, "handle", autospec=True
) as handler_mock:
await dispatcher.queue_message(make_inbound(message), rcv.send)
await dispatcher.task_queue
handler_mock.assert_awaited_once()
assert isinstance(handler_mock.call_args[0][1].message, StubAgentMessage)
assert isinstance(
handler_mock.call_args[0][2], test_module.DispatcherResponder
)
async def test_dispatch_versioned_message_fail(self):
context = make_context()
context.enforce_typing = False
registry = await context.inject(ProtocolRegistry)
registry.register_message_types(
{
DIDCommPrefix.qualify_current(
StubV1_2AgentMessage.Meta.message_type
): StubV1_2AgentMessage
},
version_definition={
"major_version": 1,
"minimum_minor_version": 2,
"current_minor_version": 2,
"path": "v1_2",
},
)
dispatcher = test_module.Dispatcher(context)
await dispatcher.setup()
rcv = Receiver()
message = {
"@type": DIDCommPrefix.qualify_current(StubAgentMessage.Meta.message_type)
}
with async_mock.patch.object(
StubAgentMessageHandler, "handle", autospec=True
) as handler_mock:
await dispatcher.queue_message(make_inbound(message), rcv.send)
await dispatcher.task_queue
assert rcv.messages and isinstance(rcv.messages[0][1], OutboundMessage)
payload = json.loads(rcv.messages[0][1].payload)
assert payload["@type"] == DIDCommPrefix.qualify_current(
ProblemReport.Meta.message_type
)
async def test_bad_message_dispatch(self):
dispatcher = test_module.Dispatcher(make_context())
await dispatcher.setup()
rcv = Receiver()
bad_message = {"bad": "message"}
await dispatcher.queue_message(make_inbound(bad_message), rcv.send)
await dispatcher.task_queue
assert rcv.messages and isinstance(rcv.messages[0][1], OutboundMessage)
payload = json.loads(rcv.messages[0][1].payload)
assert payload["@type"] == DIDCommPrefix.qualify_current(
ProblemReport.Meta.message_type
)
async def test_dispatch_log(self):
context = make_context()
context.enforce_typing = False
registry = await context.inject(ProtocolRegistry)
registry.register_message_types(
{
DIDCommPrefix.qualify_current(
StubAgentMessage.Meta.message_type
): StubAgentMessage
},
)
dispatcher = test_module.Dispatcher(context)
await dispatcher.setup()
mock_task = async_mock.MagicMock(
exc_info=(KeyError, KeyError("sample exception"), "..."),
ident="abc",
timing={
"queued": 1234567890,
"unqueued": 1234567899,
"started": 1234567901,
"ended": 1234567999,
},
)
dispatcher.log_task(mock_task)
async def test_create_outbound_send_webhook(self):
context = make_context()
context.message_receipt = async_mock.MagicMock(in_time=datetime_now())
context.settings = {"timing.enabled": True}
message = StubAgentMessage()
responder = test_module.DispatcherResponder(
context, message, None, async_mock.CoroutineMock()
)
result = await responder.create_outbound(message)
assert json.loads(result.payload)["@type"] == DIDCommPrefix.qualify_current(
StubAgentMessage.Meta.message_type
)
await responder.send_webhook("topic", "payload")
async def test_create_send_outbound(self):
message = StubAgentMessage()
responder = MockResponder()
outbound_message = await responder.create_outbound(message)
await responder.send_outbound(outbound_message)
assert len(responder.messages) == 1
async def test_create_enc_outbound(self):
context = make_context()
message = b"abc123xyz7890000"
responder = test_module.DispatcherResponder(
context, message, None, async_mock.CoroutineMock()
)
with async_mock.patch.object(
responder, "send_outbound", async_mock.CoroutineMock()
) as mock_send_outbound:
await responder.send(message)
assert mock_send_outbound.called_once()
| 36.62234 | 86 | 0.627596 |
77eb95970e197bf9037c938536f534a47d58cec2 | 434 | py | Python | taggit/migrations/0004_taggeditem_add_unique_index.py | texastribune/django-taggit | 3b57f28b2c67886632a6737f39dfe0f08dca101e | [
"BSD-3-Clause"
] | null | null | null | taggit/migrations/0004_taggeditem_add_unique_index.py | texastribune/django-taggit | 3b57f28b2c67886632a6737f39dfe0f08dca101e | [
"BSD-3-Clause"
] | null | null | null | taggit/migrations/0004_taggeditem_add_unique_index.py | texastribune/django-taggit | 3b57f28b2c67886632a6737f39dfe0f08dca101e | [
"BSD-3-Clause"
] | 1 | 2018-03-15T22:52:09.000Z | 2018-03-15T22:52:09.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
("taggit", "0003_rm_unique_tagname"),
]
operations = [
migrations.AlterUniqueTogether(
name="taggeditem", unique_together={("content_type", "object_id", "tag")}
)
]
| 22.842105 | 85 | 0.645161 |
0300d0c0835aec715e1a6d74d6c3b0b24378d7f7 | 2,291 | py | Python | dkron/migrations/0001_initial_20210729.py | surface-security/django-dkron | e85810cfebfc51ad6143befebf758b000662494e | [
"MIT"
] | 1 | 2022-01-24T10:29:14.000Z | 2022-01-24T10:29:14.000Z | dkron/migrations/0001_initial_20210729.py | surface-security/django-dkron | e85810cfebfc51ad6143befebf758b000662494e | [
"MIT"
] | 6 | 2022-01-17T16:50:38.000Z | 2022-03-23T17:46:33.000Z | dkron/migrations/0001_initial_20210729.py | surface-security/django-dkron | e85810cfebfc51ad6143befebf758b000662494e | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-07-29 08:53
from django.db import migrations, models
def data_fix(apps, schema_editor):
apps.get_model("notifications", "Event").objects.update_or_create(name='dkron_failed_job')
class Migration(migrations.Migration):
initial = True
replaces = [
('dkron', '0001_initial_20210326'),
('dkron', '0004_remove_job_retries'),
('dkron', '0001_initial_v2'),
('dkron', '0001_initial'),
('dkron', '0002_auto_20190103_1758'),
('dkron', '0003_auto_20190110_1550'),
('dkron', '0004_auto_20190110_1804'),
('dkron', '0005_auto_20190213_1700'),
('dkron', '0006_job_retries'),
('dkron', '0001_squashed_0006_job_retries'),
('dkron', '0003_create_notify_event'),
('dkron', '0005_auto_20201104_0945'),
('dkron', '0002_job_notify_on_error'),
]
dependencies = [
('notifications', '0001_initial_20210326'),
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
(
'schedule',
models.CharField(
help_text='https://dkron.io/usage/cron-spec/ or "@parent JOBNAME" for dependent jobs',
max_length=255,
),
),
('command', models.CharField(max_length=255)),
('description', models.CharField(blank=True, max_length=255, null=True)),
('enabled', models.BooleanField(default=True)),
('use_shell', models.BooleanField(default=False, help_text='/bin/sh -c "..."')),
('last_run_date', models.DateTimeField(blank=True, editable=False, null=True)),
('last_run_success', models.BooleanField(editable=False, null=True)),
('notify_on_error', models.BooleanField(default=True)),
],
options={
'permissions': (('can_use_dashboard', 'Can use the dashboard'),),
},
),
migrations.RunPython(data_fix),
]
| 37.557377 | 114 | 0.566565 |
d36282fa3954ceab125882e1f061b4281e15c49b | 3,844 | py | Python | venv/lib/python3.8/site-packages/pyqtgraph/SignalProxy.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 150 | 2018-03-27T16:45:37.000Z | 2022-03-30T03:47:56.000Z | venv/lib/python3.8/site-packages/pyqtgraph/SignalProxy.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 34 | 2018-09-28T00:01:59.000Z | 2022-03-21T15:40:02.000Z | venv/lib/python3.8/site-packages/pyqtgraph/SignalProxy.py | willBear/willBear-Fundamental_Analysis | bc67eb1e69dcf6765c0b77314d37f7f165a7318f | [
"MIT"
] | 40 | 2018-04-06T19:42:21.000Z | 2022-01-11T00:34:17.000Z | # -*- coding: utf-8 -*-
from .Qt import QtCore
from .ptime import time
from . import ThreadsafeTimer
import weakref
__all__ = ['SignalProxy']
class SignalProxy(QtCore.QObject):
"""Object which collects rapid-fire signals and condenses them
into a single signal or a rate-limited stream of signals.
Used, for example, to prevent a SpinBox from generating multiple
signals when the mouse wheel is rolled over it.
Emits sigDelayed after input signals have stopped for a certain period of time.
"""
sigDelayed = QtCore.Signal(object)
def __init__(self, signal, delay=0.3, rateLimit=0, slot=None):
"""Initialization arguments:
signal - a bound Signal or pyqtSignal instance
delay - Time (in seconds) to wait for signals to stop before emitting (default 0.3s)
slot - Optional function to connect sigDelayed to.
rateLimit - (signals/sec) if greater than 0, this allows signals to stream out at a
steady rate while they are being received.
"""
QtCore.QObject.__init__(self)
signal.connect(self.signalReceived)
self.signal = signal
self.delay = delay
self.rateLimit = rateLimit
self.args = None
self.timer = ThreadsafeTimer.ThreadsafeTimer()
self.timer.timeout.connect(self.flush)
self.block = False
self.slot = weakref.ref(slot)
self.lastFlushTime = None
if slot is not None:
self.sigDelayed.connect(slot)
def setDelay(self, delay):
self.delay = delay
def signalReceived(self, *args):
"""Received signal. Cancel previous timer and store args to be forwarded later."""
if self.block:
return
self.args = args
if self.rateLimit == 0:
self.timer.stop()
self.timer.start((self.delay*1000)+1)
else:
now = time()
if self.lastFlushTime is None:
leakTime = 0
else:
lastFlush = self.lastFlushTime
leakTime = max(0, (lastFlush + (1.0 / self.rateLimit)) - now)
self.timer.stop()
self.timer.start((min(leakTime, self.delay)*1000)+1)
def flush(self):
"""If there is a signal queued up, send it now."""
if self.args is None or self.block:
return False
#self.emit(self.signal, *self.args)
self.sigDelayed.emit(self.args)
self.args = None
self.timer.stop()
self.lastFlushTime = time()
return True
def disconnect(self):
self.block = True
try:
self.signal.disconnect(self.signalReceived)
except:
pass
try:
self.sigDelayed.disconnect(self.slot())
except:
pass
#def proxyConnect(source, signal, slot, delay=0.3):
#"""Connect a signal to a slot with delay. Returns the SignalProxy
#object that was created. Be sure to store this object so it is not
#garbage-collected immediately."""
#sp = SignalProxy(source, signal, delay)
#if source is None:
#sp.connect(sp, QtCore.SIGNAL('signal'), slot)
#else:
#sp.connect(sp, signal, slot)
#return sp
if __name__ == '__main__':
from .Qt import QtGui
app = QtGui.QApplication([])
win = QtGui.QMainWindow()
spin = QtGui.QSpinBox()
win.setCentralWidget(spin)
win.show()
def fn(*args):
print("Raw signal:", args)
def fn2(*args):
print("Delayed signal:", args)
spin.valueChanged.connect(fn)
#proxy = proxyConnect(spin, QtCore.SIGNAL('valueChanged(int)'), fn)
proxy = SignalProxy(spin.valueChanged, delay=0.5, slot=fn2)
| 32.302521 | 92 | 0.593913 |
906195b9188fbe70fb6cd70ea3b28e5efe8c015e | 6,570 | py | Python | nobos_commons/feature_preparations/feature_vec_producers/from_skeleton_joints/potion/color_joint_heatmap_builder_full_img.py | noboevbo/nobos_commons | 471e52e10fd2228c106777c72d8439e58b047003 | [
"MIT"
] | 2 | 2020-06-03T16:28:44.000Z | 2020-10-10T03:07:23.000Z | nobos_commons/feature_preparations/feature_vec_producers/from_skeleton_joints/potion/color_joint_heatmap_builder_full_img.py | noboevbo/nobos_commons | 471e52e10fd2228c106777c72d8439e58b047003 | [
"MIT"
] | null | null | null | nobos_commons/feature_preparations/feature_vec_producers/from_skeleton_joints/potion/color_joint_heatmap_builder_full_img.py | noboevbo/nobos_commons | 471e52e10fd2228c106777c72d8439e58b047003 | [
"MIT"
] | 4 | 2020-10-10T03:07:25.000Z | 2021-09-30T01:11:02.000Z | from typing import List, Dict
import numpy as np
from nobos_commons.augmentations.joint_augmenter import JointAugmenter
from nobos_commons.data_structures.dimension import ImageSize
from nobos_commons.data_structures.human import Human
class JointColorHeatmapResult(object):
__slots__ = ['human_id', 'joint_color_heatmaps']
def __init__(self, human_id: str, joint_color_heatmaps: np.ndarray):
self.human_id = human_id
self.joint_color_heatmaps = joint_color_heatmaps
class ColorJointHeatmapBuilderAbsolutJointPositions(object):
__slots__ = ['__num_joints', '__heatmap_size', 'image_size', '__sigma', '__heatmap_radius', '__blank_image',
'__color_channels', '__color_scheme']
def __init__(self, num_joints: int, heatmap_size: ImageSize, image_size: ImageSize, color_scheme: List[List[int]]):
self.__num_joints = num_joints
self.image_size = np.array([image_size.width, image_size.height])
self.__heatmap_size = np.array([heatmap_size.width, heatmap_size.height])
self.__sigma = 2
self.__heatmap_radius = self.__sigma * 2
self.__color_scheme = color_scheme
self.__color_channels = len(color_scheme[0])
self.__blank_image = np.zeros([num_joints, heatmap_size.height, heatmap_size.width, self.__color_channels])
def get_color_joint_heatmaps_for_buffers(self, human_pose_results: List[Human], joint_augmenter: JointAugmenter = None) -> np.ndarray:
result: np.ndarray = None
if len(human_pose_results) == len(self.__color_scheme):
color_img = np.zeros(self.__blank_image.shape)
for human_num in range(0, len(human_pose_results)):
human = human_pose_results[human_num]
if human is None:
color_img += self.__blank_image
else:
joint_list = human.joint_list
if joint_augmenter is not None:
joint_list = joint_augmenter.get_augmented_joint_list(joint_list)
color_img += self.get_joints_as_color_img(joint_list, self.__color_scheme[human_num])
if color_img.max() == 0:
return color_img # TODO ... thats bad, we shouldnt have this in our dataset / we should augment other than that
result = (color_img / color_img.max()) * 255
return result
def get_joints_as_color_img(self, joint_list: List[List[int]], color: List[int]):
assert len(color) == self.__color_channels and len(joint_list) == self.__num_joints
return self.__get_joint_images_gauss(joint_list, color)
# TODO: Create a method which cuts the image on max by setting 0/0 to x_min/y_min and heat_x/heat_y bei x_max/y_max
# TODO: Scale every joint accordingly to this. Will replaced this feat stride by image size / heatmap size
def __get_joint_images_gauss(self, joints: List[List[int]], color: List[int]):
num_color_channels = len(color)
target_weight = np.ones((self.__num_joints, 1), dtype=np.float32)
target = np.zeros((self.__num_joints,
3,
self.__heatmap_size[1],
self.__heatmap_size[0]
),
dtype=np.float32)
for joint_id in range(self.__num_joints):
feat_stride = self.image_size / self.__heatmap_size
heatmap_joint_center_x = int(joints[joint_id][0] / feat_stride[0] + 0.5) # TODO: Why +0.5???????
heatmap_joint_center_y = int(joints[joint_id][1] / feat_stride[1] + 0.5)
heatmap_joint_upper_left = [int(heatmap_joint_center_x - self.__heatmap_radius),
int(heatmap_joint_center_y - self.__heatmap_radius)]
heatmap_joint_bottom_right = [int(heatmap_joint_center_x + self.__heatmap_radius + 1),
int(heatmap_joint_center_y + self.__heatmap_radius + 1)]
# Check that any part of the gaussian is in-bounds
if heatmap_joint_upper_left[0] >= self.__heatmap_size[0] or heatmap_joint_upper_left[1] >= \
self.__heatmap_size[1] \
or heatmap_joint_bottom_right[0] < 0 or heatmap_joint_bottom_right[1] < 0:
# If not, just return the image as is
target_weight[joint_id] = 0
continue
# # Generate gaussian
size = 2 * self.__heatmap_radius + 1
x = np.arange(0, size, 1, np.float32)
y = x[:, np.newaxis]
x0 = y0 = size // 2
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(
- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * self.__sigma ** 2)) # TODO: Sigma correct w heatmap size?
# Usable gaussian range
g_x = max(0, -heatmap_joint_upper_left[0]), min(heatmap_joint_bottom_right[0], self.__heatmap_size[0]) - \
heatmap_joint_upper_left[0]
g_y = max(0, -heatmap_joint_upper_left[1]), min(heatmap_joint_bottom_right[1], self.__heatmap_size[1]) - \
heatmap_joint_upper_left[1]
# Image range
img_x = max(0, heatmap_joint_upper_left[0]), min(heatmap_joint_bottom_right[0], self.__heatmap_size[0])
img_y = max(0, heatmap_joint_upper_left[1]), min(heatmap_joint_bottom_right[1], self.__heatmap_size[1])
v = target_weight[joint_id]
if v > 0.5:
for channel_num in range(0, num_color_channels):
target[joint_id][channel_num][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
g[g_y[0]:g_y[1], g_x[0]:g_x[1]] * color[channel_num]
target = np.transpose(target, (0, 2, 3, 1))
return target
def colorize_heatmaps(self, heatmaps: np.ndarray, color: List[int], channels: int = 3, ):
color_array = np.array([[[color]]])
clipped = np.clip(heatmaps, 0, 1)
clipped = np.squeeze(clipped, axis=0)
clipped = clipped[:, :, :, None] * np.ones(3, dtype=int)[None, None, None, :]
color_map = clipped * color_array
return color_map
def colorize_heatmaps_by_scheme(self, heatmaps: np.ndarray, color_scheme: np.ndarray, channels: int = 3, ):
clipped = np.clip(heatmaps, 0, 1)
clipped = clipped[:, :, :, :, None] * np.ones(3, dtype=int)[None, None, None, None, :]
color_map = np.matmul(clipped, color_scheme)
return color_map
| 53.852459 | 138 | 0.62618 |
af56d9e1bd403187039f03a0491d80dfcd4c7497 | 7,276 | py | Python | model/models/saq.py | Xiatian-Zhu/FEAT | 48331e00dec8b1aa20f6cd7c397cef16f06ea2f6 | [
"MIT"
] | null | null | null | model/models/saq.py | Xiatian-Zhu/FEAT | 48331e00dec8b1aa20f6cd7c397cef16f06ea2f6 | [
"MIT"
] | null | null | null | model/models/saq.py | Xiatian-Zhu/FEAT | 48331e00dec8b1aa20f6cd7c397cef16f06ea2f6 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from model.models import FewShotModel
# No-Reg for FEAT-STAR here
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
# print(f'**==> attn: {attn.shape}')
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
# print(f"attn: len_q={len_q}, len_k={len_k}, len_v={len_v}")
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
# print(f'o==> Q: {q.shape}')
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
# print(f'o==> K: {k.shape}')
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
# print(f'o==> V: {v.shape}')
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
class SAQ(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# import pdb
# print(f'aa: {instance_embs.shape}')
# print(f'bb: {support_idx.shape}')
# print(support_idx)
# print(f'cc: {query_idx.shape}')
# print(query_idx)
num_batch = support_idx.shape[0]
num_shot =support_idx.shape[1]
num_way = support_idx.shape[-1]
num_support = int(np.prod(support_idx.shape[-2:])) # support_idx.shape[1] * num_way
num_query = int(np.prod(query_idx.shape[-2:]))
# print(f"o==>num_way={num_way}, num_support={num_support}, num_query={num_query}")
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
# print(f'dd: {support.shape}')
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# print(f'ee: {query.shape}')
# get mean of the support
# proto = support.mean(dim=1) # Ntask x NK x d
# print(f'ff: {proto.shape}')
# num_batch = proto.shape[0]
num_proto = num_way # proto.shape[1]
# num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
query = query.view(-1, emb_dim).unsqueeze(1)
# print(f'gg: {query.shape}')
support = support.view(num_batch, num_support, emb_dim)
# print(f'Support A: {support.shape}')
support = support.unsqueeze(1).expand(num_batch, num_query, num_support, emb_dim)
# print(f'Support B: {support.shape}')
support = support.view(num_batch*num_query, num_support, emb_dim)
# print(f'Support C: {support.shape}')
combined_ = torch.cat([support, query], 1)
# print(f'COmbined_ A: {combined_.shape}')
combined_1 = self.slf_attn(combined_, support, support)
# pdb.set_trace()
# os_ = self.slf_attn(support, support, support)
# print(f'COmbined_ B: {combined_1.shape}')
support, query = combined_1.split(num_support, 1)
# print(f'COmbined_ D: {support.shape}')
# print(f'COmbined_ E: {query.shape}')
support = support.view(num_batch*num_query, num_shot, num_way, emb_dim)
proto = support.mean(dim=1)
# print(f'support F: {support.shape}')
# print(f'support G: {proto.shape}')
# pdb.set_trace()
# proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
# print(f'A: {proto.shape}')
# proto = proto.view(num_batch*num_query, num_proto, emb_dim)
# print(f'B: {proto.shape}')
# refine by Transformer
# combined = torch.cat([proto, query], 1) # Nk x (N + 1) x d, batch_size = NK
# print(f'C: {combined.shape}')
# combined = self.slf_attn(combined, combined, combined)
# # compute distance for all batches
# proto, query = combined.split(num_proto, 1)
# print(f'D: {proto.shape}')
# print(f'E: {query.shape}')
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
# print(f'F: {query.shape}')
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
# print(f'G: {logits.shape}')
else: # cosine similarity: more memory efficient
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
return logits, None
| 39.759563 | 114 | 0.584112 |
c314f9e21920c8aa41e3c64699cd92ed48a64a0f | 6,933 | py | Python | nni/tools/nnictl/tensorboard_utils.py | HarshCasper/nni | 291bbbba9f296382015a77b2c88eb5db5b44bf94 | [
"MIT"
] | 1 | 2022-03-03T06:04:34.000Z | 2022-03-03T06:04:34.000Z | nni/tools/nnictl/tensorboard_utils.py | HarshCasper/nni | 291bbbba9f296382015a77b2c88eb5db5b44bf94 | [
"MIT"
] | null | null | null | nni/tools/nnictl/tensorboard_utils.py | HarshCasper/nni | 291bbbba9f296382015a77b2c88eb5db5b44bf94 | [
"MIT"
] | 1 | 2020-11-15T20:10:26.000Z | 2020-11-15T20:10:26.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import json
import re
import tempfile
from subprocess import call, Popen
from .rest_utils import rest_get, check_rest_server_quick, check_response
from .config_utils import Config, Experiments
from .url_utils import trial_jobs_url, get_local_urls
from .constants import REST_TIME_OUT
from .common_utils import print_normal, print_error, print_green, detect_process, detect_port, check_tensorboard_version
from .nnictl_utils import check_experiment_id, check_experiment_id
from .ssh_utils import create_ssh_sftp_client, copy_remote_directory_to_local
def parse_log_path(args, trial_content):
'''parse log path'''
path_list = []
host_list = []
for trial in trial_content:
if args.trial_id and args.trial_id != 'all' and trial.get('trialJobId') != args.trial_id:
continue
pattern = r'(?P<head>.+)://(?P<host>.+):(?P<path>.*)'
match = re.search(pattern, trial['logPath'])
if match:
path_list.append(match.group('path'))
host_list.append(match.group('host'))
if not path_list:
print_error('Trial id %s error!' % args.trial_id)
exit(1)
return path_list, host_list
def copy_data_from_remote(args, nni_config, trial_content, path_list, host_list, temp_nni_path):
'''use ssh client to copy data from remote machine to local machien'''
machine_list = nni_config.get_config('experimentConfig').get('machineList')
machine_dict = {}
local_path_list = []
for machine in machine_list:
machine_dict[machine['ip']] = {'port': machine['port'], 'passwd': machine['passwd'], 'username': machine['username'],
'sshKeyPath': machine.get('sshKeyPath'), 'passphrase': machine.get('passphrase')}
for index, host in enumerate(host_list):
local_path = os.path.join(temp_nni_path, trial_content[index].get('trialJobId'))
local_path_list.append(local_path)
print_normal('Copying log data from %s to %s' % (host + ':' + path_list[index], local_path))
sftp = create_ssh_sftp_client(host, machine_dict[host]['port'], machine_dict[host]['username'], machine_dict[host]['passwd'],
machine_dict[host]['sshKeyPath'], machine_dict[host]['passphrase'])
copy_remote_directory_to_local(sftp, path_list[index], local_path)
print_normal('Copy done!')
return local_path_list
def get_path_list(args, nni_config, trial_content, temp_nni_path):
'''get path list according to different platform'''
path_list, host_list = parse_log_path(args, trial_content)
platform = nni_config.get_config('experimentConfig').get('trainingServicePlatform')
if platform == 'local':
print_normal('Log path: %s' % ' '.join(path_list))
return path_list
elif platform == 'remote':
path_list = copy_data_from_remote(args, nni_config, trial_content, path_list, host_list, temp_nni_path)
print_normal('Log path: %s' % ' '.join(path_list))
return path_list
else:
print_error('Not supported platform!')
exit(1)
def format_tensorboard_log_path(path_list):
new_path_list = []
for index, value in enumerate(path_list):
new_path_list.append('name%d:%s' % (index + 1, value))
return ','.join(new_path_list)
def start_tensorboard_process(args, nni_config, path_list, temp_nni_path):
'''call cmds to start tensorboard process in local machine'''
if detect_port(args.port):
print_error('Port %s is used by another process, please reset port!' % str(args.port))
exit(1)
with open(os.path.join(temp_nni_path, 'tensorboard_stdout'), 'a+') as stdout_file, \
open(os.path.join(temp_nni_path, 'tensorboard_stderr'), 'a+') as stderr_file:
log_dir_cmd = '--logdir_spec' if check_tensorboard_version() >= '2.0' else '--logdir'
cmds = ['tensorboard', log_dir_cmd, format_tensorboard_log_path(path_list), '--port', str(args.port)]
tensorboard_process = Popen(cmds, stdout=stdout_file, stderr=stderr_file)
url_list = get_local_urls(args.port)
print_green('Start tensorboard success!')
print_normal('Tensorboard urls: ' + ' '.join(url_list))
tensorboard_process_pid_list = nni_config.get_config('tensorboardPidList')
if tensorboard_process_pid_list is None:
tensorboard_process_pid_list = [tensorboard_process.pid]
else:
tensorboard_process_pid_list.append(tensorboard_process.pid)
nni_config.set_config('tensorboardPidList', tensorboard_process_pid_list)
def stop_tensorboard(args):
'''stop tensorboard'''
experiment_id = check_experiment_id(args)
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
config_file_name = experiment_dict[experiment_id]['fileName']
nni_config = Config(config_file_name)
tensorboard_pid_list = nni_config.get_config('tensorboardPidList')
if tensorboard_pid_list:
for tensorboard_pid in tensorboard_pid_list:
try:
cmds = ['kill', '-9', str(tensorboard_pid)]
call(cmds)
except Exception as exception:
print_error(exception)
nni_config.set_config('tensorboardPidList', [])
print_normal('Stop tensorboard success!')
else:
print_error('No tensorboard configuration!')
def start_tensorboard(args):
'''start tensorboard'''
experiment_id = check_experiment_id(args)
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
config_file_name = experiment_dict[experiment_id]['fileName']
nni_config = Config(config_file_name)
rest_port = nni_config.get_config('restServerPort')
rest_pid = nni_config.get_config('restServerPid')
if not detect_process(rest_pid):
print_error('Experiment is not running...')
return
running, response = check_rest_server_quick(rest_port)
trial_content = None
if running:
response = rest_get(trial_jobs_url(rest_port), REST_TIME_OUT)
if response and check_response(response):
trial_content = json.loads(response.text)
else:
print_error('List trial failed...')
else:
print_error('Restful server is not running...')
if not trial_content:
print_error('No trial information!')
exit(1)
if len(trial_content) > 1 and not args.trial_id:
print_error('There are multiple trials, please set trial id!')
exit(1)
experiment_id = nni_config.get_config('experimentId')
temp_nni_path = os.path.join(tempfile.gettempdir(), 'nni', experiment_id)
os.makedirs(temp_nni_path, exist_ok=True)
path_list = get_path_list(args, nni_config, trial_content, temp_nni_path)
start_tensorboard_process(args, nni_config, path_list, temp_nni_path)
| 46.844595 | 133 | 0.700274 |
5d88e6c923c61ec1483db058ca39378d23885408 | 8,258 | py | Python | google/cloud/forseti/services/inventory/base/cloudasset.py | ogreface/forseti-security | a7a3573183fa1416c605dad683587717795fe13b | [
"Apache-2.0"
] | null | null | null | google/cloud/forseti/services/inventory/base/cloudasset.py | ogreface/forseti-security | a7a3573183fa1416c605dad683587717795fe13b | [
"Apache-2.0"
] | null | null | null | google/cloud/forseti/services/inventory/base/cloudasset.py | ogreface/forseti-security | a7a3573183fa1416c605dad683587717795fe13b | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Forseti Inventory Cloud Asset API integration."""
import os
import time
import concurrent.futures
from googleapiclient import errors
from google.cloud.forseti.common.gcp_api import cloudasset
from google.cloud.forseti.common.gcp_api import errors as api_errors
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.services.inventory.storage import CaiDataAccess
LOGGER = logger.get_logger(__name__)
CONTENT_TYPES = ['RESOURCE', 'IAM_POLICY']
# Any asset type referenced in cai_gcp_client.py needs to be added here.
DEFAULT_ASSET_TYPES = [
'google.appengine.Application',
'google.appengine.Service',
'google.appengine.Version',
'google.cloud.bigquery.Dataset',
'google.cloud.billing.BillingAccount',
'google.cloud.dataproc.Cluster',
'google.cloud.dns.ManagedZone',
'google.cloud.dns.Policy',
'google.cloud.kms.CryptoKey',
'google.cloud.kms.CryptoKeyVersion',
'google.cloud.kms.KeyRing',
'google.cloud.resourcemanager.Folder',
'google.cloud.resourcemanager.Organization',
'google.cloud.resourcemanager.Project',
'google.cloud.sql.Instance',
'google.cloud.storage.Bucket',
'google.compute.Autoscaler',
'google.compute.BackendBucket',
'google.compute.BackendService',
'google.compute.Disk',
'google.compute.Firewall',
'google.compute.ForwardingRule',
'google.compute.HealthCheck',
'google.compute.HttpHealthCheck',
'google.compute.HttpsHealthCheck',
'google.compute.Image',
'google.compute.Instance',
'google.compute.InstanceGroup',
'google.compute.InstanceGroupManager',
'google.compute.InstanceTemplate',
'google.compute.License',
'google.compute.Network',
'google.compute.Project',
'google.compute.Router',
'google.compute.Snapshot',
'google.compute.SslCertificate',
'google.compute.Subnetwork',
'google.compute.TargetHttpProxy',
'google.compute.TargetHttpsProxy',
'google.compute.TargetInstance',
'google.compute.TargetPool',
'google.compute.TargetSslProxy',
'google.compute.TargetTcpProxy',
'google.compute.TargetVpnGateway',
'google.compute.UrlMap',
'google.compute.VpnTunnel',
'google.container.Cluster',
'google.iam.Role',
'google.iam.ServiceAccount',
'google.pubsub.Subscription',
'google.pubsub.Topic',
'google.spanner.Database',
'google.spanner.Instance',
]
def load_cloudasset_data(session, config):
"""Export asset data from Cloud Asset API and load into storage.
Args:
session (object): Database session.
config (object): Inventory configuration on server.
Returns:
int: The count of assets imported into the database, or None if there
is an error.
"""
# Start by ensuring that there is no existing CAI data in storage.
_clear_cai_data(session)
cloudasset_client = cloudasset.CloudAssetClient(
config.get_api_quota_configs())
imported_assets = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
futures = []
for content_type in CONTENT_TYPES:
futures.append(executor.submit(_export_assets,
cloudasset_client,
config,
content_type))
for future in concurrent.futures.as_completed(futures):
temporary_file = ''
try:
temporary_file = future.result()
if not temporary_file:
return _clear_cai_data(session)
LOGGER.debug('Importing Cloud Asset data from %s to database.',
temporary_file)
with open(temporary_file, 'r') as cai_data:
rows = CaiDataAccess.populate_cai_data(cai_data, session)
imported_assets += rows
LOGGER.info('%s assets imported to database.', rows)
finally:
if temporary_file:
os.unlink(temporary_file)
return imported_assets
def _export_assets(cloudasset_client, config, content_type):
"""Worker function for exporting assets and downloading dump from GCS.
Args:
cloudasset_client (CloudAssetClient): CloudAsset API client interface.
config (object): Inventory configuration on server.
content_type (ContentTypes): The content type to export.
Returns:
str: The path to the temporary file downloaded from GCS or None on
error.
"""
asset_types = config.get_cai_asset_types()
if not asset_types:
asset_types = DEFAULT_ASSET_TYPES
timeout = config.get_cai_timeout()
root_id = config.get_root_resource_id()
timestamp = int(time.time())
export_path = _get_gcs_path(config.get_cai_gcs_path(),
content_type,
root_id,
timestamp)
try:
LOGGER.info('Starting Cloud Asset export for %s under %s to GCS object '
'%s.', content_type, root_id, export_path)
if asset_types:
LOGGER.info('Limiting export to the following asset types: %s',
asset_types)
results = cloudasset_client.export_assets(root_id,
export_path,
content_type=content_type,
asset_types=asset_types,
blocking=True,
timeout=timeout)
LOGGER.debug('Cloud Asset export for %s under %s to GCS '
'object %s completed, result: %s.',
content_type, root_id, export_path, results)
except api_errors.ApiExecutionError as e:
LOGGER.warn('API Error getting cloud asset data: %s', e)
return None
except api_errors.OperationTimeoutError as e:
LOGGER.warn('Timeout getting cloud asset data: %s', e)
return None
if 'error' in results:
LOGGER.error('Export of cloud asset data had an error, aborting: '
'%s', results)
return None
try:
LOGGER.debug('Downloading Cloud Asset data from GCS to disk.')
return file_loader.copy_file_from_gcs(export_path)
except errors.HttpError as e:
LOGGER.warn('Download of CAI dump from GCS failed: %s', e)
return None
def _clear_cai_data(session):
"""Clear CAI data from storage.
Args:
session (object): Database session.
"""
LOGGER.debug('Deleting Cloud Asset data from database.')
count = CaiDataAccess.clear_cai_data(session)
LOGGER.debug('%s assets deleted from database.', count)
return None
def _get_gcs_path(base_path, content_type, root_id, timestamp):
"""Generate a GCS object path for CAI dump.
Args:
base_path (str): The GCS bucket, starting with 'gs://'.
content_type (str): The Cloud Asset content type for this export.
root_id (str): The root resource ID for this export.
timestamp (int): The timestamp for this export.
Returns:
str: The full path to a GCS object to store export the data to.
"""
return '{}/{}-{}-{}.dump'.format(base_path,
root_id.replace('/', '-'),
content_type.lower(),
timestamp)
| 36.866071 | 80 | 0.634052 |
731c8c2828977381819ed54fe4242b428e4487c2 | 5,821 | py | Python | tests/nlu/test_components.py | ektai/rasa | 0d02ad952b8c42187155fed82083cc8e60d9d064 | [
"Apache-2.0"
] | 1 | 2020-10-14T18:09:10.000Z | 2020-10-14T18:09:10.000Z | tests/nlu/test_components.py | gokulsg/rasa | 3868797ecd785970c2eaeced71911de1d54a2cbe | [
"Apache-2.0"
] | 187 | 2020-02-25T16:07:06.000Z | 2022-03-01T13:42:41.000Z | tests/nlu/test_components.py | gokulsg/rasa | 3868797ecd785970c2eaeced71911de1d54a2cbe | [
"Apache-2.0"
] | null | null | null | import pytest
from rasa.nlu import registry, train
from rasa.nlu.components import find_unavailable_packages
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.model import Interpreter, Metadata
@pytest.mark.parametrize("component_class", registry.component_classes)
def test_no_components_with_same_name(component_class):
"""The name of the components need to be unique as they will
be referenced by name when defining processing pipelines."""
names = [cls.name for cls in registry.component_classes]
assert (
names.count(component_class.name) == 1
), f"There is more than one component named {component_class.name}"
@pytest.mark.parametrize("component_class", registry.component_classes)
def test_all_required_components_can_be_satisfied(component_class):
"""Checks that all required_components are present in the registry."""
def _required_component_in_registry(component):
for previous_component in registry.component_classes:
if issubclass(previous_component, component):
return True
return False
missing_components = []
for required_component in component_class.required_components():
if not _required_component_in_registry(required_component):
missing_components.append(required_component.name)
assert missing_components == [], (
f"There is no required components {missing_components} "
f"for '{component_class.name}'."
)
def test_find_unavailable_packages():
unavailable = find_unavailable_packages(
["my_made_up_package_name", "io", "foo_bar", "foo_bar"]
)
assert unavailable == {"my_made_up_package_name", "foo_bar"}
def test_builder_create_by_module_path(component_builder, blank_config):
from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer
path = "rasa.nlu.featurizers.sparse_featurizer.regex_featurizer.RegexFeaturizer"
component_config = {"name": path}
component = component_builder.create_component(component_config, blank_config)
assert type(component) == RegexFeaturizer
@pytest.mark.parametrize(
"test_input, expected_output, error",
[
("my_made_up_component", "Cannot find class", Exception),
(
"rasa.nlu.featurizers.regex_featurizer.MadeUpClass",
"Failed to find class",
Exception,
),
("made.up.path.RegexFeaturizer", "No module named", ModuleNotFoundError),
],
)
def test_create_component_exception_messages(
component_builder, blank_config, test_input, expected_output, error
):
with pytest.raises(error):
component_config = {"name": test_input}
component_builder.create_component(component_config, blank_config)
def test_builder_load_unknown(component_builder):
with pytest.raises(Exception) as excinfo:
component_meta = {"name": "my_made_up_componment"}
component_builder.load_component(component_meta, "", Metadata({}, None))
assert "Cannot find class" in str(excinfo.value)
async def test_example_component(component_builder, tmp_path):
_config = RasaNLUModelConfig(
{"pipeline": [{"name": "tests.nlu.example_component.MyComponent"}]}
)
(trainer, trained, persisted_path) = await train(
_config,
data="./data/examples/rasa/demo-rasa.json",
path=str(tmp_path),
component_builder=component_builder,
)
assert trainer.pipeline
loaded = Interpreter.load(persisted_path, component_builder)
assert loaded.parse("test") is not None
@pytest.mark.parametrize(
"supported_language_list, not_supported_language_list, language, expected",
[
# in following comments: VAL stands for any valid setting
# for language is `None`
(None, None, None, True),
# (None, None)
(None, None, "en", True),
# (VAL, None)
(["en"], None, "en", True),
(["en"], None, "zh", False),
# (VAL, [])
(["en"], [], "en", True),
(["en"], [], "zh", False),
# (None, VAL)
(None, ["en"], "en", False),
(None, ["en"], "zh", True),
# ([], VAL)
([], ["en"], "en", False),
([], ["en"], "zh", True),
],
)
def test_can_handle_language_logically_correctness(
supported_language_list, not_supported_language_list, language, expected
):
from rasa.nlu.components import Component
SampleComponent = type(
"SampleComponent",
(Component,),
{
"supported_language_list": supported_language_list,
"not_supported_language_list": not_supported_language_list,
},
)
assert SampleComponent.can_handle_language(language) == expected
@pytest.mark.parametrize(
"supported_language_list, not_supported_language_list, expected_exec_msg",
[
# in following comments: VAL stands for any valid setting
# (None, [])
(None, [], "Empty lists for both"),
# ([], None)
([], None, "Empty lists for both"),
# ([], [])
([], [], "Empty lists for both"),
# (VAL, VAL)
(["en"], ["zh"], "Only one of"),
],
)
def test_can_handle_language_guard_clause(
supported_language_list, not_supported_language_list, expected_exec_msg
):
from rasa.nlu.components import Component
from rasa.shared.exceptions import RasaException
SampleComponent = type(
"SampleComponent",
(Component,),
{
"supported_language_list": supported_language_list,
"not_supported_language_list": not_supported_language_list,
},
)
with pytest.raises(RasaException) as excinfo:
SampleComponent.can_handle_language("random_string")
assert expected_exec_msg in str(excinfo.value)
| 33.262857 | 87 | 0.675485 |
28a802054ba1634de60aac01a13100d865762c8a | 22 | py | Python | venv/Lib/site-packages/folium/_version.py | star10919/drf | 77c005794087484d72ffc0d76612a6ac9845821e | [
"BSD-3-Clause"
] | 4 | 2015-02-03T06:48:43.000Z | 2015-05-27T15:31:56.000Z | venv/Lib/site-packages/folium/_version.py | star10919/drf | 77c005794087484d72ffc0d76612a6ac9845821e | [
"BSD-3-Clause"
] | 13 | 2015-01-08T14:12:38.000Z | 2021-06-10T17:44:37.000Z | venv/Lib/site-packages/folium/_version.py | star10919/drf | 77c005794087484d72ffc0d76612a6ac9845821e | [
"BSD-3-Clause"
] | 1 | 2018-02-25T17:50:55.000Z | 2018-02-25T17:50:55.000Z | __version__ = "0.12.1" | 22 | 22 | 0.681818 |
b7c5f6d810a4377c4d3d325bb5ccc2b618107486 | 10,732 | py | Python | ports/tm4c123/board/make-pins.py | JohKiener/micropython | 96af9ce5ac35a7026ed4e92272694eb35c5e7f32 | [
"MIT"
] | 6 | 2019-07-23T04:18:43.000Z | 2022-03-26T12:03:08.000Z | ports/tm4c123/board/make-pins.py | JohKiener/micropython | 96af9ce5ac35a7026ed4e92272694eb35c5e7f32 | [
"MIT"
] | 5 | 2019-07-26T09:06:38.000Z | 2022-02-07T16:43:52.000Z | ports/tm4c123/board/make-pins.py | JohKiener/micropython | 96af9ce5ac35a7026ed4e92272694eb35c5e7f32 | [
"MIT"
] | 13 | 2019-07-30T10:17:10.000Z | 2022-02-08T08:04:52.000Z | #!/usr/bin/env python
"""Generates the pins file for the TM4C123."""
from __future__ import print_function
import argparse
import sys
import csv
SUPPORTED_AFS = {
'UART' : ('TX', 'RX', 'RTS', 'CTS'), #UART
'SSI' : ('CLK', 'TX', 'RX', 'FSS'), #SPI
'I2C' : ('SDA', 'SCL'),
'TIM' : ('CCP0', 'CCP1'), #16 bit Timer
'WTIM' : ('CCP0', 'CCP1'), #32 bit Wide Timer
'MTRL' : ('PWM0', 'PWM1', 'PWM2', 'PWM3', 'PWM4', 'PWM5','PWM6','PWM7', 'FAULT0'), # Motion Control
'ADC' : ( 'AIN0','AIN1','AIN2','AIN3','AIN4','AIN5','AIN6','AIN7','AIN8','AIN9','AIN10', 'AIN11'),
'COMP' : ('NEG','POS', 'OUT' ), # Analog Comparator
'QEI' : ('PHA0', 'PHA1', 'PHB0', 'PHB1', 'IDX0', 'IDX1'), # Quadrature Encoder Interface
'TR' : ('CLK', 'D0', 'D1'), # Trace
'CAN' : ('TX', 'RX'),
'NMI' : (''),
'JTAG' : ('TDO', 'SWO', 'TDI', 'TMS', 'SWDIO', 'TCK', 'SWCLK'),
'USB' : ('DM', 'DP', 'EPEN', 'ID', 'PFLT', 'VBUS')
}
SINGLE_UNIT_AF = ('NMI','TR') # These do not have Unit numbers
NO_PREFIX_UNIT_AF = ('ADC', 'QEI', 'JTAG') # these units dont have the unit type in the af name
AF_SHORT_DICT = {
'UART' : 'U',
'TIM' : 'T',
'WTIM' : 'WT',
'MTRL' : 'M',
'COMP' : 'C'
} # some af names are shortened in the datasheet and driverlib
def parse_port_pin(name_str):
"""Parses a string and returns a (port, port_pin) tuple."""
if len(name_str) < 3:
raise ValueError("Expecting pin name to be at least 3 characters")
if name_str[0] != 'P':
raise ValueError("Expecting pin name to start with P")
if name_str[1] < 'A' or name_str[1] > 'F':
raise ValueError("Expecting pin port to be between A and F")
port = name_str[1]
pin_str = name_str[2:]
if not pin_str.isdigit():
raise ValueError("Expecting numeric pin number.")
return (port, int(pin_str))
class AF:
"""Holds the description of an alternate function"""
def __init__(self, name, idx, fn, unit, type, pin_name):
self.name = name
self.idx = idx
"""AF from 0 to 9 and 14 to 15"""
if self.idx > 15 or (10 <= self.idx <= 13):
self.idx = -1
self.fn = fn
self.unit = unit
self.type = type
self.pin_name = pin_name
if fn in AF_SHORT_DICT:
self.short = AF_SHORT_DICT[fn] + str(unit)
elif fn in NO_PREFIX_UNIT_AF:
self.short = ''
elif unit < 0 :
self.short = fn
else:
self.short = fn + str(unit)
def print(self):
if self.idx == 0:
print (' AF_AN({:14s}, {:4d}, {:4s}, {:4d}, {:6s}), // {}'.format(self.name, self.idx, self.fn, self.unit, self.type, self.name))
elif self.short == '':
print (' AF_SL({:14s}, {:4d}, {:4s}, {:4d}, {:12s}, {:3s}), // {}'.format(self.name, self.idx, self.fn, self.unit, self.type, self.pin_name, self.name))
else:
print (' AF_ML({:14s}, {:4d}, {:4s}, {:4d}, {:6s}, {:4s}, {:3s}), // {}'.format(self.name, self.idx, self.fn, self.unit, self.type, self.short, self.pin_name, self.name))
class Pin:
"""Holds the information associated with a pin."""
def __init__(self, name, port, port_pin, def_af, pin_num):
self.name = name
self.port = port
self.port_pin = port_pin
self.pin_num = pin_num
self.def_af = def_af
self.board_pin = False
self.afs = []
def add_af(self, af):
self.afs.append(af)
def print(self):
print('// {}'.format(self.name))
if len(self.afs):
print('const pin_af_obj_t pin_{}_af[] = {{'.format(self.name))
for af in self.afs:
af.print()
print('};')
print('const pin_obj_t pin_{:3s}_obj = PIN({:6s}, {}, {:3d}, {:2d}, pin_{}_af, {}, {});\n'.format(
self.name, self.name, self.port, self.port_pin, self.pin_num, self.name, self.def_af, len(self.afs)))
else:
print('const pin_obj_t pin_{:3s}_obj = PIN({:6s}, {}, {:3d}, {:2d}, NULL, 0, 0);\n'.format(
self.name, self.name, self.port, self.port_pin, self.pin_num))
def print_header(self, hdr_file):
hdr_file.write('extern const pin_obj_t pin_{:3s}_obj;\n#define pin_{:3s} (&pin_{:3s}_obj)\n\n'.format(self.name,self.name,self.name))
class Pins:
def __init__(self):
self.board_pins = [] # list of pin objects
def find_pin(self, port, port_pin):
for pin in self.board_pins:
if pin.port == port and pin.port_pin == port_pin:
return pin
def find_pin_by_num(self, pin_num):
for pin in self.board_pins:
if pin.pin_num == pin_num:
return pin
def find_pin_by_name(self, name):
for pin in self.board_pins:
if pin.name == name:
return pin
def parse_af_file(self, filename, pin_col, pinname_col, defaf_col, af_start_col):
with open(filename, 'r') as csvfile:
rows = csv.reader(csvfile)
for row in rows:
try:
(port_num, port_pin) = parse_port_pin(row[pinname_col])
except:
continue
if not row[pin_col].isdigit():
raise ValueError("Invalid pin number {:s} in row {:s}".format(row[pin_col], row))
pin_num = int(row[pin_col])
# find the default af
if row[defaf_col] != '' and row[defaf_col] != row[pinname_col]:
for cell in row[af_start_col:]:
if cell == row[defaf_col]:
def_af = row[af_start_col:].index(cell)
break
else:
def_af = 0
pin = Pin(row[pinname_col], port_num, port_pin, def_af, pin_num)
self.board_pins.append(pin)
af_idx = 0
for af in row[af_start_col:]:
af_splitted = af.split('_')
fn_name = af_splitted[0].rstrip('0123456789')
if fn_name in SUPPORTED_AFS:
try:
type_name = af_splitted[1]
except:
type_name = ''
if type_name in SUPPORTED_AFS[fn_name]:
if fn_name in SINGLE_UNIT_AF: # Dont have numbers
unit_idx = -1
elif fn_name in NO_PREFIX_UNIT_AF:
unit_idx = -1
else:
unit_idx = af_splitted[0][-1]
pin.add_af(AF(af, af_idx, fn_name, int(unit_idx), type_name, pin.name))
af_idx += 1
def parse_board_file(self, filename, cpu_pin_col):
with open(filename, 'r') as csvfile:
rows = csv.reader(csvfile)
for row in rows:
if row[cpu_pin_col].isdigit():
pin = self.find_pin_by_num(int(row[cpu_pin_col]))
else:
pin = self.find_pin_by_name(row[cpu_pin_col])
if pin:
pin.board_pin = True
def print_named(self, label, pins):
print('')
print('STATIC const mp_rom_map_elem_t pin_{:s}_pins_locals_dict_table[] = {{'.format(label))
for pin in pins:
if pin.board_pin:
print(' {{ MP_ROM_QSTR(MP_QSTR_{:3s}), MP_ROM_PTR(pin_{:3s}) }},'.format(pin.name, pin.name))
print('};')
print('MP_DEFINE_CONST_DICT(pin_{:s}_pins_locals_dict, pin_{:s}_pins_locals_dict_table);'.format(label, label))
def print(self):
for pin in self.board_pins:
if pin.board_pin:
pin.print()
self.print_named('board', self.board_pins)
self.print_named('cpu', self.board_pins)
print('')
def print_header(self, hdr_filename):
with open(hdr_filename, 'wt') as hdr_file:
for pin in self.board_pins:
if pin.board_pin:
pin.print_header(hdr_file)
def print_qstr(self, qstr_filename):
with open(qstr_filename, 'wt') as qstr_file:
pin_qstr_set = set([])
af_qstr_set = set([])
for pin in self.board_pins:
if pin.board_pin:
pin_qstr_set |= set([pin.name])
for af in pin.afs:
af_qstr_set |= set([af.name])
print('// Board pins', file=qstr_file)
for qstr in sorted(pin_qstr_set):
print('Q({})'.format(qstr), file=qstr_file)
print('\n// Pin AFs', file=qstr_file)
for qstr in sorted(af_qstr_set):
print('Q({})'.format(qstr), file=qstr_file)
def main():
parser = argparse.ArgumentParser(
prog="make-pins.py",
usage="%(prog)s [options] [command]",
description="Generate board specific pin file"
)
parser.add_argument(
"-a", "--af",
dest="af_filename",
help="Specifies the alternate function file for the chip",
default="tm4c123_af.csv"
)
parser.add_argument(
"-b", "--board",
dest="board_filename",
help="Specifies the board file",
)
parser.add_argument(
"-p", "--prefix",
dest="prefix_filename",
help="Specifies beginning portion of generated pins file",
default="tm4c123_prefix.c"
)
parser.add_argument(
"-q", "--qstr",
dest="qstr_filename",
help="Specifies name of generated qstr header file",
default="build/pins_qstr.h"
)
parser.add_argument(
"-r", "--hdr",
dest="hdr_filename",
help="Specifies name of generated pin header file",
default="build/pins.h"
)
args = parser.parse_args(sys.argv[1:])
pins = Pins()
print('// This file was automatically generated by make-pins.py')
print('//')
if args.af_filename:
print('// --af {:s}'.format(args.af_filename))
pins.parse_af_file(args.af_filename, 0, 1, 2, 3)
if args.board_filename:
print('// --board {:s}'.format(args.board_filename))
pins.parse_board_file(args.board_filename, 1)
if args.prefix_filename:
print('// --prefix {:s}'.format(args.prefix_filename))
print('')
with open(args.prefix_filename, 'r') as prefix_file:
print(prefix_file.read())
pins.print()
pins.print_qstr(args.qstr_filename)
pins.print_header(args.hdr_filename)
if __name__ == "__main__":
main()
| 37.393728 | 188 | 0.53196 |
2e75eaad1eeceadc338fcc59970801a09b2cea5c | 5,117 | py | Python | bin/metrics-netscaler.py | sensu-plugins/sensu-plugins-netscaler | 6c11db7ca98657013bc01964dd8a432f9f15c4c2 | [
"MIT"
] | null | null | null | bin/metrics-netscaler.py | sensu-plugins/sensu-plugins-netscaler | 6c11db7ca98657013bc01964dd8a432f9f15c4c2 | [
"MIT"
] | 2 | 2018-11-01T16:00:05.000Z | 2021-06-25T15:16:40.000Z | bin/metrics-netscaler.py | sensu-plugins/sensu-plugins-netscaler | 6c11db7ca98657013bc01964dd8a432f9f15c4c2 | [
"MIT"
] | 2 | 2015-08-11T22:48:16.000Z | 2018-04-10T22:05:40.000Z | #!/usr/bin/env python
#
# DESCRIPTION:
# Grabs stats from a netscaler appliance via the Nitro REST API.
# Prints to STDOUT in graphite format thus meant for a TCP handler
# To find out what each stat means download the Nitro SDK
# http://support.citrix.com/proddocs/topic/netscaler-main-api-10-map/ns-nitro-rest-feat-stat-api-ref.html
# You should also be able to get the stats docs in a PDF that can be downloaded
# from your netscaler web UI.
#
# OUTPUT:
# Graphite plain-text format (name value timestamp\n)
#
# DEPENDENCIES:
# Python 2.7 (untested on python 3 but should work fine)
# Python Requests (http://docs.python-requests.org)
#
# LICENSE:
# Jaime Gago contact@jaimegago.com
# Released under the same terms as Sensu (the MIT license); see LICENSE
# for details.
import logging
import logging.handlers
import optparse
import requests
import sys
import time
FLAT_STATS_ENDPOINTS = [
'ns',
'cmp',
'ssl',
'system'
]
STATS_WITH_IDS = [
{
'endpoint' : 'lbvserver',
'identifier' : 'name'
},
{
'endpoint' : 'Interface',
'identifier' : 'id'
}
]
FAILURE_CONSTANT = 1
def set_syslog():
'''Set a syslog logger'''
try:
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(pathname)s: %(message)s")
handler = logging.handlers.SysLogHandler(address = '/dev/log')
handler.setFormatter(formatter)
logger.addHandler(handler)
except Exception:
logging.critical("Failed to configure syslog handler")
sys.exit(1)
return logger
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def graphite_printer(stats, graphite_scheme):
now = time.time()
for stat in stats:
print "%s.%s %s %d" % (graphite_scheme, stat, stats[stat], now)
def get_flat_stats(flat_stats_end_points, nitro_version, netscaler, user,
password, logger):
nitro_rest_api = 'http://%s/nitro/%s/stat/' % (netscaler, nitro_version)
flat_stats = {}
for flat_stat_end_point in flat_stats_end_points:
url = nitro_rest_api + flat_stat_end_point
try:
response = requests.get(url, auth=(user, password))
except Exception as e:
logger.critical('Could not get JSON from %s' % url)
logger.critical(e)
sys.exit(FAILURE_CONSTANT)
data = response.json()
for flat_stat in data[flat_stat_end_point]:
value = data[flat_stat_end_point][flat_stat]
if isfloat(value):
flat_stats[flat_stat_end_point+ '.' + flat_stat] = value
return flat_stats
def get_stats_with_ids(stat_with_ids_end_point, stat_identifier, nitro_version,
netscaler, user, password, logger):
nitro_rest_api = 'http://%s/nitro/%s/stat/' % (netscaler, nitro_version)
url = nitro_rest_api + stat_with_ids_end_point
stats_with_ids = {}
try:
response = requests.get(url, auth=(user, password))
except Exception as e:
logger.critical('Could not get JSON from %s' % url)
logger.critical(e)
sys.exit(FAILURE_CONSTANT)
data = response.json()
for stats in data[stat_with_ids_end_point]:
stat_id = stats[stat_identifier]
stat_id_alnum = ''.join(e for e in stat_id if e.isalnum())
for stat in stats:
value = stats[stat]
if isfloat(value):
stat_name = stat_with_ids_end_point + '.' + stat_id_alnum + '.' + stat
stats_with_ids[stat_name] = value
return stats_with_ids
def main():
parser = optparse.OptionParser()
parser.add_option('-n', '--netscaler',
help = 'netscaler (IP or FQDN) to collect stats from',
dest = 'netscaler',
metavar = 'netscaler')
parser.add_option('-u', '--user',
help = 'netscaler user with access to nitro rest',
dest = 'user',
metavar = 'USER')
parser.add_option('-p', '--password',
help = 'netscaler user password',
dest = 'password',
metavar = 'PASSWORD')
parser.add_option('-s', '--graphite_scheme',
help = 'graphite scheme to prepend, default to <netscaler>',
default = 'netscaler',
dest = 'graphite_scheme',
metavar = 'GRAPHITE_SCHEME')
parser.add_option('-v', '--nitro-version',
help = 'nitro REST API version, defaults to v1',
default = 'v1',
dest = 'nitro_version',
metavar = 'NITRO_VERSION')
(options, args) = parser.parse_args()
if not options.netscaler or not options.user or not options.password:
print 'A netscaler, user and password are required'
sys.exit(FAILURE_CONSTANT)
nitro_version = options.nitro_version
netscaler = options.netscaler
user = options.user
password = options.password
logger = set_syslog()
flat_stats = get_flat_stats(FLAT_STATS_ENDPOINTS, nitro_version, netscaler, user,
password, logger)
graphite_printer(flat_stats, options.graphite_scheme)
for stat_with_ids in STATS_WITH_IDS:
stats_with_ids = get_stats_with_ids(stat_with_ids['endpoint'],
stat_with_ids['identifier'], nitro_version, netscaler, user, password,
logger)
graphite_printer(stats_with_ids, options.graphite_scheme)
if __name__ == '__main__':
main()
| 29.073864 | 105 | 0.688098 |
6a682c53c2b7b045ae5a8a20260721ecc46889ea | 4,278 | py | Python | src/mobile.py | mjvakili/footprinter | 60f36a05c136f5635be287c452d6bf590fe5c961 | [
"MIT"
] | null | null | null | src/mobile.py | mjvakili/footprinter | 60f36a05c136f5635be287c452d6bf590fe5c961 | [
"MIT"
] | null | null | null | src/mobile.py | mjvakili/footprinter | 60f36a05c136f5635be287c452d6bf590fe5c961 | [
"MIT"
] | 2 | 2020-05-23T04:43:22.000Z | 2020-07-21T09:59:10.000Z | import tensorflow as tf
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Input, ReLU, Conv2DTranspose, BatchNormalization, Dropout, Concatenate
from tensorflow.keras.activations import relu
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.models import load_model
def Encoder(input_shape=(128, 128, 3)):
base_model = MobileNetV2(input_shape=input_shape, include_top=False)
# Use the activations of these layers for the Skip connection
layer_names = [
'block_1_expand_relu', # OUTPUT_SHAPE: (BS, 64, 64, 96)
'block_3_expand_relu', # OUTPUT_SHAPE: (BS, 32, 32, 144)
'block_6_expand_relu', # OUTPUT_SHAPE: (BS, 16, 16, 192)
'block_13_expand_relu', # OUTPUT_SHAPE: (BS, 8, 8, 576)
'block_16_project' # OUTPUT_SHAPE: (BS, 4, 4, 320)
]
layers = [base_model.get_layer(name).output for name in layer_names]
# Create the feature extraction encoder with 5 outputs
# The last output is the input of the decoder
# the 4th, 3rd, 2nd, and 1st outputs are the 1st, 2nd, 3rd, and 4th skip connections the decoder
down_stack = Model(inputs=base_model.input, outputs=layers)
# Make it non-trainable
down_stack.trainable = False
return down_stack
def upsampler_block(nfilters, size=3, strides=2, norm_type='batchnorm', apply_dropout=False):
"""
source: https://github.com/tensorflow/examples/blob/master/tensorflow_examples/models/pix2pix/pix2pix.py#L220
Upsamples an input with a Conv2DTranspose followed by Batchnorm, Dropout and Relu activation
Conv2DTranspose => Batchnorm => Dropout => Relu
Args: nfilters: number of filters
size: filter size
norm_type: Normalization type; either 'batchnorm' or 'instancenorm'.
apply_dropout: If True, adds the dropout layer
Returns:
An upsampler Sequential Model : (nrows, ncols) --> (new_nrows, new_ncols)
new_nrows = (nrows - 1) * strides[0] + size[0]
new_ncols = (ncols - 1) * strides[1] + size[1]
See: https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
"""
initializer = tf.random_normal_initializer(0., 0.02)
result = Sequential()
result.add(Conv2DTranspose(nfilters, size, strides=2,
padding='same',
kernel_initializer = "he_normal",
use_bias=False))
if norm_type.lower() == 'batchnorm':
result.add(BatchNormalization())
elif norm_type.lower() == 'instancenorm':
result.add(InstanceNormalization())
if apply_dropout:
result.add(Dropout(0.5))
result.add(ReLU())
return result
def unet_model(input_shape=(128, 128, 3)):
decoder = [
upsampler_block(512, 3), # (BS, 4, 4, 320) -> (BS, 8, 8, 512) + (BS, 8, 8, 576) = (BS, 8, 8, 1088)
upsampler_block(256, 3), # (BS, 8, 8, 1088) -> (BS, 16, 16, 256) + (BS, 16, 16, 192) = (BS, 16, 16, 448)
upsampler_block(128, 3), # (BS, 16, 16, 448) -> (BS, 32, 32, 128) + (BS, 32, 32, 144) = (BS, 32, 32, 172)
upsampler_block(64, 3) # (BS, 32, 32, 172) -> (BS, 64, 64, 64) + (BS, 64, 64, 96) = (BS, 64, 64, 160)
]
inputs = Input(shape=input_shape)
encoder = Encoder(input_shape)
encoder_outputs = encoder(inputs)
#skip connections are ordered from last to first
encoder_skips_backward, x = encoder_outputs[:-1], encoder_outputs[-1]
#reorder the skip connection
encoder_skips_forward = reversed(encoder_skips_backward)
for upsampler, encoder_skip in zip(decoder, encoder_skips_forward):
#Upsample with the unit upsampling block
x = upsampler(x)
#Concatenate the upsampled tensor with the skip connections
x = Concatenate()([x, encoder_skip])
#One last upsampling layer to predict the target binary mask: (BS, 64, 64, 160) -> (BS, 128, 128, 1)
outputs = Conv2DTranspose(1, 3, strides=2, padding="same", activation = "sigmoid")(x)
unet_model = Model(inputs = inputs, outputs = outputs)
return unet_model
| 43.212121 | 121 | 0.634409 |
cc6743d3f9b4baeec14de8d86477361a18c93f76 | 13,805 | py | Python | testing_CLSA.py | cacoool/CLSA-Retina | 0001cf38bc5984d742c1093a718a279738aefa4b | [
"MIT"
] | 1 | 2021-09-19T08:31:20.000Z | 2021-09-19T08:31:20.000Z | testing_CLSA.py | cacoool/CLSA-Retina | 0001cf38bc5984d742c1093a718a279738aefa4b | [
"MIT"
] | null | null | null | testing_CLSA.py | cacoool/CLSA-Retina | 0001cf38bc5984d742c1093a718a279738aefa4b | [
"MIT"
] | null | null | null | ###################################################
#
# Script to
# - Calculate prediction of the test datasets
# - Calculate the parameters to evaluate the prediction
#
##################################################
#Python
import numpy as np
import torch
import os
import h5py
import cv2 as cv
import pandas as pd
from torch.utils.data import DataLoader,Dataset
from tqdm import tqdm
import time
from scipy import stats
import random
import cv2 as cv
import matplotlib.cm as cm
import copy
from PIL import Image
from sklearn.metrics import roc_auc_score, confusion_matrix, roc_curve
# help_functions.py
from helper_functions.loadConfig import loadConfig
from helper_functions.resumeCheckpoint import resumeCheckpoint
from datasets.CLSA_dataloader import CLSA_dataloader_test
from sklearn.metrics import r2_score, mean_absolute_error
# define pyplot parameters
import matplotlib.pylab as pylab
params = {'legend.fontsize': 15,
'axes.labelsize': 15,
'axes.titlesize':15,
'xtick.labelsize':15,
'ytick.labelsize':15}
pylab.rcParams.update(params)
def baseline_rolling_window(a, window):
bins = np.linspace(start=np.floor(np.min(a)), stop=np.ceil(np.max(a)), num=1+(np.ceil(np.max(a))-np.floor(np.min(a))).astype(np.uint8))
hist = np.histogram(a, bins=bins)[0]
windows_hist = np.lib.stride_tricks.sliding_window_view(hist, window_shape=window)
sum_windows = np.sum(windows_hist, axis=1)
bin = np.argmax(sum_windows)
windows_bins = np.lib.stride_tricks.sliding_window_view(bins, window_shape=window)
return np.average(windows_bins[bin])
def test(expName, upper, var_n):
preds = []
targs = []
id_list = []
global line
#========= Load settings from Config file ==========
hyperparameters = loadConfig('./config.txt')
hyperparameters['name'] = expName
path_dataset = hyperparameters['path_dataset']
bs = hyperparameters['batch_size']
#========== Define parameters here ==========
device = hyperparameters['device']
assert os.path.isdir('experiences/' + expName)
# HyperX
test_set = CLSA_dataloader_test(path_dataset, var_n, upper, hyperparameters=hyperparameters)
test_loader = DataLoader(test_set, batch_size=bs, shuffle=False, num_workers=8)
#========= Define model ============
num_classes = 1
from torch import nn
import torch.backends.cudnn as cudnn
from torchvision import models
from efficientnet_pytorch import EfficientNet
from efficientnet_pytorch.utils import MemoryEfficientSwish
from models.metaEfficient import get_metaEfficient
from models.attention_model import AttentionNet
# net = AttentionNet(2)
# net = nn.Sequential(nn.AdaptiveAvgPool2d((587, 587)), net)
# ========== Define model here ==========
# Attention
# from models.attention_model import AttentionNet
# net = AttentionNet(n_output=num_classes)
# net = nn.Sequential(nn.AdaptiveAvgPool2d((587, 587)), net)
# model_name = "AttentionNet"
# Meta EfficientNet
# net = get_metaEfficient(n_features=301)
# model_name = "MetaEfficientNetB3"
# Meta
# net = nn.Sequential(nn.Linear(301, 500),
# nn.BatchNorm1d(500),
# nn.ReLU(),
# nn.Dropout(p=0.2),
# nn.Linear(500, 250), # FC layer output will have 250 features
# nn.BatchNorm1d(250),
# nn.ReLU(),
# nn.Dropout(p=0.2),
# nn.Linear(250, 1),
# nn.ReLU())
# model_name = "MetaNet"
# EfficientNet
net = EfficientNet.from_pretrained('efficientnet-b3', in_channels=3)
net = nn.Sequential(nn.AdaptiveAvgPool2d((300, 300)), net, nn.Linear(1000, 500), nn.ReLU(), nn.Dropout(p=0.2), nn.Linear(500, num_classes), MemoryEfficientSwish())
if hyperparameters['device'] == 'cuda':
net.cuda()
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True
_, _, net = resumeCheckpoint(net, hyperparameters)
net.eval()
with torch.no_grad():
for (inputs, targets, meta, id) in tqdm(test_loader):
inputs, targets, meta = inputs.to(device), targets.to(device), meta.to(device)
# inputs = [inputs, meta]
# outputs = net(meta)
outputs = net(inputs)
# outputs, amap = net(inputs)
preds.extend(outputs.squeeze().detach().cpu().numpy())
targs.extend(targets.squeeze().detach().cpu().numpy())
id_list.extend(id)
# Saliency Batch Size must be 1
# amap = cv.resize(amap1.cpu().detach().numpy().squeeze(), (587, 587))
# new_amap = ((amap - amap.min()) * (1/(amap.max() - amap.min()) * 255)).astype('uint8')
# print(ori.shape)
# org_im = Image.fromarray(cv.resize(ori.cpu().detach().numpy().squeeze(), (587, 587)))
# cmaps= ['spring']#'spring', 'seismic',
# for cmap in cmaps:
# print(cmap)
# color_map = cm.get_cmap(cmap)
# heat = color_map(new_amap)
# # Change alpha channel in colormap to make sure original image is displayed
# heatmap = copy.copy(heat)
# heatmap[:, :, 3] = 0.7
# heatmap = Image.fromarray((heatmap*255).astype(np.uint8))
#
# # Apply heatmap on iamge
# heatmap_on_image = Image.new("RGBA", org_im.size)
# heatmap_on_image = Image.alpha_composite(heatmap_on_image, org_im.convert('RGBA'))
# heatmap_on_image = Image.alpha_composite(heatmap_on_image, heatmap)
# # heat = cv.applyColorMap(new_amap, cv.COLORMAP_PINK)
# # fin = cv.addWeighted(heat[:, :, 0:3], 0.5, cv.resize(ori.cpu().detach().numpy().squeeze(), (587, 587)), 0.5, 0)
# # heatmap_on_image.show()
# # cv.imshow("test", np.array(heatmap_on_image))
# cv.imshow("test", np.hstack([np.array(org_im), (heat[:, :, 0:3]*255).astype(np.uint8), np.array(heatmap_on_image)[:, :, 0:3]]))
# cv.waitKeyEx()
import matplotlib.pyplot as plt
preds = np.array(preds, dtype=np.float32).squeeze()#-100)/10
targs = np.array(targs, dtype=np.float32).squeeze()#-100)/10
id = np.array(id_list).squeeze()
# COMBINE PRED FROM 2 FUNDUS INTO ONE FOR CLASSIFICATION TASK
# df = pd.DataFrame(id)
# df[1] = preds[:, 0]
# df[2] = preds[:, 1]
# df[3] = df.groupby(0)[1].transform('mean')
# df[4] = df.groupby(0)[2].transform('mean')
# df[5] = targs[:, 0]
# df[6] = targs[:, 1]
# df = df.drop([1, 2], axis=1)
# df = df.drop_duplicates()
# preds = np.array([df[3],df[4]]).transpose()
# targs = np.array([df[5],df[6]]).transpose()
# COMBINE PRED FROM 2 FUNDUS INTO ONE FOR REGRESSION TASK
import pandas as pd
df = pd.DataFrame(id)
df[1] = preds
df[2] = df.groupby(0)[1].transform('mean')
df[3] = targs
df = df.drop([1], axis=1)
df = df.drop_duplicates()
preds = np.array(df[2])
targs = np.array(df[3])
# ROC Curve
# fpr_real, tpr_real, _ = roc_curve(targs.argmax(axis=1), preds[:,1], drop_intermediate=True)
# roc = roc_auc_score(np.array(targs).argmax(axis=1), np.array(preds).argmax(axis=1))
# fpr_50, tpr_50, _ = roc_curve(targs.argmax(axis=1), np.zeros_like(targs.argmax(axis=1)))
# fig, ax = plt.subplots()
# ax.plot(fpr_real, tpr_real, 'b-', label=('Sex (AUC=' + str(roc)[0:4]+')'))
# ax.plot(fpr_50, tpr_50, 'k:')
# legend = ax.legend(loc='lower right', fontsize='x-large')
# plt.xlabel("False Positive Rate")
# plt.ylabel("True Positive Rate")
# plt.show()
# np.save("fpr_real_apoe4_combined.npy", fpr_real)
# np.save("tpr_real_apoe4_combined.npy", tpr_real)
# Bootstrap for CIs for classification
# det_roc = []
# for i in range(2000):
# indices = np.array([(random.randrange(0, len(preds))) for i in range(len(preds))])
# det_roc.append(roc_auc_score(np.array(targs[indices]).argmax(axis=1), np.array(preds[indices]).argmax(axis=1)))
# det_roc = np.array(det_roc)
# alpha = 0.95
# # R2
# p = ((1.0-alpha)/2.0) * 100
# lower_r = max(0.0, np.percentile(det_roc, p))
# p = (alpha+((1.0-alpha)/2.0)) * 100
# upper_r = min(1.0, np.percentile(det_roc, p))
# print('%.1f confidence interval for R2 %.3f%% and %.3f%%' % (alpha*100, lower_r, upper_r))
#
# time.sleep(1)
# print("Model Roc: " + str(det_roc))
# Bootstrap for CIs for regression
det_r = []
det_mae = []
for i in range(2000):
indices = np.array([(random.randrange(0, len(preds))) for i in range(len(preds))])
det_r.append(r2_score(targs[indices], preds[indices]))
det_mae.append(mean_absolute_error(targs[indices], preds[indices]))
det_r = np.array(det_r)
det_mae = np.array(det_mae)
alpha = 0.95
# R2
p = ((1.0-alpha)/2.0) * 100
lower_r = max(0.0, np.percentile(det_r, p))
p = (alpha+((1.0-alpha)/2.0)) * 100
upper_r = min(1.0, np.percentile(det_r, p))
print('%.1f confidence interval for R2 %.3f%% and %.3f%%' % (alpha*100, lower_r, upper_r))
# MAE
p = ((1.0-alpha)/2.0) * 100
lower_m = np.percentile(det_mae, p)
p = (alpha+((1.0-alpha)/2.0)) * 100
upper_m = np.percentile(det_mae, p)
print('%.1f confidence interval for MAE %.2f%% and %.2f%%' % (alpha*100, lower_m, upper_m))
mean_targs = np.ones_like(targs) * np.mean(targs)
mae = mean_absolute_error(targs, preds)
r2 = r2_score(targs, preds)
mae_baseline = mean_absolute_error(targs, mean_targs)
r2_baseline = r2_score(targs, mean_targs)
time.sleep(1)
print("Model R2: " + str(r2))
print("Model MAE: " + str(mae))
print("Baseline R2: " + str(r2_baseline))
print("Baseline MAE: " + str(mae_baseline))
#
#
# total = len(preds)
# one_counter = 0
# three_counter = 0
# five_counter = 0
# ten_counter = 0
# fifteen_counter = 0
# for i, pred in enumerate(preds):
# diff = np.abs(pred - targs[i])
# if diff <= 1:
# one_counter += 1
# if diff <= 3:
# three_counter += 1
# if diff <= 5:
# five_counter += 1
# if diff <= 10:
# ten_counter += 1
# if diff <= 15:
# fifteen_counter += 1
# print("Model +/- 1 : " + str(one_counter/total*100))
# print("Model +/- 3 : " + str(three_counter/total*100))
# print("Model +/- 5 : " + str(five_counter/total*100))
# print("Model +/- 10 : " + str(ten_counter/total*100))
# print("Model +/- 15 : " + str(fifteen_counter/total*100))
#
# # calculate baseline accuracy
# b1 = baseline_rolling_window(targs, 2)
# b3 = baseline_rolling_window(targs, 6)
# b5 = baseline_rolling_window(targs, 10)
# b10 = baseline_rolling_window(targs, 20)
# b15 = baseline_rolling_window(targs, 30)
# b_one_counter = 0
# b_three_counter = 0
# b_five_counter = 0
# b_ten_counter = 0
# b_fifteen_counter = 0
# for targ in targs:
# if np.abs(targ-b1) <= 1:
# b_one_counter += 1
# if np.abs(targ-b3) <= 3:
# b_three_counter += 1
# if np.abs(targ-b5) <= 5:
# b_five_counter += 1
# if np.abs(targ-b10) <= 10:
# b_ten_counter += 1
# if np.abs(targ-b15) <= 15:
# b_fifteen_counter += 1
# print("Baseline +/- 1 : " + str(b_one_counter/total*100))
# print("Baseline +/- 3 : " + str(b_three_counter/total*100))
# print("Baseline +/- 5 : " + str(b_five_counter/total*100))
# print("Baseline +/- 10 : " + str(b_ten_counter/total*100))
# print("Baseline +/- 15 : " + str(b_fifteen_counter/total*100))
#
# print("P-value +/- 1 : " + str(stats.binom_test(one_counter, total, b_one_counter/total, 'greater')))
# print("P-value +/- 3 : " + str(stats.binom_test(three_counter, total, b_three_counter/total, 'greater')))
# print("P-value +/- 5 : " + str(stats.binom_test(five_counter, total, b_five_counter/total, 'greater')))
# print("P-value +/- 10 : " + str(stats.binom_test(ten_counter, total, b_ten_counter/total, 'greater')))
# print("P-value +/- 15 : " + str(stats.binom_test(fifteen_counter, total, b_fifteen_counter/total, 'greater')))
# results = np.array([
# lower_r,
# upper_r,
# lower_m,
# upper_m,
# r2,
# mae,
# r2_baseline,
# mae_baseline,
# one_counter/total*100,
# three_counter/total*100,
# five_counter/total*100,
# ten_counter/total*100,
# fifteen_counter/total*100,
# b_one_counter/total*100,
# b_three_counter/total*100,
# b_five_counter/total*100,
# b_ten_counter/total*100,
# b_fifteen_counter/total*100,
# stats.binom_test(one_counter, total, b_one_counter/total, 'greater'),
# stats.binom_test(three_counter, total, b_three_counter/total, 'greater'),
# stats.binom_test(five_counter, total, b_five_counter/total, 'greater'),
# stats.binom_test(ten_counter, total, b_ten_counter/total, 'greater'),
# stats.binom_test(fifteen_counter, total, b_fifteen_counter/total, 'greater')
# ])
# np.savetxt(expName + "_results.csv", results, delimiter=",")
if __name__ == '__main__':
var_n = 28
expName = "Test_EfficientNetB3_0.001_32_CLSA_CFA_GRAHAM_GoodAndUsable"
upper = 21860
test(expName, upper, var_n) | 38.561453 | 167 | 0.596378 |
b4c7cc19c3dfc52fe47020b4d3b337f1204c0d43 | 4,803 | py | Python | tensorflow_datasets/core/features/dataset_feature.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | 1 | 2021-05-10T10:41:27.000Z | 2021-05-10T10:41:27.000Z | tensorflow_datasets/core/features/dataset_feature.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/features/dataset_feature.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | 1 | 2021-07-04T11:07:35.000Z | 2021-07-04T11:07:35.000Z | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset feature for nested datasets."""
import functools
from typing import Any, Dict, Iterator, Union
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core.features import feature as feature_lib
from tensorflow_datasets.core.features import sequence_feature
from tensorflow_datasets.core.utils import type_utils
class Dataset(sequence_feature.Sequence):
"""A Dataset feature encodes a nested dataset.
`Dataset` corresponds to a dataset of `tfds.features.FeatureConnector`. Using
`tfds.features.Dataset` will return a nested `tf.data.Dataset` inside the
top-level `tf.data.Dataset` returned by `tfds.load`. At generation time, an
iterable over the dataset elements is given.
This is an experimental feature. Currently, only one level of nesting is
supported and TF1 graph is not supported either.
Example:
At construction time (inside `_info`):
```python
features=tfds.features.FeatureDict({
'agent_id': tf.string,
'episode': tfds.features.Dataset({
'observation': tfds.features.Image(),
'reward': tfds.features.Image(),
}),
})
```
Will return:
```
{
'agent_id': tf.Tensor(shape=(), dtype=tf.string),
'episode': tf.data.Dataset(element_spec={
'observation': tf.Tensor(shape=(None, None, 3), dtype=tf.uint8),
'reward': tf.Tensor(shape=(), dtype=tf.int32),
}),
}
```
The nested dataset can be used as:
```
for e in tfds.load(...): # {'agent_id': tf.Tensor, 'episode': tf.data.Dataset}
for step in e['episode']: # Each episode is a nested `tf.data.Dataset`
step['observation']
```
During generation, it accept any `Iterable`/`Iterator`, like
```python
yield _, {
'agent_id': agent_name
'episode': ({'observation': ..., 'reward': ...} for _ in range(10)),
}
```
Or a dictionary of `Iterable`, like
```python
yield _, {
'agent_id': agent_name
'episode': {'observation': np.ones(10), 'reward': np.ones(10)} ,
}
```
"""
# TODO(tfds): Add support for TF1 graph mode.
def get_tensor_info(self):
"""Shape of one element of the dataset."""
# Add the dataset level
tensor_info = self._feature.get_tensor_info()
return tf.nest.map_structure(_add_dataset_lvl, tensor_info)
def get_serialized_info(self):
# Add the dataset level and the number of elements in the dataset
tensor_info = super().get_serialized_info()
return tf.nest.map_structure(_add_dataset_lvl, tensor_info)
def encode_example(self, example_ds: Union[Iterator[type_utils.TreeDict[Any]],
Dict[str, Any]]):
if isinstance(example_ds, dict):
dict_list = sequence_feature.transpose_dict_list(example_ds)
else:
dict_list = example_ds
# Encode each individual element
ds_elements = [
self.feature.encode_example(example) for example in dict_list
]
# Empty datasets return empty arrays
if not ds_elements:
return tf.nest.map_structure(sequence_feature.build_empty_np,
self.get_serialized_info())
# Then convert back list[nested dict] => nested dict[list]
encoded = sequence_feature.stack_nested(ds_elements)
return encoded
def decode_example(self, serialized_example, decoders=None):
# NOTE: By using from_tensor_slices we remove the possibility of nested
# datasets.
# Gets the decoding function of the inner feature to apply it to the
# elements of the dataset.
decode_fn = self.feature.decode_example
if decoders:
decode_fn = functools.partial(decode_fn, decoders=decoders)
ds = tf.data.Dataset.from_tensor_slices(serialized_example).map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
return ds
def _flatten(self, x):
"""See base class for details."""
return [x]
def _nest(self, list_x):
"""See base class for details."""
assert len(list_x) == 1
return list_x[0]
def _add_dataset_lvl(tensor_info):
"""Add the dataset nesting level to the tensor_info."""
tensor_info = feature_lib.TensorInfo.copy_from(tensor_info)
tensor_info.dataset_lvl += 1
return tensor_info
| 31.392157 | 80 | 0.693317 |
d641e75afc77d99f8f3e2ad8403c651859e795d6 | 723 | py | Python | src/backend/common/queries/subscription_query.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | null | null | null | src/backend/common/queries/subscription_query.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | null | null | null | src/backend/common/queries/subscription_query.py | guineawheek/ftc-data-take-2 | 337bff2077eadb3bd6bbebd153cbb6181c99516f | [
"MIT"
] | null | null | null | from typing import List
from backend.common.models.account import Account
from backend.common.models.subscription import Subscription
from backend.common.queries.database_query import DatabaseQuery
from backend.common.tasklets import typed_tasklet
class SubscriptionQuery(DatabaseQuery[List[Subscription]]):
def __init__(self, account: Account, keys_only: bool = False) -> None:
super().__init__(account=account, keys_only=keys_only)
@typed_tasklet
def _query_async(
self, account: Account, keys_only: bool = False
) -> List[Subscription]:
subscription_query = Subscription.query(ancestor=account.key)
return (yield (subscription_query.fetch_async(keys_only=keys_only)))
| 38.052632 | 76 | 0.767635 |
2315615ca540ba823ced82ab8e74ef2bb041be03 | 3,138 | py | Python | homeassistant/components/sensor/neurio_energy.py | TastyPi/home-assistant | aa1e4c564cb8660bf6b7637bc25317ee58869214 | [
"MIT"
] | 13 | 2017-02-01T13:25:34.000Z | 2022-01-26T01:30:39.000Z | homeassistant/components/sensor/neurio_energy.py | 1Forward1Back/home-assistant | ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6 | [
"MIT"
] | 9 | 2017-07-26T18:05:32.000Z | 2021-12-05T14:16:34.000Z | homeassistant/components/sensor/neurio_energy.py | 1Forward1Back/home-assistant | ce24ef0c20dea0fd671d6f2c2a8b1456b4b66ba6 | [
"MIT"
] | 21 | 2017-07-26T17:09:40.000Z | 2022-03-27T22:37:22.000Z | """
Support for monitoring an Neurio hub.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.neurio_energy/
"""
import logging
import requests.exceptions
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_API_KEY, CONF_NAME)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['neurio==0.2.10']
_LOGGER = logging.getLogger(__name__)
CONF_API_SECRET = 'api_secret'
CONF_SENSOR_ID = 'sensor_id'
DEFAULT_NAME = 'Energy Usage'
ICON = 'mdi:flash'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_API_SECRET): cv.string,
vol.Optional(CONF_SENSOR_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Neurio sensor."""
name = config.get(CONF_NAME)
api_key = config.get(CONF_API_KEY)
api_secret = config.get(CONF_API_SECRET)
sensor_id = config.get(CONF_SENSOR_ID)
if not sensor_id:
import neurio
neurio_tp = neurio.TokenProvider(key=api_key, secret=api_secret)
neurio_client = neurio.Client(token_provider=neurio_tp)
user_info = neurio_client.get_user_information()
_LOGGER.warning('Sensor ID auto-detected, set api_sensor_id: "%s"',
user_info["locations"][0]["sensors"][0]["sensorId"])
sensor_id = user_info["locations"][0]["sensors"][0]["sensorId"]
add_devices([NeurioEnergy(api_key, api_secret, name, sensor_id)])
class NeurioEnergy(Entity):
"""Implementation of an Neurio energy."""
def __init__(self, api_key, api_secret, name, sensor_id):
"""Initialize the sensor."""
self._name = name
self.api_key = api_key
self.api_secret = api_secret
self.sensor_id = sensor_id
self._state = None
self._unit_of_measurement = 'W'
@property
def name(self):
"""Return the name of th sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the Neurio monitor data from the web service."""
import neurio
try:
neurio_tp = neurio.TokenProvider(
key=self.api_key, secret=self.api_secret)
neurio_client = neurio.Client(token_provider=neurio_tp)
sample = neurio_client.get_samples_live_last(
sensor_id=self.sensor_id)
self._state = sample['consumptionPower']
except (requests.exceptions.RequestException, ValueError):
_LOGGER.warning('Could not update status for %s', self.name)
| 31.69697 | 76 | 0.680688 |
553da1dc6640940fadd1eb5860d056fb848515ec | 1,506 | py | Python | test/SCONS_LIB_DIR.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 3 | 2017-01-06T09:26:23.000Z | 2017-03-04T04:13:20.000Z | test/SCONS_LIB_DIR.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 2 | 2015-10-27T20:17:24.000Z | 2016-08-04T21:49:56.000Z | test/SCONS_LIB_DIR.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 4 | 2015-03-31T16:09:15.000Z | 2021-08-04T12:41:47.000Z | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
import os
import string
test = TestSCons.TestSCons()
test.subdir('SCons')
test.write(['SCons','Script.py'], """
def main ():
print "SCons.Script"
""")
test.write(['SCons','__init__.py'], """
""")
expect = "SCons.Script\n"
os.environ['SCONS_LIB_DIR'] = test.workpath()
test.run(stdout = expect)
test.pass_test()
| 29.529412 | 73 | 0.75166 |
ef0f90b13f7f863df4d4c2b1f7542f313aad1515 | 864 | py | Python | cvdd/preprocessors/stopwords.py | altescy/cvdd | 57e4fe0fd30a6d2b67651ce076b63a9a8a6e7c7a | [
"MIT"
] | 5 | 2021-07-11T08:40:43.000Z | 2021-07-19T05:08:11.000Z | cvdd/preprocessors/stopwords.py | altescy/cvdd | 57e4fe0fd30a6d2b67651ce076b63a9a8a6e7c7a | [
"MIT"
] | null | null | null | cvdd/preprocessors/stopwords.py | altescy/cvdd | 57e4fe0fd30a6d2b67651ce076b63a9a8a6e7c7a | [
"MIT"
] | null | null | null | from typing import List, Optional
import nltk
from allennlp.data.tokenizers import Tokenizer, WhitespaceTokenizer
from xallennlp.data.preprocessors import Preprocessor
@Preprocessor.register("stopwords")
class Stopwords(Preprocessor[str, str]): # type: ignore[misc]
def __init__(
self,
stopwords: Optional[List[str]] = None,
tokenizer: Optional[Tokenizer] = None,
) -> None:
super().__init__()
nltk.download("stopwords")
self._stopwords = set(stopwords or nltk.corpus.stopwords.words("english"))
self._tokenizer = tokenizer or WhitespaceTokenizer()
def __call__(self, data: str) -> str:
tokens = [
token.text
for token in self._tokenizer.tokenize(data)
if token.text and token.text not in self._stopwords
]
return " ".join(tokens)
| 32 | 82 | 0.658565 |
5ba45c7fc60aed8a9c89b679aa7452368a3b98a9 | 5,855 | py | Python | libcloud/common/osc.py | bopopescu/libcloud-bak | ead66a1e0e49bb70cbaa9ad335885cad3e82fe47 | [
"Apache-2.0"
] | null | null | null | libcloud/common/osc.py | bopopescu/libcloud-bak | ead66a1e0e49bb70cbaa9ad335885cad3e82fe47 | [
"Apache-2.0"
] | null | null | null | libcloud/common/osc.py | bopopescu/libcloud-bak | ead66a1e0e49bb70cbaa9ad335885cad3e82fe47 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import hashlib
import hmac
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import urlquote
__all__ = [
'OSCRequestSignerAlgorithmV4',
]
class OSCRequestSigner(object):
"""
Class which handles signing the outgoing AWS requests.
"""
def __init__(self, access_key, access_secret, version, connection):
"""
:param access_key: Access key.
:type access_key: ``str``
:param access_secret: Access secret.
:type access_secret: ``str``
:param version: API version.
:type version: ``str``
:param connection: Connection instance.
:type connection: :class:`Connection`
"""
self.access_key = access_key
self.access_secret = access_secret
self.version = version
self.connection = connection
class OSCRequestSignerAlgorithmV4(OSCRequestSigner):
@staticmethod
def sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
@staticmethod
def _get_signed_headers(headers):
return ';'.join([k.lower() for k in sorted(headers.keys())])
@staticmethod
def _get_canonical_headers(headers):
return '\n'.join([':'.join([k.lower(), str(v).strip()])
for k, v in sorted(headers.items())]) + '\n'
@staticmethod
def _get_request_params(params):
return '&'.join(["%s=%s" %
(urlquote(k, safe=''), urlquote(str(v), safe='~'))
for k, v in sorted(params.items())])
def get_request_headers(self, service_name, region, action,
data=None):
date = datetime.utcnow()
host = "{}.{}.outscale.com".format(service_name, region)
headers = {
'Content-Type': "application/json; charset=utf-8",
'X-Osc-Date': date.strftime('%Y%m%dT%H%M%SZ'),
'Host': host,
}
path = "/{}/{}/{}".format(self.connection.service_name, self.version, action)
sig = self._get_authorization_v4_header(
headers=headers,
dt=date,
method='POST',
path=path,
data=data
)
headers.update({'Authorization': sig})
return headers
def _get_authorization_v4_header(self, headers, dt, method='GET',
path='/', data=None):
credentials_scope = self._get_credential_scope(dt=dt)
signed_headers = self._get_signed_headers(headers=headers)
signature = self._get_signature(headers=headers, dt=dt,
method=method, path=path,
data=data)
return 'OSC4-HMAC-SHA256 Credential=%(u)s/%(c)s, ' \
'SignedHeaders=%(sh)s, Signature=%(s)s' % {
'u': self.access_key,
'c': credentials_scope,
'sh': signed_headers,
's': signature
}
def _get_signature(self, headers, dt, method, path, data):
string_to_sign = self._get_string_to_sign(headers=headers, dt=dt,
method=method, path=path,
data=data)
signing_key = self._get_key_to_sign_with(self.access_secret, dt)
return hmac.new(signing_key, string_to_sign.encode('utf-8'),
hashlib.sha256).hexdigest()
def _get_key_to_sign_with(self, key, dt):
dt = dt.strftime('%Y%m%d')
k_date = self.sign(('OSC4' + key).encode('utf-8'), dt)
k_region = self.sign(k_date, self.connection.region_name)
k_service = self.sign(k_region, self.connection.service_name)
return self.sign(k_service, 'osc4_request')
def _get_string_to_sign(self, headers, dt, method, path, data):
canonical_request = self._get_canonical_request(headers=headers,
method=method,
path=path,
data=data)
return 'OSC4-HMAC-SHA256' + '\n' \
+ dt.strftime('%Y%m%dT%H%M%SZ') + '\n' \
+ self._get_credential_scope(dt) + '\n' \
+ hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()
def _get_credential_scope(self, dt):
return '/'.join([dt.strftime('%Y%m%d'),
self.connection.region_name,
self.connection.service_name,
'osc4_request'])
def _get_canonical_request(self, headers, method, path, data="{}"):
data = data if data else "{}"
return '\n'.join([
method,
path,
self._get_request_params({}),
self._get_canonical_headers(headers),
self._get_signed_headers(headers),
hashlib.sha256(data.encode('utf-8')).hexdigest()
])
| 38.267974 | 85 | 0.574722 |
385a609fbfbc9ac89da769b434eca2f5ad6c7156 | 4,085 | py | Python | catalog/views.py | jaimish11/django_locallibrary | f3f4364a41f0a20f47e865dd89947f90aad5172c | [
"MIT"
] | null | null | null | catalog/views.py | jaimish11/django_locallibrary | f3f4364a41f0a20f47e865dd89947f90aad5172c | [
"MIT"
] | null | null | null | catalog/views.py | jaimish11/django_locallibrary | f3f4364a41f0a20f47e865dd89947f90aad5172c | [
"MIT"
] | null | null | null | from django.shortcuts import render
from catalog.models import Book, Author, BookInstance, Genre
from django.views import generic
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.mixins import PermissionRequiredMixin
import datetime
from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from catalog.forms import RenewBookForm
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
def index(request):
"""View function for home page of site"""
#Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.all().count()
#Available books (status = 'a')
num_instances_available = BookInstance.objects.filter(status__exact = 'a').count()
#The all() is implied by default
num_authors = Author.objects.count()
#The Science Fiction Genre
num_science_fiction = Genre.objects.filter(name__iexact = 'Science Fiction').count()
#Number of visits to this view, as counted by the session variable
num_visits = request.session.get('num_visits', 0)
request.session['num_visits'] = num_visits + 1
context = {
'num_books': num_books,
'num_instances': num_instances,
'num_instances_available': num_instances_available,
'num_authors': num_authors,
'num_science_fiction': num_science_fiction,
'num_visits': num_visits,
}
#Render the HTML template index.html with the data in the context variable
return render(request, 'index.html', context = context)
class BookListView(generic.ListView):
model = Book
paginate_by = 2
class BookDetailView(generic.DetailView):
model = Book
class AuthorListView(generic.ListView):
model = Author
paginate_by = 10
class AuthorDetailView(generic.DetailView):
model = Author
class LoanedBooksByUserListView(LoginRequiredMixin, generic.ListView):
"""Generic class-based view listing books on loan to current user"""
model = BookInstance
template_name = 'catalog/bookinstance_list_borrowed_user.html'
paginate_by = 4
def get_queryset(self):
return BookInstance.objects.filter(borrower = self.request.user).filter(status__exact = 'o').order_by('due_back')
class AllBorrowedBooksListView(PermissionRequiredMixin, generic.ListView):
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name = 'catalog/bookinstance_list_borrowed_all.html'
paginate_by = 4
def get_queryset(self):
return BookInstance.objects.filter(status__exact = 'o').order_by('due_back')
@permission_required('catalog.can_mark_returned')
def renew_book_librarian(request, pk):
"""View function for renewing a specific BookInstance by librarian"""
book_instance = get_object_or_404(BookInstance, pk = pk)
#If this is a POST request then process the From data
if request.method == 'POST':
#Create a form instance and populate it with data from the request
form = RenewBookForm(request.POST)
#Check if form is valid
if form.is_valid():
#process the data in form.cleaned_data as required
book_instance.due_back = form.cleaned_data['renewal_date']
book_instance.save()
return HttpResponseRedirect(reverse('all-borrowed'))
else:
proposed_renewal_date = datetime.date.today() + datetime.timedelta(weeks = 3)
form = RenewBookForm(initial = {'renewal_date': proposed_renewal_date})
context = {
'form': form,
'book_instance': book_instance,
}
return render(request, 'catalog/book_renew_librarian.html', context)
class AuthorCreate(CreateView):
model = Author
fields = '__all__'
class AuthorUpdate(UpdateView):
model = Author
fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death']
class AuthorDelete(DeleteView):
model = Author
success_url = reverse_lazy('authors')
class BookCreate(CreateView):
model = Book
fields = '__all__'
class BookUpdate(UpdateView):
model = Book
fields = '__all__'
class BookDelete(DeleteView):
model = Book
success_url = reverse_lazy('book')
| 30.94697 | 115 | 0.778703 |
d9102105fa7e10822e94dad7462837dbcdbf11b0 | 1,492 | py | Python | src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_validators.py | enterstudio/azure-cli | b0504c3b634e17f1afc944a9572864a40da6bc18 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_validators.py | enterstudio/azure-cli | b0504c3b634e17f1afc944a9572864a40da6bc18 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-acr/azure/cli/command_modules/acr/_validators.py | enterstudio/azure-cli | b0504c3b634e17f1afc944a9572864a40da6bc18 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core._util import CLIError
from ._factory import (
get_arm_service_client,
get_acr_service_client
)
import azure.cli.core.azlogging as azlogging
logger = azlogging.get_az_logger(__name__)
def validate_registry_name(namespace):
if namespace.registry_name:
client = get_acr_service_client().registries
registry_name = namespace.registry_name
result = client.check_name_availability(registry_name)
if not result.name_available: #pylint: disable=no-member
raise CLIError(result.message) #pylint: disable=no-member
def validate_resource_group_name(namespace):
if namespace.resource_group_name:
client = get_arm_service_client()
resource_group_name = namespace.resource_group_name
if not client.resource_groups.check_existence(resource_group_name):
logger.warning('Command to create a resource group:')
logger.warning(' az group create -n <name> -l <location>')
raise CLIError(
'The resource group {} does not exist in the current subscription.'\
.format(resource_group_name))
| 40.324324 | 94 | 0.632038 |
f63b3b0278eecbd9acba83718f4efb242847bc36 | 63,836 | py | Python | src/sage/geometry/riemannian_manifolds/parametrized_surface3d.py | qedhandle/sage | 8453ffb849b047893b6c61dd09176a84c9133342 | [
"BSL-1.0"
] | null | null | null | src/sage/geometry/riemannian_manifolds/parametrized_surface3d.py | qedhandle/sage | 8453ffb849b047893b6c61dd09176a84c9133342 | [
"BSL-1.0"
] | null | null | null | src/sage/geometry/riemannian_manifolds/parametrized_surface3d.py | qedhandle/sage | 8453ffb849b047893b6c61dd09176a84c9133342 | [
"BSL-1.0"
] | null | null | null | """
Differential Geometry of Parametrized Surfaces
AUTHORS:
- Mikhail Malakhaltsev (2010-09-25): initial version
- Joris Vankerschaver (2010-10-25): implementation, doctests
"""
# ****************************************************************************
# Copyright (C) 2010 Mikhail Malakhaltsev <mikarm@gmail.com>
# Copyright (C) 2010 Joris Vankerschaver <joris.vankerschaver@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
# ****************************************************************************
from itertools import product
from sage.structure.sage_object import SageObject
from sage.modules.free_module_element import vector
from sage.matrix.constructor import matrix
from sage.calculus.functional import diff
from sage.functions.other import sqrt
from sage.misc.cachefunc import cached_method
from sage.symbolic.ring import SR
from sage.symbolic.constants import pi
def _simplify_full_rad(f):
"""
Helper function to conveniently call :meth:`simplify_full` and
:meth:`canonicalize_radical` in succession.
INPUT:
- ``f`` - a symbolic expression.
EXAMPLES::
sage: from sage.geometry.riemannian_manifolds.parametrized_surface3d import _simplify_full_rad
sage: _simplify_full_rad(sqrt(x^2)/x)
1
"""
return f.simplify_full().canonicalize_radical()
class ParametrizedSurface3D(SageObject):
r"""
Class representing a parametrized two-dimensional surface in
Euclidian three-space. Provides methods for calculating the main
geometrical objects related to such a surface, such as the first
and the second fundamental form, the total (Gaussian) and the mean
curvature, the geodesic curves, parallel transport, etc.
INPUT:
- ``surface_equation`` -- a 3-tuple of functions specifying a parametric
representation of the surface.
- ``variables`` -- a 2-tuple of intrinsic coordinates `(u, v)` on the
surface, with `u` and `v` symbolic variables, or a 2-tuple of triples
$(u, u_{min}, u_{max})$,
$(v, v_{min}, v_{max})$ when the parameter range
for the coordinates is known.
- ``name`` -- name of the surface (optional).
.. note::
Throughout the documentation, we use the Einstein summation
convention: whenever an index appears twice, once as a
subscript, and once as a superscript, summation over that index
is implied. For instance, `g_{ij} g^{jk}` stands for `\sum_j
g_{ij}g^{jk}`.
EXAMPLES:
We give several examples of standard surfaces in differential
geometry. First, let's construct an elliptic paraboloid by
explicitly specifying its parametric equation::
sage: u, v = var('u,v', domain='real')
sage: eparaboloid = ParametrizedSurface3D((u, v, u^2 + v^2), (u, v),'elliptic paraboloid'); eparaboloid
Parametrized surface ('elliptic paraboloid') with equation (u, v, u^2 + v^2)
When the ranges for the intrinsic coordinates are known, they can be
specified explicitly. This is mainly useful for plotting. Here we
construct half of an ellipsoid::
sage: u1, u2 = var ('u1, u2', domain='real')
sage: coords = ((u1, -pi/2, pi/2), (u2, 0, pi))
sage: ellipsoid_eq = (cos(u1)*cos(u2), 2*sin(u1)*cos(u2), 3*sin(u2))
sage: ellipsoid = ParametrizedSurface3D(ellipsoid_eq, coords, 'ellipsoid'); ellipsoid
Parametrized surface ('ellipsoid') with equation (cos(u1)*cos(u2), 2*cos(u2)*sin(u1), 3*sin(u2))
sage: ellipsoid.plot()
Graphics3d Object
Standard surfaces can be constructed using the ``surfaces`` generator::
sage: klein = surfaces.Klein(); klein
Parametrized surface ('Klein bottle') with equation (-(sin(1/2*u)*sin(2*v) - cos(1/2*u)*sin(v) - 1)*cos(u), -(sin(1/2*u)*sin(2*v) - cos(1/2*u)*sin(v) - 1)*sin(u), cos(1/2*u)*sin(2*v) + sin(1/2*u)*sin(v))
Latex representation of the surfaces::
sage: u, v = var('u, v', domain='real')
sage: sphere = ParametrizedSurface3D((cos(u)*cos(v), sin(u)*cos(v), sin(v)), (u, v), 'sphere')
sage: print(latex(sphere))
\left(\cos\left(u\right) \cos\left(v\right), \cos\left(v\right) \sin\left(u\right), \sin\left(v\right)\right)
sage: print(sphere._latex_())
\left(\cos\left(u\right) \cos\left(v\right), \cos\left(v\right) \sin\left(u\right), \sin\left(v\right)\right)
sage: print(sphere)
Parametrized surface ('sphere') with equation (cos(u)*cos(v), cos(v)*sin(u), sin(v))
To plot a parametric surface, use the :meth:`plot` member function::
sage: enneper = surfaces.Enneper(); enneper
Parametrized surface ('Enneper's surface') with equation (-1/9*(u^2 - 3*v^2 - 3)*u, -1/9*(3*u^2 - v^2 + 3)*v, 1/3*u^2 - 1/3*v^2)
sage: enneper.plot(aspect_ratio='automatic')
Graphics3d Object
We construct an ellipsoid whose axes are given by symbolic variables `a`,
`b` and `c`, and find the natural frame of tangent vectors,
expressed in intrinsic coordinates. Note that the result is a
dictionary of vector fields::
sage: a, b, c = var('a, b, c', domain='real')
sage: u1, u2 = var('u1, u2', domain='real')
sage: ellipsoid_eq = (a*cos(u1)*cos(u2), b*sin(u1)*cos(u2), c*sin(u2))
sage: ellipsoid = ParametrizedSurface3D(ellipsoid_eq, (u1, u2), 'Symbolic ellipsoid'); ellipsoid
Parametrized surface ('Symbolic ellipsoid') with equation (a*cos(u1)*cos(u2), b*cos(u2)*sin(u1), c*sin(u2))
sage: ellipsoid.natural_frame()
{1: (-a*cos(u2)*sin(u1), b*cos(u1)*cos(u2), 0), 2: (-a*cos(u1)*sin(u2), -b*sin(u1)*sin(u2), c*cos(u2))}
We find the normal vector field to the surface. The normal vector
field is the vector product of the vectors of the natural frame,
and is given by::
sage: ellipsoid.normal_vector()
(b*c*cos(u1)*cos(u2)^2, a*c*cos(u2)^2*sin(u1), a*b*cos(u2)*sin(u2))
By default, the normal vector field is not normalized. To obtain
the unit normal vector field of the elliptic paraboloid, we put::
sage: u, v = var('u,v', domain='real')
sage: eparaboloid = ParametrizedSurface3D([u,v,u^2+v^2],[u,v],'elliptic paraboloid')
sage: eparaboloid.normal_vector(normalized=True)
(-2*u/sqrt(4*u^2 + 4*v^2 + 1), -2*v/sqrt(4*u^2 + 4*v^2 + 1), 1/sqrt(4*u^2 + 4*v^2 + 1))
Now let us compute the coefficients of the first fundamental form of the torus::
sage: u, v = var('u, v', domain='real')
sage: a, b = var('a, b', domain='real')
sage: torus = ParametrizedSurface3D(((a + b*cos(u))*cos(v),(a + b*cos(u))*sin(v), b*sin(u)),[u,v],'torus')
sage: torus.first_fundamental_form_coefficients()
{(1, 1): b^2, (1, 2): 0, (2, 1): 0, (2, 2): b^2*cos(u)^2 + 2*a*b*cos(u) + a^2}
The first fundamental form can be used to compute the length of a
curve on the surface. For example, let us find the length of the
curve $u^1 = t$, $u^2 = t$, $t \in [0,2\pi]$, on the ellipsoid
with axes $a=1$, $b=1.5$ and $c=1$. So we take the curve::
sage: t = var('t', domain='real')
sage: u1 = t
sage: u2 = t
Then find the tangent vector::
sage: du1 = diff(u1,t)
sage: du2 = diff(u2,t)
sage: du = vector([du1, du2]); du
(1, 1)
Once we specify numerical values for the axes of the ellipsoid, we can
determine the numerical value of the length integral::
sage: L = sqrt(ellipsoid.first_fundamental_form(du, du).substitute(u1=u1,u2=u2))
sage: numerical_integral(L.substitute(a=2, b=1.5, c=1),0,1)[0] # rel tol 1e-11
2.00127905972
We find the area of the sphere of radius $R$::
sage: R = var('R', domain='real')
sage: u, v = var('u,v', domain='real')
sage: assume(R>0)
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([R*cos(u)*cos(v),R*sin(u)*cos(v),R*sin(v)],[u,v],'sphere')
sage: integral(integral(sphere.area_form(),u,0,2*pi),v,-pi/2,pi/2)
4*pi*R^2
We can find an orthonormal frame field $\{e_1, e_2\}$ of a surface
and calculate its structure functions. Let us first determine the
orthonormal frame field for the elliptic paraboloid::
sage: u, v = var('u,v', domain='real')
sage: eparaboloid = ParametrizedSurface3D([u,v,u^2+v^2],[u,v],'elliptic paraboloid')
sage: eparaboloid.orthonormal_frame()
{1: (1/sqrt(4*u^2 + 1), 0, 2*u/sqrt(4*u^2 + 1)), 2: (-4*u*v/(sqrt(4*u^2 + 4*v^2 + 1)*sqrt(4*u^2 + 1)), sqrt(4*u^2 + 1)/sqrt(4*u^2 + 4*v^2 + 1), 2*v/(sqrt(4*u^2 + 4*v^2 + 1)*sqrt(4*u^2 + 1)))}
We can express the orthogonal frame field both in exterior
coordinates (i.e. expressed as vector field fields in the ambient
space $\RR^3$, the default) or in intrinsic coordinates
(with respect to the natural frame). Here we use intrinsic
coordinates::
sage: eparaboloid.orthonormal_frame(coordinates='int')
{1: (1/sqrt(4*u^2 + 1), 0), 2: (-4*u*v/(sqrt(4*u^2 + 4*v^2 + 1)*sqrt(4*u^2 + 1)), sqrt(4*u^2 + 1)/sqrt(4*u^2 + 4*v^2 + 1))}
Using the orthonormal frame in interior coordinates, we can calculate
the structure functions $c^k_{ij}$ of the surface, defined by
$[e_i,e_j] = c^k_{ij} e_k$, where $[e_i, e_j]$ represents the Lie
bracket of two frame vector fields $e_i, e_j$. For the
elliptic paraboloid, we get::
sage: EE = eparaboloid.orthonormal_frame(coordinates='int')
sage: E1 = EE[1]; E2 = EE[2]
sage: CC = eparaboloid.frame_structure_functions(E1,E2)
sage: CC[1,2,1].simplify_full()
4*sqrt(4*u^2 + 4*v^2 + 1)*v/((16*u^4 + 4*(4*u^2 + 1)*v^2 + 8*u^2 + 1)*sqrt(4*u^2 + 1))
We compute the Gaussian and mean curvatures of the sphere::
sage: sphere = surfaces.Sphere(); sphere
Parametrized surface ('Sphere') with equation (cos(u)*cos(v), cos(v)*sin(u), sin(v))
sage: K = sphere.gauss_curvature(); K # Not tested -- see trac 12737
1
sage: H = sphere.mean_curvature(); H # Not tested -- see trac 12737
-1
We can easily generate a color plot of the Gaussian curvature of a surface.
Here we deal with the ellipsoid::
sage: u1, u2 = var('u1,u2', domain='real')
sage: u = [u1,u2]
sage: ellipsoid_equation(u1,u2) = [2*cos(u1)*cos(u2),1.5*cos(u1)*sin(u2),sin(u1)]
sage: ellipsoid = ParametrizedSurface3D(ellipsoid_equation(u1,u2), [u1, u2],'ellipsoid')
sage: # set intervals for variables and the number of division points
sage: u1min, u1max = -1.5, 1.5
sage: u2min, u2max = 0, 6.28
sage: u1num, u2num = 10, 20
sage: # make the arguments array
sage: from numpy import linspace
sage: u1_array = linspace(u1min, u1max, u1num)
sage: u2_array = linspace(u2min, u2max, u2num)
sage: u_array = [ (uu1,uu2) for uu1 in u1_array for uu2 in u2_array]
sage: # Find the gaussian curvature
sage: K(u1,u2) = ellipsoid.gauss_curvature()
sage: # Make array of K values
sage: K_array = [K(uu[0],uu[1]) for uu in u_array]
sage: # Find minimum and max of the gauss curvature
sage: K_max = max(K_array)
sage: K_min = min(K_array)
sage: # Make the array of color coefficients
sage: cc_array = [ (ccc - K_min)/(K_max - K_min) for ccc in K_array ]
sage: points_array = [ellipsoid_equation(u_array[counter][0],u_array[counter][1]) for counter in range(0,len(u_array)) ]
sage: curvature_ellipsoid_plot = sum( point([xx for xx in points_array[counter]],color=hue(cc_array[counter]/2)) for counter in range(0,len(u_array)) )
sage: curvature_ellipsoid_plot.show(aspect_ratio=1)
We can find the principal curvatures and principal directions of the
elliptic paraboloid::
sage: u, v = var('u, v', domain='real')
sage: eparaboloid = ParametrizedSurface3D([u, v, u^2+v^2], [u, v], 'elliptic paraboloid')
sage: pd = eparaboloid.principal_directions(); pd
[(2*sqrt(4*u^2 + 4*v^2 + 1)/(16*u^4 + 16*v^4 + 8*(4*u^2 + 1)*v^2 + 8*u^2 + 1), [(1, v/u)], 1), (2/sqrt(4*u^2 + 4*v^2 + 1), [(1, -u/v)], 1)]
We extract the principal curvatures::
sage: k1 = pd[0][0].simplify_full()
sage: k1
2*sqrt(4*u^2 + 4*v^2 + 1)/(16*u^4 + 16*v^4 + 8*(4*u^2 + 1)*v^2 + 8*u^2 + 1)
sage: k2 = pd[1][0].simplify_full()
sage: k2
2/sqrt(4*u^2 + 4*v^2 + 1)
and check them by comparison with the Gaussian and mean curvature
expressed in terms of the principal curvatures::
sage: K = eparaboloid.gauss_curvature().simplify_full()
sage: K
4/(16*u^4 + 16*v^4 + 8*(4*u^2 + 1)*v^2 + 8*u^2 + 1)
sage: H = eparaboloid.mean_curvature().simplify_full()
sage: H
2*(2*u^2 + 2*v^2 + 1)/(4*u^2 + 4*v^2 + 1)^(3/2)
sage: (K - k1*k2).simplify_full()
0
sage: (2*H - k1 - k2).simplify_full()
0
We can find the intrinsic (local coordinates) of the principal directions::
sage: pd[0][1]
[(1, v/u)]
sage: pd[1][1]
[(1, -u/v)]
The ParametrizedSurface3D class also contains functionality to
compute the coefficients of the second fundamental form, the shape
operator, the rotation on the surface at a given angle, the
connection coefficients. One can also calculate numerically the
geodesics and the parallel translation along a curve.
Here we compute a number of geodesics on the sphere emanating
from the point ``(1, 0, 0)``, in various directions. The geodesics
intersect again in the antipodal point ``(-1, 0, 0)``, indicating
that these points are conjugate::
sage: S = surfaces.Sphere()
sage: g1 = [c[-1] for c in S.geodesics_numerical((0,0),(1,0),(0,2*pi,100))]
sage: g2 = [c[-1] for c in S.geodesics_numerical((0,0),(cos(pi/3),sin(pi/3)),(0,2*pi,100))]
sage: g3 = [c[-1] for c in S.geodesics_numerical((0,0),(cos(2*pi/3),sin(2*pi/3)),(0,2*pi,100))]
sage: (S.plot(opacity=0.3) + line3d(g1,color='red') + line3d(g2,color='red') + line3d(g3,color='red')).show()
"""
def __init__(self, equation, variables, name=None):
r"""
See ``ParametrizedSurface3D`` for full documentation.
.. note::
The orientation of the surface is determined by the
parametrization, that is, the natural frame with positive
orientation is given by `\partial_1 \vec r`, `\partial_2 \vec
r`.
EXAMPLES::
sage: u, v = var('u,v', domain='real')
sage: eq = (3*u + 3*u*v^2 - u^3, 3*v + 3*u^2*v - v^3, 3*(u^2-v^2))
sage: enneper = ParametrizedSurface3D(eq, (u, v),'Enneper Surface'); enneper
Parametrized surface ('Enneper Surface') with equation (-u^3 + 3*u*v^2 + 3*u, 3*u^2*v - v^3 + 3*v, 3*u^2 - 3*v^2)
"""
self.equation = tuple(equation)
if len(variables[0]):
self.variables_range = (variables[0][1:3], variables[1][1:3])
self.variables_list = (variables[0][0], variables[1][0])
else:
self.variables_range = None
self.variables_list = variables
self.variables = {1:self.variables_list[0], 2:self.variables_list[1]}
self.name = name
def _latex_(self):
r"""
Return the LaTeX representation of this parametrized surface.
EXAMPLES::
sage: u, v = var('u, v')
sage: sphere = ParametrizedSurface3D((cos(u)*cos(v), sin(u)*cos(v), sin(v)), (u, v),'sphere')
sage: latex(sphere)
\left(\cos\left(u\right) \cos\left(v\right), \cos\left(v\right) \sin\left(u\right), \sin\left(v\right)\right)
sage: sphere._latex_()
\left(\cos\left(u\right) \cos\left(v\right), \cos\left(v\right) \sin\left(u\right), \sin\left(v\right)\right)
"""
from sage.misc.latex import latex
return latex(self.equation)
def _repr_(self):
r"""
Returns the string representation of this parametrized surface.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: eq = (3*u + 3*u*v^2 - u^3, 3*v + 3*u^2*v - v^3, 3*(u^2-v^2))
sage: enneper = ParametrizedSurface3D(eq,[u,v],'enneper_surface')
sage: print(enneper)
Parametrized surface ('enneper_surface') with equation (-u^3 + 3*u*v^2 + 3*u, 3*u^2*v - v^3 + 3*v, 3*u^2 - 3*v^2)
sage: enneper._repr_()
"Parametrized surface ('enneper_surface') with equation (-u^3 + 3*u*v^2 + 3*u, 3*u^2*v - v^3 + 3*v, 3*u^2 - 3*v^2)"
"""
name = 'Parametrized surface'
if self.name is not None:
name += " ('%s')" % self.name
s ='%(designation)s with equation %(eq)s' % \
{'designation': name, 'eq': str(self.equation)}
return s
def point(self, coords):
r"""
Returns a point on the surface given its intrinsic coordinates.
INPUT:
- ``coords`` - 2-tuple specifying the intrinsic coordinates ``(u, v)`` of the point.
OUTPUT:
- 3-vector specifying the coordinates in `\RR^3` of the point.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: torus = ParametrizedSurface3D(((2 + cos(u))*cos(v),(2 + cos(u))*sin(v), sin(u)),[u,v],'torus')
sage: torus.point((0, pi/2))
(0, 3, 0)
sage: torus.point((pi/2, pi))
(-2, 0, 1)
sage: torus.point((pi, pi/2))
(0, 1, 0)
"""
d = dict(zip(self.variables_list, coords))
return vector([f.subs(d) for f in self.equation])
def tangent_vector(self, coords, components):
r"""
Returns the components of a tangent vector given the intrinsic
coordinates of the base point and the components of the vector
in the intrinsic frame.
INPUT:
- ``coords`` - 2-tuple specifying the intrinsic coordinates ``(u, v)`` of the point.
- ``components`` - 2-tuple specifying the components of the tangent vector in the intrinsic coordinate frame.
OUTPUT:
- 3-vector specifying the components in `\RR^3` of the vector.
EXAMPLES:
We compute two tangent vectors to Enneper's surface along the
coordinate lines and check that their cross product gives the
normal vector::
sage: u, v = var('u,v', domain='real')
sage: eq = (3*u + 3*u*v^2 - u^3, 3*v + 3*u^2*v - v^3, 3*(u^2-v^2))
sage: e = ParametrizedSurface3D(eq, (u, v),'Enneper Surface')
sage: w1 = e.tangent_vector((1, 2), (1, 0)); w1
(12, 12, 6)
sage: w2 = e.tangent_vector((1, 2), (0, 1)); w2
(12, -6, -12)
sage: w1.cross_product(w2)
(-108, 216, -216)
sage: n = e.normal_vector().subs({u: 1, v: 2}); n
(-108, 216, -216)
sage: n == w1.cross_product(w2)
True
"""
components = vector(components)
d = dict(zip(self.variables_list, coords))
jacobian = matrix([[f.diff(u).subs(d) for u in self.variables_list]
for f in self.equation])
return jacobian * components
def plot(self, urange=None, vrange=None, **kwds):
r"""
Enable easy plotting directly from the surface class.
The optional keywords ``urange`` and ``vrange`` specify the range for
the surface parameters `u` and `v`. If either of these parameters
is ``None``, the method checks whether a parameter range was
specified when the surface was created. If not, the default of
$(0, 2 \pi)$ is used.
INPUT:
- ``urange`` - 2-tuple specifying the parameter range for `u`.
- ``vrange`` - 2-tuple specifying the parameter range for `v`.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: eq = (3*u + 3*u*v^2 - u^3, 3*v + 3*u^2*v - v^3, 3*(u^2-v^2))
sage: enneper = ParametrizedSurface3D(eq, (u, v), 'Enneper Surface')
sage: enneper.plot((-5, 5), (-5, 5))
Graphics3d Object
"""
from sage.plot.plot3d.parametric_plot3d import parametric_plot3d
if self.variables_range is None:
if urange is None:
urange = (0, 2*pi)
if vrange is None:
vrange = (0, 2*pi)
else:
if urange is None:
urange = self.variables_range[0]
if vrange is None:
vrange = self.variables_range[1]
urange3 = (self.variables[1],) + tuple(urange)
vrange3 = (self.variables[2],) + tuple(vrange)
P = parametric_plot3d(self.equation, urange3, vrange3, **kwds)
return P
@cached_method
def natural_frame(self):
"""
Returns the natural tangent frame on the parametrized surface.
The vectors of this frame are tangent to the coordinate lines
on the surface.
OUTPUT:
- The natural frame as a dictionary.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: eparaboloid = ParametrizedSurface3D((u, v, u^2+v^2), (u, v), 'elliptic paraboloid')
sage: eparaboloid.natural_frame()
{1: (1, 0, 2*u), 2: (0, 1, 2*v)}
"""
dr1 = \
vector([_simplify_full_rad( diff(f,self.variables[1]) )
for f in self.equation])
dr2 = \
vector([_simplify_full_rad( diff(f,self.variables[2]) )
for f in self.equation])
return {1:dr1, 2:dr2}
@cached_method
def normal_vector(self, normalized=False):
"""
Returns the normal vector field of the parametrized surface.
INPUT:
- ``normalized`` - default ``False`` - specifies whether the normal vector should be normalized.
OUTPUT:
- Normal vector field.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: eparaboloid = ParametrizedSurface3D((u, v, u^2 + v^2), (u, v), 'elliptic paraboloid')
sage: eparaboloid.normal_vector(normalized=False)
(-2*u, -2*v, 1)
sage: eparaboloid.normal_vector(normalized=True)
(-2*u/sqrt(4*u^2 + 4*v^2 + 1), -2*v/sqrt(4*u^2 + 4*v^2 + 1), 1/sqrt(4*u^2 + 4*v^2 + 1))
"""
dr = self.natural_frame()
normal = dr[1].cross_product(dr[2])
if normalized:
normal /= normal.norm()
return _simplify_full_rad(normal)
@cached_method
def _compute_first_fundamental_form_coefficient(self, index):
"""
Helper function to compute coefficients of the first fundamental form.
Do not call this method directly; instead use
``first_fundamental_form_coefficient``.
This method is cached, and expects its argument to be a list.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: eparaboloid = ParametrizedSurface3D((u, v, u^2+v^2), (u, v))
sage: eparaboloid._compute_first_fundamental_form_coefficient((1,2))
4*u*v
"""
dr = self.natural_frame()
return _simplify_full_rad(dr[index[0]]*dr[index[1]])
def first_fundamental_form_coefficient(self, index):
r"""
Compute a single component $g_{ij}$ of the first fundamental form. If
the parametric representation of the surface is given by the vector
function $\vec r(u^i)$, where $u^i$, $i = 1, 2$ are curvilinear
coordinates, then $g_{ij} = \frac{\partial \vec r}{\partial u^i} \cdot \frac{\partial \vec r}{\partial u^j}$.
INPUT:
- ``index`` - tuple ``(i, j)`` specifying the index of the component $g_{ij}$.
OUTPUT:
- Component $g_{ij}$ of the first fundamental form
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: eparaboloid = ParametrizedSurface3D((u, v, u^2+v^2), (u, v))
sage: eparaboloid.first_fundamental_form_coefficient((1,2))
4*u*v
When the index is invalid, an error is raised::
sage: u, v = var('u, v', domain='real')
sage: eparaboloid = ParametrizedSurface3D((u, v, u^2+v^2), (u, v))
sage: eparaboloid.first_fundamental_form_coefficient((1,5))
Traceback (most recent call last):
...
ValueError: Index (1, 5) out of bounds.
"""
index = tuple(sorted(index))
if len(index) == 2 and all(i == 1 or i == 2 for i in index):
return self._compute_first_fundamental_form_coefficient(index)
else:
raise ValueError("Index %s out of bounds." % str(index))
def first_fundamental_form_coefficients(self):
r"""
Returns the coefficients of the first fundamental form as a dictionary.
The keys are tuples $(i, j)$, where $i$ and $j$ range over $1, 2$,
while the values are the corresponding coefficients $g_{ij}$.
OUTPUT:
- Dictionary of first fundamental form coefficients.
EXAMPLES::
sage: u, v = var('u,v', domain='real')
sage: sphere = ParametrizedSurface3D((cos(u)*cos(v), sin(u)*cos(v), sin(v)), (u, v), 'sphere')
sage: sphere.first_fundamental_form_coefficients()
{(1, 1): cos(v)^2, (1, 2): 0, (2, 1): 0, (2, 2): 1}
"""
coefficients = {}
for index in product((1, 2), repeat=2):
coefficients[index] = \
self._compute_first_fundamental_form_coefficient(index)
return coefficients
def first_fundamental_form(self, vector1, vector2):
r"""
Evaluate the first fundamental form on two vectors expressed with
respect to the natural coordinate frame on the surface. In other words,
if the vectors are $v = (v^1, v^2)$ and $w = (w^1, w^2)$, calculate
$g_{11} v^1 w^1 + g_{12}(v^1 w^2 + v^2 w^1) + g_{22} v^2 w^2$, with
$g_{ij}$ the coefficients of the first fundamental form.
INPUT:
- ``vector1``, ``vector2`` - vectors on the surface.
OUTPUT:
- First fundamental form evaluated on the input vectors.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: v1, v2, w1, w2 = var('v1, v2, w1, w2', domain='real')
sage: sphere = ParametrizedSurface3D((cos(u)*cos(v), sin(u)*cos(v), sin(v)), (u, v),'sphere')
sage: sphere.first_fundamental_form(vector([v1,v2]),vector([w1,w2]))
v1*w1*cos(v)^2 + v2*w2
sage: vv = vector([1,2])
sage: sphere.first_fundamental_form(vv,vv)
cos(v)^2 + 4
sage: sphere.first_fundamental_form([1,1],[2,1])
2*cos(v)^2 + 1
"""
gamma = self.first_fundamental_form_coefficients()
return sum(gamma[(i,j)] * vector1[i - 1] * vector2[j - 1]
for i, j in product((1, 2), repeat=2))
def area_form_squared(self):
"""
Returns the square of the coefficient of the area form on the surface.
In terms of the coefficients $g_{ij}$ (where $i, j = 1, 2$) of the
first fundamental form, this invariant is given by
$A^2 = g_{11}g_{22} - g_{12}^2$.
See also :meth:`.area_form`.
OUTPUT:
- Square of the area form
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
sage: sphere.area_form_squared()
cos(v)^2
"""
gamma = self.first_fundamental_form_coefficients()
sq = gamma[(1,1)] * gamma[(2,2)] - gamma[(1,2)]**2
return _simplify_full_rad(sq)
def area_form(self):
r"""
Returns the coefficient of the area form on the surface. In terms of
the coefficients $g_{ij}$ (where $i, j = 1, 2$) of the first
fundamental form, the coefficient of the area form is given by
$A = \sqrt{g_{11}g_{22} - g_{12}^2}$.
See also :meth:`.area_form_squared`.
OUTPUT:
- Coefficient of the area form
EXAMPLES::
sage: u, v = var('u,v', domain='real')
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
sage: sphere.area_form()
cos(v)
"""
f = abs(sqrt(self.area_form_squared()))
return _simplify_full_rad(f)
def first_fundamental_form_inverse_coefficients(self):
r"""
Returns the coefficients $g^{ij}$ of the inverse of the fundamental
form, as a dictionary. The inverse coefficients are defined by
$g^{ij} g_{jk} = \delta^i_k$ with $\delta^i_k$ the Kronecker
delta.
OUTPUT:
- Dictionary of the inverse coefficients.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
sage: sphere.first_fundamental_form_inverse_coefficients()
{(1, 1): cos(v)^(-2), (1, 2): 0, (2, 1): 0, (2, 2): 1}
"""
g = self.first_fundamental_form_coefficients()
D = g[(1,1)] * g[(2,2)] - g[(1,2)]**2
gi11 = _simplify_full_rad(g[(2,2)]/D)
gi12 = _simplify_full_rad(-g[(1,2)]/D)
gi21 = gi12
gi22 = _simplify_full_rad(g[(1,1)]/D)
return {(1,1): gi11, (1,2): gi12, (2,1): gi21, (2,2): gi22}
def first_fundamental_form_inverse_coefficient(self, index):
r"""
Returns a specific component $g^{ij}$ of the inverse of the fundamental
form.
INPUT:
- ``index`` - tuple ``(i, j)`` specifying the index of the component $g^{ij}$.
OUTPUT:
- Component of the inverse of the fundamental form.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
sage: sphere.first_fundamental_form_inverse_coefficient((1, 2))
0
sage: sphere.first_fundamental_form_inverse_coefficient((1, 1))
cos(v)^(-2)
"""
index = tuple(sorted(index))
if len(index) == 2 and all(i == 1 or i == 2 for i in index):
return self.first_fundamental_form_inverse_coefficients()[index]
else:
raise ValueError("Index %s out of bounds." % str(index))
@cached_method
def rotation(self,theta):
r"""
Gives the matrix of the rotation operator over a given angle $\theta$
with respect to the natural frame.
INPUT:
- ``theta`` - rotation angle
OUTPUT:
- Rotation matrix with respect to the natural frame.
ALGORITHM:
The operator of rotation over $\pi/2$ is $J^i_j = g^{ik}\omega_{jk}$,
where $\omega$ is the area form. The operator of rotation over an
angle $\theta$ is $\cos(\theta) I + sin(\theta) J$.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
We first compute the matrix of rotation over $\pi/3$::
sage: rotation = sphere.rotation(pi/3); rotation
[ 1/2 -1/2*sqrt(3)/cos(v)]
[ 1/2*sqrt(3)*cos(v) 1/2]
We verify that three successive rotations over $\pi/3$ yield minus the identity::
sage: rotation^3
[-1 0]
[ 0 -1]
"""
from sage.functions.trig import sin, cos
gi = self.first_fundamental_form_inverse_coefficients()
w12 = self.area_form()
R11 = (cos(theta) + sin(theta)*gi[1,2]*w12).simplify_full()
R12 = (- sin(theta)*gi[1,1]*w12).simplify_full()
R21 = (sin(theta)*gi[2,2]*w12).simplify_full()
R22 = (cos(theta) - sin(theta)*gi[2,1]*w12).simplify_full()
return matrix([[R11,R12],[R21,R22]])
@cached_method
def orthonormal_frame(self, coordinates='ext'):
r"""
Returns the orthonormal frame field on the surface, expressed either
in exterior coordinates (i.e. expressed as vector fields in the
ambient space $\mathbb{R}^3$, the default) or interior coordinates
(with respect to the natural frame)
INPUT:
- ``coordinates`` - either ``ext`` (default) or ``int``.
OUTPUT:
- Orthogonal frame field as a dictionary.
ALGORITHM:
We normalize the first vector $\vec e_1$ of the natural frame and then
get the second frame vector as $\vec e_2 = [\vec n, \vec e_1]$, where
$\vec n$ is the unit normal to the surface.
EXAMPLES::
sage: u, v = var('u,v', domain='real')
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v), sin(u)*cos(v), sin(v)], [u, v],'sphere')
sage: frame = sphere.orthonormal_frame(); frame
{1: (-sin(u), cos(u), 0), 2: (-cos(u)*sin(v), -sin(u)*sin(v), cos(v))}
sage: (frame[1]*frame[1]).simplify_full()
1
sage: (frame[1]*frame[2]).simplify_full()
0
sage: frame[1] == sphere.orthonormal_frame_vector(1)
True
We compute the orthonormal frame with respect to the natural frame on
the surface::
sage: frame_int = sphere.orthonormal_frame(coordinates='int'); frame_int
{1: (1/cos(v), 0), 2: (0, 1)}
sage: sphere.first_fundamental_form(frame_int[1], frame_int[1])
1
sage: sphere.first_fundamental_form(frame_int[1], frame_int[2])
0
sage: sphere.first_fundamental_form(frame_int[2], frame_int[2])
1
"""
from sage.symbolic.constants import pi
if coordinates not in ['ext', 'int']:
raise ValueError("Coordinate system must be exterior ('ext') "
"or interior ('int').")
c = self.first_fundamental_form_coefficient([1,1])
if coordinates == 'ext':
f1 = self.natural_frame()[1]
E1 = _simplify_full_rad(f1/sqrt(c))
E2 = _simplify_full_rad(
self.normal_vector(normalized=True).cross_product(E1))
else:
E1 = vector([_simplify_full_rad(1/sqrt(c)), 0])
E2 = (self.rotation(pi/2)*E1).simplify_full()
return {1:E1, 2:E2}
def orthonormal_frame_vector(self, index, coordinates='ext'):
r"""
Returns a specific basis vector field of the orthonormal frame field on
the surface, expressed in exterior or interior coordinates. See
:meth:`orthogonal_frame` for more details.
INPUT:
- ``index`` - index of the basis vector;
- ``coordinates`` - either ``ext`` (default) or ``int``.
OUTPUT:
- Orthonormal frame vector field.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
sage: V1 = sphere.orthonormal_frame_vector(1); V1
(-sin(u), cos(u), 0)
sage: V2 = sphere.orthonormal_frame_vector(2); V2
(-cos(u)*sin(v), -sin(u)*sin(v), cos(v))
sage: (V1*V1).simplify_full()
1
sage: (V1*V2).simplify_full()
0
sage: n = sphere.normal_vector(normalized=True)
sage: (V1.cross_product(V2) - n).simplify_full()
(0, 0, 0)
"""
return self.orthonormal_frame(coordinates)[index]
def lie_bracket(self, v, w):
r"""
Returns the Lie bracket of two vector fields that are tangent
to the surface. The vector fields should be given in intrinsic
coordinates, i.e. with respect to the natural frame.
INPUT:
- ``v`` and ``w`` - vector fields on the surface, expressed
as pairs of functions or as vectors of length 2.
OUTPUT:
- The Lie bracket $[v, w]$.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
sage: sphere.lie_bracket([u,v],[-v,u])
(0, 0)
sage: EE_int = sphere.orthonormal_frame(coordinates='int')
sage: sphere.lie_bracket(EE_int[1],EE_int[2])
(sin(v)/cos(v)^2, 0)
"""
v = vector(SR, v)
w = vector(SR, w)
variables = self.variables_list
Dv = matrix([[_simplify_full_rad(diff(component, u))
for u in variables] for component in v])
Dw = matrix([[_simplify_full_rad(diff(component, u))
for u in variables] for component in w])
return vector(Dv*w - Dw*v).simplify_full()
def frame_structure_functions(self, e1, e2):
r"""
Returns the structure functions $c^k_{ij}$ for a frame field
$e_1, e_2$, i.e. a pair of vector fields on the surface which are
linearly independent at each point. The structure functions are
defined using the Lie bracket by $[e_i,e_j] = c^k_{ij}e_k$.
INPUT:
- ``e1``, ``e2`` - vector fields in intrinsic coordinates on
the surface, expressed as pairs of functions, or as vectors of
length 2.
OUTPUT:
- Dictionary of structure functions, where the key ``(i, j, k)`` refers to
the structure function $c_{i,j}^k$.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: assume(cos(v) > 0)
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v), sin(u)*cos(v), sin(v)], [u, v], 'sphere')
sage: sphere.frame_structure_functions([u, v], [-v, u])
{(1, 1, 1): 0,
(1, 1, 2): 0,
(1, 2, 1): 0,
(1, 2, 2): 0,
(2, 1, 1): 0,
(2, 1, 2): 0,
(2, 2, 1): 0,
(2, 2, 2): 0}
We construct the structure functions of the orthonormal frame on the
surface::
sage: EE_int = sphere.orthonormal_frame(coordinates='int')
sage: CC = sphere.frame_structure_functions(EE_int[1],EE_int[2]); CC
{(1, 1, 1): 0,
(1, 1, 2): 0,
(1, 2, 1): sin(v)/cos(v),
(1, 2, 2): 0,
(2, 1, 1): -sin(v)/cos(v),
(2, 1, 2): 0,
(2, 2, 1): 0,
(2, 2, 2): 0}
sage: sphere.lie_bracket(EE_int[1],EE_int[2]) - CC[(1,2,1)]*EE_int[1] - CC[(1,2,2)]*EE_int[2]
(0, 0)
"""
e1 = vector(SR, e1)
e2 = vector(SR, e2)
lie_bracket = self.lie_bracket(e1, e2).simplify_full()
transformation = matrix(SR, [e1, e2]).transpose()
w = (transformation.inverse()*lie_bracket).simplify_full()
return {(1,1,1): 0, (1,1,2): 0, (1,2,1): w[0], (1,2,2): w[1],
(2,1,1): -w[0], (2,1,2): -w[1], (2,2,1): 0, (2,2,2): 0}
@cached_method
def _compute_second_order_frame_element(self, index):
"""
Compute an element of the second order frame of the surface. See
:meth:`second_order_natural_frame` for more details.
This method expects its arguments in tuple form for caching.
As it does no input checking, it should not be called directly.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: paraboloid = ParametrizedSurface3D([u, v, u^2 + v^2], [u,v], 'paraboloid')
sage: paraboloid._compute_second_order_frame_element((1, 2))
(0, 0, 0)
sage: paraboloid._compute_second_order_frame_element((2, 2))
(0, 0, 2)
"""
variables = [self.variables[i] for i in index]
ddr_element = vector([_simplify_full_rad(diff(f, variables))
for f in self.equation])
return ddr_element
def second_order_natural_frame(self):
r"""
Returns the second-order frame of the surface, i.e. computes the
second-order derivatives (with respect to the parameters on the
surface) of the parametric expression $\vec r = \vec r(u^1,u^2)$
of the surface.
OUTPUT:
- Dictionary where the keys are 2-tuples ``(i, j)`` and the values are the corresponding derivatives $r_{ij}$.
EXAMPLES:
We compute the second-order natural frame of the sphere::
sage: u, v = var('u, v', domain='real')
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
sage: sphere.second_order_natural_frame()
{(1, 1): (-cos(u)*cos(v), -cos(v)*sin(u), 0),
(1, 2): (sin(u)*sin(v), -cos(u)*sin(v), 0),
(2, 1): (sin(u)*sin(v), -cos(u)*sin(v), 0),
(2, 2): (-cos(u)*cos(v), -cos(v)*sin(u), -sin(v))}
"""
vectors = {}
for index in product((1, 2), repeat=2):
sorted_index = tuple(sorted(index))
vectors[index] = \
self._compute_second_order_frame_element(sorted_index)
return vectors
def second_order_natural_frame_element(self, index):
r"""
Returns a vector in the second-order frame of the surface, i.e.
computes the second-order derivatives of the parametric expression
$\vec{r}$ of the surface with respect to the parameters listed in the
argument.
INPUT:
- ``index`` - a 2-tuple ``(i, j)`` specifying the element of the second-order frame.
OUTPUT:
- The second-order derivative $r_{ij}$.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
sage: sphere.second_order_natural_frame_element((1, 2))
(sin(u)*sin(v), -cos(u)*sin(v), 0)
"""
index = tuple(sorted(index))
if len(index) == 2 and all(i == 1 or i == 2 for i in index):
return self._compute_second_order_frame_element(index)
else:
raise ValueError("Index %s out of bounds." % str(index))
@cached_method
def _compute_second_fundamental_form_coefficient(self, index):
"""
Compute a coefficient of the second fundamental form of the surface.
See ``second_fundamental_form_coefficient`` for more details.
This method expects its arguments in tuple form for caching. As it
does no input checking, it should not be called directly.
EXAMPLES::
sage: u, v = var('u,v', domain='real')
sage: paraboloid = ParametrizedSurface3D([u, v, u^2+v^2], [u, v], 'paraboloid')
sage: paraboloid._compute_second_fundamental_form_coefficient((1,1))
2/sqrt(4*u^2 + 4*v^2 + 1)
"""
N = self.normal_vector(normalized=True)
v = self.second_order_natural_frame_element(index)
return _simplify_full_rad(v*N)
def second_fundamental_form_coefficient(self, index):
r"""
Returns the coefficient $h_{ij}$ of the second fundamental form
corresponding to the index $(i, j)$. If the equation of the surface
is $\vec{r}(u^1, u^2)$, then $h_{ij} = \vec{r}_{u^i u^j} \cdot \vec{n}$,
where $\vec{n}$ is the unit normal.
INPUT:
- ``index`` - a 2-tuple ``(i, j)``
OUTPUT:
- Component $h_{ij}$ of the second fundamental form.
EXAMPLES::
sage: u, v = var('u,v', domain='real')
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
sage: sphere.second_fundamental_form_coefficient((1, 1))
-cos(v)^2
sage: sphere.second_fundamental_form_coefficient((2, 1))
0
"""
index = tuple(index)
if len(index) == 2 and all(i == 1 or i == 2 for i in index):
return self._compute_second_fundamental_form_coefficient(index)
else:
raise ValueError("Index %s out of bounds." % str(index))
def second_fundamental_form_coefficients(self):
"""
Returns the coefficients $h_{ij}$ of the second fundamental form as
a dictionary, where the keys are the indices $(i, j)$ and the values
are the corresponding components $h_{ij}$.
When only one component is needed, consider instead the function
:meth:`second_fundamental_form_coefficient`.
OUTPUT:
Dictionary of second fundamental form coefficients.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
sage: sphere.second_fundamental_form_coefficients()
{(1, 1): -cos(v)^2, (1, 2): 0, (2, 1): 0, (2, 2): -1}
"""
coefficients = {}
for index in product((1, 2), repeat=2):
coefficients[index] = \
self._compute_second_fundamental_form_coefficient(index)
return coefficients
def second_fundamental_form(self,vector1,vector2):
r"""
Evaluates the second fundamental form on two vectors on the surface.
If the vectors are given by $v=(v^1,v^2)$ and $w=(w^1,w^2)$, the
result of this function is $h_{11} v^1 w^1 + h_{12}(v^1 w^2 + v^2 w^1) + h_{22} v^2 w^2$.
INPUT:
- ``vector1``, ``vector2`` - 2-tuples representing the input vectors.
OUTPUT:
- Value of the second fundamental form evaluated on the given vectors.
EXAMPLES:
We evaluate the second fundamental form on two symbolic vectors::
sage: u, v = var('u, v', domain='real')
sage: v1, v2, w1, w2 = var('v1, v2, w1, w2', domain='real')
sage: assume(cos(v) > 0)
sage: sphere = ParametrizedSurface3D([cos(u)*cos(v),sin(u)*cos(v),sin(v)],[u,v],'sphere')
sage: sphere.second_fundamental_form(vector([v1, v2]), vector([w1, w2]))
-v1*w1*cos(v)^2 - v2*w2
We evaluate the second fundamental form on vectors with numerical
components::
sage: vect = vector([1,2])
sage: sphere.second_fundamental_form(vect, vect)
-cos(v)^2 - 4
sage: sphere.second_fundamental_form([1,1], [2,1])
-2*cos(v)^2 - 1
"""
hh = self.second_fundamental_form_coefficients()
return sum(hh[(i, j)] * vector1[i - 1] * vector2[j - 1]
for (i, j) in product((1, 2), repeat=2))
def gauss_curvature(self):
r"""
Finds the gaussian curvature of the surface, given by
$K = \frac{h_{11}h_{22} - h_{12}^2}{g_{11}g_{22} - g_{12}^2}$,
where $g_{ij}$ and $h_{ij}$ are the coefficients of the first
and second fundamental form, respectively.
OUTPUT:
- Gaussian curvature of the surface.
EXAMPLES::
sage: R = var('R')
sage: assume(R>0)
sage: u, v = var('u,v', domain='real')
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([R*cos(u)*cos(v),R*sin(u)*cos(v),R*sin(v)],[u,v],'sphere')
sage: sphere.gauss_curvature()
R^(-2)
"""
hh = self.second_fundamental_form_coefficients()
return _simplify_full_rad(
(hh[(1,1)] * hh[(2,2)] - hh[(1,2)]**2)/self.area_form_squared())
def mean_curvature(self):
r"""
Finds the mean curvature of the surface, given by
$H = \frac{1}{2}\frac{g_{22}h_{11} - 2g_{12}h_{12} + g_{11}h_{22}}{g_{11}g_{22} - g_{12}^2}$,
where $g_{ij}$ and $h_{ij}$ are the components of the first and second
fundamental forms, respectively.
OUTPUT:
- Mean curvature of the surface
EXAMPLES::
sage: R = var('R')
sage: assume(R>0)
sage: u, v = var('u,v', domain='real')
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([R*cos(u)*cos(v),R*sin(u)*cos(v),R*sin(v)],[u,v],'sphere')
sage: sphere.mean_curvature()
-1/R
"""
gg = self.first_fundamental_form_coefficients()
hh = self.second_fundamental_form_coefficients()
denom = 2*self.area_form_squared()
numer = (gg[(2,2)]*hh[(1,1)] - 2*gg[(1,2)]*hh[(1,2)] +
gg[(1,1)]*hh[(2,2)]).simplify_full()
return _simplify_full_rad(numer/denom)
@cached_method
def shape_operator_coefficients(self):
r"""
Returns the components of the shape operator of the surface as a
dictionary. See ``shape_operator`` for more information.
OUTPUT:
- Dictionary where the keys are two-tuples ``(i, j)``, with values the
corresponding component of the shape operator.
EXAMPLES::
sage: R = var('R')
sage: u, v = var('u,v', domain='real')
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([R*cos(u)*cos(v),R*sin(u)*cos(v),R*sin(v)],[u,v],'sphere')
sage: sphere.shape_operator_coefficients()
{(1, 1): -1/R, (1, 2): 0, (2, 1): 0, (2, 2): -1/R}
"""
gi = self.first_fundamental_form_inverse_coefficients()
hh = self.second_fundamental_form_coefficients()
sh_op11 = _simplify_full_rad(gi[(1,1)]*hh[(1,1)] + gi[(1,2)]*hh[(1,2)])
sh_op12 = _simplify_full_rad(gi[(1,1)]*hh[(2,1)] + gi[(1,2)]*hh[(2,2)])
sh_op21 = _simplify_full_rad(gi[(2,1)]*hh[(1,1)] + gi[(2,2)]*hh[(1,2)])
sh_op22 = _simplify_full_rad(gi[(2,1)]*hh[(2,1)] + gi[(2,2)]*hh[(2,2)])
return {(1,1): sh_op11, (1,2): sh_op12, (2,1): sh_op21, (2,2): sh_op22}
def shape_operator(self):
r"""
Returns the shape operator of the surface as a matrix. The shape
operator is defined as the derivative of the Gauss map, and is
computed here in terms of the first and second fundamental form by
means of the Weingarten equations.
OUTPUT:
- Matrix of the shape operator
EXAMPLES::
sage: R = var('R')
sage: assume(R>0)
sage: u, v = var('u,v', domain='real')
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([R*cos(u)*cos(v),R*sin(u)*cos(v),R*sin(v)],[u,v],'sphere')
sage: S = sphere.shape_operator(); S
[-1/R 0]
[ 0 -1/R]
The eigenvalues of the shape operator are the principal curvatures of
the surface::
sage: u, v = var('u,v', domain='real')
sage: paraboloid = ParametrizedSurface3D([u, v, u^2+v^2], [u, v], 'paraboloid')
sage: S = paraboloid.shape_operator(); S
[2*(4*v^2 + 1)/(4*u^2 + 4*v^2 + 1)^(3/2) -8*u*v/(4*u^2 + 4*v^2 + 1)^(3/2)]
[ -8*u*v/(4*u^2 + 4*v^2 + 1)^(3/2) 2*(4*u^2 + 1)/(4*u^2 + 4*v^2 + 1)^(3/2)]
sage: S.eigenvalues()
[2*sqrt(4*u^2 + 4*v^2 + 1)/(16*u^4 + 16*v^4 + 8*(4*u^2 + 1)*v^2 + 8*u^2 + 1), 2/sqrt(4*u^2 + 4*v^2 + 1)]
"""
shop = self.shape_operator_coefficients()
shop_matrix=matrix([[shop[(1,1)],shop[(1,2)]],
[shop[(2,1)],shop[(2,2)]]])
return shop_matrix
def principal_directions(self):
r"""
Finds the principal curvatures and principal directions of the surface
OUTPUT:
For each principal curvature, returns a list of the form
$(\rho, V, n)$, where $\rho$ is the principal curvature,
$V$ is the corresponding principal direction, and $n$ is
the multiplicity.
EXAMPLES::
sage: u, v = var('u, v', domain='real')
sage: R, r = var('R,r', domain='real')
sage: assume(R>r,r>0)
sage: torus = ParametrizedSurface3D([(R+r*cos(v))*cos(u),(R+r*cos(v))*sin(u),r*sin(v)],[u,v],'torus')
sage: torus.principal_directions()
[(-cos(v)/(r*cos(v) + R), [(1, 0)], 1), (-1/r, [(0, 1)], 1)]
::
sage: u, v = var('u, v', domain='real')
sage: V = vector([u*cos(u+v), u*sin(u+v), u+v])
sage: helicoid = ParametrizedSurface3D(V, (u, v))
sage: helicoid.principal_directions()
[(-1/(u^2 + 1), [(1, -(u^2 - sqrt(u^2 + 1) + 1)/(u^2 + 1))], 1),
(1/(u^2 + 1), [(1, -(u^2 + sqrt(u^2 + 1) + 1)/(u^2 + 1))], 1)]
"""
return self.shape_operator().eigenvectors_right()
@cached_method
def connection_coefficients(self):
r"""
Computes the connection coefficients or Christoffel symbols
$\Gamma^k_{ij}$ of the surface. If the coefficients of the first
fundamental form are given by $g_{ij}$ (where $i, j = 1, 2$), then
$\Gamma^k_{ij} = \frac{1}{2} g^{kl} \left( \frac{\partial g_{li}}{\partial x^j}
- \frac{\partial g_{ij}}{\partial x^l}
+ \frac{\partial g_{lj}}{\partial x^i} \right)$.
Here, $(g^{kl})$ is the inverse of the matrix $(g_{ij})$, with
$i, j = 1, 2$.
OUTPUT:
Dictionary of connection coefficients, where the keys are 3-tuples
$(i,j,k)$ and the values are the corresponding coefficients
$\Gamma^k_{ij}$.
EXAMPLES::
sage: r = var('r')
sage: assume(r > 0)
sage: u, v = var('u,v', domain='real')
sage: assume(cos(v)>0)
sage: sphere = ParametrizedSurface3D([r*cos(u)*cos(v),r*sin(u)*cos(v),r*sin(v)],[u,v],'sphere')
sage: sphere.connection_coefficients()
{(1, 1, 1): 0,
(1, 1, 2): cos(v)*sin(v),
(1, 2, 1): -sin(v)/cos(v),
(1, 2, 2): 0,
(2, 1, 1): -sin(v)/cos(v),
(2, 1, 2): 0,
(2, 2, 1): 0,
(2, 2, 2): 0}
"""
x = self.variables
gg = self.first_fundamental_form_coefficients()
gi = self.first_fundamental_form_inverse_coefficients()
dg = {}
for i,j,k in product((1, 2), repeat=3):
dg[(i,j,k)] = _simplify_full_rad(gg[(j,k)].differentiate(x[i]))
structfun={}
for i,j,k in product((1, 2), repeat=3):
structfun[(i,j,k)] = sum(gi[(k,s)]*(dg[(i,j,s)] + dg[(j,i,s)]
-dg[(s,i,j)])/2
for s in (1,2))
structfun[(i,j,k)] = _simplify_full_rad(structfun[(i,j,k)])
return structfun
@cached_method
def _create_geodesic_ode_system(self):
r"""
Helper method to create a fast floating-point version of the
geodesic equations, used by :meth:`geodesics_numerical`.
EXAMPLES::
sage: p, q = var('p,q', domain='real')
sage: sphere = ParametrizedSurface3D([cos(q)*cos(p),sin(q)*cos(p),sin(p)],[p,q],'sphere')
sage: ode = sphere._create_geodesic_ode_system()
sage: ode.function(0.0, (1.0, 0.0, 1.0, 1.0))
[1.00000000000000, 1.00000000000000, -0.4546487134128409, 3.114815449309804]
"""
from sage.ext.fast_eval import fast_float
from sage.calculus.ode import ode_solver
u1 = self.variables[1]
u2 = self.variables[2]
v1 = SR.var('v1', domain='real')
v2 = SR.var('v2', domain='real')
C = self.connection_coefficients()
dv1 = - C[(1,1,1)]*v1**2 - 2*C[(1,2,1)]*v1*v2 - C[(2,2,1)]*v2**2
dv2 = - C[(1,1,2)]*v1**2 - 2*C[(1,2,2)]*v1*v2 - C[(2,2,2)]*v2**2
fun1 = fast_float(dv1, str(u1), str(u2), str(v1), str(v2))
fun2 = fast_float(dv2, str(u1), str(u2), str(v1), str(v2))
geodesic_ode = ode_solver()
geodesic_ode.function = (
lambda t, u1_u2_v1_v2:
[u1_u2_v1_v2[2], u1_u2_v1_v2[3], fun1(*u1_u2_v1_v2), fun2(*u1_u2_v1_v2)])
return geodesic_ode
def geodesics_numerical(self, p0, v0, tinterval):
r"""
Numerical integration of the geodesic equations. Explicitly, the
geodesic equations are given by
$\frac{d^2 u^i}{dt^2} + \Gamma^i_{jk} \frac{d u^j}{dt} \frac{d u^k}{dt} = 0$.
Solving these equations gives the coordinates $(u^1, u^2)$ of
the geodesic on the surface. The coordinates in space can
then be found by substituting $(u^1, u^2)$ into the vector
$\vec{r}(u^1, u^2)$ representing the surface.
ALGORITHM:
The geodesic equations are integrated forward in time using
the ode solvers from ``sage.calculus.ode``. See the member
function ``_create_geodesic_ode_system`` for more details.
INPUT:
- ``p0`` - 2-tuple with coordinates of the initial point.
- ``v0`` - 2-tuple with components of the initial tangent vector to the geodesic.
- ``tinterval`` - List ``[a, b, M]``, where ``(a,b)`` is the domain of the geodesic and ``M`` is the number of subdivision points used when returning the solution.
OUTPUT:
List of lists ``[t, [u1(t), u2(t)], [v1(t), v2(t)], [x1(t), x2(t), x3(t)]]``, where
- ``t`` is a subdivision point;
- ``[u1(t), u2(t)]`` are the intrinsic coordinates of the geodesic point;
- ``[v1(t), v2(t)]`` are the intrinsic coordinates of the tangent vector to the geodesic;
- ``[x1(t), x2(t), x3(t)]`` are the coordinates of the geodesic point in the three-dimensional space.
EXAMPLES::
sage: p, q = var('p,q', domain='real')
sage: assume(cos(q)>0)
sage: sphere = ParametrizedSurface3D([cos(q)*cos(p),sin(q)*cos(p),sin(p)],[p,q],'sphere')
sage: geodesic = sphere.geodesics_numerical([0.0,0.0],[1.0,1.0],[0,2*pi,5])
sage: times, points, tangent_vectors, ext_points = zip(*geodesic)
sage: round4 = lambda vec: [N(x, digits=4) for x in vec] # helper function to round to 4 digits
sage: round4(times)
[0.0000, 1.257, 2.513, 3.770, 5.027, 6.283]
sage: [round4(p) for p in points]
[[0.0000, 0.0000], [0.7644, 1.859], [-0.2876, 3.442], [-0.6137, 5.502], [0.5464, 6.937], [0.3714, 9.025]]
sage: [round4(p) for p in ext_points]
[[1.000, 0.0000, 0.0000], [-0.2049, 0.6921, 0.6921], [-0.9160, -0.2836, -0.2836], [0.5803, -0.5759, -0.5759], [0.6782, 0.5196, 0.5196], [-0.8582, 0.3629, 0.3629]]
"""
solver = self._create_geodesic_ode_system()
t_interval, n = tinterval[0:2], tinterval[2]
solver.y_0 = [p0[0], p0[1], v0[0], v0[1]]
solver.ode_solve(t_span=t_interval, num_points=n)
parsed_solution = \
[[vec[0], vec[1][0:2], vec[1][2:], self.point(vec[1])]
for vec in solver.solution]
return parsed_solution
@cached_method
def _create_pt_ode_system(self, curve, t):
"""
Helper method to create a fast floating-point version of the parallel
transport equations, used by ``parallel_translation_numerical``.
INPUT:
- ``curve`` - curve in intrinsic coordinates along which to do parallel transport.
- ``t`` - curve parameter
EXAMPLES::
sage: p, q = var('p,q', domain='real')
sage: sphere = ParametrizedSurface3D([cos(q)*cos(p),sin(q)*cos(p),sin(p)],[p,q],'sphere')
sage: s = var('s')
sage: ode = sphere._create_pt_ode_system((s, s), s)
sage: ode.function(0.0, (1.0, 1.0))
[-0.0, 0.0]
"""
from sage.ext.fast_eval import fast_float
from sage.calculus.ode import ode_solver
u1 = self.variables[1]
u2 = self.variables[2]
v1 = SR.var('v1', domain='real')
v2 = SR.var('v2', domain='real')
du1 = diff(curve[0], t)
du2 = diff(curve[1], t)
C = self.connection_coefficients()
for coef in C:
C[coef] = C[coef].subs({u1: curve[0], u2: curve[1]})
dv1 = - C[(1,1,1)]*v1*du1 - C[(1,2,1)]*(du1*v2 + du2*v1) - \
C[(2,2,1)]*du2*v2
dv2 = - C[(1,1,2)]*v1*du1 - C[(1,2,2)]*(du1*v2 + du2*v1) - \
C[(2,2,2)]*du2*v2
fun1 = fast_float(dv1, str(t), str(v1), str(v2))
fun2 = fast_float(dv2, str(t), str(v1), str(v2))
pt_ode = ode_solver()
pt_ode.function = lambda t, v1_v2: [fun1(t, v1_v2[0], v1_v2[1]), fun2(t, v1_v2[0], v1_v2[1])]
return pt_ode
def parallel_translation_numerical(self,curve,t,v0,tinterval):
r"""
Numerically solves the equations for parallel translation of a vector
along a curve on the surface. Explicitly, the equations for parallel
translation are given by
$\frac{d u^i}{dt} + u^j \frac{d c^k}{dt} \Gamma^i_{jk} = 0$,
where $\Gamma^i_{jk}$ are the connection coefficients of the surface,
the vector to be transported has components $u^j$ and the curve along
which to transport has components $c^k$.
ALGORITHM:
The parallel transport equations are integrated forward in time using
the ode solvers from ``sage.calculus.ode``. See :meth:`_create_pt_ode_system`
for more details.
INPUT:
- ``curve`` - 2-tuple of functions which determine the curve with respect to
the local coordinate system;
- ``t`` - symbolic variable denoting the curve parameter;
- ``v0`` - 2-tuple representing the initial vector;
- ``tinterval`` - list ``[a, b, N]``, where ``(a, b)`` is the domain of the curve
and ``N`` is the number of subdivision points.
OUTPUT:
The list consisting of lists ``[t, [v1(t), v2(t)]]``, where
- ``t`` is a subdivision point;
- ``[v1(t), v2(t)]`` is the list of coordinates of the vector parallel translated
along the curve.
EXAMPLES::
sage: p, q = var('p,q', domain='real')
sage: v = [p,q]
sage: assume(cos(q)>0)
sage: sphere = ParametrizedSurface3D([cos(q)*cos(p),sin(q)*cos(p),sin(p)],v,'sphere')
sage: s = var('s')
sage: vector_field = sphere.parallel_translation_numerical([s,s],s,[1.0,1.0],[0.0, pi/4, 5])
sage: times, components = zip(*vector_field)
sage: round4 = lambda vec: [N(x, digits=4) for x in vec] # helper function to round to 4 digits
sage: round4(times)
[0.0000, 0.1571, 0.3142, 0.4712, 0.6283, 0.7854]
sage: [round4(v) for v in components]
[[1.000, 1.000], [0.9876, 1.025], [0.9499, 1.102], [0.8853, 1.238], [0.7920, 1.448], [0.6687, 1.762]]
"""
solver = self._create_pt_ode_system(tuple(curve), t)
t_interval, n = tinterval[0:2], tinterval[2]
solver.y_0 = v0
solver.ode_solve(t_span=t_interval, num_points=n)
return solver.solution
| 37.222157 | 211 | 0.562676 |
79ca4318625bec7cf7dba96845318180110b7df1 | 13,108 | py | Python | test/test_keypair.py | nathanwhit/py-substrate-interface | 285fdb5e87f580157913398ea19c807abb1cde07 | [
"Apache-2.0"
] | null | null | null | test/test_keypair.py | nathanwhit/py-substrate-interface | 285fdb5e87f580157913398ea19c807abb1cde07 | [
"Apache-2.0"
] | null | null | null | test/test_keypair.py | nathanwhit/py-substrate-interface | 285fdb5e87f580157913398ea19c807abb1cde07 | [
"Apache-2.0"
] | null | null | null | # Python Substrate Interface Library
#
# Copyright 2018-2020 Stichting Polkascan (Polkascan Foundation).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from substrateinterface.constants import DEV_PHRASE
from substrateinterface.key import extract_derive_path
from substrateinterface.exceptions import ConfigurationError
from scalecodec.base import ScaleBytes
from substrateinterface import Keypair, KeypairType
from bip39 import bip39_validate
class KeyPairTestCase(unittest.TestCase):
def test_generate_mnemonic(self):
mnemonic = Keypair.generate_mnemonic()
self.assertTrue(bip39_validate(mnemonic))
def test_invalid_mnemic(self):
mnemonic = "This is an invalid mnemonic"
self.assertFalse(bip39_validate(mnemonic))
def test_create_sr25519_keypair(self):
mnemonic = "old leopard transfer rib spatial phone calm indicate online fire caution review"
keypair = Keypair.create_from_mnemonic(mnemonic, ss58_format=0)
self.assertEqual(keypair.ss58_address, "16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2")
def test_only_provide_ss58_address(self):
keypair = Keypair(ss58_address='16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2')
self.assertEqual(keypair.public_key, bytes.fromhex('e4359ad3e2716c539a1d663ebd0a51bdc5c98a12e663bb4c4402db47828c9446'))
def test_only_provide_public_key(self):
keypair = Keypair(
public_key=bytes.fromhex('e4359ad3e2716c539a1d663ebd0a51bdc5c98a12e663bb4c4402db47828c9446'),
ss58_format=0
)
self.assertEqual(keypair.ss58_address, '16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2')
def test_provide_no_ss58_address_and_public_key(self):
self.assertRaises(ValueError, Keypair)
def test_incorrect_private_key_length_sr25519(self):
self.assertRaises(
ValueError, Keypair, private_key='0x23', ss58_address='16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2'
)
def test_incorrect_public_key(self):
self.assertRaises(ValueError, Keypair, public_key='0x23')
def test_sign_and_verify(self):
mnemonic = Keypair.generate_mnemonic()
keypair = Keypair.create_from_mnemonic(mnemonic)
signature = keypair.sign("Test123")
self.assertTrue(keypair.verify("Test123", signature))
def test_sign_and_verify_hex_data(self):
mnemonic = Keypair.generate_mnemonic()
keypair = Keypair.create_from_mnemonic(mnemonic)
signature = keypair.sign("0x1234")
self.assertTrue(keypair.verify("0x1234", signature))
def test_sign_and_verify_scale_bytes(self):
mnemonic = Keypair.generate_mnemonic()
keypair = Keypair.create_from_mnemonic(mnemonic)
data = ScaleBytes('0x1234')
signature = keypair.sign(data)
self.assertTrue(keypair.verify(data, signature))
def test_sign_missing_private_key(self):
keypair = Keypair(ss58_address="5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY")
self.assertRaises(ConfigurationError, keypair.sign, "0x1234")
def test_sign_unsupported_crypto_type(self):
keypair = Keypair.create_from_private_key(
ss58_address='16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2',
private_key='0x1f1995bdf3a17b60626a26cfe6f564b337d46056b7a1281b64c649d592ccda0a9cffd34d9fb01cae1fba61aeed184c817442a2186d5172416729a4b54dd4b84e',
crypto_type=3
)
self.assertRaises(ConfigurationError, keypair.sign, "0x1234")
def test_verify_unsupported_crypto_type(self):
keypair = Keypair.create_from_private_key(
ss58_address='16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2',
private_key='0x1f1995bdf3a17b60626a26cfe6f564b337d46056b7a1281b64c649d592ccda0a9cffd34d9fb01cae1fba61aeed184c817442a2186d5172416729a4b54dd4b84e',
crypto_type=3
)
self.assertRaises(ConfigurationError, keypair.verify, "0x1234", '0x1234')
def test_sign_and_verify_incorrect_signature(self):
mnemonic = Keypair.generate_mnemonic()
keypair = Keypair.create_from_mnemonic(mnemonic)
signature = "0x4c291bfb0bb9c1274e86d4b666d13b2ac99a0bacc04a4846fb8ea50bda114677f83c1f164af58fc184451e5140cc8160c4de626163b11451d3bbb208a1889f8a"
self.assertFalse(keypair.verify("Test123", signature))
def test_sign_and_verify_invalid_signature(self):
mnemonic = Keypair.generate_mnemonic()
keypair = Keypair.create_from_mnemonic(mnemonic)
signature = "Test"
self.assertRaises(TypeError, keypair.verify, "Test123", signature)
def test_sign_and_verify_invalid_message(self):
mnemonic = Keypair.generate_mnemonic()
keypair = Keypair.create_from_mnemonic(mnemonic)
signature = keypair.sign("Test123")
self.assertFalse(keypair.verify("OtherMessage", signature))
def test_create_ed25519_keypair(self):
mnemonic = "old leopard transfer rib spatial phone calm indicate online fire caution review"
keypair = Keypair.create_from_mnemonic(mnemonic, ss58_format=0, crypto_type=KeypairType.ED25519)
self.assertEqual("16dYRUXznyhvWHS1ktUENGfNAEjCawyDzHRtN9AdFnJRc38h", keypair.ss58_address)
def test_sign_and_verify_ed25519(self):
mnemonic = Keypair.generate_mnemonic()
keypair = Keypair.create_from_mnemonic(mnemonic, crypto_type=KeypairType.ED25519)
signature = keypair.sign("Test123")
self.assertTrue(keypair.verify("Test123", signature))
def test_sign_and_verify_invalid_signature_ed25519(self):
mnemonic = Keypair.generate_mnemonic()
keypair = Keypair.create_from_mnemonic(mnemonic, crypto_type=KeypairType.ED25519)
signature = "0x4c291bfb0bb9c1274e86d4b666d13b2ac99a0bacc04a4846fb8ea50bda114677f83c1f164af58fc184451e5140cc8160c4de626163b11451d3bbb208a1889f8a"
self.assertFalse(keypair.verify("Test123", signature))
def test_create_ecdsa_keypair_private_key(self):
private_key = bytes.fromhex("b516d07cbf975a08adf9465c4864b6d7e348b04c241db5eb8f24d89de629d387")
keypair = Keypair.create_from_private_key(private_key=private_key, crypto_type=KeypairType.ECDSA)
self.assertEqual("0xc6A0d8799D596BDd5C30E9ACbe2c63F37c142e35", keypair.ss58_address)
self.assertEqual(bytes.fromhex("c6A0d8799D596BDd5C30E9ACbe2c63F37c142e35"), keypair.public_key)
def test_create_ecdsa_keypair_mnemonic(self):
mnemonic = "old leopard transfer rib spatial phone calm indicate online fire caution review"
# m/44'/60'/0'/0/0
keypair = Keypair.create_from_mnemonic(mnemonic, crypto_type=KeypairType.ECDSA)
self.assertEqual("0xc6A0d8799D596BDd5C30E9ACbe2c63F37c142e35", keypair.ss58_address)
def test_create_ecdsa_keypair_uri(self):
mnemonic = "old leopard transfer rib spatial phone calm indicate online fire caution review"
suri_0 = f"{mnemonic}/m/44'/60'/0'/0/0"
keypair = Keypair.create_from_uri(suri_0, crypto_type=KeypairType.ECDSA)
self.assertEqual("0xc6A0d8799D596BDd5C30E9ACbe2c63F37c142e35", keypair.ss58_address)
suri_1 = f"{mnemonic}/m/44'/60'/0'/0/1"
keypair = Keypair.create_from_uri(suri_1, crypto_type=KeypairType.ECDSA)
self.assertEqual("0x571DCd75Cd50852db08951e3A173aC23e44F05c9", keypair.ss58_address)
def test_sign_and_verify_ecdsa(self):
mnemonic = Keypair.generate_mnemonic()
keypair = Keypair.create_from_mnemonic(mnemonic, crypto_type=KeypairType.ECDSA)
signature = keypair.sign("Test123")
self.assertTrue(keypair.verify("Test123", signature))
def test_sign_and_verify_invalid_signature_ecdsa(self):
mnemonic = Keypair.generate_mnemonic()
keypair = Keypair.create_from_mnemonic(mnemonic, crypto_type=KeypairType.ECDSA)
signature = "0x24ff874fddab207ac6cae6a5bfe6e3542bb561abc98a22d1cfd7f8396927cf6d4962e198b5d599cf598b3c14cca98ab16d12569b666e8d33899c46d0d814a58200"
self.assertFalse(keypair.verify("Test123", signature))
def test_unsupport_crypto_type(self):
self.assertRaises(
ValueError, Keypair.create_from_seed,
seed_hex='0xda3cf5b1e9144931?a0f0db65664aab662673b099415a7f8121b7245fb0be4143',
crypto_type=2
)
def test_create_keypair_from_private_key(self):
keypair = Keypair.create_from_private_key(
ss58_address='16ADqpMa4yzfmWs3nuTSMhfZ2ckeGtvqhPWCNqECEGDcGgU2',
private_key='0x1f1995bdf3a17b60626a26cfe6f564b337d46056b7a1281b64c649d592ccda0a9cffd34d9fb01cae1fba61aeed184c817442a2186d5172416729a4b54dd4b84e'
)
self.assertEqual(keypair.public_key, bytes.fromhex('e4359ad3e2716c539a1d663ebd0a51bdc5c98a12e663bb4c4402db47828c9446'))
def test_hdkd_hard_path(self):
mnemonic = 'old leopard transfer rib spatial phone calm indicate online fire caution review'
derivation_address = '5FEiH8iuDUw271xbqWTWuB6WrDjv5dnCeDX1CyHubAniXDNN'
derivation_path = '//Alice'
derived_keypair = Keypair.create_from_uri(mnemonic + derivation_path)
self.assertEqual(derivation_address, derived_keypair.ss58_address)
def test_hdkd_soft_path(self):
mnemonic = 'old leopard transfer rib spatial phone calm indicate online fire caution review'
derivation_address = '5GNXbA46ma5dg19GXdiKi5JH3mnkZ8Yea3bBtZAvj7t99P9i'
derivation_path = '/Alice'
derived_keypair = Keypair.create_from_uri(mnemonic + derivation_path)
self.assertEqual(derivation_address, derived_keypair.ss58_address)
def test_hdkd_default_to_dev_mnemonic(self):
derivation_address = '5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY'
derivation_path = '//Alice'
derived_keypair = Keypair.create_from_uri(derivation_path)
self.assertEqual(derivation_address, derived_keypair.ss58_address)
def test_hdkd_create_uri_correct_ss58format(self):
derivation_address = 'HNZata7iMYWmk5RvZRTiAsSDhV8366zq2YGb3tLH5Upf74F'
derivation_path = '//Alice'
derived_keypair = Keypair.create_from_uri(derivation_path, ss58_format=2)
self.assertEqual(derived_keypair.ss58_format, 2)
self.assertEqual(derived_keypair.ss58_address, derivation_address)
def test_hdkd_nested_hard_soft_path(self):
derivation_address = '5CJGwWiKXSE16WJaxBdPZhWqUYkotgenLUALv7ZvqQ4TXeqf'
derivation_path = '//Bob/test'
derived_keypair = Keypair.create_from_uri(derivation_path)
self.assertEqual(derivation_address, derived_keypair.ss58_address)
def test_hdkd_nested_soft_hard_path(self):
derivation_address = '5Cwc8tShrshDJUp1P1M21dKUTcYQpV9GcfSa4hUBNmMdV3Cx'
derivation_path = '/Bob//test'
derived_keypair = Keypair.create_from_uri(derivation_path)
self.assertEqual(derivation_address, derived_keypair.ss58_address)
def test_hdkd_nested_numeric_hard_path(self):
derivation_address = '5Fc3qszVcAXHAmjjm61KcxqvV1kh91jpydE476NjjnJneNdP'
derivation_path = '//polkadot//0'
derived_keypair = Keypair.create_from_uri(derivation_path)
self.assertEqual(derivation_address, derived_keypair.ss58_address)
def test_hdkd_nested_numeric2_hard_path(self):
derivation_address = '5Dr9GrefZzxfeHovyiKUXKYGKRRiTbPhfLo14iYcHKNccN9q'
derivation_path = '//1//5000'
derived_keypair = Keypair.create_from_uri(derivation_path)
self.assertEqual(derivation_address, derived_keypair.ss58_address)
def test_hdkd_path_gt_32_bytes(self):
derivation_address = '5GR5pfZeNs1uQiSWVxZaQiZou3wdZiX894eqgvfNfHbEh7W2'
derivation_path = '//PathNameLongerThan32BytesWhichShouldBeHashed'
derived_keypair = Keypair.create_from_uri(derivation_path)
self.assertEqual(derivation_address, derived_keypair.ss58_address)
def test_hdkd_path_eq_32_bytes(self):
derivation_address = '5Ea3JZpvsBN34jiQwVrLiMh5ypaHtPSyM2DWQvLmSRioEmyk'
derivation_path = '//rococo-validator-profiled-node-0'
derived_keypair = Keypair.create_from_uri(derivation_path)
self.assertEqual(derivation_address, derived_keypair.ss58_address)
def test_hdkd_unsupported_password(self):
self.assertRaises(NotImplementedError, Keypair.create_from_uri, DEV_PHRASE + '///test')
def test_reconstruct_path_fail(self):
self.assertRaises(ValueError, extract_derive_path, 'no_slashes')
self.assertRaises(ValueError, extract_derive_path, '//')
if __name__ == '__main__':
unittest.main()
| 44.283784 | 157 | 0.763198 |
ef40c0fcb0e8fff7c25f9d5a4fb738962a87d58b | 6,253 | py | Python | cloudify_azure/resources/storage/disk.py | marrowne/cloudify-azure-plugin | 26b7794149f0b0586a06411abc7c64a13fd8c7eb | [
"Apache-2.0"
] | 2 | 2016-07-25T14:21:27.000Z | 2016-10-26T19:47:23.000Z | cloudify_azure/resources/storage/disk.py | marrowne/cloudify-azure-plugin | 26b7794149f0b0586a06411abc7c64a13fd8c7eb | [
"Apache-2.0"
] | 43 | 2017-05-18T12:31:42.000Z | 2019-01-08T09:20:42.000Z | cloudify_azure/resources/storage/disk.py | marrowne/cloudify-azure-plugin | 26b7794149f0b0586a06411abc7c64a13fd8c7eb | [
"Apache-2.0"
] | 13 | 2015-07-09T10:49:55.000Z | 2021-05-06T09:24:30.000Z | # #######
# Copyright (c) 2016-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
resources.storage.Disk
~~~~~~~~~~~~~~~~~~~~~~
Microsoft Azure Storage Disk interface
"""
import random
import string
from azure.storage.common.cloudstorageaccount import CloudStorageAccount
from cloudify import ctx
from cloudify.decorators import operation
from cloudify.exceptions import RecoverableError, NonRecoverableError
from cloudify_azure import (constants, utils)
from azure_sdk.resources.storage.storage_account import StorageAccount
def disk_name_generator():
"""Generates a unique Disk resource name"""
return ''.join(random.choice(string.ascii_lowercase + string.digits)
for i in range(random.randint(32, 76))) + '.vhd'
def data_disk_exists(pageblobsvc, disk_container, disk_name):
"""
Checks if a Data Disk already exists
:rtype: `azure.storage.blob.models.Blob` or `None`
:returns: Azure Page Blob object if the Data Disk
exists or None if it does not
"""
ctx.logger.debug('Checking if Data Disk "{0}/{1}" exists'
.format(disk_container, disk_name))
try:
props = pageblobsvc.get_blob_properties(disk_container, disk_name)
ctx.logger.debug('Data Disk "{0}/{1}" exists'
.format(disk_container, disk_name))
return props
except Exception:
ctx.logger.debug('Data Disk "{0}/{1}" does not exist'
.format(disk_container, disk_name))
return None
def get_cloud_storage_account(_ctx=ctx):
"""Gets the Azure Blob storage service"""
# Get the storage account
storage_account = utils.get_parent(
_ctx.instance,
rel_type=constants.REL_CONTAINED_IN_SA
)
resource_group_name = utils.get_resource_group(_ctx)
storage_account_name = utils.get_resource_name(_ctx=storage_account)
azure_config = _ctx.node.properties.get("azure_config")
if not azure_config.get("subscription_id"):
azure_config = ctx.node.properties.get('client_config')
else:
ctx.logger.warn("azure_config is deprecated please use client_config, "
"in later version it will be removed")
# Get the storage account keys
keys = StorageAccount(azure_config, _ctx.logger).list_keys(
resource_group_name, storage_account_name)
if not keys or not keys.get("key1"):
raise RecoverableError(
'StorageAccount reported no usable authentication keys')
# Get an interface to the Storage Account
storage_account_key = keys.get("key1")
return CloudStorageAccount(
account_name=storage_account_name,
account_key=storage_account_key)
@operation(resumable=True)
def create_data_disk(ctx, **_):
"""Uses an existing, or creates a new, Data Disk placeholder"""
res_cfg = ctx.node.properties.get("resource_config", {})
disk_name = utils.get_resource_name(ctx)
disk_container = res_cfg.get('container_name')
# Validation
if ctx.node.properties.get('use_external_resource', False):
if not disk_name:
raise NonRecoverableError(
'"use_external_resource" specified without '
'a resource "name"')
if not disk_container:
raise NonRecoverableError(
'"use_external_resource" specified without '
'a resource "container_name"')
# Get the storage account
csa = get_cloud_storage_account()
# Get an interface to the Page Blob Service
pageblobsvc = csa.create_page_blob_service()
# Generate a VHD Data Disk name if needed
if not disk_name:
ctx.logger.info('Generating a new Data Disk name')
for _ in range(0, 10):
tmpname = disk_name_generator()
if not data_disk_exists(pageblobsvc, disk_container, tmpname):
disk_name = tmpname
break
# Set the runtime properties
ctx.instance.runtime_properties['name'] = disk_name
ctx.instance.runtime_properties['diskSizeGB'] = \
res_cfg.get('size')
ctx.instance.runtime_properties['container'] = \
disk_container
ctx.instance.runtime_properties['uri'] = (
'https://{0}.blob.{1}/{2}/{3}'.format(
csa.account_name, constants.CONN_STORAGE_ENDPOINT,
disk_container, disk_name)
)
@operation(resumable=True)
def delete_data_disk(ctx, **_):
"""Deletes a Data Disk"""
res_cfg = ctx.node.properties.get("resource_config", {})
disk_name = ctx.instance.runtime_properties.get('name')
disk_container = ctx.instance.runtime_properties.get('container')
# If we're not deleting the disk, skip the lifecycle operation
if ctx.node.properties.get('use_external_resource', False) or \
not res_cfg.get('force_delete', False):
return
# Validate the name exists
if not disk_name or not disk_container:
raise NonRecoverableError(
'Attempted to delete Data Disk without a name or '
'container name specified')
# Get the storage account
csa = get_cloud_storage_account()
# Get an interface to the Page Blob Service
pageblobsvc = csa.create_page_blob_service()
# Delete the blob
ctx.logger.info('Deleting Data Disk "{0}/{1}"'
.format(disk_container, disk_name))
pageblobsvc.delete_blob(disk_container, disk_name)
for prop in ['name', 'diskSizeGB', 'container', 'uri']:
try:
del ctx.instance.runtime_properties[prop]
except IndexError:
ctx.logger.debug(
'Attempted to delete property {0} but failed.'.format(
prop))
| 39.327044 | 79 | 0.674556 |
774b1a3a7184242a08a7d55360058e8edd2a8ebf | 480 | py | Python | modules/attention.py | yd1996/PartialComparison | 0e5765de001134ea61696393b2eff4b65eaaf733 | [
"MIT"
] | 4 | 2019-08-16T08:54:57.000Z | 2020-07-22T09:11:37.000Z | modules/attention.py | yd1996/PartialComparison | 0e5765de001134ea61696393b2eff4b65eaaf733 | [
"MIT"
] | 1 | 2020-11-16T10:51:05.000Z | 2020-11-16T10:51:05.000Z | modules/attention.py | yd1996/PartialComparison | 0e5765de001134ea61696393b2eff4b65eaaf733 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class AttentionBase(nn.Module):
def __init__(self, dim):
super(AttentionBase, self).__init__()
self.dim = dim
def calcuate_attention_weight(self, query, key):
raise NotImplementedError
def forward(self, query, memory):
attention_weight = self.calcuate_attention_weight(query, memory)
weighted_memory = torch.einsum("ijk,ij->ik", [memory, attention_weight])
return weighted_memory | 26.666667 | 80 | 0.69375 |
7a9c51bf1a27870e4f880b3c5d697d5831473eaa | 11,315 | py | Python | cello/scanpy_cello.py | Ann-Holmes/CellO | bc2192a2d27e0859f6df885a6fc246e26e54a7b0 | [
"MIT"
] | 42 | 2019-05-14T19:04:38.000Z | 2022-03-06T12:57:00.000Z | cello/scanpy_cello.py | Ann-Holmes/CellO | bc2192a2d27e0859f6df885a6fc246e26e54a7b0 | [
"MIT"
] | 16 | 2020-08-04T12:34:08.000Z | 2022-03-31T22:30:48.000Z | cello/scanpy_cello.py | Ann-Holmes/CellO | bc2192a2d27e0859f6df885a6fc246e26e54a7b0 | [
"MIT"
] | 6 | 2019-05-13T15:57:03.000Z | 2022-03-18T02:17:05.000Z | """\
Classify human cell against the Cell Ontology using CellO.
Here we implement a function for running CellO following the conventions
in Scanpy's external API (https://scanpy.readthedocs.io/en/stable/external/).
Author: Matthew Bernstein
Email: mbernstein@morgridge.org
"""
from anndata import AnnData
import dill
from collections import defaultdict
import pandas as pd
import io
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
from .plot_annotations import probabilities_on_graph
from . import ontology_utils as ou
from . import cello as ce
def cello(
adata: AnnData,
clust_key: str = 'leiden',
rsrc_loc: str = '.',
algo: str = 'IR',
out_prefix: str = None,
model_file: str = None,
log_dir: str = None,
term_ids: bool = False,
remove_anatomical_subterms: list = None
):
"""\
CellO [Bernstein21]_.
Hierarchical cell type classification of human cells with the Cell Ontology.
For more information, tutorials, and bug reports, visit the `CellO
GitHub page <https://github.com/deweylab/CellO>`__.
Parameters
----------
adata
Annotated data matrix. CellO requires that expression data has
been normalized using log(TPM+1). For droplet-based assays, this
is equivalent to log(CPM+1).
clust_key
Key-name of the cluster annotations in `adata.obs`.
rsrc_loc
The path to the CellO resources file. The CellO resources
contain pre-trained models, gene symbol mappings, and
training sets for constructing CellO classifiers. If the
CellO resources are not located at this provided location,
they will be downloaded automatically. The resources require
approximately 5GB of disk space.
algo
The name of the algorithm to use for hierarchical classification
against the Cell Ontology. Use `IR` for Isotonic Regression and
`CLR` for Cascaded Logistic Regression.
out_prefix
The output prefix of the trained model. If the pre-trained models
are not compatible with the input dataset do to model expecting
different genes than that included in the input data, then a new
model will be trained using CellO's training set. The output model
will be written to `<output_pref>.model.dill`. If `None` the
newly trained model will not be saved to disk.
model_file
The path to the a trained CellO classifier to use for classification.
CellO model files end in the suffix `model.dill`.
log_dir
Directory in which to write log files. If `None`, no log files will
be written.
term_ids
If `True`, output will use Cell Ontology term ID's.
If `False`, output will use human readable cell type names.
remove_anatomical_subterms
A list of Uberon Ontology term ID's used to filter CellO's output
according to anatomical entities. For example, to blacklist all
cell type specific only to lung and liver, one would supply the
list `['UBERON:0002048', 'UBERON:0002107']`.
Returns
-------
Updates `adata.obs` with CellO's output. Specifically, `adata.obs` will
have two columns for every cell type. A column `<cell_type> (probability)`
that stores the probability that each cell is of `<cell_type>` and a column
`<cell_type> (binary)` that stores a 1 if the cell is predicted to be of
`<cell_type>` and 0 otherwise. `adata.obs` will also have a column called
`Most specific cell type` with the term ID or name (depending on whether
`term_ids` is set to True or False respectively) of the most-specific cell
type classification for each cell.
Examples
--------
>>> from anndata import AnnData
>>> import scanpy as sc
>>> import scanpy.external as sce
>>> adata = sc.datasets.pbmc3k()
>>> adata.X = adata.X.todense()
>>> sc.pp.normalize_total(adata, target_sum=1e6)
>>> sc.pp.log1p(adata)
>>> sc.pp.pca(adata)
>>> sc.pp.neighbors(adata)
>>> sc.tl.leiden(adata, resolution=2.0) # Perform clustering
>>> adata.var['gene_symbols'] = adata.var.index
>>> adata.var = adata.var.set_index('gene_ids') # Set the Ensembl gene ID's as primary gene identifiers
>>> sce.tl.cello(adata, 'leiden', '.') # Run CellO
>>> sc.tl.umap(adata)
>>> sc.pl.umap(adata, color='Most specific cell type') # Create UMAP plot with cells colored by cell type
"""
try:
import cello as ce
except ImportError:
raise ImportError(
'You need to install the package `cello`: please run `pip install '
'--user cello` in a terminal.'
)
# Load the model
if model_file:
print('Loading model from {}...'.format(model_file))
with open(model_file, 'rb') as f:
mod=dill.load(f)
else:
# Load or train a model
mod = ce._retrieve_pretrained_model(adata, algo, rsrc_loc)
if mod is None:
mod = ce.train_model(
adata,
rsrc_loc,
algo=algo,
log_dir=log_dir
)
if out_prefix:
out_model_f = '{}.model.dill'.format(out_prefix)
print('Writing trained model to {}'.format(out_model_f))
with open(out_model_f, 'wb') as f:
dill.dump(mod, f)
else:
print("No argument to 'out_prefix' was provided. Trained model will not be saved.")
# Run classification
results_df, finalized_binary_results_df, ms_results_df = ce.predict(
adata,
mod,
algo=algo,
clust_key=clust_key,
rsrc_loc=rsrc_loc,
log_dir=log_dir,
remove_anatomical_subterms=remove_anatomical_subterms
)
# Merge results into AnnData object
if term_ids:
column_to_term_id = {
'{} (probability)'.format(c): c
for c in results_df.columns
}
results_df.columns = [
'{} (probability)'.format(c)
for c in results_df.columns
]
finalized_binary_results_df.columns = [
'{} (binary)'.format(c)
for c in finalized_binary_results_df.columns
]
else:
column_to_term_id = {
'{} (probability)'.format(ou.cell_ontology().id_to_term[c].name): c
for c in results_df.columns
}
results_df.columns = [
'{} (probability)'.format(
ou.cell_ontology().id_to_term[c].name
)
for c in results_df.columns
]
finalized_binary_results_df.columns = [
'{} (binary)'.format(
ou.cell_ontology().id_to_term[c].name
)
for c in finalized_binary_results_df.columns
]
ms_results_df['most_specific_cell_type'] = [
ou.cell_ontology().id_to_term[c].name
for c in ms_results_df['most_specific_cell_type']
]
drop_cols = [
col
for col in adata.obs.columns
if '(probability)' in str(col)
or '(binary)' in str(col)
or col == 'Most specific cell type'
]
adata.obs = adata.obs.drop(drop_cols, axis=1)
finalized_binary_results_df = finalized_binary_results_df.astype(bool).astype(str).astype('category')
adata.obs = adata.obs.join(results_df).join(finalized_binary_results_df)
adata.uns['CellO_column_mappings'] = column_to_term_id
if term_ids:
adata.obs['Most specific cell type'] = [
ou.cell_ontology().id_to_term[c].name
for c in ms_results_df['most_specific_cell_type']
]
else:
adata.obs['Most specific cell type'] = ms_results_df['most_specific_cell_type']
def normalize_and_cluster(
adata: AnnData,
n_pca_components: int = 50,
n_neighbors: int = 15,
n_top_genes: int = 10000,
cluster_res: float = 2.0
):
"""
Normalize and cluster an expression matrix in units of raw UMI counts.
Parameters
----------
adata
Annotated data matrix. Expected units are raw UMI counts.
n_pca_components (default 50)
Number of principal components to use when running PCA. PCA is
is used to reduce noise and speed up computation when clustering.
n_neighbors (default 15)
Number of neighbors to use for computing the nearest-neighbors
graph. Clustering is performed using community detection on this
nearest-neighbors graph.
n_top_genes (default 10000)
Number of genes selected for computing the nearest-neighbors graph
and for clustering.
cluster_res (default 2.0)
Cluster resolution for the Leiden community detection algorithm.
A higher resolution produces more fine-grained, smaller clusters.
"""
try:
import scanpy as sc
except ImportError:
sys.exit("The function 'normalize_and_cluster' requires that scanpy package be installed. To install scanpy, run 'pip install scanpy'")
sc.pp.normalize_total(adata, target_sum=1e6)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata, n_top_genes=n_top_genes)
sc.pp.pca(adata, n_comps=n_pca_components, use_highly_variable=True)
sc.pp.neighbors(adata, n_neighbors=n_neighbors)
sc.tl.leiden(adata, resolution=cluster_res)
def cello_probs(adata, cell_or_clust, rsrc_loc, p_thresh, width=10, height=10, clust_key=None, dpi=300):
results_df = adata.obs[[col for col in adata.uns['CellO_column_mappings']]]
results_df.columns = [
adata.uns['CellO_column_mappings'][c] for c in results_df.columns
]
# Plot based on cluster ID
if clust_key:
try:
assert cell_or_clust in set(adata.obs[clust_key])
except AssertionError:
raise KeyError(f"Error plotting probabilities on graph. Cluster {clust_key} not found in `adata.obs` columns.")
# Take subset of results dataframe for each column
clust_to_indices = defaultdict(lambda: [])
for index, clust in zip(adata.obs.index, adata.obs[clust_key]):
clust_to_indices[clust].append(index)
clusts = sorted(clust_to_indices.keys())
results_df = pd.DataFrame(
[
results_df.loc[clust_to_indices[clust][0]]
for clust in clusts
],
index=clusts,
columns=results_df.columns
)
g = probabilities_on_graph(
cell_or_clust,
results_df,
rsrc_loc,
p_thresh=p_thresh
)
f = io.BytesIO(g.draw(format='png', prog='dot', args=f'-Gdpi={dpi}'))
fig, ax = plt.subplots(figsize=(width, height))
im = mpimg.imread(f)
plt.xticks([])
plt.yticks([])
plt.imshow(im)
plt.show()
return fig, ax
def write_to_tsv(adata, filename):
"""
Write CellO's output to a TSV file.
"""
keep_cols = [
col
for col in adata.obs.columns
if '(probability)' in col
or '(binary)' in col
or 'Most specific cell type' in col
]
df = adata.obs[keep_cols]
df.to_csv(filename, sep='\t')
| 35.806962 | 143 | 0.632965 |
e9928d00c97cf1f0de98100477e574f1b78a874a | 13,272 | py | Python | log_mito_act/model_631.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_mito_act/model_631.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_mito_act/model_631.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('ParpC')
Monomer('Xiap', ['Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd'])
Monomer('C3pro', ['Apop'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 157750.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None) + BidU(C8A=None) | C8A(BidU=1) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1) % BidU(C8A=1) >> C8A(BidU=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None) | Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None) >> Xiap(Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None), C8A_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None), C8pro_0)
Initial(C3pro(Apop=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
| 79.473054 | 614 | 0.80975 |
03f5487645815406e5d8a6e27ee372c27f52e316 | 14,090 | py | Python | ansible/venv/lib/python2.7/site-packages/ansible/plugins/connection/kubectl.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | null | null | null | ansible/venv/lib/python2.7/site-packages/ansible/plugins/connection/kubectl.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | null | null | null | ansible/venv/lib/python2.7/site-packages/ansible/plugins/connection/kubectl.py | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 0fb18850afe0d8609693ba4b23f29c7cda17d97f | [
"MIT"
] | null | null | null | # Based on the docker connection plugin
#
# Connection plugin for configuring kubernetes containers with kubectl
# (c) 2017, XuXinkun <xuxinkun@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author:
- xuxinkun
connection: kubectl
short_description: Execute tasks in pods running on Kubernetes.
description:
- Use the kubectl exec command to run tasks in, or put/fetch files to, pods running on the Kubernetes
container platform.
version_added: "2.5"
requirements:
- kubectl (go binary)
options:
kubectl_pod:
description:
- Pod name. Required when the host name does not match pod name.
default: ''
vars:
- name: ansible_kubectl_pod
env:
- name: K8S_AUTH_POD
kubectl_container:
description:
- Container name. Required when a pod contains more than one container.
default: ''
vars:
- name: ansible_kubectl_container
env:
- name: K8S_AUTH_CONTAINER
kubectl_namespace:
description:
- The namespace of the pod
default: ''
vars:
- name: ansible_kubectl_namespace
env:
- name: K8S_AUTH_NAMESPACE
kubectl_extra_args:
description:
- Extra arguments to pass to the kubectl command line.
- Please be aware that this passes information directly on the command line and it could expose sensitive data.
default: ''
vars:
- name: ansible_kubectl_extra_args
env:
- name: K8S_AUTH_EXTRA_ARGS
kubectl_kubeconfig:
description:
- Path to a kubectl config file. Defaults to I(~/.kube/config)
default: ''
vars:
- name: ansible_kubectl_kubeconfig
- name: ansible_kubectl_config
env:
- name: K8S_AUTH_KUBECONFIG
kubectl_context:
description:
- The name of a context found in the K8s config file.
default: ''
vars:
- name: ansible_kubectl_context
env:
- name: k8S_AUTH_CONTEXT
kubectl_host:
description:
- URL for accessing the API.
default: ''
vars:
- name: ansible_kubectl_host
- name: ansible_kubectl_server
env:
- name: K8S_AUTH_HOST
- name: K8S_AUTH_SERVER
kubectl_username:
description:
- Provide a username for authenticating with the API.
default: ''
vars:
- name: ansible_kubectl_username
- name: ansible_kubectl_user
env:
- name: K8S_AUTH_USERNAME
kubectl_password:
description:
- Provide a password for authenticating with the API.
- Please be aware that this passes information directly on the command line and it could expose sensitive data.
We recommend using the file based authentication options instead.
default: ''
vars:
- name: ansible_kubectl_password
env:
- name: K8S_AUTH_PASSWORD
kubectl_token:
description:
- API authentication bearer token.
- Please be aware that this passes information directly on the command line and it could expose sensitive data.
We recommend using the file based authentication options instead.
vars:
- name: ansible_kubectl_token
- name: ansible_kubectl_api_key
env:
- name: K8S_AUTH_TOKEN
- name: K8S_AUTH_API_KEY
client_cert:
description:
- Path to a certificate used to authenticate with the API.
default: ''
vars:
- name: ansible_kubectl_cert_file
- name: ansible_kubectl_client_cert
env:
- name: K8S_AUTH_CERT_FILE
aliases: [ kubectl_cert_file ]
client_key:
description:
- Path to a key file used to authenticate with the API.
default: ''
vars:
- name: ansible_kubectl_key_file
- name: ansible_kubectl_client_key
env:
- name: K8S_AUTH_KEY_FILE
aliases: [ kubectl_key_file ]
ca_cert:
description:
- Path to a CA certificate used to authenticate with the API.
default: ''
vars:
- name: ansible_kubectl_ssl_ca_cert
- name: ansible_kubectl_ca_cert
env:
- name: K8S_AUTH_SSL_CA_CERT
aliases: [ kubectl_ssl_ca_cert ]
validate_certs:
description:
- Whether or not to verify the API server's SSL certificate. Defaults to I(true).
default: ''
vars:
- name: ansible_kubectl_verify_ssl
- name: ansible_kubectl_validate_certs
env:
- name: K8S_AUTH_VERIFY_SSL
aliases: [ kubectl_verify_ssl ]
"""
import distutils.spawn
import os
import os.path
import subprocess
import ansible.constants as C
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils._text import to_bytes
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.utils.display import Display
display = Display()
CONNECTION_TRANSPORT = 'kubectl'
CONNECTION_OPTIONS = {
'kubectl_container': '-c',
'kubectl_namespace': '-n',
'kubectl_kubeconfig': '--kubeconfig',
'kubectl_context': '--context',
'kubectl_host': '--server',
'kubectl_username': '--username',
'kubectl_password': '--password',
'client_cert': '--client-certificate',
'client_key': '--client-key',
'ca_cert': '--certificate-authority',
'validate_certs': '--insecure-skip-tls-verify',
'kubectl_token': '--token'
}
class Connection(ConnectionBase):
''' Local kubectl based connections '''
transport = CONNECTION_TRANSPORT
connection_options = CONNECTION_OPTIONS
documentation = DOCUMENTATION
has_pipelining = True
transport_cmd = None
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
# Note: kubectl runs commands as the user that started the container.
# It is impossible to set the remote user for a kubectl connection.
cmd_arg = '{0}_command'.format(self.transport)
if cmd_arg in kwargs:
self.transport_cmd = kwargs[cmd_arg]
else:
self.transport_cmd = distutils.spawn.find_executable(self.transport)
if not self.transport_cmd:
raise AnsibleError("{0} command not found in PATH".format(self.transport))
def _build_exec_cmd(self, cmd):
""" Build the local kubectl exec command to run cmd on remote_host
"""
local_cmd = [self.transport_cmd]
# Build command options based on doc string
doc_yaml = AnsibleLoader(self.documentation).get_single_data()
for key in doc_yaml.get('options'):
if key.endswith('verify_ssl') and self.get_option(key) != '':
# Translate verify_ssl to skip_verify_ssl, and output as string
skip_verify_ssl = not self.get_option(key)
local_cmd.append(u'{0}={1}'.format(self.connection_options[key], str(skip_verify_ssl).lower()))
elif not key.endswith('container') and self.get_option(key) and self.connection_options.get(key):
cmd_arg = self.connection_options[key]
local_cmd += [cmd_arg, self.get_option(key)]
extra_args_name = u'{0}_extra_args'.format(self.transport)
if self.get_option(extra_args_name):
local_cmd += self.get_option(extra_args_name).split(' ')
pod = self.get_option(u'{0}_pod'.format(self.transport))
if not pod:
pod = self._play_context.remote_addr
# -i is needed to keep stdin open which allows pipelining to work
local_cmd += ['exec', '-i', pod]
# if the pod has more than one container, then container is required
container_arg_name = u'{0}_container'.format(self.transport)
if self.get_option(container_arg_name):
local_cmd += ['-c', self.get_option(container_arg_name)]
local_cmd += ['--'] + cmd
return local_cmd
def _connect(self, port=None):
""" Connect to the container. Nothing to do """
super(Connection, self)._connect()
if not self._connected:
display.vvv(u"ESTABLISH {0} CONNECTION".format(self.transport), host=self._play_context.remote_addr)
self._connected = True
def exec_command(self, cmd, in_data=None, sudoable=False):
""" Run a command in the container """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr)
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(in_data)
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
""" Transfer a file from local to the container """
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
out_path = self._prefix_login_path(out_path)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound(
"file or module does not exist: %s" % in_path)
out_path = shlex_quote(out_path)
# kubectl doesn't have native support for copying files into
# running containers, so we use kubectl exec to implement this
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
if not os.fstat(in_file.fileno()).st_size:
count = ' count=0'
else:
count = ''
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
try:
p = subprocess.Popen(args, stdin=in_file,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
raise AnsibleError("kubectl connection requires dd command in the container to put files")
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
in_path = self._prefix_login_path(in_path)
out_dir = os.path.dirname(out_path)
# kubectl doesn't have native support for fetching files from
# running containers, so we use kubectl exec to implement this
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
try:
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=out_file, stderr=subprocess.PIPE)
except OSError:
raise AnsibleError(
"{0} connection requires dd command in the container to fetch files".format(self.transport)
)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
if actual_out_path != out_path:
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
def close(self):
""" Terminate the connection. Nothing to do for kubectl"""
super(Connection, self).close()
self._connected = False
| 38.922652 | 127 | 0.634918 |
9961d010cfa91fcfa280ba946beb8aad37feb79f | 21,357 | py | Python | DFND_DeiT-imagenet.py | IIGROUP/AttentionProbe | b2c88b064452741a7ccc6660a4b090743013cc73 | [
"MIT"
] | 11 | 2022-01-23T15:09:09.000Z | 2022-03-18T10:27:04.000Z | DFND_DeiT-imagenet.py | Wang-jiahao/AttentionProbe | 41a3cc0d5454ec5bba78c3dace9cded00da8cff9 | [
"MIT"
] | null | null | null | DFND_DeiT-imagenet.py | Wang-jiahao/AttentionProbe | 41a3cc0d5454ec5bba78c3dace9cded00da8cff9 | [
"MIT"
] | null | null | null | #Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
#This program is free software; you can redistribute it and/or modify it under the terms of the BSD 3-Clause License.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD 3-Clause License for more details.
import os
cpu_num = 4
import resnet
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
# from torch.autograd import Variable
from resnet import ResNet18,ResNet34
from torchvision.datasets import CIFAR100,ImageFolder,CIFAR10
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from timm.models import create_model
import vision_transformer
from loss import kdloss, csloss, patch_attention_probe_loss, robust_kdloss
from utils import accuracy, AverageMeter
from functools import partial
import random
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import pdb
import numpy as np
import warnings
os.environ['OMP_NUM_THREADS'] = str(cpu_num)
os.environ['OPENBLAS_NUM_THREADS'] = str(cpu_num)
os.environ['MKL_NUM_THREADS'] = str(cpu_num)
os.environ['VECLIB_MAXIMUM_THREADS'] = str(cpu_num)
os.environ['NUMEXPR_NUM_THREADS'] = str(cpu_num)
torch.set_num_threads(cpu_num)
warnings.filterwarnings('ignore')
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--num_select', type=int, default=600000)
parser.add_argument('--data_cifar', type=str, default='/home/wjh19/database/cifar10/')
parser.add_argument('--data_imagenet', type=str, default='/home/wjh19/database/imagenet/train/')
parser.add_argument('--teacher', default='deit_base_patch4_32_teacher', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher_dir', type=str, default='/home/wjh19/mage/DFND_DeiT/output/cifar10/teacher/checkpoint.pth')
parser.add_argument('--nb_classes', type=int, default=10, help='number of classes')
parser.add_argument('--lr_S', type=float, default=7.5e-4, help='learning rate')
parser.add_argument('--robust', action='store_true', default=False,
help='Robust distillation enabled (if avail)')
parser.add_argument('--attnprobe_sel', action='store_true', default=False,
help='Distillation by attention prime enabled (if avail)')
parser.add_argument('--random', action='store_true', default=False,
help='Randomly select wild data (if avail)')
parser.add_argument('--attnprobe_dist', action='store_true', default=False,
help='Distillation by attention prime enabled (if avail)')
parser.add_argument('--attnlier', type=float, default=0.05, help='weight of attention layer to sample the wild data')
parser.add_argument('--outlier', type=float, default=0.9, help='weight of output layer to sample the wild data')
parser.add_argument('--patchattn', type=float, default=0.8, help='weight of patch attention loss')
parser.add_argument('--pos_num', type=int, default=129)
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10','cifar100','imagenet','mnist'])
parser.add_argument('--epochs', type=float, default=800)
parser.add_argument('--output_dir', type=str, default='/home/wjh19/mage/DFND_DeiT/output/cifar10/')
parser.add_argument('--selected_file', type=str, default='/home/wjh19/mage/DFND_DeiT/selected/cifar10/')
parser.add_argument('--schedule', default=[200, 300], nargs='*', type=int,
help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--cos', action='store_true',
help='use cosine lr schedule')
args,_ = parser.parse_known_args()
acc = 0
acc_best = 0
teacher = None
assert args.teacher_dir, 'need to specify teacher-path when using distillation'
print(f"Creating teacher model: {args.teacher}")
teacher = create_model(
args.teacher,
pretrained=False,
num_classes=args.nb_classes,
)
if args.dataset == 'imagenet':
embed_dim = 768
num_heads = 12
img_size = 224
else:
embed_dim = 384
num_heads = 3
img_size = 32
checkpoint = torch.load(args.teacher_dir, map_location='cpu')
teacher.load_state_dict(checkpoint['model'])
teacher.cuda()
teacher.eval()
teacher = nn.DataParallel(teacher)
# teacher = torch.load(args.teacher_dir + 'teacher').cuda()
# teacher.eval()
for parameter in teacher.parameters():
parameter.requires_grad = False
def get_class_weight(model, dataloader, num_classes=10, T=1):
classes_outputs = np.zeros(num_classes)
model.eval()
if os.path.exists(args.selected_file + 'class_weights.pth'):
class_weights = torch.load(args.selected_file + 'class_weights.pth')
else:
for i,(inputs, labels) in enumerate(dataloader):
inputs = inputs.cuda()
with torch.set_grad_enabled(False):
outputs, output_feature_t = model(inputs)
outputs = F.softmax(outputs/T, dim=1)
for j in range(inputs.shape[0]):
classes_outputs += outputs[j].cpu().data.numpy()
class_weights = 1/classes_outputs
weights_sum = np.sum(class_weights)
class_weights /= weights_sum
class_weights *= num_classes
torch.save(class_weights, args.selected_file + 'class_weights.pth')
return class_weights
def perturb(weight, epsilon=0.1, perturb_num=1):
weights = []
weights.append(weight)
for i in range(perturb_num):
p = np.random.rand(weight.shape[0]) * epsilon
weight_new = weight + p
weights.append(weight_new)
return weights
normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010])
data_train = ImageFolder(args.data_imagenet, transforms.Compose([
transforms.Resize((img_size,img_size)),
transforms.ToTensor(),
normalize,
]))
data_train_transform = ImageFolder(args.data_imagenet, transforms.Compose([
transforms.Resize((img_size,img_size)),
transforms.RandomCrop(img_size, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
transform_train = transforms.Compose([
transforms.RandomCrop(img_size, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_train_imagenet = transforms.Compose([
transforms.Resize(224),
transforms.RandomCrop(224, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_test_imagenet = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
if args.dataset == 'cifar100':
data_test = CIFAR100(args.data_cifar,
train=False,
transform=transform_test)
teacher_acc = torch.tensor([0.7630])
n_classes = 100
if args.dataset == 'cifar10':
data_test = CIFAR10(args.data_cifar,
train=False,
transform=transform_test)
teacher_acc = torch.tensor([0.9665])
n_classes = 10
if args.dataset == 'imagenet':
label_trainset = ImageFolder(os.path.join(args.data_cifar, 'train'),
transform_train_imagenet)
data_test = ImageFolder(os.path.join(args.data_cifar, 'val'),
transform_test_imagenet)
teacher_acc = torch.tensor([0.8118])
n_classes = 1000
if os.path.exists(args.selected_file + 'labelset_index_dict.pth'):
labelset_index = torch.load(args.selected_file + 'labelset_index_dict.pth')['positive_index']
else:
labelset_index = []
classnum = np.ones(n_classes) * args.pos_num
i = 0
sample_idx = 0
while(np.sum(classnum) > 0):
image = label_trainset[i][0]
label = label_trainset[i][1]
if(classnum[label] > 0):
labelset_index.append(i)
classnum[label] -= 1
sample_idx += 1
i += 1
print('Sample %d from the original dataset.' % sample_idx)
labelset_index_dict = {}
labelset_index_dict['positive_index'] = labelset_index
torch.save(labelset_index_dict, args.selected_file + 'labelset_index_dict.pth')
print("Positive data has been sampled from the original dataset!")
label_train_subset = torch.utils.data.Subset(label_trainset, labelset_index)
data_test_loader = DataLoader(data_test, batch_size=1000, num_workers=0)
noise_adaptation = torch.nn.Parameter(torch.zeros(n_classes,n_classes-1))
def noisy(noise_adaptation):
# noise_adaptation_softmax: (n_classes,n_classes-1)
noise_adaptation_softmax = torch.nn.functional.softmax(noise_adaptation,dim=1) * (1 - teacher_acc)
# noise_adaptation_layer: (n_classes,n_classes)
noise_adaptation_layer = torch.zeros(n_classes,n_classes)
for i in range(n_classes):
if i == 0:
noise_adaptation_layer[i] = torch.cat([teacher_acc,noise_adaptation_softmax[i][i:]])
if i == n_classes-1:
noise_adaptation_layer[i] = torch.cat([noise_adaptation_softmax[i][:i],teacher_acc])
else:
noise_adaptation_layer[i] = torch.cat([noise_adaptation_softmax[i][:i],teacher_acc,noise_adaptation_softmax[i][i:]])
# noise_adaptation_layer: (n_classes,n_classes)
return noise_adaptation_layer.cuda()
# net = ResNet18(n_classes).cuda()
if args.dataset == 'imagenet':
print("Creating student model: deiT_tiny_patch16_224")
net = vision_transformer.TeacherVisionTransformer(img_size=224, patch_size=16, in_chans=3, num_classes=args.nb_classes, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)).cuda()
else:
print("Creating student model: deiT_xtiny_patch4_32")
net = vision_transformer.TeacherVisionTransformer(img_size=32, patch_size=4, in_chans=3, num_classes=args.nb_classes, embed_dim=128, depth=12, num_heads=2, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)).cuda()
net = torch.nn.DataParallel(net)
criterion = torch.nn.CrossEntropyLoss().cuda()
celoss = torch.nn.CrossEntropyLoss(reduction = 'none').cuda()
# optimizer = torch.optim.SGD(list(net.parameters()), lr=0.1, momentum=0.9, weight_decay=5e-4)
optimizer = torch.optim.AdamW(net.parameters(), lr=args.lr_S, weight_decay=0.025)
optimizer_noise = torch.optim.Adam([noise_adaptation], lr=0.001)
data_train_loader_noshuffle = DataLoader(data_train, batch_size=256, shuffle=False, num_workers=8)
def identify_attnlier(checkpoint, embed_dim, num_heads):
value_blk3 = []
value_blk7 = []
# pred_list = []
attn_inputs_blk3 = []
attn_inputs_blk7 = []
index = 0
embed_dim = int(embed_dim/num_heads)
scale = embed_dim ** -0.5
teacher.eval()
# Obtain weights and bias
# linear_weight_blk_3: (1152, 384). linear_bias_blk_3: (1152)
linear_weight_blk_3 = checkpoint['model']['blocks.3.attn.qkv.weight'].cuda()
linear_bias_blk_3 = checkpoint['model']['blocks.3.attn.qkv.bias'].cuda()
# linear_weight_q_blk_3, linear_weight_k_blk_3, linear_weight_v_blk_3 = torch.split(linear_weight_blk_3, [384, 384, 384], dim=0)
linear_weight_blk_7 = checkpoint["model"]['blocks.7.attn.qkv.weight'].cuda()
linear_bias_blk_7 = checkpoint["model"]['blocks.7.attn.qkv.bias'].cuda()
# linear_weight_q_blk_7, linear_weight_k_blk_7, linear_weight_v_blk_7 = torch.split(linear_weight_blk_7, [384, 384, 384], dim=0)
hooksadd = [
teacher.module.blocks[3].attn.register_forward_hook(
lambda self, input, output: attn_inputs_blk3.append(input)
),
teacher.module.blocks[7].attn.register_forward_hook(
lambda self, input, output: attn_inputs_blk7.append(input)
),
]
for i,(inputs, labels) in enumerate(data_train_loader_noshuffle):
inputs = inputs.cuda()
outputs, output_feature = teacher(inputs)
# calculate input × weights and view the shape
# B, N, C = 256, 65, 384
B, N, C = attn_inputs_blk3[index][0].shape
uniform = (torch.ones(B, N-1)/(N-1)).float().cuda()
qkv_blk_3 = torch.bmm(attn_inputs_blk3[0][0], linear_weight_blk_3.unsqueeze(0).repeat(B, 1, 1).permute(0, 2, 1)) + linear_bias_blk_3
qkv_blk_3 = qkv_blk_3.reshape(B, N, 3, num_heads, embed_dim).permute(2, 0, 3, 1, 4)
q_blk_3, k_blk_3, v_blk_3 = qkv_blk_3[0], qkv_blk_3[1], qkv_blk_3[2] # make torchscript happy (cannot use tensor as tuple)
# attn_blk_3: (B, num_heads, N, N) = (256, num_heads, 65, 65)
attn_blk_3 = (q_blk_3 @ k_blk_3.transpose(-2, -1)) * scale
attn_blk_3 = attn_blk_3.softmax(dim=-1)
# attnprime_blk_3: (B, N-1) = (256, 64)
attnprime_blk_3 = attn_blk_3[:,0,0,1:]
qkv_blk_7 = torch.bmm(attn_inputs_blk7[0][0], linear_weight_blk_7.unsqueeze(0).repeat(B, 1, 1).permute(0, 2, 1)) + linear_bias_blk_7
qkv_blk_7 = qkv_blk_7.reshape(B, N, 3, num_heads, embed_dim).permute(2, 0, 3, 1, 4)
q_blk_7, k_blk_7, v_blk_7 = qkv_blk_7[0], qkv_blk_7[1], qkv_blk_7[2] # make torchscript happy (cannot use tensor as tuple)
# attn_blk_7: (B, num_heads, N, N)
attn_blk_7 = (q_blk_7 @ k_blk_7.transpose(-2, -1)) * scale
attn_blk_7 = attn_blk_7.softmax(dim=-1)
# attnprime_blk_7: (B, N-1)
attnprime_blk_7 = attn_blk_7[:,0,0,1:]
loss_blk3 = csloss(attnprime_blk_3, uniform)
loss_blk7 = csloss(attnprime_blk_7, uniform)
value_blk3.append(loss_blk3.detach().clone())
value_blk7.append(loss_blk7.detach().clone())
attn_inputs_blk3.clear()
attn_inputs_blk7.clear()
print('Considering attnlier of batch %d from the wild massive unlabeled dataset.' % i)
for hook in hooksadd:
hook.remove()
return torch.cat(value_blk3,dim=0), torch.cat(value_blk7,dim=0)
def identify_outlier():
value = []
pred_list = []
index = 0
teacher.eval()
for i,(inputs, labels) in enumerate(data_train_loader_noshuffle):
inputs = inputs.cuda()
# outputs: (bs, n_classes)
outputs, output_feature = teacher(inputs)
# pred: (bs, 1)
pred = outputs.data.max(1)[1]
loss = celoss(outputs, pred)
value.append(loss.detach().clone())
index += inputs.shape[0]
pred_list.append(pred)
print('Considering outlier of batch %d from the wild massive unlabeled dataset.' % i)
return torch.cat(value,dim=0), torch.cat(pred_list,dim=0)
def train(epoch, trainloader, nll, class_weights):
net.train()
loss_list, batch_list = [], []
interval = len(trainloader) // 6
for i, (images, labels) in enumerate(trainloader):
images, labels = images.cuda(), labels.cuda()
optimizer.zero_grad()
optimizer_noise.zero_grad()
output, output_feature_s = net(images)
output_t, output_feature_t = teacher(images)
output_t = output_t.detach()
output_feature_t = output_feature_t.detach()
pred = output_t.data.max(1)[1]
preds_t = pred.cpu().data.numpy()
if args.robust:
for class_weight in class_weights:
weights = torch.from_numpy(class_weight[preds_t]).float().cuda()
loss = robust_kdloss(output, output_t, weights)
else:
loss = kdloss(output, output_t)
output_s = F.softmax(output, dim=1)
output_s_adaptation = torch.matmul(output_s, noisy(noise_adaptation))
loss += nll(torch.log(output_s_adaptation), pred)
if args.attnprobe_dist:
loss_patch_attn = args.patchattn * patch_attention_probe_loss(output_feature_t, output_feature_s)
loss += loss_patch_attn
loss_list.append(loss.data.item())
batch_list.append(i+1)
if (i % interval) == 0:
if args.attnprobe_dist:
print('Train - Epoch %d, Batch: %d, Loss: %f, Loss_attn: %f' % (epoch, i, loss.data.item(), loss_patch_attn.data.item()))
else:
print('Train - Epoch %d, Batch: %d, Loss: %f' % (epoch, i, loss.data.item()))
loss.backward()
optimizer.step()
optimizer_noise.step()
def test(epoch):
global acc, acc_best, epoch_best
net.eval()
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
total_correct = 0
avg_loss = 0.0
with torch.no_grad():
for i, (images, labels) in enumerate(data_test_loader):
images, labels = images.cuda(), labels.cuda()
output, output_feature_s = net(images)
avg_loss += criterion(output, labels).sum()
pred = output.data.max(1)[1]
total_correct += pred.eq(labels.data.view_as(pred)).sum()
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
avg_loss /= len(data_test)
acc = float(total_correct) / len(data_test)
if acc_best < acc:
torch.save(net.state_dict(), args.output_dir + 'student/' + 'checkpoint.pth')
acc_best = acc
epoch_best = epoch
print('Test Avg. Loss: %f, Accuracy: %f. Epoch: %d' % (avg_loss.data.item(), acc, epoch))
print('******** ******** ********')
print('Test Avg Best. Accuracy: %f. Epoch: %d' % (acc_best, epoch_best))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1, top5=top5))
def train_and_test(epoch, trainloader3, nll, class_weight):
train(epoch, trainloader3, nll, class_weight)
test(epoch)
# def adjust_learning_rate(optimizer, epoch, max_epoch):
# """For resnet, the lr starts from 0.1, and is divided by 10 at 80 and 120 epochs"""
# if epoch < (max_epoch/200.0*80.0):
# lr = 0.1
# elif epoch < (max_epoch/200.0*160.0):
# lr = 0.01
# else:
# lr = 0.001
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
def adjust_learning_rate(optimizer, epoch, max_epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr_S
if args.cos: # cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / max_epoch))
else: # stepwise lr schedule
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_positive(value_blk3, value_blk7, value_out, args):
positive_index = []
if args.attnprobe_sel:
value = value_out
else:
value = args.attnlier * (value_blk3 + value_blk7) + args.outlier * value_out
if args.random:
print('randomly selected!')
positive_index = torch.tensor(random.sample(range(1281167), args.num_select))
else:
positive_index = value.topk(args.num_select,largest=False)[1]
return positive_index
def main():
global acc_best
if os.path.exists(args.selected_file + 'value_out.pth'):
value_out = torch.load(args.selected_file + 'value_out.pth').cuda()
pred_out = torch.load(args.selected_file + 'pred_out.pth').cuda()
value_blk3 = torch.load(args.selected_file + 'value_blk3.pth').cuda()
value_blk7 = torch.load(args.selected_file + 'value_blk7.pth').cuda()
# value_numpy = np.loadtxt(args.selected_file + '/value.txt')
# value = torch.Tensor(value_numpy)
# pred_numpy = np.loadtxt(args.selected_file + '/pred.txt')
# pred = torch.Tensor(pred_numpy)
else:
value_blk3, value_blk7 = identify_attnlier(checkpoint, embed_dim, num_heads)
value_out, pred_out = identify_outlier()
torch.save(value_out, args.selected_file + 'value_out.pth')
torch.save(pred_out, args.selected_file + 'pred_out.pth')
torch.save(value_blk3, args.selected_file + 'value_blk3.pth')
torch.save(value_blk7, args.selected_file + 'value_blk7.pth')
# np.savetxt(args.selected_file + '/value.txt', value.numpy(), fmt='%d',delimiter=None)
# np.savetxt(args.selected_file + '/pred.txt', pred.numpy(), fmt='%d',delimiter=None)
positive_index = get_positive(value_blk3, value_blk7, value_out, args)
nll = torch.nn.NLLLoss().cuda()
positive_index = positive_index.tolist()
data_train_select = torch.utils.data.Subset(data_train_transform, positive_index)
trainloader3 = torch.utils.data.DataLoader(label_train_subset + data_train_select, batch_size=256, shuffle=True, num_workers=8, pin_memory=True)
class_weight = get_class_weight(teacher, trainloader3, num_classes=args.nb_classes)
print(class_weight)
class_weights = perturb(class_weight)
epoch = int(640000/args.num_select * 512)
for e in range(1, epoch):
adjust_learning_rate(optimizer, e, epoch, args)
train_and_test(e, trainloader3, nll, class_weights)
print(acc_best)
if __name__ == '__main__':
main()
| 41.958743 | 240 | 0.67472 |
d6dd7b41e29df538c719fa74798776e4c68a1f00 | 19,166 | py | Python | py/desispec/workflow/proctable.py | desihub/desispec | 22b84030c9c4f87080cb09900456bd0db0f6b4f0 | [
"BSD-3-Clause"
] | 24 | 2015-09-29T06:06:29.000Z | 2022-01-14T07:31:45.000Z | py/desispec/workflow/proctable.py | desihub/desispec | 22b84030c9c4f87080cb09900456bd0db0f6b4f0 | [
"BSD-3-Clause"
] | 1,452 | 2015-02-26T00:14:23.000Z | 2022-03-31T23:35:10.000Z | py/desispec/workflow/proctable.py | desihub/desispec | 22b84030c9c4f87080cb09900456bd0db0f6b4f0 | [
"BSD-3-Clause"
] | 25 | 2015-02-06T21:39:13.000Z | 2022-02-22T14:16:31.000Z | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import os
from astropy.table import Table, vstack
from collections import OrderedDict
## Import some helper functions, you can see their definitions by uncomenting the bash shell command
from desispec.workflow.exptable import default_obstypes_for_exptable
from desispec.workflow.utils import define_variable_from_environment, pathjoin
from desispec.io.util import difference_camwords, parse_badamps, create_camword, decode_camword
from desiutil.log import get_logger
###############################################
##### Processing Table Column Definitions #####
###############################################
## To eventually being turned into a full-fledged data model. For now a brief description.
# EXPID, int, the exposure ID's assosciate with the job. Always a np.array, even if a single exposure.
# OBSTYPE, string, the obstype as defined by ICS.
# TILEID, int, the TILEID of the tile the exposure observed.
# NIGHT, int, the night of the observation.
# BADAMPS, string, comma list of "{camera}{petal}{amp}", i.e. "[brz][0-9][ABCD]". Example: 'b7D,z8A'
# in the csv this is saved as a semicolon separated list
# LASTSTEP, string, the last step the pipeline should run through for the given exposure. Inclusive of last step.
# EXPFLAG, np.ndarray, set of flags that describe that describe the exposure.
# PROCCAMWORD, string, The result of difference_camword(CAMWORD,BADCAMWWORD) from those exposure table entries.
# This summarizes the cameras that should be processed for the given exposure/job
# CALIBRATOR, int, A 0 signifies that the job is not assosciated with a calibration exposure. 1 means that it is.
# INTID, int, an internally generated ID for a single job within a production. Only unique within a production and
# not guaranteed will not necessarily be the same between different production runs (e.g. between a daily
# run and a large batch reprocessing run).
# OBSDESC, string, describes the observation in more detail than obstype. Currently only used for DITHER on dither tiles.
# JOBDESC, string, described the job that the row defines. For a single science exposure that could be 'prestdstar' or
# 'poststdstar'. For joint science that would be 'stdstarfit'. For individual arcs it is 'arc', for
# joint arcs it is 'psfnight'. For individual flats it is 'flat', for joint fits it is 'psfnightly'.
# LATEST_QID, int, the most recent Slurm ID assigned to the submitted job.
# SUBMIT_DATE, int, the 'unix time' of the job submission in seconds (int(time.time())).
# STATUS, string, the most recent Slurm status of the job. See docstring of desispec.workflow.queue.get_resubmission_states
# for a list and description.
# SCRIPTNAME, string, the name of the script submitted to Slurm. Due to astropy table constraints, this is truncated
# to a maximum of 40 characters.
# INT_DEP_IDS, np.array, internal ID's of all jobs that are dependencies for the current row. I.e. inputs to the current job.
# LATEST_DEP_QID, np.array, the most recent Slurm ID's for the dependencies jobs uniquely identified by internal ID's
# in INT_DEP_IDS
# ALL_QIDS, np.array, a list of all Slurm ID's assosciated with submissions of this job. Useful if multiple submissions
# were made because of node failures or any other issues that were later resolved (or not resolved).
##################################################
def get_processing_table_column_defs(return_default_values=False, overlap_only=False, unique_only=False):
"""
Contains the column names, data types, and default row values for a DESI processing table. It returns
the names and datatypes with the defaults being given with an optional flag. Returned as 2 (or 3) lists.
Args:
return_default_values, bool. True if you want the default values returned.
overlap_only, bool. Only return the columns that are common to both processing and exposure tables.
unique_only, bool. Only return columns that are not found in an exposure table.
Returns:
colnames, list. List of column names for an processing table.
coldtypes, list. List of column datatypes for the names in colnames.
coldeflts, list. Optionally returned if return_default_values is True. List of default values for the
corresponding colnames.
"""
## Define the column names for the internal production table and their respective datatypes, split in two
## only for readability's sake
colnames1 = ['EXPID' , 'OBSTYPE', 'TILEID', 'NIGHT' ]
coltypes1 = [np.ndarray , 'S10' , int , int ]
coldeflt1 = [np.ndarray(shape=0).astype(int), 'unknown', -99 , 20000101]
colnames1 += ['BADAMPS', 'LASTSTEP', 'EXPFLAG' ]
coltypes1 += ['S30' , 'S30' , np.ndarray ]
coldeflt1 += ['' , 'all' , np.array([], dtype=str)]
colnames2 = [ 'PROCCAMWORD' ,'CALIBRATOR', 'INTID', 'OBSDESC', 'JOBDESC', 'LATEST_QID']
coltypes2 = [ 'S40' , np.int8 , int , 'S16' , 'S12' , int ]
coldeflt2 = [ 'a0123456789' , 0 , -99 , '' , 'unknown', -99 ]
colnames2 += [ 'SUBMIT_DATE', 'STATUS', 'SCRIPTNAME']
coltypes2 += [ int , 'S10' , 'S40' ]
coldeflt2 += [ -99 , 'U' , '' ]
colnames2 += ['INT_DEP_IDS' , 'LATEST_DEP_QID' , 'ALL_QIDS' ]
coltypes2 += [np.ndarray , np.ndarray , np.ndarray ]
coldeflt2 += [np.ndarray(shape=0).astype(int), np.ndarray(shape=0).astype(int), np.ndarray(shape=0).astype(int)]
colnames = colnames1 + colnames2
coldtypes = coltypes1 + coltypes2
coldeflts = coldeflt1 + coldeflt2
if return_default_values:
if overlap_only:
return colnames1, coltypes1, coldeflt1
elif unique_only:
return colnames2, coltypes2, coldeflt2
else:
return colnames, coldtypes, coldeflts
else:
if overlap_only:
return colnames1, coltypes1
elif unique_only:
return colnames2, coltypes2
else:
return colnames, coldtypes
def default_exptypes_for_proctable():
"""
Defines the exposure types to be recognized by the workflow and saved in the processing table by default.
Returns:
list. A list of default obstypes to be included in a processing table.
"""
## Define the science types to be included in the exposure table (case insensitive)
return ['arc','flat','twilight','science','sci','dither']
def get_processing_table_name(specprod=None, prodmod=None, extension='csv'):
"""
Defines the default processing name given the specprod of the production and the optional extension.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
prodmod, str. Additional str that can be added to the production table name to further differentiate it.
Used in daily workflow to add the night to the name and make it unique from other nightly tables.
extension, str. The extension (and therefore data format) without a leading period of the saved table.
Default is 'csv'.
Returns:
str. The processing table name given the input night and extension.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
if prodmod is not None:
prodname_modifier = '-' + str(prodmod)
elif 'SPECPROD_MOD' in os.environ:
prodname_modifier = '-' + os.environ['SPECPROD_MOD']
else:
prodname_modifier = ''
return f'processing_table_{specprod}{prodname_modifier}.{extension}'
def get_processing_table_path(specprod=None):
"""
Defines the default path to save a processing table. If specprod is not given, the environment variable
'SPECPROD' must exist.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
Returns:
str. The full path to the directory where the processing table should be written (or is already written). This
does not including the filename.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
basedir = define_variable_from_environment(env_name='DESI_SPECTRO_REDUX',
var_descr="The specprod path")
path = pathjoin(basedir, specprod, 'processing_tables')
return path
def get_processing_table_pathname(specprod=None, prodmod=None, extension='csv'): # base_path,specprod
"""
Defines the default pathname to save a processing table.
Args:
specprod, str or None. The name of the production. If None, it will be taken from the environment variable.
prodmod, str. Additional str that can be added to the production table name to further differentiate it.
Used in daily workflow to add the night to the name and make it unique from other nightly tables.
extension, str. The extension (and therefore data format) without a leading period of the saved table.
Default is 'csv'.
Returns:
str. The full pathname where the processing table should be written (or is already written). This
includes the filename.
"""
if specprod is None:
specprod = define_variable_from_environment(env_name='SPECPROD',
var_descr="Use SPECPROD for unique processing table directories")
path = get_processing_table_path(specprod)
table_name = get_processing_table_name(specprod, prodmod, extension)
return pathjoin(path, table_name)
def instantiate_processing_table(colnames=None, coldtypes=None, rows=None):
"""
Create an empty processing table with proper column names and datatypes. If rows is given, it inserts the rows
into the table, otherwise it returns a table with no rows.
Args:
colnames, list. List of column names for a procesing table.
coldtypes, list. List of column datatypes for the names in colnames.
rows, list or np.array of Table.Rows or dicts. An iterable set of Table.Row's or dicts with keys/colnames and value
pairs that match the default column names and data types of the
default exposure table.
Returns:
processing_table, Table. An astropy Table with the column names and data types for a DESI workflow processing
table. If the input rows was not None, it contains those rows, otherwise it has no rows.
"""
## Define the column names for the exposure table and their respective datatypes
if colnames is None or coldtypes is None:
colnames, coldtypes = get_processing_table_column_defs()
processing_table = Table(names=colnames, dtype=coldtypes)
if rows is not None:
for row in rows:
processing_table.add_row(row)
return processing_table
def exptable_to_proctable(input_exptable, obstypes=None):
"""
Converts an exposure table to a processing table and an unprocessed table. The columns unique to a processing table
are filled with default values. If comments are made in COMMENTS or HEADERERR, those will be adjusted in the values
stored in the processing table.
Args:
input_exptable, Table. An exposure table. Each row will be converted to a row of an processing table. If
comments are made in COMMENTS or HEADERERR, those will be adjusted in the values
stored in the processing table.
obstypes, list or np.array. Optional. A list of exposure OBSTYPE's that should be processed (and therefore
added to the processing table).
Returns:
processing_table, Table. The output processing table. Each row corresponds with an exposure that should be
processed.
unprocessed_table, Table. The output unprocessed table. Each row is an exposure that should not be processed.
"""
log = get_logger()
exptable = input_exptable.copy()
if obstypes is None:
obstypes = default_obstypes_for_exptable()
## Define the column names for the exposure table and their respective datatypes
colnames, coldtypes, coldefaults = get_processing_table_column_defs(return_default_values=True)
# for col in ['COMMENTS']: #'HEADERERR',
# if col in exptable.colnames:
# for ii, arr in enumerate(exptable[col]):
# for item in arr:
# clean_item = item.strip(' \t')
# if len(clean_item) > 6:
# keyval = None
# for symb in [':', '=']:
# if symb in clean_item:
# keyval = [val.strip(' ') for val in clean_item.split(symb)]
# break
# if keyval is not None and len(keyval) == 2 and keyval[0].upper() in exptable.colnames:
# key, newval = keyval[0].upper(), keyval[1]
# expid, oldval = exptable['EXPID'][ii], exptable[key][ii]
# log.info(
# f'Found a requested correction to ExpID {expid}: Changing {key} val from {oldval} to {newval}')
# exptable[key][ii] = newval
good_exps = (exptable['EXPFLAG'] == 0)
good_types = np.array([val in obstypes for val in exptable['OBSTYPE']]).astype(bool)
good = (good_exps & good_types)
good_table = exptable[good]
unprocessed_table = exptable[~good]
## Remove columns that aren't relevant to processing, they will be added back in the production tables for
## end user viewing
for col in ['REQRA', 'REQDEC', 'TARGTRA', 'TARGTDEC', 'HEADERERR', 'COMMENTS', 'BADEXP']:
if col in exptable.colnames:
good_table.remove_column(col)
if len(good_table) > 0:
rows = []
for erow in good_table:
prow = erow_to_prow(erow)#, colnames, coldtypes, coldefaults)
rows.append(prow)
processing_table = Table(names=colnames, dtype=coldtypes, rows=rows)
else:
processing_table = Table(names=colnames, dtype=coldtypes)
return processing_table, unprocessed_table
def erow_to_prow(erow):#, colnames=None, coldtypes=None, coldefaults=None, joinsymb='|'):
"""
Converts an exposure table row to a processing table row. The columns unique to a processing table
are filled with default values. If comments are made in COMMENTS or HEADERERR, those are ignored.
Args:
erow, Table.Row or dict. An exposure table row. The row will be converted to a row of an processing table.
If comments are made in COMMENTS or HEADERERR, those are ignored.
Returns:
prow, dict. The output processing table row.
"""
log = get_logger()
erow = table_row_to_dict(erow)
row_names = list(erow.keys())
## Define the column names for the exposure table and their respective datatypes
#if colnames is None:
colnames, coldtypes, coldefaults = get_processing_table_column_defs(return_default_values=True)
colnames, coldtypes, coldefaults = np.array(colnames,dtype=object), \
np.array(coldtypes,dtype=object), \
np.array(coldefaults,dtype=object)
prow = dict()
for nam, typ, defval in zip(colnames, coldtypes, coldefaults):
if nam == 'PROCCAMWORD':
if 'BADCAMWORD' in row_names:
badcamword = erow['BADCAMWORD']
else:
badcamword = ''
prow[nam] = difference_camwords(erow['CAMWORD'],badcamword)
elif nam == 'OBSDESC':
if nam in colnames:
prow[nam] = coldefaults[colnames == nam][0]
else:
prow[nam] = ''
for word in ['dither', 'acquisition', 'focus', 'test']:
if 'PROGRAM' in row_names and word in erow['PROGRAM'].lower():
prow[nam] = word
elif nam == 'EXPID':
prow[nam] = np.array([erow[nam]])
elif nam in row_names:
prow[nam] = erow[nam]
else:
prow[nam] = defval
## For obstypes that aren't science, BADAMPS loses it's relevance. For processing,
## convert those into bad cameras in BADCAMWORD, so the cameras aren't processed.
## Otherwise we'll have nightly calibrations with only half the fibers useful.
if prow['OBSTYPE'] != 'science' and prow['BADAMPS'] != '':
badcams = []
for (camera, petal, amplifier) in parse_badamps(prow['BADAMPS']):
badcams.append(f'{camera}{petal}')
newbadcamword = create_camword(badcams)
log.info("For nonsscience exposure: {}, converting BADAMPS={} to bad cameras={}.".format( erow['EXPID'],
prow['BADAMPS'],
newbadcamword ) )
prow['PROCCAMWORD'] = difference_camwords(prow['PROCCAMWORD'],newbadcamword)
prow['BADAMPS'] = ''
return prow
def table_row_to_dict(table_row):
"""
Helper function to convert a table row to a dictionary, which is much easier to work with for some applications
Args:
table_row, Table.Row or dict. The row of an astropy table that you want to convert into a dictionary where
each key is a column name and the values are the column entry.
Returns:
out, dict. Dictionary where each key is a column name and the values are the column entry.
"""
if type(table_row) is Table.Row:
out = {coln: table_row[coln] for coln in table_row.colnames}
return out
elif type(table_row) in [dict, OrderedDict]:
return table_row
else:
log = get_logger()
typ = type(table_row)
log.error(f"Received table_row of type {typ}, can't convert to a dictionary. Exiting.")
raise TypeError(f"Received table_row of type {typ}, can't convert to a dictionary. Exiting.") | 51.8 | 129 | 0.630596 |
b383b2828ba05c95ace2da8c52a020e73d856776 | 5,299 | py | Python | src/compas_rcf/docker/docker_cmds.py | feihln/compas_rcf | 90d25dbc1fd8aa30c67a2888ef8896bc27add23e | [
"MIT"
] | null | null | null | src/compas_rcf/docker/docker_cmds.py | feihln/compas_rcf | 90d25dbc1fd8aa30c67a2888ef8896bc27add23e | [
"MIT"
] | 21 | 2020-04-01T10:00:59.000Z | 2020-04-23T14:08:05.000Z | src/compas_rcf/docker/docker_cmds.py | feihln/compas_rcf | 90d25dbc1fd8aa30c67a2888ef8896bc27add23e | [
"MIT"
] | null | null | null | """Docker compose commands to be used from python scripts."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import shlex
import subprocess
import sys
log = logging.getLogger(__name__)
def _setup_env_vars(env_vars):
list_vars = []
for key in env_vars:
if os.name == "nt":
list_vars.append("set")
list_vars.append("{}={}".format(key.upper(), env_vars[key]))
list_vars.append("&&")
return list_vars
def _run(cmd, check_output=False, print_output=True, **kwargs):
if sys.version_info.major < 3:
if check_output:
subprocess.check_call(cmd, universal_newlines=print_output, **kwargs)
else:
subprocess.call(cmd, universal_newlines=print_output, **kwargs)
else:
subprocess.run(cmd, check=check_output, text=print_output, **kwargs)
def compose_up(
path,
force_recreate=False,
remove_orphans=False,
ignore_orphans=True,
print_output=True,
check_output=True,
env_vars={},
):
"""Run ``docker-compose up`` for specified compose file.
Parameters
----------
path : :class:`os.PathLike` or :class:`str`
Path to compose file.
force_recreate : :class:`bool`, optional
Force recreation of containers specified in ``docker-compose`` file.
Defaults to ``False``.
remove_orphans : :class:`bool`, optional
Remove orphaned containers. Defaults to ``False``.
ignore_orphans : :class:`bool`, optional
Don't warn about orphaned containers (useful since the use of multiple
compose files produces false positives for this check). Defaults to
``True``.
print_output : :class:`bool`, optional
Print ``stdout`` and ``stdin`` generated by ``docker-compose`` command.
Defaults to ``True``.
check_output : :class:`bool`, optional
Raise if ``docker-compose`` fails. Defaults to ``True``.
env_vars : :class:`dict`, optional
Environment variables to set before running ``docker-compose``
"""
run_kwargs = {}
run_kwargs.update({"check_output": check_output})
run_kwargs.update({"print_output": print_output})
cmd = ["docker-compose", "--file", str(path), "up", "--detach"]
log.debug("Env vars: {}".format(env_vars))
if ignore_orphans:
env_vars.update({"COMPOSE_IGNORE_ORPHANS": "true"})
if len(env_vars) > 0:
cmd = _setup_env_vars(env_vars) + cmd
run_kwargs.update({"shell": True})
if force_recreate:
cmd.append("--force-recreate")
if remove_orphans:
cmd.append("--remove-orphans")
log.debug("Command to run: {}".format(cmd))
_run(cmd, **run_kwargs)
def compose_down(path, check_output=True, print_output=True):
"""Run ``docker-compose down`` for specified compose file.
Parameters
----------
path : :class:`os.PathLike` or :class:`str`
Path to compose file
print_output : :class:`bool`, optional
Print ``stdout`` and ``stdin`` generated by ``docker-compose`` command.
Defaults to ``True``.
check_output : :class:`bool`, optional
Raise if ``docker-compose`` fails. Defaults to ``True``.
"""
cmd = ["docker-compose", "--file", str(path), "down"]
log.debug("Running compose down for {}".format(path))
_run(cmd, check_output=check_output, print_output=print_output)
def restart_container(container_name, check_output=True, print_output=True):
"""Run ``docker restart`` for specified container.
Parameters
----------
container_name : :class:`str`
Name of container to restart.
print_output : :class:`bool`, optional
Print ``stdout`` and ``stdin`` generated by ``docker-compose`` command.
Defaults to ``True``.
check_output : :class:`bool`, optional
Raise if ``docker-compose`` fails. Defaults to ``True``.
"""
cmd_str = "docker-compose --file {} down".format(container_name)
cmd_str = "docker restart {}".format(container_name)
cmd = shlex.split(cmd_str)
log.debug("Restarting {}".format(container_name))
_run(cmd, check_output=check_output, print_output=print_output)
def kill_and_prune_all_containers(
print_output=True,
check_output=True,
env_vars={},
):
"""Kills and prunes all docker containers. Watch out, a bit brutal.
Parameters
----------
print_output : :class:`bool`, optional
Print ``stdout`` and ``stdin`` generated by ``docker-compose`` command.
Defaults to ``True``.
check_output : :class:`bool`, optional
Raise if ``docker-compose`` fails. Defaults to ``True``.
env_vars : :class:`dict`, optional
Environment variables to set before running ``docker``
"""
run_kwargs = {}
run_kwargs.update({"check_output": check_output})
run_kwargs.update({"print_output": print_output})
cmd = '''for /f "tokens=* usebackq" %i in (`docker ps -aq`) do docker container rm -f %i'''
log.debug("Env vars: {}".format(env_vars))
if len(env_vars) > 0:
cmd = _setup_env_vars(env_vars) + cmd
run_kwargs.update({"shell": True})
log.debug("Command to run: {}".format(cmd))
_run(cmd, shell=True, **run_kwargs)
| 31.541667 | 95 | 0.645594 |
5c033ec80d31f669ceddc8ec7b383568424e1081 | 207 | py | Python | kvdroid/jclass/android/webkit.py | wilsenmuts/Kvdroid | 9ab29c54e761ce4cfaa9d8f847257c4a975fbe0f | [
"MIT"
] | 27 | 2021-03-09T21:39:43.000Z | 2022-01-25T22:55:34.000Z | kvdroid/jclass/android/webkit.py | wilsenmuts/Kvdroid | 9ab29c54e761ce4cfaa9d8f847257c4a975fbe0f | [
"MIT"
] | 10 | 2021-03-22T20:38:17.000Z | 2021-12-23T21:06:40.000Z | kvdroid/jclass/android/webkit.py | wilsenmuts/Kvdroid | 9ab29c54e761ce4cfaa9d8f847257c4a975fbe0f | [
"MIT"
] | 7 | 2021-03-23T07:55:47.000Z | 2021-12-17T09:32:35.000Z | from jnius import autoclass
from kvdroid.jclass import _class_call
def CookieManager(*args, instantiate: bool = False):
return _class_call(autoclass("android.webkit.CookieManager"), args, instantiate)
| 29.571429 | 84 | 0.797101 |
befff23a9189022295a078385c6c901ba9789d20 | 16,019 | py | Python | tests/test_x_provider.py | Swapnilr1/pyoidc | 2feb099f4f4e26047888437aea1226237d05ebba | [
"Apache-2.0"
] | null | null | null | tests/test_x_provider.py | Swapnilr1/pyoidc | 2feb099f4f4e26047888437aea1226237d05ebba | [
"Apache-2.0"
] | null | null | null | tests/test_x_provider.py | Swapnilr1/pyoidc | 2feb099f4f4e26047888437aea1226237d05ebba | [
"Apache-2.0"
] | null | null | null | from future.backports.urllib.parse import parse_qs
from future.backports.urllib.parse import urlparse
import json
import time
import pytest
from oic import rndstr
from oic.extension.client import Client
from oic.extension.message import TokenIntrospectionRequest
from oic.extension.message import TokenIntrospectionResponse
from oic.extension.message import TokenRevocationRequest
from oic.extension.provider import Provider
from oic.extension.token import JWTToken
from oic.oauth2.message import AccessTokenRequest
from oic.oauth2.message import AccessTokenResponse
from oic.oauth2.message import AuthorizationRequest
from oic.oauth2.message import AuthorizationResponse
from oic.oauth2.message import TokenErrorResponse
from oic.utils.authn.authn_context import AuthnBroker
from oic.utils.authn.client import verify_client
from oic.utils.authn.user import UserAuthnMethod
from oic.utils.authz import Implicit
from oic.utils.keyio import KeyBundle
from oic.utils.keyio import KeyJar
from oic.utils.sdb import DefaultToken
from oic.utils.sdb import SessionDB
from oic.utils.sdb import lv_pack
from oic.utils.sdb import lv_unpack
CLIENT_CONFIG = {
"client_id": "client1",
'config': {'issuer': 'https://example.com/as'}
}
CONSUMER_CONFIG = {
"authz_page": "/authz",
"flow_type": "code",
"scope": [],
"response_type": "code",
}
ISSUER = "https://connect-op.heroku.com"
SERVER_INFO = {
"version": "3.0",
"issuer": ISSUER,
"authorization_endpoint": "http://localhost:8088/authorization",
"token_endpoint": "http://localhost:8088/token",
"flows_supported": ["code", "token", "code token"],
}
CDB = {
"a1b2c3": {
"password": "hemligt",
"client_secret": "drickyoughurt",
"redirect_uris": [("http://localhost:8087/authz", None)],
'token_endpoint_auth_method': 'client_secret_post',
'response_types': ['code', 'token']
},
"client1": {
"client_secret": "hemlighet",
"redirect_uris": [("http://localhost:8087/authz", None)],
'token_endpoint_auth_method': 'client_secret_post',
'response_types': ['code', 'token']
}
}
JWKS = {"keys": [
{
"d": "vT9bnSZ63uIdaVsmZjrbmcvrDZG-_qzVQ1KmrSSC398sLJiyaQKRPkmBRvV"
"-MGxW1MVPeCkhnSULCRgtqHq"
"-zQxMeCviSScHTKOuDYJfwMB5qdOE3FkuqPMsEVf6EXYaSd90"
"-O6GOA88LBCPNR4iKxsrQ6LNkawwiJoPw7muK3TbQk9HzuznF8WDkt72CQFxd4eT"
"6wJ97xpaIgxZce0oRmFcLYkQ4A0pgVhF42zxJjJDIBj_ZrSl5_qZIgiE76PV4hjH"
"t9Nv4ZveabObnNbyz9YOiWHiOLdYZGmixHuauM98NK8udMxI6IuOkRypFhJzaQZF"
"wMroa7ZNZF-mm78VYQ",
"dp":
"wLqivLfMc0FBhGFFRTb6WWzDpVZukcgOEQGb8wW3knmNEpgch699WQ4ZY_ws1xSbv"
"QZtbx7MaIBXpn3qT1LYZosoP5oHVTAvdg6G8I7zgWyqj-nG4evciuoeAa1Ff52h4-"
"J1moZ6FF2GelLdjXHoCbjIBjz_VljelSqOk5Sh5HU",
"dq": "KXIUYNfDxwxv3A_w1t9Ohm92gOs-UJdI3_IVpe4FauCDrJ4mqgsnTisA15KY"
"-9fCEvKfqG571WK6EKpBcxaRrqSU0ekpBvgJx8o3MGlqXWj-Lw0co8N9_"
"-fo1rYx_8g-wCRrm5zeA5pYJdwdhOBnmKOqw_GsXJEcYeUod1xkcfU",
"e": "AQAB",
"ext": "true",
"key_ops": "sign",
"kty": "RSA",
"n": "wl0DPln-EFLqr_Ftn6A87wEQAUVbpZsUTN2OCEsJV0nhlvmX3GUzyZx5UXdlM3Dz68PfUWCgfx67Il6sURqWVCnjnU-_gr3GeDyzedj-"
"lZejnBx-lEy_3j6B98SbcDfkJF6saXnPd7_kgilJT1_g-EVI9ifFB1cxZXHCd2WBeRABSCprAlCglF-YmnUeeDs5K32z2ckVjadF9BG2"
"7CO5UfNq0K8jI9Yj_coOhM9dRNrQ9UVZNdQVG-bAIDhB2y2o3ASGwqchHouIxv5YZNGS0SMJL5t0edh483q1tSWPqBw-ZeryLztOedBB"
"zSuJk7QDmL1B6B7KKUIrlUYJmVsYzw",
"p": "6MEg5Di_IFiPGKvMFRjyx2t7YAOQ4KfdIkU_Khny1t1eCG5O07omPe_jLU8I5fPaD5F5HhWExLNureHD4K6LB18JPE3VE8chQROiRSNP"
"Zo1-faUvHu-Dy0pr7I-TS8pl_P3vop1KelIbGwXhzPIRKQMqCEKi3tLJt4R_MQ18Dx0",
"q": "1cZVPpUbf4p5n4cMv_kERCPh3cieMs4aVojgh3feAiJiLwWWL9Pc43oJUekK44aWMnbs68Y4kqXtc52PMtBDzVp0Gjt0lCY3M7MYRVI4"
"JhtknqvQynMKQ2nKs3VldvVfY2SxyUmnRyEolQUGRA7rRMUyPb4AXhSR7oroRrJD59s",
"qi": "50PhyaqbLSczhipWiYy149sLsGlx9cX0tnGMswy1JLam7nBvH4"
"-MWB2oGwD2hmG-YN66q-xXBS9CVDLZZrj1sonRTQPtWE"
"-zuZqds6_NVlk2Ge4_IAA3TZ9tvIfM5FZVTOQsExu3_LX8FGCspWC1R"
"-zDqT45Y9bpaCwxekluO7Q",
'kid': 'sign1'
}, {
"k":
b"YTEyZjBlMDgxMGI4YWU4Y2JjZDFiYTFlZTBjYzljNDU3YWM0ZWNiNzhmNmFlYTNkNTY0NzMzYjE",
"kty": "oct",
"use": "sig"
}]}
def _eq(l1, l2):
return set(l1) == set(l2)
def test_lv_pack_unpack():
lst = ['foo', 'kaka', 'banan', 'jordgubb']
s = lv_pack(*lst)
r = lv_unpack(s)
assert r == lst
class DummyAuthn(UserAuthnMethod):
def __init__(self, srv, user):
UserAuthnMethod.__init__(self, srv)
self.user = user
def authenticated_as(self, cookie=None, **kwargs):
return {"uid": self.user}, time.time()
AUTHN_BROKER = AuthnBroker()
AUTHN_BROKER.add("UNDEFINED", DummyAuthn(None, "username"))
# dealing with authorization
AUTHZ = Implicit()
class TestProvider(object):
@pytest.fixture(autouse=True)
def create_provider(self):
kb = KeyBundle(JWKS["keys"])
kj = KeyJar()
kj.issuer_keys[''] = [kb]
_sdb = SessionDB(
"https://example.com/",
db={},
code_factory=DefaultToken('supersecret', 'verybadpassword',
typ='A', lifetime=600),
token_factory=JWTToken('T', keyjar=kj,
lt_pattern={'code': 3600, 'token': 900},
iss='https://example.com/as',
sign_alg='RS256'),
refresh_token_factory=JWTToken(
'R', keyjar=kj, lt_pattern={'': 24 * 3600},
iss='https://example.com/as')
)
# name, sdb, cdb, authn_broker, authz, client_authn,
self.provider = Provider("as", _sdb, CDB, AUTHN_BROKER, AUTHZ,
verify_client,
baseurl='https://example.com/as')
def test_authorization_endpoint_faulty_redirect_uri(self):
bib = {"state": "id-6da9ca0cc23959f5f33e8becd9b08cae",
# faulty redirect uri
"redirect_uri": "http://localhost:8087/cb",
"response_type": ["code"],
"client_id": "a1b2c3"}
arq = AuthorizationRequest(**bib)
resp = self.provider.authorization_endpoint(request=arq.to_urlencoded())
assert resp.status_code == 400
msg = json.loads(resp.message)
assert msg["error"] == "invalid_request"
def test_authenticated(self):
client = Client(**CLIENT_CONFIG)
client.authorization_endpoint = 'https://example.com/as'
sid = rndstr(8)
args = {
'redirect_uri': "http://localhost:8087/authz",
"state": sid, "response_type": 'code'}
url, body, ht_args, csi = client.request_info(
AuthorizationRequest, 'GET', request_args=args)
resp = self.provider.authorization_endpoint(urlparse(url).query)
assert resp.status_code == 303
resp = urlparse(resp.message).query
aresp = client.parse_authz_response(resp)
assert isinstance(aresp, AuthorizationResponse)
assert _eq(aresp.keys(), ['state', 'code', 'client_id', 'iss'])
assert _eq(client.grant[sid].keys(), ['tokens', 'code', 'exp_in',
'seed', 'id_token',
'grant_expiration_time'])
def test_authenticated_token(self):
client = Client(**CLIENT_CONFIG)
client.authorization_endpoint = 'https://example.com/as'
sid = rndstr(8)
args = {'redirect_uri': "http://localhost:8087/authz", "state": sid,
"response_type": 'token'}
url, body, ht_args, csi = client.request_info(AuthorizationRequest,
'GET', request_args=args)
QUERY_STRING = url.split("?")[1]
resp = self.provider.authorization_endpoint(QUERY_STRING)
auth_resp = parse_qs(urlparse(resp.message).fragment)
assert "access_token" in auth_resp
assert auth_resp["token_type"][0] == "Bearer"
def test_token_endpoint(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id="client1")
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.token_factory['code'](sid=sid)
_sdb[sid] = {
"oauth_state": "authz",
"sub": "sub",
"authzreq": authreq.to_json(),
"client_id": "client1",
"code": access_grant,
"code_used": False,
"redirect_uri": "http://example.com/authz",
'response_type': ['code']
}
# Construct Access token request
areq = AccessTokenRequest(code=access_grant,
redirect_uri="http://example.com/authz",
client_id="client1",
client_secret="hemlighet",
grant_type='authorization_code')
resp = self.provider.token_endpoint(request=areq.to_urlencoded())
atr = AccessTokenResponse().deserialize(resp.message, "json")
assert _eq(atr.keys(), ['access_token', 'token_type'])
def test_token_endpoint_no_cache(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id="client1")
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.token_factory['code'](sid=sid)
_sdb[sid] = {
"oauth_state": "authz",
"sub": "sub",
"authzreq": authreq.to_json(),
"client_id": "client1",
"code": access_grant,
"code_used": False,
"redirect_uri": "http://example.com/authz",
'response_type': ['code']
}
# Construct Access token request
areq = AccessTokenRequest(code=access_grant,
redirect_uri="http://example.com/authz",
client_id="client1",
client_secret="hemlighet",
grant_type='authorization_code')
resp = self.provider.token_endpoint(request=areq.to_urlencoded())
assert resp.headers == [('Pragma', 'no-cache'), ('Cache-Control', 'no-store'),
('Content-type', 'application/json')]
def test_token_endpoint_unauth(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id="client1",
response_type='code')
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.token_factory['code'](sid=sid)
_sdb[sid] = {
"oauth_state": "authz",
"sub": "sub",
"authzreq": authreq.to_json(),
"client_id": "client1",
"code": access_grant,
"code_used": False,
"redirect_uri": "http://example.com/authz",
'response_type': ['code']
}
# Construct Access token request
areq = AccessTokenRequest(code=access_grant,
redirect_uri="http://example.com/authz",
client_id="client2",
client_secret="hemlighet",
grant_type='authorization_code')
resp = self.provider.token_endpoint(request=areq.to_urlencoded())
atr = TokenErrorResponse().deserialize(resp.message, "json")
assert _eq(atr.keys(), ['error_description', 'error'])
def test_token_introspection(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id="client1")
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.token_factory['code'](sid=sid)
_sdb[sid] = {
"oauth_state": "authz",
"sub": "sub",
"authzreq": authreq.to_json(),
"client_id": "client1",
"code": access_grant,
"code_used": False,
"redirect_uri": "http://example.com/authz",
'response_type': ['code']
}
# Construct Access token request
areq = AccessTokenRequest(code=access_grant,
redirect_uri="http://example.com/authz",
client_id="client1",
client_secret="hemlighet",
grant_type='authorization_code')
resp = self.provider.token_endpoint(request=areq.to_urlencoded())
atr = AccessTokenResponse().deserialize(resp.message, "json")
req = TokenIntrospectionRequest(token=atr['access_token'],
client_id="client1",
client_secret="hemlighet",
token_type_hint='access_token')
resp = self.provider.introspection_endpoint(request=req.to_urlencoded())
assert resp
ti_resp = TokenIntrospectionResponse().deserialize(resp.message, 'json')
assert ti_resp['active'] is True
def test_token_revocation_and_introspection(self):
authreq = AuthorizationRequest(state="state",
redirect_uri="http://example.com/authz",
client_id="client1")
_sdb = self.provider.sdb
sid = _sdb.access_token.key(user="sub", areq=authreq)
access_grant = _sdb.token_factory['code'](sid=sid)
_sdb[sid] = {
"oauth_state": "authz",
"sub": "sub",
"authzreq": authreq.to_json(),
"client_id": "client1",
"code": access_grant,
"code_used": False,
"redirect_uri": "http://example.com/authz",
'response_type': ['code']
}
# Construct Access token request
areq = AccessTokenRequest(code=access_grant,
redirect_uri="http://example.com/authz",
client_id="client1",
client_secret="hemlighet",
grant_type='authorization_code')
resp = self.provider.token_endpoint(request=areq.to_urlencoded())
atr = AccessTokenResponse().deserialize(resp.message, "json")
req = TokenRevocationRequest(token=atr['access_token'],
client_id="client1",
client_secret="hemlighet",
token_type_hint='access_token')
resp = self.provider.revocation_endpoint(request=req.to_urlencoded())
assert resp.status_code == 200
req = TokenIntrospectionRequest(token=atr['access_token'],
client_id="client1",
client_secret="hemlighet",
token_type_hint='access_token')
resp = self.provider.introspection_endpoint(request=req.to_urlencoded())
assert resp
ti_resp = TokenIntrospectionResponse().deserialize(resp.message, 'json')
assert ti_resp['active'] is False
| 41.392765 | 119 | 0.584493 |
34fb61e982f4f68ff9f8443e070a44b09d46beaa | 2,852 | py | Python | tests/test_utils.py | Rails-on-HPC/subway | bacdc387fce66aab78753aa48f36ec1a79d9d64b | [
"MIT"
] | 4 | 2020-03-02T01:34:28.000Z | 2021-02-22T07:55:28.000Z | tests/test_utils.py | Rails-on-HPC/subway | bacdc387fce66aab78753aa48f36ec1a79d9d64b | [
"MIT"
] | null | null | null | tests/test_utils.py | Rails-on-HPC/subway | bacdc387fce66aab78753aa48f36ec1a79d9d64b | [
"MIT"
] | 1 | 2020-09-23T01:55:15.000Z | 2020-09-23T01:55:15.000Z | import sys
import os
import pytest
from functools import partial
from datetime import datetime
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from subway.utils import (
replace_wildcard,
simple_template_render,
statement_parser,
md5file,
flatten_dict,
editor,
)
def test_replace_wildcard():
def translate_func(translate_dict, s):
return translate_dict.get(s, "")
caps_dict = {"a": "A", "b": "B", "c": "C", "d": "DD"}
translate_pfunc = partial(translate_func, caps_dict)
r = partial(replace_wildcard, translate_pfunc)
assert r("") == ""
assert r("a") == "a"
assert r("abC") == "abC"
for i in range(8):
assert r("%" * i) == "%" * i
assert r("%%a") == "%a"
assert r("%%%a") == "%%a"
assert r("%a") == "A"
assert r("%ab") == "Ab"
assert r("%eb") == "b"
assert r("sf%c%dm%f") == "sfCDDm"
assert r("%%ab%") == "%ab%"
assert r("%Aal%cc%%") == "alCc%%"
assert r("%ddD\n%%a") == "DDdD\n%a"
assert r("slurm-%a.out") == "slurm-A.out"
assert r("r%%%%a%a%%a%") == "r%%%aA%a%"
@pytest.mark.parametrize(
"template",
[["{a}b{c}d", "AbCd"], ["{c}c{c}", "CcC"], ["a", "a"], ["{b}", "B"]],
indirect=True,
)
def test_template_render(template):
simple_template_render(
os.path.join(os.path.dirname(__file__), "test.template"),
os.path.join(os.path.dirname(__file__), "test.out"),
{"a": "A", "b": "B", "c": "C"},
)
# with open(os.path.join(os.path.dirname(__file__), "test.out"), "r") as f:
# s = f.read()
# assert s == "AbCd"
def test_statement_parser():
stdans1 = {
"a": ("=", "b"),
"cc": ("=", "ad"),
"e": (">", 17),
"f": ("<=", datetime(2020, 2, 2)),
}
assert statement_parser("a=b;cc=ad e>17; f<=datetime(2020,2,2)") == stdans1
assert statement_parser("a=b;cc=ad e>17; f<=datetime(2020,2,2)") == stdans1
assert statement_parser("a=b; cc=ad e>17; f<=datetime(2020,2,2)") == stdans1
assert statement_parser("a=b cc=ad e>17 f<=datetime(2020,2,2)") == stdans1
def test_md5file():
assert (
md5file(os.path.join(os.path.dirname(__file__), ".subway", "history.json"))
== "ac9501d73459152e7e1c2ce11b6d9a7b"
)
def test_flatten_dict():
assert flatten_dict({"a": 1, "b": "cc"}, parent_key="h") == {"ha": 1, "hb": "cc"}
assert flatten_dict(
{"d": [{"m": "n", "k": ["zz"]}, 2, [3, 4]], "c": 1}, sep="~"
) == {
"c": 1,
"d~list_0~k~list_0": "zz",
"d~list_0~m": "n",
"d~list_1": 2,
"d~list_2~list_0": 3,
"d~list_2~list_1": 4,
}
def _run(name):
if name[0].endswith("vim"):
raise OSError("no vim found")
def test_editor():
editor(os.path.join(os.path.dirname(__file__), ".subway", "config.json"), _run=_run)
| 28.237624 | 88 | 0.538569 |
652b64a7eb9cefde1b5de55ef1bab9f60952c0c4 | 1,456 | py | Python | app/core/tests/test_admin.py | sfbattles/recipe-app-api | 7c2e893e91e4165cdd7294db71f74196ca299e7b | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | sfbattles/recipe-app-api | 7c2e893e91e4165cdd7294db71f74196ca299e7b | [
"MIT"
] | null | null | null | app/core/tests/test_admin.py | sfbattles/recipe-app-api | 7c2e893e91e4165cdd7294db71f74196ca299e7b | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
"""Setup function get testcase for testing """
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='richard.long@gmail.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='sfbattles@gmail.com',
password='password123',
name='richard long'
)
def test_users_listed(self):
"""Test that user are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url) # run http get on url
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""Test that the user edit page work"""
url = reverse('admin:core_user_change', args=[self.user.id])
# /admin/core/user/1 sample url that will be created
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 33.860465 | 68 | 0.64217 |
b21b2edb482cf990bd44696f19ad7272f1a0c0de | 11,180 | py | Python | seqGAN-tensorflow-master/discriminator.py | pwnsauce8/kiv-bpini-web | 2572ffd5b8fdd49944dd84faf9fa689ed1d13018 | [
"MIT"
] | null | null | null | seqGAN-tensorflow-master/discriminator.py | pwnsauce8/kiv-bpini-web | 2572ffd5b8fdd49944dd84faf9fa689ed1d13018 | [
"MIT"
] | 4 | 2020-09-26T01:05:08.000Z | 2022-02-10T02:03:29.000Z | seqGAN-tensorflow-master/discriminator.py | pwnsauce8/kiv-bpini-web | 2572ffd5b8fdd49944dd84faf9fa689ed1d13018 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import time
from utils import SummaryHelper
# An alternative to tf.nn.rnn_cell._linear function, which has been removed in Tensorfow 1.0.1
# The highway layer is borrowed from https://github.com/mkroutikov/tf-lstm-char-cnn
def linear(input_, output_size, scope=None):
'''
Linear map: output[k] = sum_i(Matrix[k, i] * input_[i] ) + Bias[k]
Args:
input_: a tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(input_[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
'''
shape = input_.get_shape().as_list()
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shape))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shape))
input_size = shape[1]
# Now the computation.
with tf.variable_scope(scope or "SimpleLinear"):
matrix = tf.get_variable("Matrix", [output_size, input_size], dtype=input_.dtype)
bias_term = tf.get_variable("Bias", [output_size], dtype=input_.dtype)
return tf.matmul(input_, tf.transpose(matrix)) + bias_term
def highway(input_, size, num_layers=1, bias=-2.0, f=tf.nn.relu, scope='Highway'):
"""Highway Network (cf. http://arxiv.org/abs/1505.00387).
t = sigmoid(Wy + b)
z = t * g(Wy + b) + (1 - t) * y
where g is nonlinearity, t is transform gate, and (1 - t) is carry gate.
"""
with tf.variable_scope(scope):
for idx in range(num_layers):
g = f(linear(input_, size, scope='highway_lin_%d' % idx))
t = tf.sigmoid(linear(input_, size, scope='highway_gate_%d' % idx) + bias)
output = t * g + (1. - t) * input_
input_ = output
return output
class Discriminator(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(self, args, data, embed, summary, sequence_length, latest_dir, best_dir):
# Placeholders for input, output and dropout
self.sequence_length = sequence_length
self.args = args
self.data = data
self.num_classes = args.num_classes
self.vocab_size = data.vocab_size
self.filter_sizes = args.dis_filter_sizes
self.num_filters = args.dis_num_filters
self.embedding_size = args.embedding_size
self.l2_reg_lambda = args.dis_l2_reg_lambda
self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, self.num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
self.l2_loss = tf.constant(0.0)
self.latest_dir = latest_dir
self.best_dir = best_dir
self.latest_saver = None
self.best_saver = None
self.global_step = tf.Variable(0, trainable=False)
# initialize the training process
self.learning_rate = tf.Variable(float(args.dis_lr), trainable=False, dtype=tf.float32)
_, _, _, self.trainSummary, self.devSummary, self.testSummary = summary
def build_discriminator(self):
self.W = tf.Variable(
tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),
name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for filter_size, num_filter in zip(self.filter_sizes, self.num_filters):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, self.embedding_size, 1, num_filter]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filter]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, self.sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = sum(self.num_filters)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add highway
with tf.name_scope("highway"):
self.h_highway = highway(self.h_pool_flat, self.h_pool_flat.get_shape()[1], 1, 0)
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_highway, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.Variable(tf.truncated_normal([num_filters_total, self.num_classes], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name="b")
self.l2_loss += tf.nn.l2_loss(W)
self.l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.ypred_for_auc = tf.nn.softmax(self.scores)
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + self.l2_reg_lambda * self.l2_loss
self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.cast(tf.argmax(self.scores, 1), tf.int32), tf.cast(tf.argmax(self.input_y, 1), tf.int32)), tf.float32))
self.params = [v for v in tf.trainable_variables() if 'discriminator' in v.name]
opt = tf.train.AdamOptimizer(self.learning_rate)
gradients = tf.gradients(self.loss, self.params)
clipped_gradients, self.gradient_norm = tf.clip_by_global_norm(gradients, self.args.grad_clip)
self.update = opt.apply_gradients(zip(clipped_gradients, self.params),
global_step=self.global_step)
def print_parameters(self):
for item in self.params:
print('%s: %s' % (item.name, item.get_shape()))
def step_decoder(self, generator, session, data, forward_only=False):
sentence = []
for s in data["sent"]:
sentence.append(np.concatenate((s[1:],[self.data.word2id["<pad>"]] * (self.sequence_length-len(s[1:]))), 0))
neg_data = generator.generate(session)
feed = {
self.input_x: np.concatenate((np.array(sentence), neg_data), 0) ,
self.input_y: np.concatenate((np.array([[0,1]]*np.shape(sentence)[0]), [[1,0]]*np.shape(neg_data)[0]), 0),
self.dropout_keep_prob: 1.0 if forward_only else self.args.dis_dropout_keep_prob
}
if forward_only:
output_feed = [self.loss, self.acc]
else:
output_feed = [self.loss, self.acc,
self.gradient_norm,
self.update]
return session.run(output_feed, feed)
def evaluate(self, generator, session, data, batch_size, key_name):
loss_step, acc_step = 0., 0.
times = 0
data.restart(key_name, batch_size=batch_size, shuffle=False)
batched_data = data.get_next_batch(key_name)
while batched_data != None:
outputs = self.step_decoder(generator, session, batched_data, forward_only=True)
loss_step += outputs[0]
acc_step += outputs[1]
times += 1
batched_data = data.get_next_batch(key_name)
loss_step /= times
acc_step /= times
return loss_step, acc_step
def store_checkpoint(self, sess, path, key, generator):
if key == "latest":
self.latest_saver.save(sess, path, global_step = self.global_step + generator.global_step)
else:
self.best_saver.save(sess, path, global_step = self.args.global_step + generator.global_step)
def train_process(self, generator, data, sess, steps):
loss_step, acc_step, time_step, epoch = 0., 0., 0., 0
data.restart("train", batch_size=self.args.batch_size, shuffle=True)
batched_data = data.get_next_batch("train")
for epoch in range(steps):
while batched_data != None:
global_step = self.global_step.eval()
print("Discriminator Step: " + str(global_step), end='\r')
if global_step % self.args.checkpoint_steps == 0 and global_step != 0:
print("Dis epoch %d global step %d learning rate %.4f step-time %.2f loss_step %.4f acc %.4f"
% (epoch, global_step, self.learning_rate.eval(), time_step, loss_step, acc_step))
self.trainSummary(global_step // self.args.checkpoint_steps, {'loss': loss_step, 'accuracy':acc_step})
devout = self.evaluate(generator, sess, data, self.args.batch_size, "dev")
print(' dev loss: %.4f, acc: %.4f' % (devout[0], devout[1]))
self.devSummary(global_step // self.args.checkpoint_steps, {'loss': devout[0], 'accuracy':devout[1]})
testout = self.evaluate(generator, sess, data, self.args.batch_size, "test")
print(' test loss: %.4f, acc: %.4f' % (testout[0], testout[1]))
self.testSummary(global_step // self.args.checkpoint_steps, {'loss': testout[0], 'accuracy':testout[1]})
loss_step, acc_step, time_step = np.zeros((1,)), .0, .0
self.store_checkpoint(sess, self.latest_dir + '/checkpoint', "latest", generator)
start_time = time.time()
outputs = self.step_decoder(generator, sess, batched_data)
loss_step += outputs[0] / self.args.checkpoint_steps
acc_step += outputs[1] / self.args.checkpoint_steps
time_step += (time.time() - start_time) / self.args.checkpoint_steps
batched_data = data.get_next_batch("train")
data.restart("train", batch_size=self.args.batch_size, shuffle=True)
batched_data = data.get_next_batch("train") | 47.982833 | 161 | 0.613953 |
4846a9a99a1ae6987e3846e0d58c8f742772c07c | 209 | py | Python | xls2csv.py | thcrock/school-report-cards | 46eab255d65fb37d3216e387ac324edae530bc9e | [
"MIT"
] | 12 | 2016-05-09T20:00:39.000Z | 2021-09-09T00:07:53.000Z | xls2csv.py | thcrock/school-report-cards | 46eab255d65fb37d3216e387ac324edae530bc9e | [
"MIT"
] | 7 | 2016-01-11T14:30:03.000Z | 2018-06-12T14:52:13.000Z | xls2csv.py | thcrock/school-report-cards | 46eab255d65fb37d3216e387ac324edae530bc9e | [
"MIT"
] | 2 | 2016-06-18T08:53:02.000Z | 2017-04-01T16:44:35.000Z | import xlrd
import csv
import sys
with xlrd.open_workbook(sys.argv[1]) as wb:
sh = wb.sheet_by_index(0)
c = csv.writer(sys.stdout)
for r in range(sh.nrows):
c.writerow(sh.row_values(r))
| 20.9 | 43 | 0.665072 |
7da7bc269c7a0c47aa900a11781abdbe49ecd3fc | 6,176 | py | Python | manim_sandbox/utils/mobjects/MyTest.py | pu17/manim_sandor | 1dc213db45e85df9d56de800c59f56388d5d1742 | [
"MIT"
] | null | null | null | manim_sandbox/utils/mobjects/MyTest.py | pu17/manim_sandor | 1dc213db45e85df9d56de800c59f56388d5d1742 | [
"MIT"
] | null | null | null | manim_sandbox/utils/mobjects/MyTest.py | pu17/manim_sandor | 1dc213db45e85df9d56de800c59f56388d5d1742 | [
"MIT"
] | null | null | null | from manimlib import *
class MyText_old(Tex):
CONFIG = {
'default_font': '思源黑体',
}
def __init__(self, *tex_strings, **kwargs):
self.tex_list = tex_strings
TexMobject.__init__(self, *tex_strings, **kwargs)
self.not_replace_texs = ['\\over', ]
self.new_font_texs = VGroup()
def reset_tex_with_font(self):
self.new_font_texs = VGroup()
def get_color_by_tex(self, tex, **kwargs):
parts = self.get_parts_by_tex(tex, **kwargs)
colors = []
for part in parts:
colors.append(part.get_color())
return colors[0]
def set_font_by_tex(self, tex, font, new_tex=None, color=None, **kwargs):
parts_to_font = self.get_parts_by_tex(tex, **kwargs)
if color == None:
color = self.get_color_by_tex(tex)
if new_tex != None:
tex = new_tex
for part in parts_to_font:
tex_new = Text(tex, font=font, color=color)
tex_new.set_height(part.get_height())
# tex_new.set_width(part.get_width())
tex_new.move_to(part)
self.new_font_texs.add(tex_new)
def set_font_by_tex_to_font_map(self, texs_to_font_map, texs_replace_map, **kwargs):
for texs, font in list(texs_to_font_map.items()):
try:
# If the given key behaves like tex_strings
if texs in texs_replace_map:
self.set_font_by_tex(texs, font, new_tex=texs_replace_map[texs], **kwargs)
else:
self.set_font_by_tex(texs, font, **kwargs)
except TypeError:
# If the given key is a tuple
for tex in texs:
if tex in texs_replace_map:
self.set_font_by_tex(texs, font, new_tex=texs_replace_map[texs], **kwargs)
else:
self.set_font_by_tex(texs, font, **kwargs)
def create_default_font_dict(self):
self.default_font_dict = {}
for tex in self.tex_strings:
if not tex in self.not_replace_texs:
self.default_font_dict[tex] = self.default_font
return self.default_font_dict
def get_new_font_texs(self, texs_replace_map, **kwargs):
texs_to_font_map = self.create_default_font_dict()
self.set_font_by_tex_to_font_map(texs_to_font_map, texs_replace_map, **kwargs)
return self.new_font_texs
class MyText(Tex):
CONFIG = {
'default_font': 'SWGothe',
'tex_scale_factor': 1,
}
def __init__(self, *tex_strings, **kwargs):
self.tex_list = tex_strings
TexMobject.__init__(self, *tex_strings, **kwargs)
self.new_font_texs = VGroup()
def reset_tex_with_font(self):
self.new_font_texs = VGroup()
def get_color_by_tex(self, tex, **kwargs):
parts = self.get_parts_by_tex(tex, **kwargs)
colors = []
for part in parts:
colors.append(part.get_color())
return colors[0]
def get_new_font_texs(self, replace_dict):
for i in range(len(self.tex_strings)):
tex = self.tex_strings[i]
color=self.get_color_by_tex(tex)
if tex in replace_dict:
tex = replace_dict[tex]
tex_new = Text(tex, font=self.default_font, color=color)
tex_new.set_height(self[i].get_height())
if tex == '-' or tex == '=':
tex_new.set_width(self[i].get_width(), stretch=True)
tex_new.scale(self.tex_scale_factor)
tex_new.move_to(self[i])
self.new_font_texs.add(tex_new)
return self.new_font_texs
class MyTitle(Text):
# similar to the 'Title' class in manim,
# use Text to replace TextMobject so that we can change font
CONFIG = {
"scale_factor": 1,
"include_underline": True,
"underline_width": FRAME_WIDTH - 2,
# This will override underline_width
"match_underline_width_to_text": False,
"underline_buff": MED_SMALL_BUFF,
}
def __init__(self, *text, **kwargs):
Text.__init__(self, *text, **kwargs)
self.scale(self.scale_factor)
self.to_edge(UP)
if self.include_underline:
underline = Line(LEFT, RIGHT)
underline.next_to(self, DOWN, buff=self.underline_buff)
if self.match_underline_width_to_text:
underline.match_width(self)
else:
underline.set_width(self.underline_width)
self.add(underline)
self.underline = underline
class Test_mytext(Scene):
def construct(self):
color_dict = {'R': PINK, 'd': YELLOW, 'r': ORANGE, '\\theta': BLUE, '\\over': WHITE,
't': BLUE, 'e': GREEN, 'i': RED, '\\sin': WHITE, '\\cos': WHITE}
font_list = ['Comic Sans MS', '庞门正道标题体', 'Consolas', 'SWGothe', 'Rough___Dusty_Chalk',
'SWScrps', '新蒂小丸子体']
origin_formula = TexMobject('f', '(', 't', ')', '=', 'x', '(', 't', ')', '+', 'y', '(', 't', ')', 'i', '=',
'(', 'R', '-', 'r', ')', 'e^{', 'i', 't}', '+', 'd', 'e^{', '-', 'i', '{R', '-',
'r', '\\over', 'r}', 't}').scale(1)\
.set_color_by_tex_to_color_map(color_dict).to_corner(LEFT * 2 + UP * 1.5)
formulas = VGroup(origin_formula)
for i in range(len(font_list)):
formula_i = MyText('f', '(', 't', ')', '=', 'x', '(', 't', ')', '+', 'y', '(', 't', ')', 'i', '=',
'(', 'R', '-', 'r', ')', 'e^{', 'i', 't}', '+', 'd', 'e^{', '-', 'i', '{R', '-',
'r', '\\over', 'r}', 't}', default_font=font_list[i], tex_scale_factor=0.75)
formula_i.set_color_by_tex_to_color_map(color_dict)
replace_dict = {'e^{': 'e', 't}': 't', '{R': 'R', 'r}': 'r', '\\over': '-'}
new_formula = formula_i.get_new_font_texs(replace_dict)
new_formula.to_corner(LEFT * 2 + UP * 1.5).shift(DOWN * 0.8 * (i+1))
formulas.add(new_formula)
self.add(formulas)
self.wait(5)
| 37.889571 | 115 | 0.552623 |
b409ad97f3135c9ef3cd088addfcbe02b5fc3e0d | 8,856 | py | Python | GAN/ebgan64.py | prannayk/thedeeplearning | c5c45c17edb36b31fe25461850974d8310350d80 | [
"MIT"
] | 4 | 2017-08-23T19:28:41.000Z | 2020-02-13T19:04:28.000Z | GAN/ebgan64.py | prannayk/thedeeplearning | c5c45c17edb36b31fe25461850974d8310350d80 | [
"MIT"
] | null | null | null | GAN/ebgan64.py | prannayk/thedeeplearning | c5c45c17edb36b31fe25461850974d8310350d80 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import scipy.misc
import sys
import time
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
class EBGAN():
def __init__ (self, batch_size = 50, image_shape = [28,28,1], embedding_size = 128, frames=8, num_class =10, dim1 = 1024, dim2 = 128,
dim3 = 64, dim_channel = 1, dim4=16, learning_rate_1=sys.argv[1], learning_rate_2=sys.argv[2], momentum=sys.argv[3],
scale=10.0):
self.batch_size = batch_size
self.image_shape = image_shape
self.embedding_size = embedding_size
self.num_class = num_class
self.dim1 = dim1
self.dim2 = dim2
self.dim3 = dim3
self.dim4 = dim4
self.learning_rate_1 = float(learning_rate_1)
self.learning_rate_2 = float(learning_rate_2)
self.momentum = float(momentum)
self.scale = scale
self.frames = frames
self.dim_1 = self.image_shape[0]
self.dim_2 = self.image_shape[0] // 2
self.dim_4 = self.image_shape[0] // 4
self.dim_8 = self.image_shape[0] // 8
self.dim_channel = dim_channel
self.device = "/gpu:0"
self.image_size = reduce(lambda x,y : x*y, image_shape)
self.initializer = tf.random_normal_initializer(stddev=0.02)
def batch_normalize(self, X, eps=1e-6,flag=False):
if flag :
if X.get_shape().ndims == 4:
mean, vari = tf.nn.moments(X, [0,1,2], keep_dims=True)
return tf.nn.batch_normalization(X,mean, vari, variance_epsilon=eps)
elif X.get_shape().ndims == 2:
mean, vari = tf.nn.moments(X, 0, keep_dims=True)
return tf.nn.batch_normalization(X, mean, vari, variance_epsilon=eps)
if X.get_shape().ndims == 4 :
mean = tf.reduce_mean(X,[0,1,2])
stddev = tf.reduce_mean(tf.square(X-mean),[0,1,2])
X = (X - mean)/tf.sqrt(stddev + eps)
elif X.get_shape().ndims == 2:
mean = tf.reduce_mean(X,[0])
stddev = tf.reduce_mean(tf.square(X-mean),[0])
X = (X - mean)/tf.sqrt(stddev + eps)
else:
raise NoImplementationForSuchDimensions
return X
def lrelu(self, X):
return LeakyRelu(X)
def generate(self, embedding, classes, scope):
with tf.device(self.device):
ystack = tf.reshape(classes, [self.batch_size,1, 1, self.num_class])
embedding = tf.concat(axis=1, values=[embedding, classes])
h1 = tf.layers.dense(embedding, units=self.dim1, activation=None,
kernel_initializer=self.initializer,
name='dense_1', reuse=scope.reuse)
h1_relu = tf.nn.relu(self.normalize(h1))
h1_concat = tf.concat(axis=1, values=[h1_relu, classes])
h2 = tf.layers.dense(h1_concat, units=self.dim_8*self.dim_8*self.dim2,
activation=None, kernel_initializer=self.initializer,
name='dense_2', reuse=scope.reuse)
h2_relu = tf.nn.relu(self.normalize(h2))
h2_concat = tf.concat(axis=3,
values=[tf.reshape(h2_relu, shape=[self.batch_size,self.dim_8,self.dim_8,self.dim2]),
ystack*tf.ones(shape=[self.batch_size, self.dim_8, self.dim_8,
self.num_class])])
h3 = tf.layers.conv2d_transpose(inputs=h2_concat, filters = 2*self.dim3,
kernel_size=[4,4], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_1')
h3_relu = tf.nn.relu(self.normalize(h3,flag=True))
# print(h3.get_shape())
h3_concat = tf.concat(axis=3,
values=[tf.reshape(h3_relu, shape=[self.batch_size,self.dim_4,self.dim_4,2*self.dim3]),
ystack*tf.ones(shape=[self.batch_size, self.dim_4, self.dim_4, self.num_class])])
h4 = tf.layers.conv2d_transpose(inputs=h3_concat, filters = 2*self.dim4,
kernel_size=[4,4], strides=[2,2], padding='SAME', activation=tf.nn.relu,
kernel_initializer=self.initializer,
reuse=scope.reuse,name="conv_2")
h4_relu = tf.nn.relu(self.normalize(h4,flag=True))
h4_concat = tf.concat(axis=3,
values=[tf.reshape(h4_relu, shape=[self.batch_size,self.dim_2,self.dim_2,2*self.dim4]),
ystack*tf.ones(shape=[self.batch_size, self.dim_2, self.dim_2, self.num_class])])
h5 = tf.layers.conv2d_transpose(inputs=h4_concat, filters = 4*self.dim4,
kernel_size=[4,4], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name="conv_3")
h5_relu = tf.nn.relu(self.normalize(h5, flag=True))
h5_concat = tf.concat(axis=3,
values=[h5_relu, ystack*tf.ones(shape=[self.batch_size, self.dim_1, self.dim_1, self.num_class])])
h6 = tf.layers.conv2d_transpose(inputs=h5_concat, filters = self.dim_channel*self.frames,
kernel_size=[5,5], strides=[1,1], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse, name="conv_4")
return tf.nn.sigmoid(h6)
def encoder_image(self, image, scope):
with tf.device(self.device):
LeakyReLU = tf.contrib.keras.layers.LeakyReLU(alpha=0.2)
image_proc = self.normalize(image,flag=True)
h1 = tf.layers.conv2d(image_proc, filters=48, kernel_size=[4,4],
strides=[2,2], padding='SAME',
activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse, name="conv_1")
h1_relu = self.normalize(LeakyReLU(h1))
h2 = tf.layers.conv2d(h1_relu, filters=64, kernel_size=[4,4],
strides=[2,2], padding='SAME',
activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse, name="conv_2")
h2_relu = self.normalize(LeakyReLU(h2))
h3 = tf.layers.conv2d(h2_relu, filters=16, kernel_size=[4,4],
strides=[2,2], padding='SAME',
activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse, name="conv_3")
h3_relu = self.normalize(LeakyReLU(h3))
h3_reshape = tf.reshape(h3_relu, shape=[self.batch_size, self.dim_8[0]*self.dim_8[1]*16])
h4 = tf.layers.dense(h3_reshape, units=self.embedding_size+self.num_class_image,
activation=None,
kernel_initializer=self.initializer,
name='dense_2',
reuse=scope.reuse)
return h4 # no activation over last layer of h4
def decoder_image(self, embedding, zvalue, scope):
with tf.device(self.device):
ystack = tf.reshape(zvalue, shape=[self.batch_size, 1,1 , self.zdimension])
yneed_1 = ystack*tf.ones([self.batch_size, self.dim_4[0], self.dim_4[1], self.zdimension])
yneed_2 = ystack*tf.ones([self.batch_size, self.dim_2[0], self.dim_2[1], self.zdimension])
yneed_3 = ystack*tf.ones([self.batch_size, self.dim_8[0], self.dim_8[1], self.zdimension])
embedding = tf.concat(axis=1, values=[embedding, zvalue])
h1 = tf.layers.dense(embedding, units=1280, activation=None,
kernel_initializer=self.initializer,
name='dense_1', reuse=scope.reuse)
h1_relu = tf.nn.relu(self.normalize(h1))
h1_reshape = tf.reshape(h1_relu, shape=[self.batch_size, self.dim_8[0], self.dim_8[1], 64])
h1_concat = tf.concat(axis=3, values=[h1_reshape,yneed_3])
h2 = tf.layers.conv2d_transpose(inputs=h1_concat, filters = 64,
kernel_size=[5,5], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_1')
h2_relu = tf.nn.relu(self.normalize(h2))
h2_concat = tf.concat(axis=3, values=[h2_relu, yneed_1])
h3 = tf.layers.conv2d_transpose(inputs=h2_concat, filters = 32,
kernel_size=[5,5], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_2')
h3_relu = tf.nn.relu(self.normalize(h3))
h3_concat = tf.concat(axis=3, values=[h3_relu, yneed_2])
h4 = tf.layers.conv2d_transpose(inputs=h3_concat, filters = self.dim_channel,
kernel_size=[5,5], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_3')
return tf.nn.sigmoid(h4)
def discriminate_image(self, image, zvalue, scope):
with tf.device(self.device):
with tf.variable_scope("encoder") as scope:
embedding = self.encoder_image(image, scope)
with tf.variable_scope("decoder") as scope:
image_reconstr = self.encoder_image(embedding, zvalue, scope)
return tf.sqrt(tf.reduce_mean(tf.square(image - image_reconstr)))
def build_mode(self):
with tf.device(self.device):
embedding = tf.placeholder(tf.float32, [self.batch_size, self.embedding_size])
classes = tf.placeholder(tf.float32, [self.batch_size,self.num_class])
r_image = tf.placeholder(tf.float32,[self.batch_size] + self.image_shape)
real_image = tf.reshape(r_image,[self.batch_size] + self.image_shape)
with tf.variable_scope("generator") as scope:
h4 = self.generate(embedding,classes,scope)
g_image = h4
with tf.variable_scope("discriminator") as scope:
real_value = self.discriminate(real_image,classes,scope)
with tf.variable_scope("discriminator") as scope:
scope.reuse_variables()
fake_value = self.discriminate(g_image,classes,scope)
d_cost = real_value - fake_value
g_cost = fake_value
return embedding, classes, r_image, d_cost, g_cost, fake_value, real_value
| 47.106383 | 134 | 0.715221 |
30113453fe86b1af4400f65820ff4eb1e84b466b | 235,109 | py | Python | cpplint.py | michalliu/chromium-depot_tools | 43e9158650446b1153cb7ac119f6e6d95bf5c0c3 | [
"BSD-3-Clause"
] | null | null | null | cpplint.py | michalliu/chromium-depot_tools | 43e9158650446b1153cb7ac119f6e6d95bf5c0c3 | [
"BSD-3-Clause"
] | null | null | null | cpplint.py | michalliu/chromium-depot_tools | 43e9158650446b1153cb7ac119f6e6d95bf5c0c3 | [
"BSD-3-Clause"
] | 1 | 2019-05-07T22:56:00.000Z | 2019-05-07T22:56:00.000Z | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
"linelength" allows to specify the allowed line length for the project.
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
for i in lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self):
_BlockInfo.__init__(self, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo())
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the //.
if Match(r'//[^ ]*\w', comment):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<([^\s,=])', line)
if (match and match.group(1) != '(' and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def CheckBracesSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def IsTemplateParameterList(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is the end of template<>.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is end of a template parameter list, False otherwise.
"""
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, column)
if (startpos > -1 and
Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
return True
return False
def IsRValueType(clean_lines, nesting_state, linenum, column):
"""Check if the token ending on (linenum, column) is a type.
Assumes that text to the right of the column is "&&" or a function
name.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is a type, False if we are not sure.
"""
prefix = clean_lines.elided[linenum][0:column]
# Get one word to the left. If we failed to do so, this is most
# likely not a type, since it's unlikely that the type name and "&&"
# would be split across multiple lines.
match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
if not match:
return False
# Check text following the token. If it's "&&>" or "&&," or "&&...", it's
# most likely a rvalue reference used inside a template.
suffix = clean_lines.elided[linenum][column:]
if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
return True
# Check for simple type and end of templates:
# int&& variable
# vector<int>&& variable
#
# Because this function is called recursively, we also need to
# recognize pointer and reference types:
# int* Function()
# int& Function()
if match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
'short', 'int', 'long', 'signed', 'unsigned',
'float', 'double', 'void', 'auto', '>', '*', '&']:
return True
# If we see a close parenthesis, look for decltype on the other side.
# decltype would unambiguously identify a type, anything else is
# probably a parenthesized expression and not a type.
if match.group(2) == ')':
return IsDecltype(
clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
# Check for casts and cv-qualifiers.
# match.group(1) remainder
# -------------- ---------
# const_cast< type&&
# const type&&
# type const&&
if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
r'reinterpret_cast\s*<|\w+\s)\s*$',
match.group(1)):
return True
# Look for a preceding symbol that might help differentiate the context.
# These are the cases that would be ambiguous:
# match.group(1) remainder
# -------------- ---------
# Call ( expression &&
# Declaration ( type&&
# sizeof ( type&&
# if ( expression &&
# while ( expression &&
# for ( type&&
# for( ; expression &&
# statement ; type&&
# block { type&&
# constructor { expression &&
start = linenum
line = match.group(1)
match_symbol = None
while start >= 0:
# We want to skip over identifiers and commas to get to a symbol.
# Commas are skipped so that we can find the opening parenthesis
# for function parameter lists.
match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
if match_symbol:
break
start -= 1
line = clean_lines.elided[start]
if not match_symbol:
# Probably the first statement in the file is an rvalue reference
return True
if match_symbol.group(2) == '}':
# Found closing brace, probably an indicate of this:
# block{} type&&
return True
if match_symbol.group(2) == ';':
# Found semicolon, probably one of these:
# for(; expression &&
# statement; type&&
# Look for the previous 'for(' in the previous lines.
before_text = match_symbol.group(1)
for i in xrange(start - 1, max(start - 6, 0), -1):
before_text = clean_lines.elided[i] + before_text
if Search(r'for\s*\([^{};]*$', before_text):
# This is the condition inside a for-loop
return False
# Did not find a for-init-statement before this semicolon, so this
# is probably a new statement and not a condition.
return True
if match_symbol.group(2) == '{':
# Found opening brace, probably one of these:
# block{ type&& = ... ; }
# constructor{ expression && expression }
# Look for a closing brace or a semicolon. If we see a semicolon
# first, this is probably a rvalue reference.
line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
end = start
depth = 1
while True:
for ch in line:
if ch == ';':
return True
elif ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return False
end += 1
if end >= clean_lines.NumLines():
break
line = clean_lines.elided[end]
# Incomplete program?
return False
if match_symbol.group(2) == '(':
# Opening parenthesis. Need to check what's to the left of the
# parenthesis. Look back one extra line for additional context.
before_text = match_symbol.group(1)
if linenum > 1:
before_text = clean_lines.elided[linenum - 1] + before_text
before_text = match_symbol.group(1)
# Patterns that are likely to be types:
# [](type&&
# for (type&&
# sizeof(type&&
# operator=(type&&
#
if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
return True
# Patterns that are likely to be expressions:
# if (expression &&
# while (expression &&
# : initializer(expression &&
# , initializer(expression &&
# ( FunctionCall(expression &&
# + FunctionCall(expression &&
# + (expression &&
#
# The last '+' represents operators such as '+' and '-'.
if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
return False
# Something else. Check that tokens to the left look like
# return_type function_name
match_func = Match(r'^(.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
match_symbol.group(1))
if match_func:
# Check for constructors, which don't have return types.
if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
return True
implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
if (implicit_constructor and
implicit_constructor.group(1) == implicit_constructor.group(2)):
return True
return IsRValueType(clean_lines, nesting_state, linenum,
len(match_func.group(1)))
# Nothing before the function name. If this is inside a block scope,
# this is probably a function call.
return not (nesting_state.previous_stack_top and
nesting_state.previous_stack_top.IsBlockInfo())
if match_symbol.group(2) == '>':
# Possibly a closing bracket, check that what's on the other side
# looks like the start of a template.
return IsTemplateParameterList(
clean_lines, start, len(match_symbol.group(1)))
# Some other symbol, usually something like "a=b&&c". This is most
# likely not a type.
return False
def IsDeletedOrDefault(clean_lines, linenum):
"""Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
"""
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False
(close_line, _, close_paren) = CloseExpression(
clean_lines, linenum, open_paren)
if close_paren < 0:
return False
return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
def IsRValueAllowed(clean_lines, linenum):
"""Check if RValue reference is allowed on a particular line.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if line is within the region where RValue references are allowed.
"""
# Allow region marked by PUSH/POP macros
for i in xrange(linenum, 0, -1):
line = clean_lines.elided[i]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
if not line.endswith('PUSH'):
return False
for j in xrange(linenum, clean_lines.NumLines(), 1):
line = clean_lines.elided[j]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
return line.endswith('POP')
# Allow operator=
line = clean_lines.elided[linenum]
if Search(r'\boperator\s*=\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
# Allow constructors
match = Match(r'\s*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
if match and match.group(1) == match.group(2):
return IsDeletedOrDefault(clean_lines, linenum)
if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
if Match(r'\s*[\w<>]+\s*\(', line):
previous_line = 'ReturnType'
if linenum > 0:
previous_line = clean_lines.elided[linenum - 1]
if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
return IsDeletedOrDefault(clean_lines, linenum)
return False
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
"""Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Find lines missing spaces around &&.
# TODO(unknown): currently we don't check for rvalue references
# with spaces surrounding the && to avoid false positives with
# boolean expressions.
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
# Either poorly formed && or an rvalue reference, check the context
# to get a more accurate error message. Mostly we want to determine
# if what's to the left of "&&" is a type or not.
and_pos = len(match.group(1))
if IsRValueType(clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals and lambdas.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
# Suggest a different header for ostream
if include == 'ostream':
error(filename, linenum, 'readability/streams', 3,
'For logging, include "base/logging.h" instead of <ostream>.')
else:
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_COPY_AND_ASSIGN DISALLOW_IMPLICIT_CONSTRUCTORS is present,
# then it should be the last thing in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)]+)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# Function((function_pointer_arg)(int), int param)
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),])',
remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
raw_line = clean_lines.raw_lines[linenum]
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_dict.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
"""Check that default lambda captures are not used.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# A lambda introducer specifies a default capture if it starts with "[="
# or if it starts with "[&" _not_ followed by an identifier.
match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
if match:
# Found a potential error, check what comes after the lambda-introducer.
# If it's not open parenthesis (for lambda-declarator) or open brace
# (for compound-statement), it's not a lambda.
line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
error(filename, linenum, 'build/c++11',
4, # 4 = high confidence
'Default lambda captures are an unapproved C++ feature.')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*\bvirtual\b)', line)
if not virtual: return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(1))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Check that at most one of "override" or "final" is present, not both
line = clean_lines.elided[linenum]
if Search(r'\boverride\b', line) and Search(r'\bfinal\b', line):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
if len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
return True
else:
return False
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Flag unapproved C++11 headers.
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
# utility
'forward',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
sys.stderr.write('Line length must be numeric.')
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
sys.stderr.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| 38.260212 | 97 | 0.651523 |
6f6071f661c49cc7cb44af8605a59d851154e2f2 | 850 | py | Python | taotao-cloud-python/taotao-cloud-oldboy/day77-cmdb/day75/day75/urls.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 47 | 2021-04-13T10:32:13.000Z | 2022-03-31T10:30:30.000Z | taotao-cloud-python/taotao-cloud-oldboy/day77-cmdb/day75/day75/urls.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 1 | 2021-11-01T07:41:04.000Z | 2021-11-01T07:41:10.000Z | taotao-cloud-python/taotao-cloud-oldboy/day77-cmdb/day75/day75/urls.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 21 | 2021-04-13T10:32:17.000Z | 2022-03-26T07:43:22.000Z | """day75 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include('api.urls')),
url(r'^web/', include('web.urls')),
]
| 35.416667 | 79 | 0.688235 |
4941827248bd34624b6b5e4ee31e2475a1375098 | 14,029 | py | Python | Cython/Utils.py | mbuesch/cython | 14acda8e7e0b4246ef80331146e66d4f5b70c4e8 | [
"Apache-2.0"
] | 1 | 2020-12-22T08:51:20.000Z | 2020-12-22T08:51:20.000Z | Cython/Utils.py | mbuesch/cython | 14acda8e7e0b4246ef80331146e66d4f5b70c4e8 | [
"Apache-2.0"
] | null | null | null | Cython/Utils.py | mbuesch/cython | 14acda8e7e0b4246ef80331146e66d4f5b70c4e8 | [
"Apache-2.0"
] | null | null | null | """
Cython -- Things that don't belong
anywhere else in particular
"""
from __future__ import absolute_import
try:
from __builtin__ import basestring
except ImportError:
basestring = str
try:
FileNotFoundError
except NameError:
FileNotFoundError = OSError
import os
import sys
import re
import io
import codecs
import shutil
from contextlib import contextmanager
PACKAGE_FILES = ("__init__.py", "__init__.pyc", "__init__.pyx", "__init__.pxd")
modification_time = os.path.getmtime
_function_caches = []
def clear_function_caches():
for cache in _function_caches:
cache.clear()
def cached_function(f):
cache = {}
_function_caches.append(cache)
uncomputed = object()
def wrapper(*args):
res = cache.get(args, uncomputed)
if res is uncomputed:
res = cache[args] = f(*args)
return res
wrapper.uncached = f
return wrapper
def cached_method(f):
cache_name = '__%s_cache' % f.__name__
def wrapper(self, *args):
cache = getattr(self, cache_name, None)
if cache is None:
cache = {}
setattr(self, cache_name, cache)
if args in cache:
return cache[args]
res = cache[args] = f(self, *args)
return res
return wrapper
def replace_suffix(path, newsuf):
base, _ = os.path.splitext(path)
return base + newsuf
def open_new_file(path):
if os.path.exists(path):
# Make sure to create a new file here so we can
# safely hard link the output files.
os.unlink(path)
# we use the ISO-8859-1 encoding here because we only write pure
# ASCII strings or (e.g. for file names) byte encoded strings as
# Unicode, so we need a direct mapping from the first 256 Unicode
# characters to a byte sequence, which ISO-8859-1 provides
# note: can't use io.open() in Py2 as we may be writing str objects
return codecs.open(path, "w", encoding="ISO-8859-1")
def castrate_file(path, st):
# Remove junk contents from an output file after a
# failed compilation.
# Also sets access and modification times back to
# those specified by st (a stat struct).
try:
f = open_new_file(path)
except EnvironmentError:
pass
else:
f.write(
"#error Do not use this file, it is the result of a failed Cython compilation.\n")
f.close()
if st:
os.utime(path, (st.st_atime, st.st_mtime-1))
def file_newer_than(path, time):
ftime = modification_time(path)
return ftime > time
def safe_makedirs(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def copy_file_to_dir_if_newer(sourcefile, destdir):
"""
Copy file sourcefile to directory destdir (creating it if needed),
preserving metadata. If the destination file exists and is not
older than the source file, the copying is skipped.
"""
destfile = os.path.join(destdir, os.path.basename(sourcefile))
try:
desttime = modification_time(destfile)
except OSError:
# New file does not exist, destdir may or may not exist
safe_makedirs(destdir)
else:
# New file already exists
if not file_newer_than(sourcefile, desttime):
return
shutil.copy2(sourcefile, destfile)
@cached_function
def find_root_package_dir(file_path):
dir = os.path.dirname(file_path)
if file_path == dir:
return dir
elif is_package_dir(dir):
return find_root_package_dir(dir)
else:
return dir
@cached_function
def check_package_dir(dir_path, package_names):
namespace = True
for dirname in package_names:
dir_path = os.path.join(dir_path, dirname)
has_init = contains_init(dir_path)
if not namespace and not has_init:
return None, False
elif has_init:
namespace = False
return dir_path, namespace
@cached_function
def contains_init(dir_path):
for filename in PACKAGE_FILES:
path = os.path.join(dir_path, filename)
if path_exists(path):
return 1
def is_package_dir(dir_path):
if contains_init(dir_path):
return 1
@cached_function
def path_exists(path):
# try on the filesystem first
if os.path.exists(path):
return True
# figure out if a PEP 302 loader is around
try:
loader = __loader__
# XXX the code below assumes a 'zipimport.zipimporter' instance
# XXX should be easy to generalize, but too lazy right now to write it
archive_path = getattr(loader, 'archive', None)
if archive_path:
normpath = os.path.normpath(path)
if normpath.startswith(archive_path):
arcname = normpath[len(archive_path)+1:]
try:
loader.get_data(arcname)
return True
except IOError:
return False
except NameError:
pass
return False
# file name encodings
def decode_filename(filename):
if isinstance(filename, bytes):
try:
filename_encoding = sys.getfilesystemencoding()
if filename_encoding is None:
filename_encoding = sys.getdefaultencoding()
filename = filename.decode(filename_encoding)
except UnicodeDecodeError:
pass
return filename
# support for source file encoding detection
_match_file_encoding = re.compile(br"(\w*coding)[:=]\s*([-\w.]+)").search
def detect_opened_file_encoding(f):
# PEPs 263 and 3120
# Most of the time the first two lines fall in the first couple of hundred chars,
# and this bulk read/split is much faster.
lines = ()
start = b''
while len(lines) < 3:
data = f.read(500)
start += data
lines = start.split(b"\n")
if not data:
break
m = _match_file_encoding(lines[0])
if m and m.group(1) != b'c_string_encoding':
return m.group(2).decode('iso8859-1')
elif len(lines) > 1:
m = _match_file_encoding(lines[1])
if m:
return m.group(2).decode('iso8859-1')
return "UTF-8"
def skip_bom(f):
"""
Read past a BOM at the beginning of a source file.
This could be added to the scanner, but it's *substantially* easier
to keep it at this level.
"""
if f.read(1) != u'\uFEFF':
f.seek(0)
def open_source_file(source_filename, encoding=None, error_handling=None):
stream = None
try:
if encoding is None:
# Most of the time the encoding is not specified, so try hard to open the file only once.
f = io.open(source_filename, 'rb')
encoding = detect_opened_file_encoding(f)
f.seek(0)
stream = io.TextIOWrapper(f, encoding=encoding, errors=error_handling)
else:
stream = io.open(source_filename, encoding=encoding, errors=error_handling)
except OSError:
if os.path.exists(source_filename):
raise # File is there, but something went wrong reading from it.
# Allow source files to be in zip files etc.
try:
loader = __loader__
if source_filename.startswith(loader.archive):
stream = open_source_from_loader(
loader, source_filename,
encoding, error_handling)
except (NameError, AttributeError):
pass
if stream is None:
raise FileNotFoundError(source_filename)
skip_bom(stream)
return stream
def open_source_from_loader(loader,
source_filename,
encoding=None, error_handling=None):
nrmpath = os.path.normpath(source_filename)
arcname = nrmpath[len(loader.archive)+1:]
data = loader.get_data(arcname)
return io.TextIOWrapper(io.BytesIO(data),
encoding=encoding,
errors=error_handling)
def str_to_number(value):
# note: this expects a string as input that was accepted by the
# parser already, with an optional "-" sign in front
is_neg = False
if value[:1] == '-':
is_neg = True
value = value[1:]
if len(value) < 2:
value = int(value, 0)
elif value[0] == '0':
literal_type = value[1] # 0'o' - 0'b' - 0'x'
if literal_type in 'xX':
# hex notation ('0x1AF')
value = int(value[2:], 16)
elif literal_type in 'oO':
# Py3 octal notation ('0o136')
value = int(value[2:], 8)
elif literal_type in 'bB':
# Py3 binary notation ('0b101')
value = int(value[2:], 2)
else:
# Py2 octal notation ('0136')
value = int(value, 8)
else:
value = int(value, 0)
return -value if is_neg else value
def long_literal(value):
if isinstance(value, basestring):
value = str_to_number(value)
return not -2**31 <= value < 2**31
@cached_function
def get_cython_cache_dir():
r"""
Return the base directory containing Cython's caches.
Priority:
1. CYTHON_CACHE_DIR
2. (OS X): ~/Library/Caches/Cython
(posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined
3. ~/.cython
"""
if 'CYTHON_CACHE_DIR' in os.environ:
return os.environ['CYTHON_CACHE_DIR']
parent = None
if os.name == 'posix':
if sys.platform == 'darwin':
parent = os.path.expanduser('~/Library/Caches')
else:
# this could fallback on ~/.cache
parent = os.environ.get('XDG_CACHE_HOME')
if parent and os.path.isdir(parent):
return os.path.join(parent, 'cython')
# last fallback: ~/.cython
return os.path.expanduser(os.path.join('~', '.cython'))
@contextmanager
def captured_fd(stream=2, encoding=None):
pipe_in = t = None
orig_stream = os.dup(stream) # keep copy of original stream
try:
pipe_in, pipe_out = os.pipe()
os.dup2(pipe_out, stream) # replace stream by copy of pipe
try:
os.close(pipe_out) # close original pipe-out stream
data = []
def copy():
try:
while True:
d = os.read(pipe_in, 1000)
if d:
data.append(d)
else:
break
finally:
os.close(pipe_in)
def get_output():
output = b''.join(data)
if encoding:
output = output.decode(encoding)
return output
from threading import Thread
t = Thread(target=copy)
t.daemon = True # just in case
t.start()
yield get_output
finally:
os.dup2(orig_stream, stream) # restore original stream
if t is not None:
t.join()
finally:
os.close(orig_stream)
def print_bytes(s, header_text=None, end=b'\n', file=sys.stdout, flush=True):
if header_text:
file.write(header_text) # note: text! => file.write() instead of out.write()
file.flush()
try:
out = file.buffer # Py3
except AttributeError:
out = file # Py2
out.write(s)
if end:
out.write(end)
if flush:
out.flush()
class LazyStr:
def __init__(self, callback):
self.callback = callback
def __str__(self):
return self.callback()
def __repr__(self):
return self.callback()
def __add__(self, right):
return self.callback() + right
def __radd__(self, left):
return left + self.callback()
class OrderedSet(object):
def __init__(self, elements=()):
self._list = []
self._set = set()
self.update(elements)
def __iter__(self):
return iter(self._list)
def update(self, elements):
for e in elements:
self.add(e)
def add(self, e):
if e not in self._set:
self._list.append(e)
self._set.add(e)
# Class decorator that adds a metaclass and recreates the class with it.
# Copied from 'six'.
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def raise_error_if_module_name_forbidden(full_module_name):
# it is bad idea to call the pyx-file cython.pyx, so fail early
if full_module_name == 'cython' or full_module_name.startswith('cython.'):
raise ValueError('cython is a special module, cannot be used as a module name')
def build_hex_version(version_string):
"""
Parse and translate '4.3a1' into the readable hex representation '0x040300A1' (like PY_VERSION_HEX).
"""
# First, parse '4.12a1' into [4, 12, 0, 0xA01].
digits = []
release_status = 0xF0
for digit in re.split('([.abrc]+)', version_string):
if digit in ('a', 'b', 'rc'):
release_status = {'a': 0xA0, 'b': 0xB0, 'rc': 0xC0}[digit]
digits = (digits + [0, 0])[:3] # 1.2a1 -> 1.2.0a1
elif digit != '.':
digits.append(int(digit))
digits = (digits + [0] * 3)[:4]
digits[3] += release_status
# Then, build a single hex value, two hex digits per version part.
hexversion = 0
for digit in digits:
hexversion = (hexversion << 8) + digit
return '0x%08X' % hexversion
| 28.227364 | 104 | 0.599045 |
ba28543c6baa617d8dbe73478e807a0dbf9fdea5 | 677 | py | Python | Lib/compiler/consts.py | diogommartins/cinder | 79103e9119cbecef3b085ccf2878f00c26e1d175 | [
"CNRI-Python-GPL-Compatible"
] | 278 | 2021-08-31T00:46:51.000Z | 2022-02-13T19:43:28.000Z | Lib/compiler/consts.py | diogommartins/cinder | 79103e9119cbecef3b085ccf2878f00c26e1d175 | [
"CNRI-Python-GPL-Compatible"
] | 9 | 2021-11-05T22:28:43.000Z | 2021-11-23T08:39:04.000Z | Lib/compiler/consts.py | diogommartins/cinder | 79103e9119cbecef3b085ccf2878f00c26e1d175 | [
"CNRI-Python-GPL-Compatible"
] | 12 | 2021-08-31T07:49:54.000Z | 2021-10-08T01:09:01.000Z | # Portions copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
# operation flags
OP_ASSIGN = "OP_ASSIGN"
OP_DELETE = "OP_DELETE"
OP_APPLY = "OP_APPLY"
SC_LOCAL = 1
SC_GLOBAL_IMPLICIT = 2
SC_GLOBAL_EXPLICIT = 3
SC_FREE = 4
SC_CELL = 5
SC_UNKNOWN = 6
CO_OPTIMIZED = 0x0001
CO_NEWLOCALS = 0x0002
CO_VARARGS = 0x0004
CO_VARKEYWORDS = 0x0008
CO_NESTED = 0x0010
CO_GENERATOR = 0x0020
CO_NOFREE = 0x0040
CO_COROUTINE = 0x0080
CO_GENERATOR_ALLOWED = 0
CO_ITERABLE_COROUTINE = 0x0100
CO_ASYNC_GENERATOR = 0x0200
PyCF_MASK_OBSOLETE: int = CO_NESTED
PyCF_SOURCE_IS_UTF8 = 0x0100
PyCF_DONT_IMPLY_DEDENT = 0x0200
PyCF_ONLY_AST = 0x0400
PyCF_IGNORE_COOKIE = 0x0800
| 21.83871 | 85 | 0.79616 |
0c155c03d121177e7a3af30a6d1561b8fa064fad | 315 | py | Python | routers.py | v0tti/looking-glass | 3ddfcf1f82491fa09598234d3c981c020c7b0725 | [
"MIT"
] | null | null | null | routers.py | v0tti/looking-glass | 3ddfcf1f82491fa09598234d3c981c020c7b0725 | [
"MIT"
] | null | null | null | routers.py | v0tti/looking-glass | 3ddfcf1f82491fa09598234d3c981c020c7b0725 | [
"MIT"
] | null | null | null | from dotenv import load_dotenv
import os
load_dotenv()
## Example routers, please edit and add yours
routers_list = [
dict(address=('dn42.v0tti.com', 22),
usern=os.getenv('USERN'),
passw=os.getenv('PASSW'),
type='bird',
asn='4242423929',
location='dn42.v0tti.com',
jumpserver=False
)
] | 19.6875 | 45 | 0.666667 |
05d9d3c04198f3de7a79f1b77cf8d1dd0b2dfe52 | 10,508 | py | Python | models/base_model.py | XLEric/CycleGAN | a75a50fb3cc6a8d5caf584f091fefc40851dc538 | [
"MIT"
] | null | null | null | models/base_model.py | XLEric/CycleGAN | a75a50fb3cc6a8d5caf584f091fefc40851dc538 | [
"MIT"
] | null | null | null | models/base_model.py | XLEric/CycleGAN | a75a50fb3cc6a8d5caf584f091fefc40851dc538 | [
"MIT"
] | null | null | null | import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this function, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): define networks used in our training.
-- self.visual_names (str list): specify the images that you want to display and save.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
try:
self.load_networks(load_suffix)
except:
pass
self.print_networks(opt.verbose)
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
old_lr = self.optimizers[0].param_groups[0]['lr']
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate %.7f -> %.7f' % (old_lr, lr))
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
# print('--->>>',name)
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
| 44.714894 | 260 | 0.59745 |
052800f83af9c7ea94b0d3a251e484b8ca6d2711 | 3,894 | py | Python | esmvalcore/preprocessor/_derive/co2s.py | markelg/ESMValCore | b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c | [
"Apache-2.0"
] | 26 | 2019-06-07T07:50:07.000Z | 2022-03-22T21:04:01.000Z | esmvalcore/preprocessor/_derive/co2s.py | markelg/ESMValCore | b2f7ffc3232f174dd5ebc50ad20b4a02d3517c2c | [
"Apache-2.0"
] | 1,370 | 2019-06-06T09:03:07.000Z | 2022-03-31T04:37:20.000Z | esmvalcore/preprocessor/_derive/co2s.py | zklaus/ESMValCore | 5656fb8b546eeb4d750a424de7ed56a237edfabb | [
"Apache-2.0"
] | 26 | 2019-07-03T13:08:48.000Z | 2022-03-02T16:08:47.000Z | """Derivation of variable ``co2s``."""
import dask.array as da
import iris
import numpy as np
import stratify
from ._baseclass import DerivedVariableBase
def _get_first_unmasked_data(array, axis):
"""Get first unmasked value of an array along an axis."""
mask = da.ma.getmaskarray(array)
numerical_mask = da.where(mask, -1.0, 1.0)
indices_first_positive = da.argmax(numerical_mask, axis=axis)
indices = da.meshgrid(
*[da.arange(array.shape[i]) for i in range(array.ndim) if i != axis],
indexing='ij')
indices.insert(axis, indices_first_positive)
first_unmasked_data = np.array(array)[tuple(indices)]
return first_unmasked_data
class DerivedVariable(DerivedVariableBase):
"""Derivation of variable ``co2s``.
Use linear interpolation/extrapolation and surface air pressure to
calculate CO2 mole fraction at surface.
Note
----
In some cases, ``co2`` data is masked. In these cases, the masked values
correspond to values where the pressure level is higher than the surface
air pressure (e.g. the 1000 hPa level for grid cells with high elevation).
To obtain an unmasked ``co2s`` field, it is necessary to fill these masked
values accordingly, i.e. with the lowest unmasked value for each grid cell.
"""
@staticmethod
def required(project):
"""Declare the variables needed for derivation."""
required = [{'short_name': 'co2'}, {'short_name': 'ps'}]
return required
@staticmethod
def calculate(cubes):
"""Compute mole fraction of CO2 at surface."""
co2_cube = cubes.extract_cube(
iris.Constraint(name='mole_fraction_of_carbon_dioxide_in_air'))
ps_cube = cubes.extract_cube(
iris.Constraint(name='surface_air_pressure'))
# Fill masked data if necessary (interpolation fails with masked data)
(z_axis,) = co2_cube.coord_dims(co2_cube.coord(axis='Z',
dim_coords=True))
mask = da.ma.getmaskarray(co2_cube.core_data())
if mask.any():
first_unmasked_data = _get_first_unmasked_data(
co2_cube.core_data(), axis=z_axis)
dim_map = [dim for dim in range(co2_cube.ndim) if dim != z_axis]
first_unmasked_data = iris.util.broadcast_to_shape(
first_unmasked_data, co2_cube.shape, dim_map)
co2_cube.data = da.where(mask, first_unmasked_data,
co2_cube.core_data())
# Interpolation (not supported for dask arrays)
air_pressure_coord = co2_cube.coord('air_pressure')
original_levels = iris.util.broadcast_to_shape(
air_pressure_coord.points, co2_cube.shape,
co2_cube.coord_dims(air_pressure_coord))
target_levels = np.expand_dims(ps_cube.data, axis=z_axis)
co2s_data = stratify.interpolate(
target_levels,
original_levels,
co2_cube.data,
axis=z_axis,
interpolation='linear',
extrapolation='linear',
)
co2s_data = np.squeeze(co2s_data, axis=z_axis)
# Construct co2s cube
indices = [slice(None)] * co2_cube.ndim
indices[z_axis] = 0
co2s_cube = co2_cube[tuple(indices)]
co2s_cube.data = co2s_data
if co2s_cube.coords('air_pressure'):
co2s_cube.remove_coord('air_pressure')
ps_coord = iris.coords.AuxCoord(ps_cube.data,
var_name='plev',
standard_name='air_pressure',
long_name='pressure',
units=ps_cube.units)
co2s_cube.add_aux_coord(ps_coord, np.arange(co2s_cube.ndim))
co2s_cube.convert_units('1e-6')
return co2s_cube
| 40.14433 | 79 | 0.625064 |
d81f5dc9cf42fa425b024efedb9919bae073ae6a | 5,000 | py | Python | predefined_functions/algo_doing_simple_tasks.py | g-make-it/IG_Trading_Algo_Scripts_Python | 931f1d568f8e89fbaac56aa5ec58473bbed606f6 | [
"BSD-3-Clause"
] | 186 | 2021-09-05T10:03:01.000Z | 2022-03-08T05:47:04.000Z | predefined_functions/algo_doing_simple_tasks.py | g-make-it/IG_Trading_Algo_Scripts_Python | 931f1d568f8e89fbaac56aa5ec58473bbed606f6 | [
"BSD-3-Clause"
] | 1 | 2021-11-19T21:56:55.000Z | 2021-12-29T18:21:29.000Z | predefined_functions/algo_doing_simple_tasks.py | g-make-it/IG_Trading_Algo_Scripts_Python | 931f1d568f8e89fbaac56aa5ec58473bbed606f6 | [
"BSD-3-Clause"
] | 32 | 2021-09-05T11:46:32.000Z | 2022-03-22T08:23:38.000Z | from trading_ig.config import config
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# if you need to cache to DB your requests
from datetime import timedelta
import requests_cache
from getting_realtime_data.data_retrieval import Data_Retrieval
from sending_orders.order_management import Order_Management
from management_of_position.position_management import Position_Management
from predefined_functions.initialisation import Initialisation
from get_data.get_market_data import Get_Market_Data
import time
from datetime import datetime, timedelta
from predefined_functions.defined_functionality import Defined_Functionality
import pandas
import traceback
# the newest one where you make market order base on price movements 5 or more and try to catch the trend
class Algo0:
def __init__(self):
logging.basicConfig(level=logging.INFO)
self.df = Defined_Functionality()
self.list_of_epics = ['IX.D.DOW.DAILY.IP']
# list_of_epics = ['CS.D.EURUSD.TODAY.IP']
self.df.set_epics_to_look_for(epic_list=self.list_of_epics)
self.map_epic_data_minute={}
for epic in self.list_of_epics:
self.map_epic_data_minute[epic] = []
self.first_timestamp = None
self.high = None
self.low = None
def setup(self):
self.df.get_market_data()
# self.df.update_stop_level_bands()
def run(self):
self.df.start_data_from_market_data_socket(self.list_of_epics)
while(True):
try:
# self.setup()
epic=self.list_of_epics[0]
data = self.df.get_quote_data_from_socket(epic)
print(data)
# for epic in self.map_epic_data_minute.keys():
# signals_and_levels = self.signal_generation(epic=epic)
# self.create_orders(epic=epic, signals_levels=signals_and_levels)
except Exception as e:
print(e, " error in the looping for the defined_functionality")
traceback.print_exc()
def create_orders(self, epic, signals_levels):
if signals_levels == None:
return
key = None
if signals_levels["BUY"] != None:
key = "BUY"
elif signals_levels["SELL"] != None:
key = "SELL"
position = self.df.find_open_position_by_epic(epic=epic)
if isinstance(position,pandas.core.series.Series):
print("position already exists", position)
return position
create_position = self.df.create_open_position(epic=epic, direction=key, size=0.5)
return create_position
def signal_generation(self, epic):
signals_levels = None
# minute_10 = 60 * 10
# minute_10 = 60
minute_10 = 6
datetime_now = datetime.now()
data = None
if (self.first_timestamp != None):
difference = (datetime_now - self.first_timestamp)
data = self.df.get_market_data(epic=epic)
# self.finding_lows_highs(data=data)
if (difference.seconds > minute_10):
data = self.df.get_market_data(epic=epic)
self.first_timestamp = datetime_now
self.map_epic_data_minute[epic].append(data)
# self.finding_lows_highs(data=data, reset=True)
else:
data = self.df.get_market_data(epic=epic)
self.first_timestamp = datetime_now
self.map_epic_data_minute[epic].append(data)
# self.finding_lows_highs(data=data)
if len(self.map_epic_data_minute[epic]) > 3:
self.map_epic_data_minute[epic].pop(0)
sell_level = None
buy_level = None
object_epic_data = self.map_epic_data_minute[epic][-1]
bid = object_epic_data["snapshot"]["bid"]
offer = object_epic_data["snapshot"]["offer"]
high = object_epic_data["snapshot"]["high"]
low = object_epic_data["snapshot"]["low"]
object_epic_data = self.map_epic_data_minute[epic][-2]
bid_old = object_epic_data["snapshot"]["bid"]
offer_old = object_epic_data["snapshot"]["offer"]
high_old = object_epic_data["snapshot"]["high"]
low_old = object_epic_data["snapshot"]["low"]
offer_diff = offer - offer_old
bid_diff = bid - bid_old
if offer_diff > 10:
buy_level = 1
elif bid_diff < -10:
sell_level = 1
self.map_epic_data_minute[epic] = []
# instead here we are using bid/offer
if (sell_level == None) and (buy_level == None):
return None
signals_levels = {
"SELL": sell_level,
"BUY": buy_level
}
return signals_levels
| 31.64557 | 105 | 0.6146 |
3708ca56ede14f4cf9d89c7778808bdc32067bd1 | 4,576 | py | Python | TEditor/TEditor/settings.py | josevictorp81/TEditor | d05fd61a730dd2aac63221c9011deac789c3cc68 | [
"MIT"
] | 1 | 2021-12-30T01:13:41.000Z | 2021-12-30T01:13:41.000Z | TEditor/TEditor/settings.py | josevictorp81/TEditor | d05fd61a730dd2aac63221c9011deac789c3cc68 | [
"MIT"
] | null | null | null | TEditor/TEditor/settings.py | josevictorp81/TEditor | d05fd61a730dd2aac63221c9011deac789c3cc68 | [
"MIT"
] | null | null | null | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w4--^ptkk$k9cjh!191e2=6+g91i2hc3yl-p)r6&-^-7(#z0&c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'easy_pdf',
'widget_tweaks',
'core',
'accounts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'TEditor.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'TEditor.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
LOGIN_REDIRECT_URL = 'text-list'
LOGOUT_REDIRECT_URL = 'index'
CKEDITOR_CONFIGS = {
'default': {
'toolbar_YourCustomToolbarConfig': [
{'name': 'basicstyles',
'items': ['Bold', 'Italic', 'Underline', 'Strike', 'Subscript', 'Superscript', '-', 'RemoveFormat']},
{'name': 'paragraph',
'items': ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote', 'CreateDiv', '-',
'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock', '-']},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']},
{'name': 'insert',
'items': ['Image', 'Flash', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar', 'PageBreak']},
'/',
{'name': 'styles', 'items': ['Format', 'Font', 'FontSize']},
{'name': 'colors', 'items': ['TextColor', 'BGColor']},
{'name': 'tools', 'items': ['Maximize', 'ShowBlocks']},
{'name': 'about', 'items': ['About']},
{'name': 'yourcustomtools', 'items': [
'Preview',
'Maximize',
]},
],
'toolbar': 'YourCustomToolbarConfig',
'height': 300,
'width': '100%',
'tabSpaces': 4,
'extraPlugins': ','.join([
'uploadimage',
'div',
'autolink',
'autoembed',
'embedsemantic',
'autogrow',
'widget',
'lineutils',
'clipboard',
'dialog',
'dialogui',
'elementspath'
]),
}
}
| 28.07362 | 117 | 0.600743 |
3455383c4666cd0da34da81a5f7a6b2e329b59d0 | 343 | py | Python | generator/generators/ping.py | ionut-arm/parsec-mock | 9bf0f4b24e046865d30513490ec20c4e94ef0a7f | [
"Apache-2.0"
] | 3 | 2021-03-19T17:25:00.000Z | 2021-03-24T23:38:28.000Z | generator/generators/ping.py | ionut-arm/parsec-mock | 9bf0f4b24e046865d30513490ec20c4e94ef0a7f | [
"Apache-2.0"
] | 4 | 2021-04-21T16:23:04.000Z | 2021-12-08T15:24:02.000Z | generator/generators/ping.py | ionut-arm/parsec-mock | 9bf0f4b24e046865d30513490ec20c4e94ef0a7f | [
"Apache-2.0"
] | 3 | 2021-03-23T20:41:48.000Z | 2022-03-25T10:19:37.000Z | # Copyright 2021 Contributors to the Parsec project.
# SPDX-License-Identifier: Apache-2.0
from .protobuf import ping_pb2
def gen():
op = ping_pb2.Operation()
result = ping_pb2.Result()
result.wire_protocol_version_maj = 1
result.wire_protocol_version_min = 0
return (op.SerializeToString(), result.SerializeToString())
| 26.384615 | 63 | 0.74344 |
2e5aa13a921a5d00fafc8bd4a83511bcc9dfc6b6 | 2,959 | py | Python | pyferm/actions/ramp.py | viyh/pyferm | 39becd72e8918a0cfa56942b646288b081bc4300 | [
"MIT"
] | null | null | null | pyferm/actions/ramp.py | viyh/pyferm | 39becd72e8918a0cfa56942b646288b081bc4300 | [
"MIT"
] | null | null | null | pyferm/actions/ramp.py | viyh/pyferm | 39becd72e8918a0cfa56942b646288b081bc4300 | [
"MIT"
] | null | null | null | import time
import datetime
from pyferm.actions import action
class ramp(action):
def __init__(self, name, parent, **kwargs):
super().__init__(name, parent, **kwargs)
def run(self):
self.start_value = self.params["start_value"]
self.end_value = self.params["end_value"]
self.step_interval = self.params["step_interval"]
self.step_size = self.params["step_size"]
self.low = self.params["controls"]["low"]
self.high = self.params["controls"]["high"]
self.metric = self.params["metric"]
self.current_value = self.start_value
self.action_step_start_time = datetime.datetime.utcnow()
while self._is_running:
self.action_step_elapsed = (
datetime.datetime.utcnow() - self.action_step_start_time
).seconds
self.log(f"elapsed: {self.action_step_elapsed}")
if (
self.action_step_elapsed > self.step_interval
and self.current_value < self.end_value
):
self.action_step_start_time = datetime.datetime.utcnow()
self.current_value += self.step_size
self.log(f"next step, value: {self.current_value}")
else:
self.log(f"set value: {self.current_value}")
sensor = self.parent.get_sensor_by_name(self.metric["sensor"])
metric = sensor.get_metric_by_name(self.metric["metric"])
metric_value = metric.get_value()
if metric_value:
self.log(f"metric value: {metric_value}")
if metric_value < self.current_value - self.low["threshold"]:
self.log(f"low threshold, triggering {self.low['control']}")
self.low_on()
if (
metric_value >= self.current_value - self.low["threshold"]
and metric_value
):
self.log(
f"low threshold met, turning off {self.low['control']}"
)
self.low_off()
if metric_value > self.current_value + self.high["threshold"]:
self.log(f"high threshold, triggering {self.high['control']}")
self.high_on()
if metric_value <= self.current_value - self.high["threshold"]:
self.log(
f"high threshold met, turning off {self.high['control']}"
)
self.high_off()
else:
self.log(
f'no metric value for {self.metric["sensor"]} - '
f'{self.metric["metric"]}'
)
self.log(f"sleeping {self.interval} seconds")
time.sleep(self.interval)
| 42.271429 | 86 | 0.515715 |
f128626436e773e1eb1e89a3c742f9ee2bc992a7 | 7,010 | py | Python | fantasycreator/welcomeWindow.py | peterg7/Fantasy_Creator | ee4d6a3d4559f649bbe2f5d8c3f76dc41c94d414 | [
"MIT"
] | null | null | null | fantasycreator/welcomeWindow.py | peterg7/Fantasy_Creator | ee4d6a3d4559f649bbe2f5d8c3f76dc41c94d414 | [
"MIT"
] | null | null | null | fantasycreator/welcomeWindow.py | peterg7/Fantasy_Creator | ee4d6a3d4559f649bbe2f5d8c3f76dc41c94d414 | [
"MIT"
] | null | null | null |
# PyQt
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtCore as qtc
from PyQt5 import QtGui as qtg
# Built-in Modules
import sys
import time
# External resources
import resources
# Opening window
class WelcomeWindow(qtw.QDialog):
new_book = qtc.pyqtSignal()
open_existing = qtc.pyqtSignal(str)
open_sample = qtc.pyqtSignal()
closed = qtc.pyqtSignal()
INIT_WIDTH = 960
INIT_HEIGHT = 640
def __init__(self, width, height, parent=None):
super(WelcomeWindow, self).__init__(parent)
self.setSizePolicy(
qtw.QSizePolicy.Preferred,
qtw.QSizePolicy.Preferred
)
# self.setModal(True)
self.setWindowTitle('Welcome!')
self.setFixedSize(WelcomeWindow.INIT_WIDTH, WelcomeWindow.INIT_HEIGHT)
# self.background = qtg.QPixmap(':/background-images/welcome_background.png')
self.background_movie = qtg.QMovie(':background-images/welcome_screen.gif')
self.background_movie.setCacheMode(qtg.QMovie.CacheAll)
self.background_movie.jumpToFrame(0)
self.background_movie.setScaledSize(qtc.QSize(WelcomeWindow.INIT_WIDTH, WelcomeWindow.INIT_HEIGHT))
# background_size = background_movie.currentImage().size()
self.background_aspect = WelcomeWindow.INIT_WIDTH / WelcomeWindow.INIT_HEIGHT
# self.background_label = qtw.QLabel()
# self.background_label.setAlignment(qtc.Qt.AlignCenter)
# self.resizeEvent()
# self.background_label.setMovie(background_movie)
self.background_movie.frameChanged.connect(self.paintNewFrame)
self.background_movie.stateChanged.connect(self.loopMovie)
self.background_movie.start()
# self.background = self.background_label.grab()
# Set up layout
layout = qtw.QGridLayout()
heading = qtw.QLabel('Fantasy Creator')
heading.setAttribute(qtc.Qt.WA_TranslucentBackground)
heading_font = qtg.QFont('Apple Chancery', 100, qtg.QFont.ExtraBold)
heading.setFont(heading_font)
heading.setAlignment(qtc.Qt.AlignCenter)
heading.setStyleSheet("QLabel {color : #ebbc00}")
layout.addWidget(heading, 1, 1, 2, 6)
options_font = qtg.QFont('Baskerville', 25)
self.new_book_btn = qtw.QPushButton('New Book')
self.new_book_btn.setFont(options_font)
self.new_book_btn.clicked.connect(self.handleNewBook)
layout.addWidget(self.new_book_btn, 3, 3, 1, 2)
self.open_book_btn = qtw.QPushButton('Open Existing')
self.open_book_btn.setFont(options_font)
self.open_book_btn.clicked.connect(self.handleOpenBook)
layout.addWidget(self.open_book_btn, 4, 3, 1, 2)
self.open_sample_btn = qtw.QPushButton('Sample')
self.open_sample_btn.setFont(options_font)
self.open_sample_btn.clicked.connect(self.handleOpenSample)
layout.addWidget(self.open_sample_btn, 5, 3, 1, 2)
spacer = qtw.QSpacerItem(0, 0)
layout.addItem(spacer, 7, 0, 1, 1)
self.progress_bar = qtw.QProgressBar(self)
self.progress_bar.setOrientation(qtc.Qt.Horizontal)
self.progress_bar.setMinimum(0)
self.progress_bar.setMaximum(8)
self.current_progress = 0
# self.progress_bar.setVisible(False)
layout.addWidget(self.progress_bar, 2, 2, 1, 4)
self.cancel = qtw.QPushButton(
'Exit',
clicked=sys.exit
)
self.cancel.setFont(qtg.QFont('Baskerville', 18))
layout.addWidget(self.cancel, 7, 7, 1, 1)
for col in range(8):
layout.setColumnStretch(col, 1)
# layout.addWidget(self.background_label, 0, 0, 7, 7)
self.setLayout(layout)
self.progress_bar.setVisible(False)
def launchApp(self, signal, args=None):
self.progress_bar.setVisible(True)
self.new_book_btn.setVisible(False)
self.open_book_btn.setVisible(False)
self.open_sample_btn.setVisible(False)
self.cancel.setVisible(False)
app = qtw.QApplication.instance()
app.processEvents()
if args:
signal.emit(args)
else:
signal.emit()
def incrementProgressBar(self):
self.current_progress += 1
self.progress_bar.setValue(self.current_progress)
app = qtw.QApplication.instance()
app.processEvents()
def closeEvent(self, event):
self.closed.emit()
super(WelcomeWindow, self).closeEvent(event)
def handleOpenBook(self):
filename, _ = qtw.QFileDialog.getOpenFileName(
self,
"Select a file to open...",
qtc.QDir.currentPath(), # static method returning user's home path
'JSON Files (*.json) ;;Text Files (*.txt) ;;All Files (*)',
'JSON Files (*.json)'
)
if filename:
self.launchApp(self.open_existing, filename)
def handleNewBook(self):
self.launchApp(self.new_book)
def handleOpenSample(self):
self.launchApp(self.open_sample)
# def resizeEvent(self, event):
# bkgnd_img = self.background.scaled(self.size(),
# qtc.Qt.IgnoreAspectRatio, qtc.Qt.SmoothTransformation)
# palette = qtg.QPalette()
# palette.setBrush(qtg.QPalette.Window, qtg.QBrush(bkgnd_img))
# self.setPalette(palette)
# super(WelcomeWindow, self).resizeEvent(event)
# def resizeEvent(self, event=None):
# rect = self.geometry()
# background_movie = self.background_label.movie()
# if background_movie:
# width = rect.height() * self.background_aspect
# if width <= rect.width():
# size = qtc.QSize(width, rect.height())
# else:
# height = rect.width() / self.background_aspect
# size = qtc.QSize(rect.width(), height)
# background_movie.setScaledSize(size)
# palette = qtg.QPalette()
# palette.setBrush(qtg.QPalette.Window, qtg.QBrush(self.background))
# self.setPalette(palette)
# super(WelcomeWindow, self).resizeEvent(event)
def paintEvent(self, event):
current_frame = self.background_movie.currentPixmap()
frame_rect = current_frame.rect()
frame_rect.moveCenter(self.rect().center())
if frame_rect.intersects(event.rect()):
painter = qtg.QPainter(self)
painter.drawPixmap(
frame_rect.left(),
frame_rect.top(),
current_frame)
def paintNewFrame(self, frame_num):
# print(frame_num, self.background_movie.state())
# # if self.background_movie.state() == qtg.QMovie.NotRunning:
# # self.background_movie.start()
self.repaint()
def loopMovie(self, state):
if state == qtg.QMovie.NotRunning:
self.background_movie.start()
| 34.70297 | 107 | 0.637233 |
e9e4db76ff5a2d02343a47b5ec50de06fd40a7bb | 120 | py | Python | recommender/redis_connection.py | mbruty/COMP2003-2020-O | 9bfbc312ec45be288d96e4640f8195444e0c63cc | [
"Apache-2.0"
] | null | null | null | recommender/redis_connection.py | mbruty/COMP2003-2020-O | 9bfbc312ec45be288d96e4640f8195444e0c63cc | [
"Apache-2.0"
] | 6 | 2021-09-21T17:27:50.000Z | 2022-02-27T13:01:23.000Z | recommender/redis_connection.py | mbruty/COMP2003-2020-O | 9bfbc312ec45be288d96e4640f8195444e0c63cc | [
"Apache-2.0"
] | null | null | null | import redis
def get_connection():
# See teams for the connection details
return redis.Redis(host="", password="") | 24 | 42 | 0.725 |
03b4f7e1e9e3fce55fa610863bf00a70b6344e20 | 77,880 | py | Python | sdks/python/apache_beam/runners/portability/fn_api_runner/translations.py | jzju/beam | d9876ea6bdef22b959ded2c16751057a418468bb | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 5,279 | 2016-12-29T04:00:44.000Z | 2022-03-31T22:56:45.000Z | sdks/python/apache_beam/runners/portability/fn_api_runner/translations.py | jzju/beam | d9876ea6bdef22b959ded2c16751057a418468bb | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 14,149 | 2016-12-28T00:43:50.000Z | 2022-03-31T23:50:22.000Z | sdks/python/apache_beam/runners/portability/fn_api_runner/translations.py | damondouglas/beam | 4774ac713f427fefb38114f661516faef26d8207 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 3,763 | 2016-12-29T04:06:10.000Z | 2022-03-31T22:25:49.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Pipeline transformations for the FnApiRunner.
"""
# pytype: skip-file
# mypy: check-untyped-defs
import collections
import functools
import itertools
import logging
import operator
from typing import Callable
from typing import Collection
from typing import Container
from typing import DefaultDict
from typing import Dict
from typing import FrozenSet
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import TypeVar
from typing import Union
from apache_beam import coders
from apache_beam.internal import pickler
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners.worker import bundle_processor
from apache_beam.transforms import combiners
from apache_beam.transforms import core
from apache_beam.utils import proto_utils
T = TypeVar('T')
# This module is experimental. No backwards-compatibility guarantees.
_LOGGER = logging.getLogger(__name__)
KNOWN_COMPOSITES = frozenset([
common_urns.primitives.GROUP_BY_KEY.urn,
common_urns.composites.COMBINE_PER_KEY.urn,
common_urns.primitives.PAR_DO.urn, # After SDF expansion.
])
COMBINE_URNS = frozenset([
common_urns.composites.COMBINE_PER_KEY.urn,
])
PAR_DO_URNS = frozenset([
common_urns.primitives.PAR_DO.urn,
common_urns.sdf_components.PAIR_WITH_RESTRICTION.urn,
common_urns.sdf_components.SPLIT_AND_SIZE_RESTRICTIONS.urn,
common_urns.sdf_components.PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,
common_urns.sdf_components.TRUNCATE_SIZED_RESTRICTION.urn,
])
IMPULSE_BUFFER = b'impulse'
# TimerFamilyId is identified by transform name + timer family
TimerFamilyId = Tuple[str, str]
# SideInputId is identified by a consumer ParDo + tag.
SideInputId = Tuple[str, str]
SideInputAccessPattern = beam_runner_api_pb2.FunctionSpec
DataOutput = Dict[str, bytes]
# A map from a PCollection coder ID to a Safe Coder ID
# A safe coder is a coder that can be used on the runner-side of the FnApi.
# A safe coder receives a byte string, and returns a type that can be
# understood by the runner when deserializing.
SafeCoderMapping = Dict[str, str]
# DataSideInput maps SideInputIds to a tuple of the encoded bytes of the side
# input content, and a payload specification regarding the type of side input
# (MultiMap / Iterable).
DataSideInput = Dict[SideInputId, Tuple[bytes, SideInputAccessPattern]]
class Stage(object):
"""A set of Transforms that can be sent to the worker for processing."""
def __init__(
self,
name, # type: str
transforms, # type: List[beam_runner_api_pb2.PTransform]
downstream_side_inputs=None, # type: Optional[FrozenSet[str]]
must_follow=frozenset(), # type: FrozenSet[Stage]
parent=None, # type: Optional[str]
environment=None, # type: Optional[str]
forced_root=False):
self.name = name
self.transforms = transforms
self.downstream_side_inputs = downstream_side_inputs
self.must_follow = must_follow
self.timers = set() # type: Set[TimerFamilyId]
self.parent = parent
if environment is None:
environment = functools.reduce(
self._merge_environments,
(self._extract_environment(t) for t in transforms))
self.environment = environment
self.forced_root = forced_root
def __repr__(self):
must_follow = ', '.join(prev.name for prev in self.must_follow)
if self.downstream_side_inputs is None:
downstream_side_inputs = '<unknown>'
else:
downstream_side_inputs = ', '.join(
str(si) for si in self.downstream_side_inputs)
return "%s\n %s\n must follow: %s\n downstream_side_inputs: %s" % (
self.name,
'\n'.join([
"%s:%s" % (transform.unique_name, transform.spec.urn)
for transform in self.transforms
]),
must_follow,
downstream_side_inputs)
@staticmethod
def _extract_environment(transform):
# type: (beam_runner_api_pb2.PTransform) -> Optional[str]
environment = transform.environment_id
return environment if environment else None
@staticmethod
def _merge_environments(env1, env2):
# type: (Optional[str], Optional[str]) -> Optional[str]
if env1 is None:
return env2
elif env2 is None:
return env1
else:
if env1 != env2:
raise ValueError(
"Incompatible environments: '%s' != '%s'" %
(str(env1).replace('\n', ' '), str(env2).replace('\n', ' ')))
return env1
def can_fuse(self, consumer, context):
# type: (Stage, TransformContext) -> bool
try:
self._merge_environments(self.environment, consumer.environment)
except ValueError:
return False
def no_overlap(a, b):
return not a or not b or not a.intersection(b)
return (
not consumer.forced_root and not self in consumer.must_follow and
self.is_all_sdk_urns(context) and consumer.is_all_sdk_urns(context) and
no_overlap(self.downstream_side_inputs, consumer.side_inputs()))
def fuse(self, other, context):
# type: (Stage, TransformContext) -> Stage
return Stage(
"(%s)+(%s)" % (self.name, other.name),
self.transforms + other.transforms,
union(self.downstream_side_inputs, other.downstream_side_inputs),
union(self.must_follow, other.must_follow),
environment=self._merge_environments(
self.environment, other.environment),
parent=_parent_for_fused_stages([self, other], context),
forced_root=self.forced_root or other.forced_root)
def is_runner_urn(self, context):
# type: (TransformContext) -> bool
return any(
transform.spec.urn in context.known_runner_urns
for transform in self.transforms)
def is_all_sdk_urns(self, context):
def is_sdk_transform(transform):
# Execute multi-input flattens in the runner.
if transform.spec.urn == common_urns.primitives.FLATTEN.urn and len(
transform.inputs) > 1:
return False
else:
return transform.spec.urn not in context.runner_only_urns
return all(is_sdk_transform(transform) for transform in self.transforms)
def is_stateful(self):
for transform in self.transforms:
if transform.spec.urn in PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
if payload.state_specs or payload.timer_family_specs:
return True
return False
def side_inputs(self):
# type: () -> Iterator[str]
for transform in self.transforms:
if transform.spec.urn in PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for side_input in payload.side_inputs:
yield transform.inputs[side_input]
def has_as_main_input(self, pcoll):
for transform in self.transforms:
if transform.spec.urn in PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
local_side_inputs = payload.side_inputs
else:
local_side_inputs = {}
for local_id, pipeline_id in transform.inputs.items():
if pcoll == pipeline_id and local_id not in local_side_inputs:
return True
def deduplicate_read(self):
# type: () -> None
seen_pcolls = set() # type: Set[str]
new_transforms = []
for transform in self.transforms:
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
pcoll = only_element(list(transform.outputs.items()))[1]
if pcoll in seen_pcolls:
continue
seen_pcolls.add(pcoll)
new_transforms.append(transform)
self.transforms = new_transforms
def executable_stage_transform(
self,
known_runner_urns, # type: FrozenSet[str]
all_consumers,
components # type: beam_runner_api_pb2.Components
):
# type: (...) -> beam_runner_api_pb2.PTransform
if (len(self.transforms) == 1 and
self.transforms[0].spec.urn in known_runner_urns):
return self.transforms[0]
else:
all_inputs = set(
pcoll for t in self.transforms for pcoll in t.inputs.values())
all_outputs = set(
pcoll for t in self.transforms for pcoll in t.outputs.values())
internal_transforms = set(id(t) for t in self.transforms)
external_outputs = [
pcoll for pcoll in all_outputs
if all_consumers[pcoll] - internal_transforms
]
stage_components = beam_runner_api_pb2.Components()
stage_components.CopyFrom(components)
# Only keep the PCollections referenced in this stage.
stage_components.pcollections.clear()
for pcoll_id in all_inputs.union(all_outputs):
stage_components.pcollections[pcoll_id].CopyFrom(
components.pcollections[pcoll_id])
# Only keep the transforms in this stage.
# Also gather up payload data as we iterate over the transforms.
stage_components.transforms.clear()
main_inputs = set() # type: Set[str]
side_inputs = []
user_states = []
timers = []
for ix, transform in enumerate(self.transforms):
transform_id = 'transform_%d' % ix
if transform.spec.urn == common_urns.primitives.PAR_DO.urn:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag in payload.side_inputs.keys():
side_inputs.append(
beam_runner_api_pb2.ExecutableStagePayload.SideInputId(
transform_id=transform_id, local_name=tag))
for tag in payload.state_specs.keys():
user_states.append(
beam_runner_api_pb2.ExecutableStagePayload.UserStateId(
transform_id=transform_id, local_name=tag))
for tag in payload.timer_family_specs.keys():
timers.append(
beam_runner_api_pb2.ExecutableStagePayload.TimerId(
transform_id=transform_id, local_name=tag))
main_inputs.update(
pcoll_id for tag,
pcoll_id in transform.inputs.items()
if tag not in payload.side_inputs)
else:
main_inputs.update(transform.inputs.values())
stage_components.transforms[transform_id].CopyFrom(transform)
main_input_id = only_element(main_inputs - all_outputs)
named_inputs = dict({
'%s:%s' % (side.transform_id, side.local_name):
stage_components.transforms[side.transform_id].inputs[side.local_name]
for side in side_inputs
},
main_input=main_input_id)
# at this point we should have resolved an environment, as the key of
# components.environments cannot be None.
assert self.environment is not None
exec_payload = beam_runner_api_pb2.ExecutableStagePayload(
environment=components.environments[self.environment],
input=main_input_id,
outputs=external_outputs,
transforms=stage_components.transforms.keys(),
components=stage_components,
side_inputs=side_inputs,
user_states=user_states,
timers=timers)
return beam_runner_api_pb2.PTransform(
unique_name=unique_name(None, self.name),
spec=beam_runner_api_pb2.FunctionSpec(
urn='beam:runner:executable_stage:v1',
payload=exec_payload.SerializeToString()),
inputs=named_inputs,
outputs={
'output_%d' % ix: pcoll
for ix,
pcoll in enumerate(external_outputs)
},
)
def memoize_on_instance(f):
missing = object()
def wrapper(self, *args):
try:
cache = getattr(self, '_cache_%s' % f.__name__)
except AttributeError:
cache = {}
setattr(self, '_cache_%s' % f.__name__, cache)
result = cache.get(args, missing)
if result is missing:
result = cache[args] = f(self, *args)
return result
return wrapper
class TransformContext(object):
_COMMON_CODER_URNS = set(
value.urn for (key, value) in common_urns.coders.__dict__.items()
if not key.startswith('_')
# Length prefix Rows rather than re-coding them.
) - set([common_urns.coders.ROW.urn])
_REQUIRED_CODER_URNS = set([
common_urns.coders.WINDOWED_VALUE.urn,
# For impulse.
common_urns.coders.BYTES.urn,
common_urns.coders.GLOBAL_WINDOW.urn,
# For GBK.
common_urns.coders.KV.urn,
common_urns.coders.ITERABLE.urn,
# For SDF.
common_urns.coders.DOUBLE.urn,
# For timers.
common_urns.coders.TIMER.urn,
# For everything else.
common_urns.coders.LENGTH_PREFIX.urn,
common_urns.coders.CUSTOM_WINDOW.urn,
])
def __init__(
self,
components, # type: beam_runner_api_pb2.Components
known_runner_urns, # type: FrozenSet[str]
use_state_iterables=False,
is_drain=False):
self.components = components
self.known_runner_urns = known_runner_urns
self.runner_only_urns = known_runner_urns - frozenset(
[common_urns.primitives.FLATTEN.urn])
self._known_coder_urns = set.union(
# Those which are required.
self._REQUIRED_CODER_URNS,
# Those common coders which are understood by all environments.
self._COMMON_CODER_URNS.intersection(
*(
set(env.capabilities)
for env in self.components.environments.values())))
self.use_state_iterables = use_state_iterables
self.is_drain = is_drain
# ok to pass None for context because BytesCoder has no components
coder_proto = coders.BytesCoder().to_runner_api(
None) # type: ignore[arg-type]
self.bytes_coder_id = self.add_or_get_coder_id(coder_proto, 'bytes_coder')
self.safe_coders: SafeCoderMapping = {
self.bytes_coder_id: self.bytes_coder_id
}
# A map of PCollection ID to Coder ID.
self.data_channel_coders = {} # type: Dict[str, str]
def add_or_get_coder_id(
self,
coder_proto, # type: beam_runner_api_pb2.Coder
coder_prefix='coder'):
# type: (...) -> str
for coder_id, coder in self.components.coders.items():
if coder == coder_proto:
return coder_id
new_coder_id = unique_name(self.components.coders, coder_prefix)
self.components.coders[new_coder_id].CopyFrom(coder_proto)
return new_coder_id
def add_data_channel_coder(self, pcoll_id):
pcoll = self.components.pcollections[pcoll_id]
proto = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.WINDOWED_VALUE.urn),
component_coder_ids=[
pcoll.coder_id,
self.components.windowing_strategies[
pcoll.windowing_strategy_id].window_coder_id
])
self.data_channel_coders[pcoll_id] = self.maybe_length_prefixed_coder(
self.add_or_get_coder_id(proto, pcoll.coder_id + '_windowed'))
@memoize_on_instance
def with_state_iterables(self, coder_id):
# type: (str) -> str
coder = self.components.coders[coder_id]
if coder.spec.urn == common_urns.coders.ITERABLE.urn:
new_coder_id = unique_name(
self.components.coders, coder_id + '_state_backed')
new_coder = self.components.coders[new_coder_id]
new_coder.CopyFrom(coder)
new_coder.spec.urn = common_urns.coders.STATE_BACKED_ITERABLE.urn
new_coder.spec.payload = b'1'
new_coder.component_coder_ids[0] = self.with_state_iterables(
coder.component_coder_ids[0])
return new_coder_id
else:
new_component_ids = [
self.with_state_iterables(c) for c in coder.component_coder_ids
]
if new_component_ids == coder.component_coder_ids:
return coder_id
else:
new_coder_id = unique_name(
self.components.coders, coder_id + '_state_backed')
self.components.coders[new_coder_id].CopyFrom(
beam_runner_api_pb2.Coder(
spec=coder.spec, component_coder_ids=new_component_ids))
return new_coder_id
@memoize_on_instance
def maybe_length_prefixed_coder(self, coder_id):
# type: (str) -> str
if coder_id in self.safe_coders:
return coder_id
(maybe_length_prefixed_id,
safe_id) = self.maybe_length_prefixed_and_safe_coder(coder_id)
self.safe_coders[maybe_length_prefixed_id] = safe_id
return maybe_length_prefixed_id
@memoize_on_instance
def maybe_length_prefixed_and_safe_coder(self, coder_id):
# type: (str) -> Tuple[str, str]
coder = self.components.coders[coder_id]
if coder.spec.urn == common_urns.coders.LENGTH_PREFIX.urn:
return coder_id, self.bytes_coder_id
elif coder.spec.urn in self._known_coder_urns:
new_component_ids = [
self.maybe_length_prefixed_coder(c) for c in coder.component_coder_ids
]
if new_component_ids == coder.component_coder_ids:
new_coder_id = coder_id
else:
new_coder_id = unique_name(
self.components.coders, coder_id + '_length_prefixed')
self.components.coders[new_coder_id].CopyFrom(
beam_runner_api_pb2.Coder(
spec=coder.spec, component_coder_ids=new_component_ids))
safe_component_ids = [self.safe_coders[c] for c in new_component_ids]
if safe_component_ids == coder.component_coder_ids:
safe_coder_id = coder_id
else:
safe_coder_id = unique_name(self.components.coders, coder_id + '_safe')
self.components.coders[safe_coder_id].CopyFrom(
beam_runner_api_pb2.Coder(
spec=coder.spec, component_coder_ids=safe_component_ids))
return new_coder_id, safe_coder_id
else:
new_coder_id = unique_name(
self.components.coders, coder_id + '_length_prefixed')
self.components.coders[new_coder_id].CopyFrom(
beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.LENGTH_PREFIX.urn),
component_coder_ids=[coder_id]))
return new_coder_id, self.bytes_coder_id
def length_prefix_pcoll_coders(self, pcoll_id):
# type: (str) -> None
self.components.pcollections[pcoll_id].coder_id = (
self.maybe_length_prefixed_coder(
self.components.pcollections[pcoll_id].coder_id))
@memoize_on_instance
def parents_map(self):
return {
child: parent
for (parent, transform) in self.components.transforms.items()
for child in transform.subtransforms
}
def leaf_transform_stages(
root_ids, # type: Iterable[str]
components, # type: beam_runner_api_pb2.Components
parent=None, # type: Optional[str]
known_composites=KNOWN_COMPOSITES # type: FrozenSet[str]
):
# type: (...) -> Iterator[Stage]
for root_id in root_ids:
root = components.transforms[root_id]
if root.spec.urn in known_composites:
yield Stage(root_id, [root], parent=parent)
elif not root.subtransforms:
# Make sure its outputs are not a subset of its inputs.
if set(root.outputs.values()) - set(root.inputs.values()):
yield Stage(root_id, [root], parent=parent)
else:
for stage in leaf_transform_stages(root.subtransforms,
components,
root_id,
known_composites):
yield stage
def pipeline_from_stages(
pipeline_proto, # type: beam_runner_api_pb2.Pipeline
stages, # type: Iterable[Stage]
known_runner_urns, # type: FrozenSet[str]
partial # type: bool
):
# type: (...) -> beam_runner_api_pb2.Pipeline
# In case it was a generator that mutates components as it
# produces outputs (as is the case with most transformations).
stages = list(stages)
new_proto = beam_runner_api_pb2.Pipeline()
new_proto.CopyFrom(pipeline_proto)
components = new_proto.components
components.transforms.clear()
components.pcollections.clear()
roots = set()
parents = {
child: parent
for parent,
proto in pipeline_proto.components.transforms.items()
for child in proto.subtransforms
}
def copy_output_pcollections(transform):
for pcoll_id in transform.outputs.values():
components.pcollections[pcoll_id].CopyFrom(
pipeline_proto.components.pcollections[pcoll_id])
def add_parent(child, parent):
if parent is None:
roots.add(child)
else:
if (parent not in components.transforms and
parent in pipeline_proto.components.transforms):
components.transforms[parent].CopyFrom(
pipeline_proto.components.transforms[parent])
copy_output_pcollections(components.transforms[parent])
del components.transforms[parent].subtransforms[:]
# Ensure that child is the last item in the parent's subtransforms.
# If the stages were previously sorted into topological order using
# sort_stages, this ensures that the parent transforms are also
# added in topological order.
if child in components.transforms[parent].subtransforms:
components.transforms[parent].subtransforms.remove(child)
components.transforms[parent].subtransforms.append(child)
add_parent(parent, parents.get(parent))
def copy_subtransforms(transform):
for subtransform_id in transform.subtransforms:
if subtransform_id not in pipeline_proto.components.transforms:
raise RuntimeError(
'Could not find subtransform to copy: ' + subtransform_id)
subtransform = pipeline_proto.components.transforms[subtransform_id]
components.transforms[subtransform_id].CopyFrom(subtransform)
copy_output_pcollections(components.transforms[subtransform_id])
copy_subtransforms(subtransform)
all_consumers = collections.defaultdict(
set) # type: DefaultDict[str, Set[int]]
for stage in stages:
for transform in stage.transforms:
for pcoll in transform.inputs.values():
all_consumers[pcoll].add(id(transform))
for stage in stages:
if partial:
transform = only_element(stage.transforms)
copy_subtransforms(transform)
else:
transform = stage.executable_stage_transform(
known_runner_urns, all_consumers, pipeline_proto.components)
transform_id = unique_name(components.transforms, stage.name)
components.transforms[transform_id].CopyFrom(transform)
copy_output_pcollections(transform)
add_parent(transform_id, stage.parent)
del new_proto.root_transform_ids[:]
new_proto.root_transform_ids.extend(roots)
return new_proto
def create_and_optimize_stages(
pipeline_proto, # type: beam_runner_api_pb2.Pipeline
phases,
known_runner_urns, # type: FrozenSet[str]
use_state_iterables=False,
is_drain=False):
# type: (...) -> Tuple[TransformContext, List[Stage]]
"""Create a set of stages given a pipeline proto, and set of optimizations.
Args:
pipeline_proto (beam_runner_api_pb2.Pipeline): A pipeline defined by a user.
phases (callable): Each phase identifies a specific transformation to be
applied to the pipeline graph. Existing phases are defined in this file,
and receive a list of stages, and a pipeline context. Some available
transformations are ``lift_combiners``, ``expand_sdf``, ``expand_gbk``,
etc.
Returns:
A tuple with a pipeline context, and a list of stages (i.e. an optimized
graph).
"""
pipeline_context = TransformContext(
pipeline_proto.components,
known_runner_urns,
use_state_iterables=use_state_iterables,
is_drain=is_drain)
# Initial set of stages are singleton leaf transforms.
stages = list(
leaf_transform_stages(
pipeline_proto.root_transform_ids,
pipeline_proto.components,
union(known_runner_urns, KNOWN_COMPOSITES)))
# Apply each phase in order.
for phase in phases:
_LOGGER.info('%s %s %s', '=' * 20, phase, '=' * 20)
stages = list(phase(stages, pipeline_context))
_LOGGER.debug('%s %s' % (len(stages), [len(s.transforms) for s in stages]))
_LOGGER.debug('Stages: %s', [str(s) for s in stages])
# Return the (possibly mutated) context and ordered set of stages.
return pipeline_context, stages
def optimize_pipeline(
pipeline_proto, # type: beam_runner_api_pb2.Pipeline
phases,
known_runner_urns, # type: FrozenSet[str]
partial=False,
**kwargs):
unused_context, stages = create_and_optimize_stages(
pipeline_proto,
phases,
known_runner_urns,
**kwargs)
return pipeline_from_stages(
pipeline_proto, stages, known_runner_urns, partial)
# Optimization stages.
def annotate_downstream_side_inputs(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> Iterable[Stage]
"""Annotate each stage with fusion-prohibiting information.
Each stage is annotated with the (transitive) set of pcollections that
depend on this stage that are also used later in the pipeline as a
side input.
While theoretically this could result in O(n^2) annotations, the size of
each set is bounded by the number of side inputs (typically much smaller
than the number of total nodes) and the number of *distinct* side-input
sets is also generally small (and shared due to the use of union
defined above).
This representation is also amenable to simple recomputation on fusion.
"""
consumers = collections.defaultdict(
list) # type: DefaultDict[str, List[Stage]]
def get_all_side_inputs():
# type: () -> Set[str]
all_side_inputs = set() # type: Set[str]
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers[input].append(stage)
for si in stage.side_inputs():
all_side_inputs.add(si)
return all_side_inputs
all_side_inputs = frozenset(get_all_side_inputs())
downstream_side_inputs_by_stage = {} # type: Dict[Stage, FrozenSet[str]]
def compute_downstream_side_inputs(stage):
# type: (Stage) -> FrozenSet[str]
if stage not in downstream_side_inputs_by_stage:
downstream_side_inputs = frozenset() # type: FrozenSet[str]
for transform in stage.transforms:
for output in transform.outputs.values():
if output in all_side_inputs:
downstream_side_inputs = union(
downstream_side_inputs, frozenset([output]))
for consumer in consumers[output]:
downstream_side_inputs = union(
downstream_side_inputs,
compute_downstream_side_inputs(consumer))
downstream_side_inputs_by_stage[stage] = downstream_side_inputs
return downstream_side_inputs_by_stage[stage]
for stage in stages:
stage.downstream_side_inputs = compute_downstream_side_inputs(stage)
return stages
def annotate_stateful_dofns_as_roots(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> Iterable[Stage]
for stage in stages:
for transform in stage.transforms:
if transform.spec.urn == common_urns.primitives.PAR_DO.urn:
pardo_payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
if pardo_payload.state_specs or pardo_payload.timer_family_specs:
stage.forced_root = True
yield stage
def fix_side_input_pcoll_coders(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> Iterable[Stage]
"""Length prefix side input PCollection coders.
"""
for stage in stages:
for si in stage.side_inputs():
pipeline_context.length_prefix_pcoll_coders(si)
return stages
def _group_stages_by_key(stages, get_stage_key):
grouped_stages = collections.defaultdict(list)
stages_with_none_key = []
for stage in stages:
stage_key = get_stage_key(stage)
if stage_key is None:
stages_with_none_key.append(stage)
else:
grouped_stages[stage_key].append(stage)
return (grouped_stages, stages_with_none_key)
def _group_stages_with_limit(stages, get_limit):
# type: (Iterable[Stage], Callable[[str], int]) -> Iterable[Collection[Stage]]
stages_with_limit = [(stage, get_limit(stage.name)) for stage in stages]
group: List[Stage] = []
group_limit = 0
for stage, limit in sorted(stages_with_limit, key=operator.itemgetter(1)):
if limit < 1:
raise Exception(
'expected get_limit to return an integer >= 1, '
'instead got: %d for stage: %s' % (limit, stage))
if not group:
group_limit = limit
assert len(group) < group_limit
group.append(stage)
if len(group) >= group_limit:
yield group
group = []
if group:
yield group
def _remap_input_pcolls(transform, pcoll_id_remap):
for input_key in list(transform.inputs.keys()):
if transform.inputs[input_key] in pcoll_id_remap:
transform.inputs[input_key] = pcoll_id_remap[transform.inputs[input_key]]
def _make_pack_name(names):
"""Return the packed Transform or Stage name.
The output name will contain the input names' common prefix, the infix
'/Packed', and the input names' suffixes in square brackets.
For example, if the input names are 'a/b/c1/d1' and 'a/b/c2/d2, then
the output name is 'a/b/Packed[c1_d1, c2_d2]'.
"""
assert names
tokens_in_names = [name.split('/') for name in names]
common_prefix_tokens = []
# Find the longest common prefix of tokens.
while True:
first_token_in_names = set()
for tokens in tokens_in_names:
if not tokens:
break
first_token_in_names.add(tokens[0])
if len(first_token_in_names) != 1:
break
common_prefix_tokens.append(next(iter(first_token_in_names)))
for tokens in tokens_in_names:
tokens.pop(0)
common_prefix_tokens.append('Packed')
common_prefix = '/'.join(common_prefix_tokens)
suffixes = ['_'.join(tokens) for tokens in tokens_in_names]
return '%s[%s]' % (common_prefix, ', '.join(suffixes))
def _eliminate_common_key_with_none(stages, context, can_pack=lambda s: True):
# type: (Iterable[Stage], TransformContext, Callable[[str], Union[bool, int]]) -> Iterable[Stage]
"""Runs common subexpression elimination for sibling KeyWithNone stages.
If multiple KeyWithNone stages share a common input, then all but one stages
will be eliminated along with their output PCollections. Transforms that
originally read input from the output PCollection of the eliminated
KeyWithNone stages will be remapped to read input from the output PCollection
of the remaining KeyWithNone stage.
"""
# Partition stages by whether they are eligible for common KeyWithNone
# elimination, and group eligible KeyWithNone stages by parent and
# environment.
def get_stage_key(stage):
if len(stage.transforms) == 1 and can_pack(stage.name):
transform = only_transform(stage.transforms)
if (transform.spec.urn == common_urns.primitives.PAR_DO.urn and
len(transform.inputs) == 1 and len(transform.outputs) == 1):
pardo_payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
if pardo_payload.do_fn.urn == python_urns.KEY_WITH_NONE_DOFN:
return (only_element(transform.inputs.values()), stage.environment)
return None
grouped_eligible_stages, ineligible_stages = _group_stages_by_key(
stages, get_stage_key)
# Eliminate stages and build the PCollection remapping dictionary.
pcoll_id_remap = {}
remaining_stages = []
for sibling_stages in grouped_eligible_stages.values():
if len(sibling_stages) > 1:
output_pcoll_ids = [
only_element(stage.transforms[0].outputs.values())
for stage in sibling_stages
]
parent = _parent_for_fused_stages(sibling_stages, context)
for to_delete_pcoll_id in output_pcoll_ids[1:]:
pcoll_id_remap[to_delete_pcoll_id] = output_pcoll_ids[0]
del context.components.pcollections[to_delete_pcoll_id]
sibling_stages[0].parent = parent
sibling_stages[0].name = _make_pack_name(
stage.name for stage in sibling_stages)
only_transform(
sibling_stages[0].transforms).unique_name = _make_pack_name(
only_transform(stage.transforms).unique_name
for stage in sibling_stages)
remaining_stages.append(sibling_stages[0])
# Remap all transforms in components.
for transform in context.components.transforms.values():
_remap_input_pcolls(transform, pcoll_id_remap)
# Yield stages while remapping input PCollections if needed.
stages_to_yield = itertools.chain(ineligible_stages, remaining_stages)
for stage in stages_to_yield:
transform = only_transform(stage.transforms)
_remap_input_pcolls(transform, pcoll_id_remap)
yield stage
_DEFAULT_PACK_COMBINERS_LIMIT = 128
def pack_per_key_combiners(stages, context, can_pack=lambda s: True):
# type: (Iterable[Stage], TransformContext, Callable[[str], Union[bool, int]]) -> Iterator[Stage]
"""Packs sibling CombinePerKey stages into a single CombinePerKey.
If CombinePerKey stages have a common input, one input each, and one output
each, pack the stages into a single stage that runs all CombinePerKeys and
outputs resulting tuples to a new PCollection. A subsequent stage unpacks
tuples from this PCollection and sends them to the original output
PCollections.
"""
class _UnpackFn(core.DoFn):
"""A DoFn that unpacks a packed to multiple tagged outputs.
Example:
tags = (T1, T2, ...)
input = (K, (V1, V2, ...))
output = TaggedOutput(T1, (K, V1)), TaggedOutput(T2, (K, V1)), ...
"""
def __init__(self, tags):
self._tags = tags
def process(self, element):
key, values = element
return [
core.pvalue.TaggedOutput(tag, (key, value)) for tag,
value in zip(self._tags, values)
]
def _get_fallback_coder_id():
return context.add_or_get_coder_id(
# passing None works here because there are no component coders
coders.registry.get_coder(object).to_runner_api(None)) # type: ignore[arg-type]
def _get_component_coder_id_from_kv_coder(coder, index):
assert index < 2
if coder.spec.urn == common_urns.coders.KV.urn and len(
coder.component_coder_ids) == 2:
return coder.component_coder_ids[index]
return _get_fallback_coder_id()
def _get_key_coder_id_from_kv_coder(coder):
return _get_component_coder_id_from_kv_coder(coder, 0)
def _get_value_coder_id_from_kv_coder(coder):
return _get_component_coder_id_from_kv_coder(coder, 1)
def _try_fuse_stages(a, b):
if a.can_fuse(b, context):
return a.fuse(b, context)
else:
raise ValueError
def _get_limit(stage_name):
result = can_pack(stage_name)
if result is True:
return _DEFAULT_PACK_COMBINERS_LIMIT
else:
return int(result)
# Partition stages by whether they are eligible for CombinePerKey packing
# and group eligible CombinePerKey stages by parent and environment.
def get_stage_key(stage):
if (len(stage.transforms) == 1 and can_pack(stage.name) and
stage.environment is not None and python_urns.PACKED_COMBINE_FN in
context.components.environments[stage.environment].capabilities):
transform = only_transform(stage.transforms)
if (transform.spec.urn == common_urns.composites.COMBINE_PER_KEY.urn and
len(transform.inputs) == 1 and len(transform.outputs) == 1):
combine_payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.CombinePayload)
if combine_payload.combine_fn.urn == python_urns.PICKLED_COMBINE_FN:
return (only_element(transform.inputs.values()), stage.environment)
return None
grouped_eligible_stages, ineligible_stages = _group_stages_by_key(
stages, get_stage_key)
for stage in ineligible_stages:
yield stage
grouped_packable_stages = [(stage_key, subgrouped_stages) for stage_key,
grouped_stages in grouped_eligible_stages.items()
for subgrouped_stages in _group_stages_with_limit(
grouped_stages, _get_limit)]
for stage_key, packable_stages in grouped_packable_stages:
input_pcoll_id, _ = stage_key
try:
if not len(packable_stages) > 1:
raise ValueError('Only one stage in this group: Skipping stage packing')
# Fused stage is used as template and is not yielded.
fused_stage = functools.reduce(_try_fuse_stages, packable_stages)
except ValueError:
# Skip packing stages in this group.
# Yield the stages unmodified, and then continue to the next group.
for stage in packable_stages:
yield stage
continue
transforms = [only_transform(stage.transforms) for stage in packable_stages]
combine_payloads = [
proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.CombinePayload)
for transform in transforms
]
output_pcoll_ids = [
only_element(transform.outputs.values()) for transform in transforms
]
# Build accumulator coder for (acc1, acc2, ...)
accumulator_coder_ids = [
combine_payload.accumulator_coder_id
for combine_payload in combine_payloads
]
tuple_accumulator_coder_id = context.add_or_get_coder_id(
beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(urn=python_urns.TUPLE_CODER),
component_coder_ids=accumulator_coder_ids))
# Build packed output coder for (key, (out1, out2, ...))
input_kv_coder_id = context.components.pcollections[input_pcoll_id].coder_id
key_coder_id = _get_key_coder_id_from_kv_coder(
context.components.coders[input_kv_coder_id])
output_kv_coder_ids = [
context.components.pcollections[output_pcoll_id].coder_id
for output_pcoll_id in output_pcoll_ids
]
output_value_coder_ids = [
_get_value_coder_id_from_kv_coder(
context.components.coders[output_kv_coder_id])
for output_kv_coder_id in output_kv_coder_ids
]
pack_output_value_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(urn=python_urns.TUPLE_CODER),
component_coder_ids=output_value_coder_ids)
pack_output_value_coder_id = context.add_or_get_coder_id(
pack_output_value_coder)
pack_output_kv_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(urn=common_urns.coders.KV.urn),
component_coder_ids=[key_coder_id, pack_output_value_coder_id])
pack_output_kv_coder_id = context.add_or_get_coder_id(pack_output_kv_coder)
pack_stage_name = _make_pack_name([stage.name for stage in packable_stages])
pack_transform_name = _make_pack_name([
only_transform(stage.transforms).unique_name
for stage in packable_stages
])
pack_pcoll_id = unique_name(context.components.pcollections, 'pcollection')
input_pcoll = context.components.pcollections[input_pcoll_id]
context.components.pcollections[pack_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=pack_transform_name + '/Pack.out',
coder_id=pack_output_kv_coder_id,
windowing_strategy_id=input_pcoll.windowing_strategy_id,
is_bounded=input_pcoll.is_bounded))
# Set up Pack stage.
# TODO(BEAM-7746): classes that inherit from RunnerApiFn are expected to
# accept a PipelineContext for from_runner_api/to_runner_api. Determine
# how to accomodate this.
pack_combine_fn = combiners.SingleInputTupleCombineFn(
*[
core.CombineFn.from_runner_api(combine_payload.combine_fn, context) # type: ignore[arg-type]
for combine_payload in combine_payloads
]).to_runner_api(context) # type: ignore[arg-type]
pack_transform = beam_runner_api_pb2.PTransform(
unique_name=pack_transform_name + '/Pack',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.composites.COMBINE_PER_KEY.urn,
payload=beam_runner_api_pb2.CombinePayload(
combine_fn=pack_combine_fn,
accumulator_coder_id=tuple_accumulator_coder_id).
SerializeToString()),
inputs={'in': input_pcoll_id},
# 'None' single output key follows convention for CombinePerKey.
outputs={'None': pack_pcoll_id},
environment_id=fused_stage.environment)
pack_stage = Stage(
pack_stage_name + '/Pack', [pack_transform],
downstream_side_inputs=fused_stage.downstream_side_inputs,
must_follow=fused_stage.must_follow,
parent=fused_stage.parent,
environment=fused_stage.environment)
yield pack_stage
# Set up Unpack stage
tags = [str(i) for i in range(len(output_pcoll_ids))]
pickled_do_fn_data = pickler.dumps((_UnpackFn(tags), (), {}, [], None))
unpack_transform = beam_runner_api_pb2.PTransform(
unique_name=pack_transform_name + '/Unpack',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.primitives.PAR_DO.urn,
payload=beam_runner_api_pb2.ParDoPayload(
do_fn=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.PICKLED_DOFN_INFO,
payload=pickled_do_fn_data)).SerializeToString()),
inputs={'in': pack_pcoll_id},
outputs=dict(zip(tags, output_pcoll_ids)),
environment_id=fused_stage.environment)
unpack_stage = Stage(
pack_stage_name + '/Unpack', [unpack_transform],
downstream_side_inputs=fused_stage.downstream_side_inputs,
must_follow=fused_stage.must_follow,
parent=fused_stage.parent,
environment=fused_stage.environment)
yield unpack_stage
def pack_combiners(stages, context, can_pack=None):
# type: (Iterable[Stage], TransformContext, Optional[Callable[[str], Union[bool, int]]]) -> Iterator[Stage]
if can_pack is None:
can_pack_names = {} # type: Dict[str, Union[bool, int]]
parents = context.parents_map()
def can_pack_fn(name: str) -> Union[bool, int]:
if name in can_pack_names:
return can_pack_names[name]
else:
transform = context.components.transforms[name]
if python_urns.APPLY_COMBINER_PACKING in transform.annotations:
try:
result = int(
transform.annotations[python_urns.APPLY_COMBINER_PACKING])
except ValueError:
result = True
elif name in parents:
result = can_pack_fn(parents[name])
else:
result = False
can_pack_names[name] = result
return result
can_pack = can_pack_fn
yield from pack_per_key_combiners(
_eliminate_common_key_with_none(stages, context, can_pack),
context,
can_pack)
def lift_combiners(stages, context):
# type: (List[Stage], TransformContext) -> Iterator[Stage]
"""Expands CombinePerKey into pre- and post-grouping stages.
... -> CombinePerKey -> ...
becomes
... -> PreCombine -> GBK -> MergeAccumulators -> ExtractOutput -> ...
"""
def is_compatible_with_combiner_lifting(trigger):
'''Returns whether this trigger is compatible with combiner lifting.
Certain triggers, such as those that fire after a certain number of
elements, need to observe every element, and as such are incompatible
with combiner lifting (which may aggregate several elements into one
before they reach the triggering code after shuffle).
'''
if trigger is None:
return True
elif trigger.WhichOneof('trigger') in (
'default',
'always',
'never',
'after_processing_time',
'after_synchronized_processing_time'):
return True
elif trigger.HasField('element_count'):
return trigger.element_count.element_count == 1
elif trigger.HasField('after_end_of_window'):
return is_compatible_with_combiner_lifting(
trigger.after_end_of_window.early_firings
) and is_compatible_with_combiner_lifting(
trigger.after_end_of_window.late_firings)
elif trigger.HasField('after_any'):
return all(
is_compatible_with_combiner_lifting(t)
for t in trigger.after_any.subtriggers)
elif trigger.HasField('repeat'):
return is_compatible_with_combiner_lifting(trigger.repeat.subtrigger)
else:
return False
def can_lift(combine_per_key_transform):
windowing = context.components.windowing_strategies[
context.components.pcollections[only_element(
list(combine_per_key_transform.inputs.values())
)].windowing_strategy_id]
return is_compatible_with_combiner_lifting(windowing.trigger)
def make_stage(base_stage, transform):
# type: (Stage, beam_runner_api_pb2.PTransform) -> Stage
return Stage(
transform.unique_name, [transform],
downstream_side_inputs=base_stage.downstream_side_inputs,
must_follow=base_stage.must_follow,
parent=base_stage.name,
environment=base_stage.environment)
def lifted_stages(stage):
transform = stage.transforms[0]
combine_payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.CombinePayload)
input_pcoll = context.components.pcollections[only_element(
list(transform.inputs.values()))]
output_pcoll = context.components.pcollections[only_element(
list(transform.outputs.values()))]
element_coder_id = input_pcoll.coder_id
element_coder = context.components.coders[element_coder_id]
key_coder_id, _ = element_coder.component_coder_ids
accumulator_coder_id = combine_payload.accumulator_coder_id
key_accumulator_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(urn=common_urns.coders.KV.urn),
component_coder_ids=[key_coder_id, accumulator_coder_id])
key_accumulator_coder_id = context.add_or_get_coder_id(
key_accumulator_coder)
accumulator_iter_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.ITERABLE.urn),
component_coder_ids=[accumulator_coder_id])
accumulator_iter_coder_id = context.add_or_get_coder_id(
accumulator_iter_coder)
key_accumulator_iter_coder = beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(urn=common_urns.coders.KV.urn),
component_coder_ids=[key_coder_id, accumulator_iter_coder_id])
key_accumulator_iter_coder_id = context.add_or_get_coder_id(
key_accumulator_iter_coder)
precombined_pcoll_id = unique_name(
context.components.pcollections, 'pcollection')
context.components.pcollections[precombined_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Precombine.out',
coder_id=key_accumulator_coder_id,
windowing_strategy_id=input_pcoll.windowing_strategy_id,
is_bounded=input_pcoll.is_bounded))
grouped_pcoll_id = unique_name(
context.components.pcollections, 'pcollection')
context.components.pcollections[grouped_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Group.out',
coder_id=key_accumulator_iter_coder_id,
windowing_strategy_id=output_pcoll.windowing_strategy_id,
is_bounded=output_pcoll.is_bounded))
merged_pcoll_id = unique_name(
context.components.pcollections, 'pcollection')
context.components.pcollections[merged_pcoll_id].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=transform.unique_name + '/Merge.out',
coder_id=key_accumulator_coder_id,
windowing_strategy_id=output_pcoll.windowing_strategy_id,
is_bounded=output_pcoll.is_bounded))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Precombine',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.combine_components.COMBINE_PER_KEY_PRECOMBINE.
urn,
payload=transform.spec.payload),
inputs=transform.inputs,
outputs={'out': precombined_pcoll_id},
environment_id=transform.environment_id))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Group',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.primitives.GROUP_BY_KEY.urn),
inputs={'in': precombined_pcoll_id},
outputs={'out': grouped_pcoll_id}))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Merge',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.combine_components.
COMBINE_PER_KEY_MERGE_ACCUMULATORS.urn,
payload=transform.spec.payload),
inputs={'in': grouped_pcoll_id},
outputs={'out': merged_pcoll_id},
environment_id=transform.environment_id))
yield make_stage(
stage,
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/ExtractOutputs',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.combine_components.
COMBINE_PER_KEY_EXTRACT_OUTPUTS.urn,
payload=transform.spec.payload),
inputs={'in': merged_pcoll_id},
outputs=transform.outputs,
environment_id=transform.environment_id))
def unlifted_stages(stage):
transform = stage.transforms[0]
for sub in transform.subtransforms:
yield make_stage(stage, context.components.transforms[sub])
for stage in stages:
transform = only_transform(stage.transforms)
if transform.spec.urn == common_urns.composites.COMBINE_PER_KEY.urn:
expansion = lifted_stages if can_lift(transform) else unlifted_stages
for substage in expansion(stage):
yield substage
else:
yield stage
def _lowest_common_ancestor(a, b, parents):
# type: (str, str, Dict[str, str]) -> Optional[str]
'''Returns the name of the lowest common ancestor of the two named stages.
The map of stage names to their parents' stage names should be provided
in parents. Note that stages are considered to be ancestors of themselves.
'''
assert a != b
def get_ancestors(name):
ancestor = name
while ancestor is not None:
yield ancestor
ancestor = parents.get(ancestor)
a_ancestors = set(get_ancestors(a))
for b_ancestor in get_ancestors(b):
if b_ancestor in a_ancestors:
return b_ancestor
return None
def _parent_for_fused_stages(stages, context):
# type: (Iterable[Stage], TransformContext) -> Optional[str]
'''Returns the name of the new parent for the fused stages.
The new parent is the lowest common ancestor of the fused stages that is not
contained in the set of stages to be fused. The provided context is used to
compute ancestors of stages.
'''
parents = context.parents_map()
# If any of the input stages were produced by fusion or an optimizer phase,
# or had its parent modified by an optimizer phase, its parent will not be
# be reflected in the PipelineContext yet, so we need to add it to the
# parents map.
for stage in stages:
parents[stage.name] = stage.parent
def reduce_fn(a, b):
# type: (Optional[str], Optional[str]) -> Optional[str]
if a is None or b is None:
return None
return _lowest_common_ancestor(a, b, parents)
stage_names = [stage.name for stage in stages] # type: List[Optional[str]]
result = functools.reduce(reduce_fn, stage_names)
if result in stage_names:
result = parents.get(result)
return result
def expand_sdf(stages, context):
# type: (Iterable[Stage], TransformContext) -> Iterator[Stage]
"""Transforms splitable DoFns into pair+split+read."""
for stage in stages:
transform = only_transform(stage.transforms)
if transform.spec.urn == common_urns.primitives.PAR_DO.urn:
pardo_payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
if pardo_payload.restriction_coder_id:
def copy_like(protos, original, suffix='_copy', **kwargs):
if isinstance(original, str):
key = original
original = protos[original]
else:
key = 'component'
new_id = unique_name(protos, key + suffix)
protos[new_id].CopyFrom(original)
proto = protos[new_id]
for name, value in kwargs.items():
if isinstance(value, dict):
getattr(proto, name).clear()
getattr(proto, name).update(value)
elif isinstance(value, list):
del getattr(proto, name)[:]
getattr(proto, name).extend(value)
elif name == 'urn':
proto.spec.urn = value
elif name == 'payload':
proto.spec.payload = value
else:
setattr(proto, name, value)
if 'unique_name' not in kwargs and hasattr(proto, 'unique_name'):
proto.unique_name = unique_name(
{p.unique_name
for p in protos.values()},
original.unique_name + suffix)
return new_id
def make_stage(base_stage, transform_id, extra_must_follow=()):
# type: (Stage, str, Iterable[Stage]) -> Stage
transform = context.components.transforms[transform_id]
return Stage(
transform.unique_name, [transform],
base_stage.downstream_side_inputs,
union(base_stage.must_follow, frozenset(extra_must_follow)),
parent=base_stage.name,
environment=base_stage.environment)
main_input_tag = only_element(
tag for tag in transform.inputs.keys()
if tag not in pardo_payload.side_inputs)
main_input_id = transform.inputs[main_input_tag]
element_coder_id = context.components.pcollections[
main_input_id].coder_id
# Tuple[element, restriction]
paired_coder_id = context.add_or_get_coder_id(
beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.KV.urn),
component_coder_ids=[
element_coder_id, pardo_payload.restriction_coder_id
]))
# Tuple[Tuple[element, restriction], double]
sized_coder_id = context.add_or_get_coder_id(
beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.coders.KV.urn),
component_coder_ids=[
paired_coder_id,
context.add_or_get_coder_id(
# context can be None here only because FloatCoder does
# not have components
coders.FloatCoder().to_runner_api(None), # type: ignore
'doubles_coder')
]))
paired_pcoll_id = copy_like(
context.components.pcollections,
main_input_id,
'_paired',
coder_id=paired_coder_id)
pair_transform_id = copy_like(
context.components.transforms,
transform,
unique_name=transform.unique_name + '/PairWithRestriction',
urn=common_urns.sdf_components.PAIR_WITH_RESTRICTION.urn,
outputs={'out': paired_pcoll_id})
split_pcoll_id = copy_like(
context.components.pcollections,
main_input_id,
'_split',
coder_id=sized_coder_id)
split_transform_id = copy_like(
context.components.transforms,
transform,
unique_name=transform.unique_name + '/SplitAndSizeRestriction',
urn=common_urns.sdf_components.SPLIT_AND_SIZE_RESTRICTIONS.urn,
inputs=dict(transform.inputs, **{main_input_tag: paired_pcoll_id}),
outputs={'out': split_pcoll_id})
reshuffle_stage = None
if common_urns.composites.RESHUFFLE.urn in context.known_runner_urns:
reshuffle_pcoll_id = copy_like(
context.components.pcollections,
main_input_id,
'_reshuffle',
coder_id=sized_coder_id)
reshuffle_transform_id = copy_like(
context.components.transforms,
transform,
unique_name=transform.unique_name + '/Reshuffle',
urn=common_urns.composites.RESHUFFLE.urn,
payload=b'',
inputs=dict(transform.inputs, **{main_input_tag: split_pcoll_id}),
outputs={'out': reshuffle_pcoll_id})
reshuffle_stage = make_stage(stage, reshuffle_transform_id)
else:
reshuffle_pcoll_id = split_pcoll_id
reshuffle_transform_id = None
if context.is_drain:
truncate_pcoll_id = copy_like(
context.components.pcollections,
main_input_id,
'_truncate_restriction',
coder_id=sized_coder_id)
# Lengthprefix the truncate output.
context.length_prefix_pcoll_coders(truncate_pcoll_id)
truncate_transform_id = copy_like(
context.components.transforms,
transform,
unique_name=transform.unique_name + '/TruncateAndSizeRestriction',
urn=common_urns.sdf_components.TRUNCATE_SIZED_RESTRICTION.urn,
inputs=dict(
transform.inputs, **{main_input_tag: reshuffle_pcoll_id}),
outputs={'out': truncate_pcoll_id})
process_transform_id = copy_like(
context.components.transforms,
transform,
unique_name=transform.unique_name + '/Process',
urn=common_urns.sdf_components.
PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,
inputs=dict(
transform.inputs, **{main_input_tag: truncate_pcoll_id}))
else:
process_transform_id = copy_like(
context.components.transforms,
transform,
unique_name=transform.unique_name + '/Process',
urn=common_urns.sdf_components.
PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,
inputs=dict(
transform.inputs, **{main_input_tag: reshuffle_pcoll_id}))
yield make_stage(stage, pair_transform_id)
split_stage = make_stage(stage, split_transform_id)
yield split_stage
if reshuffle_stage:
yield reshuffle_stage
if context.is_drain:
yield make_stage(
stage, truncate_transform_id, extra_must_follow=[split_stage])
yield make_stage(stage, process_transform_id)
else:
yield make_stage(
stage, process_transform_id, extra_must_follow=[split_stage])
else:
yield stage
else:
yield stage
def expand_gbk(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> Iterator[Stage]
"""Transforms each GBK into a write followed by a read."""
for stage in stages:
transform = only_transform(stage.transforms)
if transform.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn:
for pcoll_id in transform.inputs.values():
pipeline_context.length_prefix_pcoll_coders(pcoll_id)
for pcoll_id in transform.outputs.values():
if pipeline_context.use_state_iterables:
pipeline_context.components.pcollections[
pcoll_id].coder_id = pipeline_context.with_state_iterables(
pipeline_context.components.pcollections[pcoll_id].coder_id)
pipeline_context.length_prefix_pcoll_coders(pcoll_id)
# This is used later to correlate the read and write.
transform_id = stage.name
if transform != pipeline_context.components.transforms.get(transform_id):
transform_id = unique_name(
pipeline_context.components.transforms, stage.name)
pipeline_context.components.transforms[transform_id].CopyFrom(transform)
grouping_buffer = create_buffer_id(transform_id, kind='group')
gbk_write = Stage(
transform.unique_name + '/Write',
[
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write',
inputs=transform.inputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=grouping_buffer))
],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
yield gbk_write
yield Stage(
transform.unique_name + '/Read',
[
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=grouping_buffer))
],
downstream_side_inputs=stage.downstream_side_inputs,
must_follow=union(frozenset([gbk_write]), stage.must_follow))
else:
yield stage
def fix_flatten_coders(
stages, pipeline_context, identity_urn=bundle_processor.IDENTITY_DOFN_URN):
# type: (Iterable[Stage], TransformContext, str) -> Iterator[Stage]
"""Ensures that the inputs of Flatten have the same coders as the output.
"""
pcollections = pipeline_context.components.pcollections
for stage in stages:
transform = only_element(stage.transforms)
if transform.spec.urn == common_urns.primitives.FLATTEN.urn:
output_pcoll_id = only_element(transform.outputs.values())
output_coder_id = pcollections[output_pcoll_id].coder_id
for local_in, pcoll_in in list(transform.inputs.items()):
if pcollections[pcoll_in].coder_id != output_coder_id:
# Flatten requires that all its inputs be materialized with the
# same coder as its output. Add stages to transcode flatten
# inputs that use different coders.
transcoded_pcollection = unique_name(
pcollections,
transform.unique_name + '/Transcode/' + local_in + '/out')
transcode_name = unique_name(
pipeline_context.components.transforms,
transform.unique_name + '/Transcode/' + local_in)
yield Stage(
transcode_name,
[
beam_runner_api_pb2.PTransform(
unique_name=transcode_name,
inputs={local_in: pcoll_in},
outputs={'out': transcoded_pcollection},
spec=beam_runner_api_pb2.FunctionSpec(urn=identity_urn),
environment_id=transform.environment_id)
],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
pcollections[transcoded_pcollection].CopyFrom(pcollections[pcoll_in])
pcollections[transcoded_pcollection].unique_name = (
transcoded_pcollection)
pcollections[transcoded_pcollection].coder_id = output_coder_id
transform.inputs[local_in] = transcoded_pcollection
yield stage
def sink_flattens(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> Iterator[Stage]
"""Sink flattens and remove them from the graph.
A flatten that cannot be sunk/fused away becomes multiple writes (to the
same logical sink) followed by a read.
"""
# TODO(robertwb): Actually attempt to sink rather than always materialize.
# TODO(robertwb): Possibly fuse multi-input flattens into one of the stages.
for stage in fix_flatten_coders(stages,
pipeline_context,
common_urns.primitives.FLATTEN.urn):
transform = only_element(stage.transforms)
if (transform.spec.urn == common_urns.primitives.FLATTEN.urn and
len(transform.inputs) > 1):
# This is used later to correlate the read and writes.
buffer_id = create_buffer_id(transform.unique_name)
flatten_writes = [] # type: List[Stage]
for local_in, pcoll_in in transform.inputs.items():
flatten_write = Stage(
transform.unique_name + '/Write/' + local_in,
[
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Write/' + local_in,
inputs={local_in: pcoll_in},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=buffer_id),
environment_id=transform.environment_id)
],
downstream_side_inputs=frozenset(),
must_follow=stage.must_follow)
flatten_writes.append(flatten_write)
yield flatten_write
yield Stage(
transform.unique_name + '/Read',
[
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Read',
outputs=transform.outputs,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN, payload=buffer_id),
environment_id=transform.environment_id)
],
downstream_side_inputs=stage.downstream_side_inputs,
must_follow=union(frozenset(flatten_writes), stage.must_follow))
else:
yield stage
def greedily_fuse(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> FrozenSet[Stage]
"""Places transforms sharing an edge in the same stage, whenever possible.
"""
producers_by_pcoll = {} # type: Dict[str, Stage]
consumers_by_pcoll = collections.defaultdict(
list) # type: DefaultDict[str, List[Stage]]
# Used to always reference the correct stage as the producer and
# consumer maps are not updated when stages are fused away.
replacements = {} # type: Dict[Stage, Stage]
def replacement(s):
old_ss = []
while s in replacements:
old_ss.append(s)
s = replacements[s]
for old_s in old_ss[:-1]:
replacements[old_s] = s
return s
def fuse(producer, consumer):
fused = producer.fuse(consumer, pipeline_context)
replacements[producer] = fused
replacements[consumer] = fused
# First record the producers and consumers of each PCollection.
for stage in stages:
for transform in stage.transforms:
for input in transform.inputs.values():
consumers_by_pcoll[input].append(stage)
for output in transform.outputs.values():
producers_by_pcoll[output] = stage
# Now try to fuse away all pcollections.
for pcoll, producer in producers_by_pcoll.items():
write_pcoll = None
for consumer in consumers_by_pcoll[pcoll]:
producer = replacement(producer)
consumer = replacement(consumer)
# Update consumer.must_follow set, as it's used in can_fuse.
consumer.must_follow = frozenset(
replacement(s) for s in consumer.must_follow)
if producer.can_fuse(consumer, pipeline_context):
fuse(producer, consumer)
else:
# If we can't fuse, do a read + write.
pipeline_context.length_prefix_pcoll_coders(pcoll)
buffer_id = create_buffer_id(pcoll)
if write_pcoll is None:
write_pcoll = Stage(
pcoll + '/Write',
[
beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Write',
inputs={'in': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_OUTPUT_URN,
payload=buffer_id))
],
downstream_side_inputs=producer.downstream_side_inputs)
fuse(producer, write_pcoll)
if consumer.has_as_main_input(pcoll):
read_pcoll = Stage(
pcoll + '/Read',
[
beam_runner_api_pb2.PTransform(
unique_name=pcoll + '/Read',
outputs={'out': pcoll},
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=buffer_id))
],
downstream_side_inputs=consumer.downstream_side_inputs,
must_follow=frozenset([write_pcoll]))
fuse(read_pcoll, consumer)
else:
consumer.must_follow = union(
consumer.must_follow, frozenset([write_pcoll]))
# Everything that was originally a stage or a replacement, but wasn't
# replaced, should be in the final graph.
final_stages = frozenset(stages).union(list(replacements.values()))\
.difference(list(replacements))
for stage in final_stages:
# Update all references to their final values before throwing
# the replacement data away.
stage.must_follow = frozenset(replacement(s) for s in stage.must_follow)
# Two reads of the same stage may have been fused. This is unneeded.
stage.deduplicate_read()
return final_stages
def read_to_impulse(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> Iterator[Stage]
"""Translates Read operations into Impulse operations."""
for stage in stages:
# First map Reads, if any, to Impulse + triggered read op.
for transform in list(stage.transforms):
if transform.spec.urn == common_urns.deprecated_primitives.READ.urn:
read_pc = only_element(transform.outputs.values())
read_pc_proto = pipeline_context.components.pcollections[read_pc]
impulse_pc = unique_name(
pipeline_context.components.pcollections, 'Impulse')
pipeline_context.components.pcollections[impulse_pc].CopyFrom(
beam_runner_api_pb2.PCollection(
unique_name=impulse_pc,
coder_id=pipeline_context.bytes_coder_id,
windowing_strategy_id=read_pc_proto.windowing_strategy_id,
is_bounded=read_pc_proto.is_bounded))
stage.transforms.remove(transform)
# TODO(robertwb): If this goes multi-process before fn-api
# read is default, expand into split + reshuffle + read.
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name + '/Impulse',
spec=beam_runner_api_pb2.FunctionSpec(
urn=common_urns.primitives.IMPULSE.urn),
outputs={'out': impulse_pc}))
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name,
spec=beam_runner_api_pb2.FunctionSpec(
urn=python_urns.IMPULSE_READ_TRANSFORM,
payload=transform.spec.payload),
inputs={'in': impulse_pc},
outputs={'out': read_pc}))
yield stage
def impulse_to_input(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> Iterator[Stage]
"""Translates Impulse operations into GRPC reads."""
for stage in stages:
for transform in list(stage.transforms):
if transform.spec.urn == common_urns.primitives.IMPULSE.urn:
stage.transforms.remove(transform)
stage.transforms.append(
beam_runner_api_pb2.PTransform(
unique_name=transform.unique_name,
spec=beam_runner_api_pb2.FunctionSpec(
urn=bundle_processor.DATA_INPUT_URN,
payload=IMPULSE_BUFFER),
outputs=transform.outputs))
yield stage
def extract_impulse_stages(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> Iterator[Stage]
"""Splits fused Impulse operations into their own stage."""
for stage in stages:
for transform in list(stage.transforms):
if transform.spec.urn == common_urns.primitives.IMPULSE.urn:
stage.transforms.remove(transform)
yield Stage(
transform.unique_name,
transforms=[transform],
downstream_side_inputs=stage.downstream_side_inputs,
must_follow=stage.must_follow,
parent=stage.parent)
if stage.transforms:
yield stage
def remove_data_plane_ops(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> Iterator[Stage]
for stage in stages:
for transform in list(stage.transforms):
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
stage.transforms.remove(transform)
if stage.transforms:
yield stage
def setup_timer_mapping(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> Iterator[Stage]
"""Set up a mapping of {transform_id: [timer_ids]} for each stage.
"""
for stage in stages:
for transform in stage.transforms:
if transform.spec.urn in PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for timer_family_id in payload.timer_family_specs.keys():
stage.timers.add((transform.unique_name, timer_family_id))
yield stage
def sort_stages(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> List[Stage]
"""Order stages suitable for sequential execution.
"""
all_stages = set(stages)
seen = set() # type: Set[Stage]
ordered = []
producers = {
pcoll: stage
for stage in all_stages for t in stage.transforms
for pcoll in t.outputs.values()
}
def process(stage):
if stage not in seen:
seen.add(stage)
if stage not in all_stages:
return
for prev in stage.must_follow:
process(prev)
stage_outputs = set(
pcoll for transform in stage.transforms
for pcoll in transform.outputs.values())
for transform in stage.transforms:
for pcoll in transform.inputs.values():
if pcoll not in stage_outputs:
process(producers[pcoll])
ordered.append(stage)
for stage in stages:
process(stage)
return ordered
def populate_data_channel_coders(stages, pipeline_context):
# type: (Iterable[Stage], TransformContext) -> Iterable[Stage]
"""Populate coders for GRPC input and output ports."""
for stage in stages:
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
sdk_pcoll_id = only_element(transform.outputs.values())
else:
sdk_pcoll_id = only_element(transform.inputs.values())
pipeline_context.add_data_channel_coder(sdk_pcoll_id)
return stages
def union(a, b):
# Minimize the number of distinct sets.
if not a or a == b:
return b
elif not b:
return a
else:
return frozenset.union(a, b)
_global_counter = 0
def unique_name(existing, prefix):
# type: (Optional[Container[str]], str) -> str
if existing is None:
global _global_counter
_global_counter += 1
return '%s_%d' % (prefix, _global_counter)
elif prefix in existing:
counter = 0
while True:
counter += 1
prefix_counter = prefix + "_%s" % counter
if prefix_counter not in existing:
return prefix_counter
else:
return prefix
def only_element(iterable):
# type: (Iterable[T]) -> T
element, = iterable
return element
def only_transform(transforms):
# type: (List[beam_runner_api_pb2.PTransform]) -> beam_runner_api_pb2.PTransform
assert len(transforms) == 1
return transforms[0]
def create_buffer_id(name, kind='materialize'):
# type: (str, str) -> bytes
return ('%s:%s' % (kind, name)).encode('utf-8')
def split_buffer_id(buffer_id):
# type: (bytes) -> Tuple[str, str]
"""A buffer id is "kind:pcollection_id". Split into (kind, pcoll_id). """
kind, pcoll_id = buffer_id.decode('utf-8').split(':', 1)
return kind, pcoll_id
| 38.38344 | 109 | 0.681844 |
1528d431cb7141581dbfd850ea6e2caf695338ae | 1,700 | py | Python | profiles/models.py | Thames1990/BadBatBets | 8dffb69561668b8991bf4103919e4b254d4ca56a | [
"MIT"
] | null | null | null | profiles/models.py | Thames1990/BadBatBets | 8dffb69561668b8991bf4103919e4b254d4ca56a | [
"MIT"
] | null | null | null | profiles/models.py | Thames1990/BadBatBets | 8dffb69561668b8991bf4103919e4b254d4ca56a | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from ledger.models import Account
class Profile(models.Model):
# The user that this profile is associated with
# Since each profile is associated with exactly one user, the user is the profiles primary key
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
# Whether or not the users true identity has been verified
verified = models.BooleanField(default=False)
# Each user has an account
account = models.OneToOneField(Account, on_delete=models.CASCADE)
# Has the user accepted the general terms and conditions?
accepted_general_terms_and_conditions = models.BooleanField(default=False)
# Has the user accepted the privacy policy?
accepted_privacy_policy = models.BooleanField(default=False)
def __str__(self):
return self.user.__str__()
class ForbiddenUser(models.Model):
# The name that will be shown to the user making the selection
name = models.CharField(max_length=64)
# Whether this person actually has an account
has_account = models.BooleanField()
# If the person has an account, it will be linked here.
account = models.OneToOneField(Profile, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return self.name
class Feedback(models.Model):
# User that provided the feedback
provided_by = models.ForeignKey(User, on_delete=models.CASCADE)
# The actual feedback
feedback = models.TextField()
# Is the feedback already resolved by the administrators?
resolved = models.BooleanField(default=False)
def __str__(self):
return str(self.id)
| 36.956522 | 98 | 0.741176 |
af6bd9e285cce4290cbba8dde349ae74c6a5c5ce | 12,026 | py | Python | official/nlp/bert/input_pipeline.py | hjkim-haga/TF-OD-API | 22ac477ff4dfb93fe7a32c94b5f0b1e74330902b | [
"Apache-2.0"
] | 1 | 2021-05-22T12:50:50.000Z | 2021-05-22T12:50:50.000Z | official/nlp/bert/input_pipeline.py | DemonDamon/mask-detection-based-on-tf2odapi | 192ae544169c1230c21141c033800aa1bd94e9b6 | [
"MIT"
] | null | null | null | official/nlp/bert/input_pipeline.py | DemonDamon/mask-detection-based-on-tf2odapi | 192ae544169c1230c21141c033800aa1bd94e9b6 | [
"MIT"
] | null | null | null | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT model input pipelines."""
import tensorflow as tf
def decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
def single_file_dataset(input_file, name_to_features, num_samples=None):
"""Creates a single-file dataset to be passed for BERT custom training."""
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if num_samples:
d = d.take(num_samples)
d = d.map(
lambda record: decode_record(record, name_to_features),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# When `input_file` is a path to a single file or a list
# containing a single path, disable auto sharding so that
# same input file is sent to all workers.
if isinstance(input_file, str) or len(input_file) == 1:
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.OFF)
d = d.with_options(options)
return d
def create_pretrain_dataset(input_patterns,
seq_length,
max_predictions_per_seq,
batch_size,
is_training=True,
input_pipeline_context=None,
use_next_sentence_label=True,
use_position_id=False,
output_fake_labels=True):
"""Creates input dataset from (tf)records files for pretraining."""
name_to_features = {
'input_ids':
tf.io.FixedLenFeature([seq_length], tf.int64),
'input_mask':
tf.io.FixedLenFeature([seq_length], tf.int64),
'segment_ids':
tf.io.FixedLenFeature([seq_length], tf.int64),
'masked_lm_positions':
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
'masked_lm_ids':
tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64),
'masked_lm_weights':
tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32),
}
if use_next_sentence_label:
name_to_features['next_sentence_labels'] = tf.io.FixedLenFeature([1],
tf.int64)
if use_position_id:
name_to_features['position_ids'] = tf.io.FixedLenFeature([seq_length],
tf.int64)
for input_pattern in input_patterns:
if not tf.io.gfile.glob(input_pattern):
raise ValueError('%s does not match any files.' % input_pattern)
dataset = tf.data.Dataset.list_files(input_patterns, shuffle=is_training)
if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:
dataset = dataset.shard(input_pipeline_context.num_input_pipelines,
input_pipeline_context.input_pipeline_id)
if is_training:
dataset = dataset.repeat()
# We set shuffle buffer to exactly match total number of
# training files to ensure that training data is well shuffled.
input_files = []
for input_pattern in input_patterns:
input_files.extend(tf.io.gfile.glob(input_pattern))
dataset = dataset.shuffle(len(input_files))
# In parallel, create tf record dataset for each train files.
# cycle_length = 8 means that up to 8 files will be read and deserialized in
# parallel. You may want to increase this number if you have a large number of
# CPU cores.
dataset = dataset.interleave(
tf.data.TFRecordDataset,
cycle_length=8,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if is_training:
dataset = dataset.shuffle(100)
decode_fn = lambda record: decode_record(record, name_to_features)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _select_data_from_record(record):
"""Filter out features to use for pretraining."""
x = {
'input_word_ids': record['input_ids'],
'input_mask': record['input_mask'],
'input_type_ids': record['segment_ids'],
'masked_lm_positions': record['masked_lm_positions'],
'masked_lm_ids': record['masked_lm_ids'],
'masked_lm_weights': record['masked_lm_weights'],
}
if use_next_sentence_label:
x['next_sentence_labels'] = record['next_sentence_labels']
if use_position_id:
x['position_ids'] = record['position_ids']
# TODO(hongkuny): Remove the fake labels after migrating bert pretraining.
if output_fake_labels:
return (x, record['masked_lm_weights'])
else:
return x
dataset = dataset.map(
_select_data_from_record,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=is_training)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_classifier_dataset(file_path,
seq_length,
batch_size,
is_training=True,
input_pipeline_context=None,
label_type=tf.int64,
include_sample_weights=False,
num_samples=None):
"""Creates input dataset from (tf)records files for train/eval."""
name_to_features = {
'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64),
'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
'label_ids': tf.io.FixedLenFeature([], label_type),
}
if include_sample_weights:
name_to_features['weight'] = tf.io.FixedLenFeature([], tf.float32)
dataset = single_file_dataset(file_path, name_to_features,
num_samples=num_samples)
# The dataset is always sharded by number of hosts.
# num_input_pipelines is the number of hosts rather than number of cores.
if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:
dataset = dataset.shard(input_pipeline_context.num_input_pipelines,
input_pipeline_context.input_pipeline_id)
def _select_data_from_record(record):
x = {
'input_word_ids': record['input_ids'],
'input_mask': record['input_mask'],
'input_type_ids': record['segment_ids']
}
y = record['label_ids']
if include_sample_weights:
w = record['weight']
return (x, y, w)
return (x, y)
if is_training:
dataset = dataset.shuffle(100)
dataset = dataset.repeat()
dataset = dataset.map(
_select_data_from_record,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=is_training)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_squad_dataset(file_path,
seq_length,
batch_size,
is_training=True,
input_pipeline_context=None):
"""Creates input dataset from (tf)records files for train/eval."""
name_to_features = {
'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64),
'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features['start_positions'] = tf.io.FixedLenFeature([], tf.int64)
name_to_features['end_positions'] = tf.io.FixedLenFeature([], tf.int64)
else:
name_to_features['unique_ids'] = tf.io.FixedLenFeature([], tf.int64)
dataset = single_file_dataset(file_path, name_to_features)
# The dataset is always sharded by number of hosts.
# num_input_pipelines is the number of hosts rather than number of cores.
if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:
dataset = dataset.shard(input_pipeline_context.num_input_pipelines,
input_pipeline_context.input_pipeline_id)
def _select_data_from_record(record):
"""Dispatches record to features and labels."""
x, y = {}, {}
for name, tensor in record.items():
if name in ('start_positions', 'end_positions'):
y[name] = tensor
elif name == 'input_ids':
x['input_word_ids'] = tensor
elif name == 'segment_ids':
x['input_type_ids'] = tensor
else:
x[name] = tensor
return (x, y)
if is_training:
dataset = dataset.shuffle(100)
dataset = dataset.repeat()
dataset = dataset.map(
_select_data_from_record,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def create_retrieval_dataset(file_path,
seq_length,
batch_size,
input_pipeline_context=None):
"""Creates input dataset from (tf)records files for scoring."""
name_to_features = {
'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64),
'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64),
'example_id': tf.io.FixedLenFeature([1], tf.int64),
}
dataset = single_file_dataset(file_path, name_to_features)
# The dataset is always sharded by number of hosts.
# num_input_pipelines is the number of hosts rather than number of cores.
if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1:
dataset = dataset.shard(input_pipeline_context.num_input_pipelines,
input_pipeline_context.input_pipeline_id)
def _select_data_from_record(record):
x = {
'input_word_ids': record['input_ids'],
'input_mask': record['input_mask'],
'input_type_ids': record['segment_ids']
}
y = record['example_id']
return (x, y)
dataset = dataset.map(
_select_data_from_record,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=False)
def _pad_to_batch(x, y):
cur_size = tf.shape(y)[0]
pad_size = batch_size - cur_size
pad_ids = tf.zeros(shape=[pad_size, seq_length], dtype=tf.int32)
for key in ('input_word_ids', 'input_mask', 'input_type_ids'):
x[key] = tf.concat([x[key], pad_ids], axis=0)
pad_labels = -tf.ones(shape=[pad_size, 1], dtype=tf.int32)
y = tf.concat([y, pad_labels], axis=0)
return x, y
dataset = dataset.map(
_pad_to_batch,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
| 39.689769 | 81 | 0.652337 |
695be577288270bc4e8aa88bea40fe63a526ac38 | 13,023 | py | Python | src/spencer_people_tracking/utils/spencer_bagfile_tools/scripts/reconstruct_odometry.py | CodeToPoem/HumanAwareRobotNavigation | d44eb7e5acd73a5a7bf8bf1cd88c23d6a4a3c330 | [
"BSD-3-Clause"
] | 20 | 2017-10-26T05:58:24.000Z | 2021-06-13T11:18:54.000Z | src/spencer_people_tracking/utils/spencer_bagfile_tools/scripts/reconstruct_odometry.py | dmr-goncalves/HumanAwareRobotNavigation | d44eb7e5acd73a5a7bf8bf1cd88c23d6a4a3c330 | [
"BSD-3-Clause"
] | null | null | null | src/spencer_people_tracking/utils/spencer_bagfile_tools/scripts/reconstruct_odometry.py | dmr-goncalves/HumanAwareRobotNavigation | d44eb7e5acd73a5a7bf8bf1cd88c23d6a4a3c330 | [
"BSD-3-Clause"
] | 6 | 2018-02-05T09:31:42.000Z | 2022-02-07T22:05:57.000Z | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2014, Timm Linder, Social Robotics Lab, University of Freiburg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Allows to reconstruct odometry from /additional_odom_data topic, using
# new calibration factors, as well as plotting the overall path that was travelled.
# Requires a bag file to be played back using rosbag play / rqt_bag.
import rospy, math, numpy, tf
from collections import deque
from spencer_bagfile_tools.msg import AdditionalOdometryData
from dynamic_reconfigure.server import Server
from spencer_bagfile_tools.cfg import ReconstructOdometryConfig
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point, Quaternion
from std_msgs.msg import ColorRGBA
from nav_msgs.msg import Odometry
class State(object):
def __init__(self):
self.x = self.y = self.theta = 0
self.totalDistance = 0
self.stamp = rospy.Time(0)
class OdometryController(object):
def __init__(self):
self.msgHistory = []
self.stateHistory = self.emptyStateHistory()
self.previousMsg = self.previousState = None
self.rebuildingEntirePath = False
self.zeroPosition()
self.WHEEL_BASE = 0.665
self.TICKS_PER_METER_LEFT = 56263.5
self.TICKS_PER_METER_RIGHT = 57099.7
self.previousTimestampMarkerCount = 0
def zeroPosition(self):
self.stateHistory.append(State())
self.previousState = self.stateHistory[0]
def run(self):
self.markerArrayPublisher = rospy.Publisher("/spencer_bagfile_tools/reconstructed_odom_path", MarkerArray, queue_size=1)
self.odomPublisher = rospy.Publisher("/spencer/sensors/odom", Odometry, queue_size=3)
reconfigureServer = Server(ReconstructOdometryConfig, self.reconfigure)
topicName = "/spencer/sensors/additional_odom_data"
self.subscriber = rospy.Subscriber(topicName, AdditionalOdometryData, self.additionalOdometryDataCallback)
rospy.loginfo("Reconstructing odometry from " + topicName + ", now listening for messages...")
rospy.spin()
def additionalOdometryDataCallback(self, msg):
if not self.rebuildingEntirePath:
self.updateState(msg)
self.msgHistory.append(msg)
self.publishOdom()
self.visualizePath()
def reconfigure(self, config, level):
self.extraCalibOverallMultiplier = config["extra_calib_overall_multiplier"]
self.extraCalibLeftMultiplier = config["extra_calib_left_multiplier"]
self.lineWidth = config["line_width"]
self.arrowLength = config["arrow_length"]
self.showWaypoints = config["show_waypoints"]
self.recalculatePath = config["recalculate_path"]
if level > 0 and self.recalculatePath:
self.rebuildEntirePath()
return config
def emptyStateHistory(self):
# Limit max. state history length to prevent bad performance after driving for a while
# NOTE: msgHistory might still grow unboundedly, but there's no way of avoiding that...
# However, that is mainly a memory issue as the whole history is only processed in rebuildEntirePath()
return deque(maxlen=5000)
def rebuildEntirePath(self):
rospy.loginfo("Odometry parameters have changed! Rebuilding entire path!")
if self.rebuildingEntirePath:
return
self.rebuildingEntirePath = True
self.stateHistory = self.emptyStateHistory()
self.zeroPosition()
self.previousMsg = None
for msg in self.msgHistory:
self.updateState(msg)
self.rebuildingEntirePath = False
self.publishOdom()
self.visualizePath()
def updateState(self, msg):
newState = State()
newState.stamp = msg.header.stamp
previousLeftTicks = self.previousMsg.ticksLeft if self.previousMsg else msg.ticksLeft
previousRightTicks = self.previousMsg.ticksRight if self.previousMsg else msg.ticksRight
leftDiff = msg.ticksLeft - previousLeftTicks
rightDiff = msg.ticksRight - previousRightTicks
# Calculate metric travelled distances of both wheels and the base
metersTravelledLeft = leftDiff * msg.calibOverallMultiplier * self.extraCalibOverallMultiplier * msg.calibLeftEncMultiplier * self.extraCalibLeftMultiplier / self.TICKS_PER_METER_LEFT
metersTravelledRight = rightDiff * msg.calibOverallMultiplier * self.extraCalibOverallMultiplier / self.TICKS_PER_METER_RIGHT
distance = (metersTravelledLeft + metersTravelledRight) / 2.0
# Update position and bearing
newState.theta = self.previousState.theta + (metersTravelledLeft - metersTravelledRight) / self.WHEEL_BASE
newState.theta -= (int((newState.theta/(2*math.pi) ))) * 2*math.pi # clip to 2pi
newState.totalDistance = self.previousState.totalDistance + math.fabs(distance)
newState.x = self.previousState.x + distance * math.sin(newState.theta)
newState.y = self.previousState.y + distance * math.cos(newState.theta)
positionTolerance = 0.1 # in meters
if math.hypot(newState.x - self.stateHistory[-1].x, newState.y - self.stateHistory[-1].y) > positionTolerance:
# Do not cache every single state if the change in position is minimal, otherwise we'll soon run
# out of memory (note we still store previousState, since it is needed by publishOdom() and updateState())
self.stateHistory.append(newState)
self.previousState = newState # FIXME
self.previousMsg = msg
def publishOdom(self):
odom = Odometry()
odom.header.stamp = self.previousMsg.header.stamp if self.previousMsg else rospy.Time.now()
odom.header.frame_id = "odom"
odom.pose.pose.position.x = self.previousState.x
odom.pose.pose.position.y = self.previousState.y
for row in xrange(0, 6):
for col in xrange(0, 6):
odom.pose.covariance[6*row+col] = 0 if row != col else 0.1
odom.twist.covariance[6*row+col] = 0 if row != col else 999999
q = tf.transformations.quaternion_from_euler(0, 0, -self.previousState.theta + math.pi/2)
odom.pose.pose.orientation = Quaternion(x=q[0], y=q[1], z=q[2], w=q[3])
if len(self.stateHistory) >= 2:
odom.twist.twist.linear.x = odom.pose.pose.position.x - self.stateHistory[-2].x
odom.twist.twist.linear.y = odom.pose.pose.position.y - self.stateHistory[-2].y
self.odomPublisher.publish(odom)
def visualizePath(self):
if self.markerArrayPublisher.get_num_connections() <= 0:
return
markerArray = MarkerArray()
pathMarker = Marker()
pathMarker.header.stamp = rospy.Time.now()
pathMarker.header.frame_id = "odom"
pathMarker.ns = "Path"
pathMarker.type = Marker.LINE_STRIP
pathMarker.id = 0
pathMarker.color = ColorRGBA(r=1, g=1, a=1)
pathMarker.scale.x = 0.05 * self.lineWidth
waypointMarker = Marker()
waypointMarker.header = pathMarker.header
waypointMarker.ns = "Waypoints"
waypointMarker.type = Marker.SPHERE_LIST
waypointMarker.id = 1
waypointMarker.color = ColorRGBA(r=1, g=1, a=1)
waypointMarker.scale.x = waypointMarker.scale.y = 0.1 * self.lineWidth
lastWaypointTime = float("-inf")
lastWaypointPos = (float("99999"), float("99999"))
# Generate path and waypoints
for state in self.stateHistory:
pathMarker.points.append(Point(x=state.x, y=state.y))
if state.stamp.to_sec() - lastWaypointTime > 5 and self.showWaypoints:
dx = state.x - lastWaypointPos[0]
dy = state.y - lastWaypointPos[1]
if math.sqrt(dx*dx + dy*dy) > 1:
lastWaypointTime = state.stamp.to_sec()
lastWaypointPos = (state.x, state.y)
waypointMarker.points.append(Point(x=state.x, y=state.y))
timestampMarker = Marker()
timestampMarker.header = waypointMarker.header
timestampMarker.ns = "Timestamps"
timestampMarker.type = Marker.TEXT_VIEW_FACING
timestampMarker.id = 3 + len(markerArray.markers)
timestampMarker.color = ColorRGBA(r=0.6, a=1)
timestampMarker.scale.z = 0.1 * self.lineWidth
timestampMarker.pose.position.x = state.x
timestampMarker.pose.position.y = state.y
timestampMarker.text = "%.1f" % state.stamp.to_sec()
markerArray.markers.append(timestampMarker)
# Delete old markers
currentTimestampMarkerCount = len(markerArray.markers)
for i in xrange(0, self.previousTimestampMarkerCount - currentTimestampMarkerCount):
timestampMarker = Marker()
timestampMarker.header = waypointMarker.header
timestampMarker.ns = "Timestamps"
timestampMarker.action = Marker.DELETE
timestampMarker.id = 3 + currentTimestampMarkerCount + i
markerArray.markers.append(timestampMarker)
self.previousTimestampMarkerCount = currentTimestampMarkerCount
# Velocity arrow
velocitySmoothingNoPoints = 5
if len(pathMarker.points) > velocitySmoothingNoPoints:
arrowHeadMarker = Marker()
arrowHeadMarker.header = pathMarker.header
arrowHeadMarker.ns = "Path-ArrowHead"
arrowHeadMarker.type = Marker.LINE_STRIP
arrowHeadMarker.id = 2
arrowHeadMarker.color = ColorRGBA(r=1, g=1, a=1)
arrowHeadMarker.scale.x = arrowHeadMarker.scale.y = 0.1 * self.lineWidth
pointTip = numpy.array([pathMarker.points[-1].x, pathMarker.points[-1].y])
lastVelocity = numpy.array([pathMarker.points[-1].x - pathMarker.points[-velocitySmoothingNoPoints].x,
pathMarker.points[-1].y - pathMarker.points[-velocitySmoothingNoPoints].y])
speed = numpy.linalg.norm(lastVelocity)
lastVelocity /= speed
lastVelocity *= 0.3 * self.arrowLength
steepnessAngle = numpy.interp(speed, [0.03, 0.3], [0, 75])
pointLeft = pointTip + self.rotateVector(lastVelocity, 90 + steepnessAngle )
pointRight = pointTip + self.rotateVector(lastVelocity, -(90 + steepnessAngle) )
arrowHeadMarker.points.append(Point(x=pointLeft[0], y=pointLeft[1]))
arrowHeadMarker.points.append(Point(x=pointTip[0], y=pointTip[1]))
arrowHeadMarker.points.append(Point(x=pointRight[0], y=pointRight[1]))
markerArray.markers.append(arrowHeadMarker)
markerArray.markers.append(pathMarker)
markerArray.markers.append(waypointMarker)
self.markerArrayPublisher.publish(markerArray)
def rotateVector(self, vector, angleDeg):
theta = (angleDeg/180.) * numpy.pi
rotMatrix = numpy.array([[numpy.cos(theta), -numpy.sin(theta)],
[numpy.sin(theta), numpy.cos(theta)]])
return numpy.dot(rotMatrix, vector)
if __name__ == '__main__':
rospy.init_node("reconstruct_odometry")
odometryController = OdometryController()
odometryController.run()
| 43.701342 | 191 | 0.67496 |
135c0a51e66c50de5ee427243dcfedf9907cee20 | 48,414 | py | Python | rollbar/__init__.py | mike9005/pyrollbar | 487a78c427c8021d46f2d211e34c500593bcb706 | [
"MIT"
] | null | null | null | rollbar/__init__.py | mike9005/pyrollbar | 487a78c427c8021d46f2d211e34c500593bcb706 | [
"MIT"
] | null | null | null | rollbar/__init__.py | mike9005/pyrollbar | 487a78c427c8021d46f2d211e34c500593bcb706 | [
"MIT"
] | null | null | null | """
Plugin for Pyramid apps to submit errors to Rollbar
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import copy
import functools
import inspect
import json
import logging
import os
import socket
import sys
import threading
import time
import traceback
import types
import uuid
import wsgiref.util
import requests
import six
from rollbar.lib import events, filters, dict_merge, parse_qs, text, transport, urljoin, iteritems
__version__ = '0.14.5'
__log_name__ = 'rollbar'
log = logging.getLogger(__log_name__)
try:
# 2.x
import Queue as queue
except ImportError:
# 3.x
import queue
# import request objects from various frameworks, if available
try:
from webob import BaseRequest as WebobBaseRequest
except ImportError:
WebobBaseRequest = None
try:
from django.core.exceptions import ImproperlyConfigured
except ImportError:
DjangoHttpRequest = None
RestFrameworkRequest = None
else:
try:
from django.http import HttpRequest as DjangoHttpRequest
except (ImportError, ImproperlyConfigured):
DjangoHttpRequest = None
try:
from rest_framework.request import Request as RestFrameworkRequest
except (ImportError, ImproperlyConfigured):
RestFrameworkRequest = None
del ImproperlyConfigured
try:
from werkzeug.wrappers import BaseRequest as WerkzeugRequest
except (ImportError, SyntaxError):
WerkzeugRequest = None
try:
from werkzeug.local import LocalProxy as WerkzeugLocalProxy
except (ImportError, SyntaxError):
WerkzeugLocalProxy = None
try:
from tornado.httpserver import HTTPRequest as TornadoRequest
except ImportError:
TornadoRequest = None
try:
from bottle import BaseRequest as BottleRequest
except ImportError:
BottleRequest = None
try:
from sanic.request import Request as SanicRequest
except ImportError:
SanicRequest = None
try:
from google.appengine.api.urlfetch import fetch as AppEngineFetch
except ImportError:
AppEngineFetch = None
def passthrough_decorator(func):
def wrap(*args, **kwargs):
return func(*args, **kwargs)
return wrap
try:
from tornado.httpclient import AsyncHTTPClient as TornadoAsyncHTTPClient
except ImportError:
TornadoAsyncHTTPClient = None
try:
import treq
from twisted.python import log as twisted_log
def log_handler(event):
"""
Default uncaught error handler
"""
try:
if not event.get('isError') or 'failure' not in event:
return
err = event['failure']
# Don't report Rollbar internal errors to ourselves
if issubclass(err.type, ApiException):
log.error('Rollbar internal error: %s', err.value)
else:
report_exc_info((err.type, err.value, err.getTracebackObject()))
except:
log.exception('Error while reporting to Rollbar')
# Add Rollbar as a log handler which will report uncaught errors
twisted_log.addObserver(log_handler)
except ImportError:
treq = None
try:
from falcon import Request as FalconRequest
except ImportError:
FalconRequest = None
def get_request():
"""
Get the current request object. Implementation varies on
library support. Modified below when we know which framework
is being used.
"""
# TODO(cory): add in a generic _get_locals_request() which
# will iterate up through the call stack and look for a variable
# that appears to be valid request object.
for fn in (_get_bottle_request,
_get_flask_request,
_get_pyramid_request,
_get_pylons_request):
try:
req = fn()
if req is not None:
return req
except:
pass
return None
def _get_bottle_request():
if BottleRequest is None:
return None
from bottle import request
return request
def _get_flask_request():
if WerkzeugRequest is None:
return None
from flask import request
return request
def _get_pyramid_request():
if WebobBaseRequest is None:
return None
from pyramid.threadlocal import get_current_request
return get_current_request()
def _get_pylons_request():
if WebobBaseRequest is None:
return None
from pylons import request
return request
BASE_DATA_HOOK = None
agent_log = None
VERSION = __version__
DEFAULT_ENDPOINT = 'https://api.rollbar.com/api/1/'
DEFAULT_TIMEOUT = 3
ANONYMIZE = 'anonymize'
DEFAULT_LOCALS_SIZES = {
'maxlevel': 5,
'maxdict': 10,
'maxlist': 10,
'maxtuple': 10,
'maxset': 10,
'maxfrozenset': 10,
'maxdeque': 10,
'maxarray': 10,
'maxstring': 100,
'maxlong': 40,
'maxother': 100,
}
# configuration settings
# configure by calling init() or overriding directly
SETTINGS = {
'access_token': None,
'enabled': True,
'environment': 'production',
'exception_level_filters': [],
'root': None, # root path to your code
'branch': None, # git branch name
'code_version': None,
'handler': 'thread', # 'blocking', 'thread', 'agent', 'tornado', 'gae' or 'twisted'
'endpoint': DEFAULT_ENDPOINT,
'timeout': DEFAULT_TIMEOUT,
'agent.log_file': 'log.rollbar',
'scrub_fields': [
'pw',
'passwd',
'password',
'secret',
'confirm_password',
'confirmPassword',
'password_confirmation',
'passwordConfirmation',
'access_token',
'accessToken',
'auth',
'authentication',
],
'url_fields': ['url', 'link', 'href'],
'notifier': {
'name': 'pyrollbar',
'version': VERSION
},
'allow_logging_basic_config': True, # set to False to avoid a call to logging.basicConfig()
'locals': {
'enabled': True,
'safe_repr': True,
'scrub_varargs': True,
'sizes': DEFAULT_LOCALS_SIZES,
'whitelisted_types': []
},
'verify_https': True,
'shortener_keys': [],
'suppress_reinit_warning': False,
'capture_email': False,
'capture_username': False,
'capture_ip': True,
'log_all_rate_limited_items': True,
'http_proxy': None,
'http_proxy_user': None,
'http_proxy_password': None,
}
_CURRENT_LAMBDA_CONTEXT = None
_LAST_RESPONSE_STATUS = None
# Set in init()
_transforms = []
_serialize_transform = None
_initialized = False
from rollbar.lib.transforms.scrub_redact import REDACT_REF
from rollbar.lib import transforms
from rollbar.lib.transforms.scrub import ScrubTransform
from rollbar.lib.transforms.scruburl import ScrubUrlTransform
from rollbar.lib.transforms.scrub_redact import ScrubRedactTransform
from rollbar.lib.transforms.serializable import SerializableTransform
from rollbar.lib.transforms.shortener import ShortenerTransform
## public api
def init(access_token, environment='production', scrub_fields=None, url_fields=None, **kw):
"""
Saves configuration variables in this module's SETTINGS.
access_token: project access token. Get this from the Rollbar UI:
- click "Settings" in the top nav
- click "Projects" in the left nav
- copy-paste the appropriate token.
environment: environment name. Can be any string; suggestions: 'production', 'development',
'staging', 'yourname'
**kw: provided keyword arguments will override keys in SETTINGS.
"""
global SETTINGS, agent_log, _initialized, _transforms, _serialize_transform, _threads
if scrub_fields is not None:
SETTINGS['scrub_fields'] = list(scrub_fields)
if url_fields is not None:
SETTINGS['url_fields'] = list(url_fields)
# Merge the extra config settings into SETTINGS
SETTINGS = dict_merge(SETTINGS, kw)
if _initialized:
# NOTE: Temp solution to not being able to re-init.
# New versions of pyrollbar will support re-initialization
# via the (not-yet-implemented) configure() method.
if not SETTINGS.get('suppress_reinit_warning'):
log.warning('Rollbar already initialized. Ignoring re-init.')
return
SETTINGS['access_token'] = access_token
SETTINGS['environment'] = environment
if SETTINGS.get('allow_logging_basic_config'):
logging.basicConfig()
if SETTINGS.get('handler') == 'agent':
agent_log = _create_agent_log()
# We will perform these transforms in order:
# 1. Serialize the payload to be all python built-in objects
# 2. Scrub the payloads based on the key suffixes in SETTINGS['scrub_fields']
# 3. Scrub URLs in the payload for keys that end with 'url'
# 4. Optional - If local variable gathering is enabled, transform the
# trace frame values using the ShortReprTransform.
_serialize_transform = SerializableTransform(safe_repr=SETTINGS['locals']['safe_repr'],
whitelist_types=SETTINGS['locals']['whitelisted_types'])
_transforms = [
ScrubRedactTransform(),
_serialize_transform,
ScrubTransform(suffixes=[(field,) for field in SETTINGS['scrub_fields']], redact_char='*'),
ScrubUrlTransform(suffixes=[(field,) for field in SETTINGS['url_fields']], params_to_scrub=SETTINGS['scrub_fields'])
]
# A list of key prefixes to apply our shortener transform to. The request
# being included in the body key is old behavior and is being retained for
# backwards compatibility.
shortener_keys = [
('request', 'POST'),
('request', 'json'),
('body', 'request', 'POST'),
('body', 'request', 'json'),
]
if SETTINGS['locals']['enabled']:
shortener_keys.append(('body', 'trace', 'frames', '*', 'code'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'args', '*'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'kwargs', '*'))
shortener_keys.append(('body', 'trace', 'frames', '*', 'locals', '*'))
shortener_keys.extend(SETTINGS['shortener_keys'])
shortener = ShortenerTransform(safe_repr=SETTINGS['locals']['safe_repr'],
keys=shortener_keys,
**SETTINGS['locals']['sizes'])
_transforms.append(shortener)
_threads = queue.Queue()
events.reset()
filters.add_builtin_filters(SETTINGS)
_initialized = True
def lambda_function(f):
"""
Decorator for making error handling on AWS Lambda easier
"""
@functools.wraps(f)
def wrapper(event, context):
global _CURRENT_LAMBDA_CONTEXT
_CURRENT_LAMBDA_CONTEXT = context
try:
result = f(event, context)
return wait(lambda: result)
except:
cls, exc, trace = sys.exc_info()
report_exc_info((cls, exc, trace.tb_next))
wait()
raise
return wrapper
def report_exc_info(exc_info=None, request=None, extra_data=None, payload_data=None, level=None, **kw):
"""
Reports an exception to Rollbar, using exc_info (from calling sys.exc_info())
exc_info: optional, should be the result of calling sys.exc_info(). If omitted, sys.exc_info() will be called here.
request: optional, a WebOb, Werkzeug-based or Sanic request object.
extra_data: optional, will be included in the 'custom' section of the payload
payload_data: optional, dict that will override values in the final payload
(e.g. 'level' or 'fingerprint')
kw: provided for legacy purposes; unused.
Example usage:
rollbar.init(access_token='YOUR_PROJECT_ACCESS_TOKEN')
try:
do_something()
except:
rollbar.report_exc_info(sys.exc_info(), request, {'foo': 'bar'}, {'level': 'warning'})
"""
if exc_info is None:
exc_info = sys.exc_info()
try:
return _report_exc_info(exc_info, request, extra_data, payload_data, level=level)
except Exception as e:
log.exception("Exception while reporting exc_info to Rollbar. %r", e)
def report_message(message, level='error', request=None, extra_data=None, payload_data=None):
"""
Reports an arbitrary string message to Rollbar.
message: the string body of the message
level: level to report at. One of: 'critical', 'error', 'warning', 'info', 'debug'
request: the request object for the context of the message
extra_data: dictionary of params to include with the message. 'body' is reserved.
payload_data: param names to pass in the 'data' level of the payload; overrides defaults.
"""
try:
return _report_message(message, level, request, extra_data, payload_data)
except Exception as e:
log.exception("Exception while reporting message to Rollbar. %r", e)
def send_payload(payload, access_token):
"""
Sends a payload object, (the result of calling _build_payload() + _serialize_payload()).
Uses the configured handler from SETTINGS['handler']
Available handlers:
- 'blocking': calls _send_payload() (which makes an HTTP request) immediately, blocks on it
- 'thread': starts a single-use thread that will call _send_payload(). returns immediately.
- 'agent': writes to a log file to be processed by rollbar-agent
- 'tornado': calls _send_payload_tornado() (which makes an async HTTP request using tornado's AsyncHTTPClient)
- 'gae': calls _send_payload_appengine() (which makes a blocking call to Google App Engine)
- 'twisted': calls _send_payload_twisted() (which makes an async HTTP reqeust using Twisted and Treq)
"""
payload = events.on_payload(payload)
if payload is False:
return
payload_str = _serialize_payload(payload)
handler = SETTINGS.get('handler')
if handler == 'blocking':
_send_payload(payload_str, access_token)
elif handler == 'agent':
agent_log.error(payload_str)
elif handler == 'tornado':
if TornadoAsyncHTTPClient is None:
log.error('Unable to find tornado')
return
_send_payload_tornado(payload_str, access_token)
elif handler == 'gae':
if AppEngineFetch is None:
log.error('Unable to find AppEngine URLFetch module')
return
_send_payload_appengine(payload_str, access_token)
elif handler == 'twisted':
if treq is None:
log.error('Unable to find Treq')
return
_send_payload_twisted(payload_str, access_token)
else:
# default to 'thread'
thread = threading.Thread(target=_send_payload, args=(payload_str, access_token))
_threads.put(thread)
thread.start()
def search_items(title, return_fields=None, access_token=None, endpoint=None, **search_fields):
"""
Searches a project for items that match the input criteria.
title: all or part of the item's title to search for.
return_fields: the fields that should be returned for each item.
e.g. ['id', 'project_id', 'status'] will return a dict containing
only those fields for each item.
access_token: a project access token. If this is not provided,
the one provided to init() will be used instead.
search_fields: additional fields to include in the search.
currently supported: status, level, environment
"""
if not title:
return []
if return_fields is not None:
return_fields = ','.join(return_fields)
return _get_api('search/',
title=title,
fields=return_fields,
access_token=access_token,
endpoint=endpoint,
**search_fields)
def wait(f=None):
_threads.join()
if f is not None:
return f()
class ApiException(Exception):
"""
This exception will be raised if there was a problem decoding the
response from an API call.
"""
pass
class ApiError(ApiException):
"""
This exception will be raised if the API response contains an 'err'
field, denoting there was a problem fulfilling the api request.
"""
pass
class Result(object):
"""
This class encapsulates the response from an API call.
Usage:
result = search_items(title='foo', fields=['id'])
print result.data
"""
def __init__(self, access_token, path, params, data):
self.access_token = access_token
self.path = path
self.params = params
self.data = data
def __str__(self):
return str(self.data)
class PagedResult(Result):
"""
This class wraps the response from an API call that responded with
a page of results.
Usage:
result = search_items(title='foo', fields=['id'])
print 'First page: %d, data: %s' % (result.page, result.data)
result = result.next_page()
print 'Second page: %d, data: %s' % (result.page, result.data)
"""
def __init__(self, access_token, path, page_num, params, data, endpoint=None):
super(PagedResult, self).__init__(access_token, path, params, data)
self.page = page_num
self.endpoint = endpoint
def next_page(self):
params = copy.copy(self.params)
params['page'] = self.page + 1
return _get_api(self.path, endpoint=self.endpoint, **params)
def prev_page(self):
if self.page <= 1:
return self
params = copy.copy(self.params)
params['page'] = self.page - 1
return _get_api(self.path, endpoint=self.endpoint, **params)
## internal functions
def _resolve_exception_class(idx, filter):
cls, level = filter
if isinstance(cls, six.string_types):
# Lazily resolve class name
parts = cls.split('.')
module = '.'.join(parts[:-1])
if module in sys.modules and hasattr(sys.modules[module], parts[-1]):
cls = getattr(sys.modules[module], parts[-1])
SETTINGS['exception_level_filters'][idx] = (cls, level)
else:
cls = None
return cls, level
def _filtered_level(exception):
for i, filter in enumerate(SETTINGS['exception_level_filters']):
cls, level = _resolve_exception_class(i, filter)
if cls and isinstance(exception, cls):
return level
return None
def _is_ignored(exception):
return _filtered_level(exception) == 'ignored'
def _create_agent_log():
"""
Creates .rollbar log file for use with rollbar-agent
"""
log_file = SETTINGS['agent.log_file']
if not log_file.endswith('.rollbar'):
log.error("Provided agent log file does not end with .rollbar, which it must. "
"Using default instead.")
log_file = DEFAULTS['agent.log_file']
retval = logging.getLogger('rollbar_agent')
handler = logging.FileHandler(log_file, 'a', 'utf-8')
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
retval.addHandler(handler)
retval.setLevel(logging.WARNING)
return retval
def _report_exc_info(exc_info, request, extra_data, payload_data, level=None):
"""
Called by report_exc_info() wrapper
"""
if not _check_config():
return
filtered_level = _filtered_level(exc_info[1])
if level is None:
level = filtered_level
filtered_exc_info = events.on_exception_info(exc_info,
request=request,
extra_data=extra_data,
payload_data=payload_data,
level=level)
if filtered_exc_info is False:
return
cls, exc, trace = filtered_exc_info
data = _build_base_data(request)
if level is not None:
data['level'] = level
# walk the trace chain to collect cause and context exceptions
trace_chain = _walk_trace_chain(cls, exc, trace)
extra_trace_data = None
if len(trace_chain) > 1:
data['body'] = {
'trace_chain': trace_chain
}
if payload_data and ('body' in payload_data) and ('trace' in payload_data['body']):
extra_trace_data = payload_data['body']['trace']
del payload_data['body']['trace']
else:
data['body'] = {
'trace': trace_chain[0]
}
if extra_data:
extra_data = extra_data
if not isinstance(extra_data, dict):
extra_data = {'value': extra_data}
if extra_trace_data:
extra_data = dict_merge(extra_data, extra_trace_data)
data['custom'] = extra_data
if extra_trace_data and not extra_data:
data['custom'] = extra_trace_data
request = _get_actual_request(request)
_add_request_data(data, request)
_add_person_data(data, request)
_add_lambda_context_data(data)
data['server'] = _build_server_data()
if payload_data:
data = dict_merge(data, payload_data)
payload = _build_payload(data)
send_payload(payload, data.get('access_token'))
return data['uuid']
def _walk_trace_chain(cls, exc, trace):
trace_chain = [_trace_data(cls, exc, trace)]
while True:
exc = getattr(exc, '__cause__', None) or getattr(exc, '__context__', None)
if not exc:
break
trace_chain.append(_trace_data(type(exc), exc, getattr(exc, '__traceback__', None)))
return trace_chain
def _trace_data(cls, exc, trace):
# exception info
# most recent call last
raw_frames = traceback.extract_tb(trace)
frames = [{'filename': f[0], 'lineno': f[1], 'method': f[2], 'code': f[3]} for f in raw_frames]
trace_data = {
'frames': frames,
'exception': {
'class': getattr(cls, '__name__', cls.__class__.__name__),
'message': text(exc),
}
}
_add_locals_data(trace_data, (cls, exc, trace))
return trace_data
def _report_message(message, level, request, extra_data, payload_data):
"""
Called by report_message() wrapper
"""
if not _check_config():
return
filtered_message = events.on_message(message,
request=request,
extra_data=extra_data,
payload_data=payload_data,
level=level)
if filtered_message is False:
return
data = _build_base_data(request, level=level)
# message
data['body'] = {
'message': {
'body': filtered_message
}
}
if extra_data:
extra_data = extra_data
data['body']['message'].update(extra_data)
request = _get_actual_request(request)
_add_request_data(data, request)
_add_person_data(data, request)
_add_lambda_context_data(data)
data['server'] = _build_server_data()
if payload_data:
data = dict_merge(data, payload_data)
payload = _build_payload(data)
send_payload(payload, data.get('access_token'))
return data['uuid']
def _check_config():
if not SETTINGS.get('enabled'):
log.info("pyrollbar: Not reporting because rollbar is disabled.")
return False
# skip access token check for the agent handler
if SETTINGS.get('handler') == 'agent':
return True
# make sure we have an access_token
if not SETTINGS.get('access_token'):
log.warning("pyrollbar: No access_token provided. Please configure by calling rollbar.init() with your access token.")
return False
return True
def _build_base_data(request, level='error'):
data = {
'timestamp': int(time.time()),
'environment': SETTINGS['environment'],
'level': level,
'language': 'python %s' % '.'.join(str(x) for x in sys.version_info[:3]),
'notifier': SETTINGS['notifier'],
'uuid': text(uuid.uuid4()),
}
if SETTINGS.get('code_version'):
data['code_version'] = SETTINGS['code_version']
if BASE_DATA_HOOK:
BASE_DATA_HOOK(request, data)
return data
def _add_person_data(data, request):
try:
person_data = _build_person_data(request)
except Exception as e:
log.exception("Exception while building person data for Rollbar payload: %r", e)
else:
if person_data:
if not SETTINGS['capture_username'] and 'username' in person_data:
person_data['username'] = None
if not SETTINGS['capture_email'] and 'email' in person_data:
person_data['email'] = None
data['person'] = person_data
def _build_person_data(request):
"""
Returns a dictionary describing the logged-in user using data from `request.
Try request.rollbar_person first, then 'user', then 'user_id'
"""
if hasattr(request, 'rollbar_person'):
rollbar_person_prop = request.rollbar_person
try:
person = rollbar_person_prop()
except TypeError:
person = rollbar_person_prop
if person and isinstance(person, dict):
return person
else:
return None
if hasattr(request, 'user'):
user_prop = request.user
try:
user = user_prop()
except TypeError:
user = user_prop
if not user:
return None
elif isinstance(user, dict):
return user
else:
retval = {}
if getattr(user, 'id', None):
retval['id'] = text(user.id)
elif getattr(user, 'user_id', None):
retval['id'] = text(user.user_id)
# id is required, so only include username/email if we have an id
if retval.get('id'):
username = getattr(user, 'username', None)
email = getattr(user, 'email', None)
retval.update({
'username': username,
'email': email
})
return retval
if hasattr(request, 'user_id'):
user_id_prop = request.user_id
try:
user_id = user_id_prop()
except TypeError:
user_id = user_id_prop
if not user_id:
return None
return {'id': text(user_id)}
def _get_func_from_frame(frame):
func_name = inspect.getframeinfo(frame).function
caller = frame.f_back
if caller:
func = caller.f_locals.get(func_name,
caller.f_globals.get(func_name))
else:
func = None
return func
def _flatten_nested_lists(l):
ret = []
for x in l:
if isinstance(x, list):
ret.extend(_flatten_nested_lists(x))
else:
ret.append(x)
return ret
def _add_locals_data(trace_data, exc_info):
if not SETTINGS['locals']['enabled']:
return
frames = trace_data['frames']
cur_tb = exc_info[2]
frame_num = 0
num_frames = len(frames)
while cur_tb:
cur_frame = frames[frame_num]
tb_frame = cur_tb.tb_frame
cur_tb = cur_tb.tb_next
if not isinstance(tb_frame, types.FrameType):
# this can happen if the traceback or frame is wrapped in some way,
# for example by `ExceptionInfo` in
# https://github.com/celery/billiard/blob/master/billiard/einfo.py
log.warning('Traceback frame not a types.FrameType. Ignoring.')
frame_num += 1
continue
# Create placeholders for argspec/varargspec/keywordspec/locals
argspec = None
varargspec = None
keywordspec = None
_locals = {}
try:
arginfo = inspect.getargvalues(tb_frame)
# Optionally fill in locals for this frame
if arginfo.locals and _check_add_locals(cur_frame, frame_num, num_frames):
# Get all of the named args
#
# args can be a nested list of args in the case where there
# are anonymous tuple args provided.
# e.g. in Python 2 you can:
# def func((x, (a, b), z)):
# return x + a + b + z
#
# func((1, (1, 2), 3))
argspec = _flatten_nested_lists(arginfo.args)
if arginfo.varargs is not None:
varargspec = arginfo.varargs
if SETTINGS['locals']['scrub_varargs']:
temp_varargs = list(arginfo.locals[varargspec])
for i, arg in enumerate(temp_varargs):
temp_varargs[i] = REDACT_REF
arginfo.locals[varargspec] = tuple(temp_varargs)
if arginfo.keywords is not None:
keywordspec = arginfo.keywords
_locals.update(arginfo.locals.items())
except Exception:
log.exception('Error while extracting arguments from frame. Ignoring.')
# Finally, serialize each arg/kwarg/local separately so that we only report
# CircularReferences for each variable, instead of for the entire payload
# as would be the case if we serialized that payload in one-shot.
if argspec:
cur_frame['argspec'] = argspec
if varargspec:
cur_frame['varargspec'] = varargspec
if keywordspec:
cur_frame['keywordspec'] = keywordspec
if _locals:
try:
cur_frame['locals'] = dict((k, _serialize_frame_data(v)) for k, v in iteritems(_locals))
except Exception:
log.exception('Error while serializing frame data.')
frame_num += 1
def _serialize_frame_data(data):
for transform in (ScrubRedactTransform(), _serialize_transform):
data = transforms.transform(data, transform)
return data
def _add_lambda_context_data(data):
"""
Attempts to add information from the lambda context if it exists
"""
global _CURRENT_LAMBDA_CONTEXT
context = _CURRENT_LAMBDA_CONTEXT
if context is None:
return
try:
lambda_data = {
'lambda': {
'remaining_time_in_millis': context.get_remaining_time_in_millis(),
'function_name': context.function_name,
'function_version': context.function_version,
'arn': context.invoked_function_arn,
'request_id': context.aws_request_id,
}
}
if 'custom' in data:
data['custom'] = dict_merge(data['custom'], lambda_data)
else:
data['custom'] = lambda_data
except Exception as e:
log.exception("Exception while adding lambda context data: %r", e)
finally:
_CURRENT_LAMBDA_CONTEXT = None
def _add_request_data(data, request):
"""
Attempts to build request data; if successful, sets the 'request' key on `data`.
"""
try:
request_data = _build_request_data(request)
except Exception as e:
log.exception("Exception while building request_data for Rollbar payload: %r", e)
else:
if request_data:
_filter_ip(request_data, SETTINGS['capture_ip'])
data['request'] = request_data
def _check_add_locals(frame, frame_num, total_frames):
"""
Returns True if we should record local variables for the given frame.
"""
# Include the last frames locals
# Include any frame locals that came from a file in the project's root
return any(((frame_num == total_frames - 1),
('root' in SETTINGS and (frame.get('filename') or '').lower().startswith((SETTINGS['root'] or '').lower()))))
def _get_actual_request(request):
if WerkzeugLocalProxy and isinstance(request, WerkzeugLocalProxy):
try:
actual_request = request._get_current_object()
except RuntimeError:
return None
return actual_request
return request
def _build_request_data(request):
"""
Returns a dictionary containing data from the request.
Can handle webob or werkzeug-based request objects.
"""
# webob (pyramid)
if WebobBaseRequest and isinstance(request, WebobBaseRequest):
return _build_webob_request_data(request)
# django
if DjangoHttpRequest and isinstance(request, DjangoHttpRequest):
return _build_django_request_data(request)
# django rest framework
if RestFrameworkRequest and isinstance(request, RestFrameworkRequest):
return _build_django_request_data(request)
# werkzeug (flask)
if WerkzeugRequest and isinstance(request, WerkzeugRequest):
return _build_werkzeug_request_data(request)
# tornado
if TornadoRequest and isinstance(request, TornadoRequest):
return _build_tornado_request_data(request)
# bottle
if BottleRequest and isinstance(request, BottleRequest):
return _build_bottle_request_data(request)
# Sanic
if SanicRequest and isinstance(request, SanicRequest):
return _build_sanic_request_data(request)
# falcon
if FalconRequest and isinstance(request, FalconRequest):
return _build_falcon_request_data(request)
# Plain wsgi (should be last)
if isinstance(request, dict) and 'wsgi.version' in request:
return _build_wsgi_request_data(request)
return None
def _build_webob_request_data(request):
request_data = {
'url': request.url,
'GET': dict(request.GET),
'user_ip': _extract_user_ip(request),
'headers': dict(request.headers),
'method': request.method,
}
try:
if request.json:
request_data['json'] = request.json
except:
pass
# pyramid matchdict
if getattr(request, 'matchdict', None):
request_data['params'] = request.matchdict
# workaround for webob bug when the request body contains binary data but has a text
# content-type
try:
request_data['POST'] = dict(request.POST)
except UnicodeDecodeError:
request_data['body'] = request.body
return request_data
def _extract_wsgi_headers(items):
headers = {}
for k, v in items:
if k.startswith('HTTP_'):
header_name = '-'.join(k[len('HTTP_'):].replace('_', ' ').title().split(' '))
headers[header_name] = v
return headers
def _build_django_request_data(request):
request_data = {
'url': request.get_raw_uri(),
'method': request.method,
'GET': dict(request.GET),
'POST': dict(request.POST),
'user_ip': _wsgi_extract_user_ip(request.META),
}
request_data['headers'] = _extract_wsgi_headers(request.META.items())
return request_data
def _build_werkzeug_request_data(request):
request_data = {
'url': request.url,
'GET': dict(request.args),
'POST': dict(request.form),
'user_ip': _extract_user_ip(request),
'headers': dict(request.headers),
'method': request.method,
'files_keys': list(request.files.keys()),
}
try:
if request.json:
request_data['body'] = request.json
except Exception:
pass
return request_data
def _build_tornado_request_data(request):
request_data = {
'url': request.full_url(),
'user_ip': request.remote_ip,
'headers': dict(request.headers),
'method': request.method,
'files_keys': request.files.keys(),
'start_time': getattr(request, '_start_time', None),
}
request_data[request.method] = request.arguments
return request_data
def _build_bottle_request_data(request):
request_data = {
'url': request.url,
'user_ip': request.remote_addr,
'headers': dict(request.headers),
'method': request.method,
'GET': dict(request.query)
}
if request.json:
try:
request_data['body'] = request.body.getvalue()
except:
pass
else:
request_data['POST'] = dict(request.forms)
return request_data
def _build_sanic_request_data(request):
request_data = {
'url': request.url,
'user_ip': request.remote_addr,
'headers': request.headers,
'method': request.method,
'GET': dict(request.args)
}
if request.json:
try:
request_data['body'] = request.json
except:
pass
else:
request_data['POST'] = request.form
return request_data
def _build_falcon_request_data(request):
request_data = {
'url': request.url,
'user_ip': _wsgi_extract_user_ip(request.env),
'headers': dict(request.headers),
'method': request.method,
'GET': dict(request.params),
'context': dict(request.context),
}
return request_data
def _build_wsgi_request_data(request):
request_data = {
'url': wsgiref.util.request_uri(request),
'user_ip': _wsgi_extract_user_ip(request),
'method': request.get('REQUEST_METHOD'),
}
if 'QUERY_STRING' in request:
request_data['GET'] = parse_qs(request['QUERY_STRING'], keep_blank_values=True)
# Collapse single item arrays
request_data['GET'] = dict((k, v[0] if len(v) == 1 else v) for k, v in request_data['GET'].items())
request_data['headers'] = _extract_wsgi_headers(request.items())
try:
length = int(request.get('CONTENT_LENGTH', 0))
except ValueError:
length = 0
input = request.get('wsgi.input')
if length and input and hasattr(input, 'seek') and hasattr(input, 'tell'):
pos = input.tell()
input.seek(0, 0)
request_data['body'] = input.read(length)
input.seek(pos, 0)
return request_data
def _filter_ip(request_data, capture_ip):
if 'user_ip' not in request_data or capture_ip == True:
return
current_ip = request_data['user_ip']
if not current_ip:
return
new_ip = current_ip
if not capture_ip:
new_ip = None
elif capture_ip == ANONYMIZE:
try:
if '.' in current_ip:
new_ip = '.'.join(current_ip.split('.')[0:3]) + '.0'
elif ':' in current_ip:
parts = current_ip.split(':')
if len(parts) > 2:
terminal = '0000:0000:0000:0000:0000'
new_ip = ':'.join(parts[0:3] + [terminal])
else:
new_ip = None
except:
new_ip = None
request_data['user_ip'] = new_ip
def _build_server_data():
"""
Returns a dictionary containing information about the server environment.
"""
# server environment
server_data = {
'host': socket.gethostname(),
'pid': os.getpid()
}
# argv does not always exist in embedded python environments
argv = getattr(sys, 'argv', None)
if argv:
server_data['argv'] = argv
for key in ['branch', 'root']:
if SETTINGS.get(key):
server_data[key] = SETTINGS[key]
return server_data
def _transform(obj, key=None):
for transform in _transforms:
obj = transforms.transform(obj, transform, key=key)
return obj
def _build_payload(data):
"""
Returns the full payload as a string.
"""
for k, v in iteritems(data):
data[k] = _transform(v, key=(k,))
payload = {
'access_token': SETTINGS['access_token'],
'data': data
}
return payload
def _serialize_payload(payload):
return json.dumps(payload)
def _send_payload(payload_str, access_token):
try:
_post_api('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
try:
_threads.get_nowait()
_threads.task_done()
except queue.Empty:
pass
def _send_payload_appengine(payload_str, access_token):
try:
_post_api_appengine('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_appengine(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
url = urljoin(SETTINGS['endpoint'], path)
resp = AppEngineFetch(url,
method="POST",
payload=payload_str,
headers=headers,
allow_truncated=False,
deadline=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
validate_certificate=SETTINGS.get('verify_https', True))
return _parse_response(path, SETTINGS['access_token'], payload_str, resp)
def _post_api(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
url = urljoin(SETTINGS['endpoint'], path)
resp = transport.post(url,
data=payload_str,
headers=headers,
timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
verify=SETTINGS.get('verify_https', True),
proxy=SETTINGS.get('http_proxy'),
proxy_user=SETTINGS.get('http_proxy_user'),
proxy_password=SETTINGS.get('http_proxy_password'))
return _parse_response(path, SETTINGS['access_token'], payload_str, resp)
def _get_api(path, access_token=None, endpoint=None, **params):
access_token = access_token or SETTINGS['access_token']
url = urljoin(endpoint or SETTINGS['endpoint'], path)
params['access_token'] = access_token
resp = transport.get(url,
params=params,
verify=SETTINGS.get('verify_https', True),
proxy=SETTINGS.get('http_proxy'),
proxy_user=SETTINGS.get('http_proxy_user'),
proxy_password=SETTINGS.get('http_proxy_password'))
return _parse_response(path, access_token, params, resp, endpoint=endpoint)
def _send_payload_tornado(payload_str, access_token):
try:
_post_api_tornado('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_tornado(path, payload_str, access_token=None):
headers = {'Content-Type': 'application/json'}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = access_token
else:
access_token = SETTINGS['access_token']
url = urljoin(SETTINGS['endpoint'], path)
def post_tornado_cb(resp):
r = requests.Response()
r._content = resp.body
r.status_code = resp.code
r.headers.update(resp.headers)
try:
_parse_response(path, access_token, payload_str, r)
except Exception as e:
log.exception('Exception while posting item %r', e)
TornadoAsyncHTTPClient().fetch(url,
callback=post_tornado_cb,
raise_error=False,
body=payload_str,
method='POST',
connect_timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT),
request_timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT))
def _send_payload_twisted(payload_str, access_token):
try:
_post_api_twisted('item/', payload_str, access_token=access_token)
except Exception as e:
log.exception('Exception while posting item %r', e)
def _post_api_twisted(path, payload_str, access_token=None):
def post_data_cb(data, resp):
resp._content = data
_parse_response(path, SETTINGS['access_token'], payload_str, resp)
def post_cb(resp):
r = requests.Response()
r.status_code = resp.code
r.headers.update(resp.headers.getAllRawHeaders())
return treq.content(resp).addCallback(post_data_cb, r)
headers = {'Content-Type': ['application/json']}
if access_token is not None:
headers['X-Rollbar-Access-Token'] = [access_token]
url = urljoin(SETTINGS['endpoint'], path)
d = treq.post(url, payload_str, headers=headers,
timeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT))
d.addCallback(post_cb)
def _send_failsafe(message, uuid, host):
body_message = ('Failsafe from pyrollbar: {0}. Original payload may be found '
'in your server logs by searching for the UUID.').format(message)
data = {
'level': 'error',
'environment': SETTINGS['environment'],
'body': {
'message': {
'body': body_message
}
},
'notifier': SETTINGS['notifier'],
'custom': {
'orig_uuid': uuid,
'orig_host': host
},
'failsafe': True,
'internal': True,
}
payload = _build_payload(data)
try:
send_payload(payload, SETTINGS['access_token'])
except Exception:
log.exception('Rollbar: Error sending failsafe.')
def _parse_response(path, access_token, params, resp, endpoint=None):
if isinstance(resp, requests.Response):
try:
data = resp.text
except Exception:
data = resp.content
log.error('resp.text is undefined, resp.content is %r', resp.content)
else:
data = resp.content
global _LAST_RESPONSE_STATUS
last_response_was_429 = _LAST_RESPONSE_STATUS == 429
_LAST_RESPONSE_STATUS = resp.status_code
if resp.status_code == 429:
if SETTINGS['log_all_rate_limited_items'] or not last_response_was_429:
log.warning("Rollbar: over rate limit, data was dropped. Payload was: %r", params)
return
elif resp.status_code == 502:
log.exception('Rollbar api returned a 502')
return
elif resp.status_code == 413:
uuid = None
host = None
try:
payload = json.loads(params)
uuid = payload['data']['uuid']
host = payload['data']['server']['host']
log.error("Rollbar: request entity too large for UUID %r\n. Payload:\n%r", uuid, payload)
except (TypeError, ValueError):
log.exception('Unable to decode JSON for failsafe.')
except KeyError:
log.exception('Unable to find payload parameters for failsafe.')
_send_failsafe('payload too large', uuid, host)
# TODO: Should we return here?
elif resp.status_code != 200:
log.warning("Got unexpected status code from Rollbar api: %s\nResponse:\n%s",
resp.status_code, data)
# TODO: Should we also return here?
try:
json_data = json.loads(data)
except (TypeError, ValueError):
log.exception('Could not decode Rollbar api response:\n%s', data)
raise ApiException('Request to %s returned invalid JSON response', path)
else:
if json_data.get('err'):
raise ApiError(json_data.get('message') or 'Unknown error')
result = json_data.get('result', {})
if 'page' in result:
return PagedResult(access_token, path, result['page'], params, result, endpoint=endpoint)
else:
return Result(access_token, path, params, result)
def _extract_user_ip(request):
# some common things passed by load balancers... will need more of these.
real_ip = request.headers.get('X-Real-Ip')
if real_ip:
return real_ip
forwarded_for = request.headers.get('X-Forwarded-For')
if forwarded_for:
return forwarded_for
return request.remote_addr
def _wsgi_extract_user_ip(environ):
forwarded_for = environ.get('HTTP_X_FORWARDED_FOR')
if forwarded_for:
return forwarded_for
real_ip = environ.get('HTTP_X_REAL_IP')
if real_ip:
return real_ip
return environ['REMOTE_ADDR']
| 30.758577 | 126 | 0.625955 |
bcc8c5f0cdcc0646686c4ce1b96328c8f07cac26 | 2,910 | py | Python | okonomiyaki/versions/enpkg.py | enthought/okonomiyaki | 51b8b4fa8d17255e13c097402691726545cf5b4c | [
"BSD-3-Clause"
] | 1 | 2021-06-01T16:35:00.000Z | 2021-06-01T16:35:00.000Z | okonomiyaki/versions/enpkg.py | enthought/okonomiyaki | 51b8b4fa8d17255e13c097402691726545cf5b4c | [
"BSD-3-Clause"
] | 249 | 2015-02-24T19:06:53.000Z | 2021-07-30T09:01:53.000Z | okonomiyaki/versions/enpkg.py | enthought/okonomiyaki | 51b8b4fa8d17255e13c097402691726545cf5b4c | [
"BSD-3-Clause"
] | 4 | 2015-02-19T21:29:12.000Z | 2016-01-14T21:02:39.000Z | from okonomiyaki.errors import InvalidEnpkgVersion
from .pep386_workaround import IrrationalVersionError, PEP386WorkaroundVersion
class EnpkgVersion(object):
@classmethod
def from_upstream_and_build(cls, upstream, build):
""" Creates a new EnpkgVersion from the upstream string and the
build number.
Parameters
----------
upstream : str
The upstream version (e.g. '1.3.0')
build : int
The build number
"""
try:
upstream = PEP386WorkaroundVersion.from_string(upstream)
except IrrationalVersionError:
raise InvalidEnpkgVersion(
"{}-{}".format(upstream, build),
"Invalid PEP386 version string: {0!r}".format(upstream)
)
return cls(upstream, build)
@classmethod
def from_string(cls, version_string):
""" Creates a new EnpkgVersion from its string representation.
Parameters
----------
version_string : str
the version string.
"""
parts = version_string.rsplit("-")
if len(parts) == 1:
build = 0
elif len(parts) == 2:
try:
build = int(parts[1])
except ValueError:
raise InvalidEnpkgVersion(
version_string,
"Invalid build number: {0!r}".format(parts[1])
)
else:
raise InvalidEnpkgVersion(version_string)
return cls.from_upstream_and_build(parts[0], build)
def __init__(self, upstream, build):
""" Creates a new EnpkgVersion instance
Parameters
----------
upstream : PEP386WorkaroundVersion
The upstream version
build : int
The build number
"""
self.upstream = upstream
self.build = build
self._parts = upstream, build
def __str__(self):
return str(self.upstream) + "-" + str(self.build)
def __hash__(self):
return hash(self._parts)
def _cannot_compare(self, other):
msg = "Cannot compare {0!r} and {1!r}"
raise TypeError(msg.format(type(self), type(other)))
def __eq__(self, other):
if not isinstance(other, EnpkgVersion):
self._cannot_compare(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, EnpkgVersion):
self._cannot_compare(other)
return self._parts < other._parts
def __le__(self, other):
return not self.__gt__(other)
def __gt__(self, other):
if not isinstance(other, EnpkgVersion):
self._cannot_compare(other)
return self._parts > other._parts
def __ge__(self, other):
return not self.__lt__(other)
| 29.1 | 78 | 0.579038 |
6e207da8caa3aa42f113363ef0d8d98e40992e91 | 6,604 | py | Python | manim/animation/composition.py | Hari-07/manim | bbe113e7d33636c8901d6c7cee81cb2f4b69cc8b | [
"MIT"
] | 1 | 2020-04-29T19:55:50.000Z | 2020-04-29T19:55:50.000Z | manim/animation/composition.py | Hari-07/manim | bbe113e7d33636c8901d6c7cee81cb2f4b69cc8b | [
"MIT"
] | null | null | null | manim/animation/composition.py | Hari-07/manim | bbe113e7d33636c8901d6c7cee81cb2f4b69cc8b | [
"MIT"
] | null | null | null | """Tools for displaying multiple animations at once."""
from typing import TYPE_CHECKING, Callable, Optional, Union
import numpy as np
from .._config import config
from ..animation.animation import Animation, prepare_animation
from ..mobject.mobject import Group, Mobject
from ..mobject.opengl_mobject import OpenGLGroup
from ..scene.scene import Scene
from ..utils.iterables import remove_list_redundancies
from ..utils.rate_functions import linear
if TYPE_CHECKING:
from ..mobject.types.opengl_vectorized_mobject import OpenGLVGroup
from ..mobject.types.vectorized_mobject import VGroup
__all__ = ["AnimationGroup", "Succession", "LaggedStart", "LaggedStartMap"]
DEFAULT_LAGGED_START_LAG_RATIO: float = 0.05
class AnimationGroup(Animation):
def __init__(
self,
*animations: Animation,
group: Union[Group, "VGroup", OpenGLGroup, "OpenGLVGroup"] = None,
run_time: Optional[float] = None,
rate_func: Callable[[float], float] = linear,
lag_ratio: float = 0,
**kwargs
) -> None:
self.animations = [prepare_animation(anim) for anim in animations]
self.group = group
if self.group is None:
mobjects = remove_list_redundancies(
[anim.mobject for anim in self.animations]
)
if config["renderer"] == "opengl":
self.group = OpenGLGroup(*mobjects)
else:
self.group = Group(*mobjects)
super().__init__(self.group, rate_func=rate_func, lag_ratio=lag_ratio, **kwargs)
self.run_time: float = self.init_run_time(run_time)
def get_all_mobjects(self) -> Group:
return self.group
def begin(self) -> None:
for anim in self.animations:
anim.begin()
def finish(self) -> None:
for anim in self.animations:
anim.finish()
def clean_up_from_scene(self, scene: Scene) -> None:
for anim in self.animations:
anim.clean_up_from_scene(scene)
def update_mobjects(self, dt: float) -> None:
for anim in self.animations:
anim.update_mobjects(dt)
def init_run_time(self, run_time) -> float:
self.build_animations_with_timings()
if self.anims_with_timings:
self.max_end_time = np.max([awt[2] for awt in self.anims_with_timings])
else:
self.max_end_time = 0
return self.max_end_time if run_time is None else run_time
def build_animations_with_timings(self) -> None:
"""
Creates a list of triplets of the form
(anim, start_time, end_time)
"""
self.anims_with_timings = []
curr_time: float = 0
for anim in self.animations:
start_time: float = curr_time
end_time: float = start_time + anim.get_run_time()
self.anims_with_timings.append((anim, start_time, end_time))
# Start time of next animation is based on the lag_ratio
curr_time = (1 - self.lag_ratio) * start_time + self.lag_ratio * end_time
def interpolate(self, alpha: float) -> None:
# Note, if the run_time of AnimationGroup has been
# set to something other than its default, these
# times might not correspond to actual times,
# e.g. of the surrounding scene. Instead they'd
# be a rescaled version. But that's okay!
time = alpha * self.max_end_time
for anim, start_time, end_time in self.anims_with_timings:
anim_time = end_time - start_time
if anim_time == 0:
sub_alpha = 0
else:
sub_alpha = np.clip((time - start_time) / anim_time, 0, 1)
anim.interpolate(sub_alpha)
class Succession(AnimationGroup):
def __init__(self, *animations: Animation, lag_ratio: float = 1, **kwargs) -> None:
super().__init__(*animations, lag_ratio=lag_ratio, **kwargs)
def begin(self) -> None:
assert len(self.animations) > 0
self.update_active_animation(0)
def finish(self) -> None:
while self.active_animation is not None:
self.next_animation()
def update_mobjects(self, dt: float) -> None:
if self.active_animation:
self.active_animation.update_mobjects(dt)
def update_active_animation(self, index: int) -> None:
self.active_index = index
if index >= len(self.animations):
self.active_animation: Optional[Animation] = None
self.active_start_time: Optional[float] = None
self.active_end_time: Optional[float] = None
else:
self.active_animation = self.animations[index]
self.active_animation.begin()
self.active_start_time = self.anims_with_timings[index][1]
self.active_end_time = self.anims_with_timings[index][2]
def next_animation(self) -> None:
if self.active_animation is not None:
self.active_animation.finish()
self.update_active_animation(self.active_index + 1)
def interpolate(self, alpha: float) -> None:
current_time = alpha * self.run_time
while self.active_end_time is not None and current_time >= self.active_end_time:
self.next_animation()
if self.active_animation is not None and self.active_start_time is not None:
elapsed = current_time - self.active_start_time
active_run_time = self.active_animation.get_run_time()
subalpha = elapsed / active_run_time if active_run_time != 0.0 else 1.0
self.active_animation.interpolate(subalpha)
class LaggedStart(AnimationGroup):
def __init__(
self,
*animations: Animation,
lag_ratio: float = DEFAULT_LAGGED_START_LAG_RATIO,
**kwargs
):
super().__init__(*animations, lag_ratio=lag_ratio, **kwargs)
class LaggedStartMap(LaggedStart):
def __init__(
self,
AnimationClass: Callable[..., Animation],
mobject: Mobject,
arg_creator: Callable[[Mobject], str] = None,
run_time: float = 2,
**kwargs
) -> None:
args_list = []
for submob in mobject:
if arg_creator:
args_list.append(arg_creator(submob))
else:
args_list.append((submob,))
anim_kwargs = dict(kwargs)
if "lag_ratio" in anim_kwargs:
anim_kwargs.pop("lag_ratio")
animations = [AnimationClass(*args, **anim_kwargs) for args in args_list]
super().__init__(*animations, run_time=run_time, **kwargs)
| 36.688889 | 88 | 0.639612 |
88177a019fffd8f080e38e3816cc41d048987b23 | 1,227 | py | Python | test_ServiceGame.py | pauloeduard0/CI-List-L3 | 9263e456d6d5e07e1e1c238042ea64f81b11eb3c | [
"MIT"
] | null | null | null | test_ServiceGame.py | pauloeduard0/CI-List-L3 | 9263e456d6d5e07e1e1c238042ea64f81b11eb3c | [
"MIT"
] | null | null | null | test_ServiceGame.py | pauloeduard0/CI-List-L3 | 9263e456d6d5e07e1e1c238042ea64f81b11eb3c | [
"MIT"
] | null | null | null | import unittest
import ServiceGame
from model.Platform import platform
from model.Publishers import publisher
class TestServiceGame(unittest.TestCase):
def test_games_Wii(self):
wiigames = ServiceGame.platz(platform('Wii'))
self.assertEqual(15, len(wiigames))
def test_games_PC(self):
pc = ServiceGame.platz(platform('PC'))
self.assertEqual(1, len(pc))
def test_games_SquareSoft(self):
squaresoft = ServiceGame.plubz(publisher('SquareSoft'))
self.assertNotEqual(0, len(squaresoft))
def test_games_ElectronicArts(self):
electronicarts = ServiceGame.plubz(publisher('Electronic Arts'))
self.assertEqual(5, len(electronicarts))
def test_csv_is_create_platform(self):
ServiceGame.escolher('P1', platform('Wii'))
with open('output.csv') as arquivo:
conteudo = arquivo.readlines()
self.assertEqual(15, len(conteudo))
def test_csv_is_create_publisher(self):
ServiceGame.escolher('P2', publisher('Electronic Arts'))
with open('output.csv') as arquivo:
conteudo = arquivo.readlines()
self.assertEqual(5, len(conteudo))
if __name__ == '__main__':
unittest.main()
| 30.675 | 72 | 0.682967 |
0dc9335f1048d0ab2877a860af9f4a47a9a63480 | 16,369 | py | Python | tests/mock_dbus.py | harisokanovic/pyconnman | 8876f3dbf3a25a2a65d2d81e7b0bdd8068839173 | [
"Apache-2.0"
] | null | null | null | tests/mock_dbus.py | harisokanovic/pyconnman | 8876f3dbf3a25a2a65d2d81e7b0bdd8068839173 | [
"Apache-2.0"
] | null | null | null | tests/mock_dbus.py | harisokanovic/pyconnman | 8876f3dbf3a25a2a65d2d81e7b0bdd8068839173 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import dbus
class MockDBusInterface:
"""Mock dbus.Interface implementation for purpose of testing"""
def __init__(self, obj, addr):
self.addr = addr
self.agent = None
if (self.addr == 'net.connman.Manager'):
self._props = dbus.Dictionary({dbus.String(u'State'): dbus.String(u'online', variant_level=1), dbus.String(u'OfflineMode'): dbus.Boolean(False, variant_level=1), dbus.String(u'SessionMode'): dbus.Boolean(False, variant_level=1)}, signature=dbus.Signature('sv')) # noqa
self._technologies = dbus.Array([dbus.Struct((dbus.ObjectPath('/net/connman/technology/wifi'), dbus.Dictionary({dbus.String(u'Connected'): dbus.Boolean(False, variant_level=1), dbus.String(u'Tethering'): dbus.Boolean(False, variant_level=1), dbus.String(u'Type'): dbus.String(u'wifi', variant_level=1), dbus.String(u'Name'): dbus.String(u'WiFi', variant_level=1), dbus.String(u'Powered'): dbus.Boolean(True, variant_level=1)}, signature=dbus.Signature('sv'))), signature=None), dbus.Struct((dbus.ObjectPath('/net/connman/technology/ethernet'), dbus.Dictionary({dbus.String(u'Connected'): dbus.Boolean(True, variant_level=1), dbus.String(u'Tethering'): dbus.Boolean(False, variant_level=1), dbus.String(u'Type'): dbus.String(u'ethernet', variant_level=1), dbus.String(u'Name'): dbus.String(u'Wired', variant_level=1), dbus.String(u'Powered'): dbus.Boolean(True, variant_level=1)}, signature=dbus.Signature('sv'))), signature=None)], signature=dbus.Signature('(oa{sv})')) # noqa
self._services = dbus.Array([dbus.Struct((dbus.ObjectPath('/net/connman/service/ethernet_b827ebaf24d8_cable'), dbus.Dictionary({dbus.String(u'IPv6.Configuration'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'auto', variant_level=1), dbus.String(u'Privacy'): dbus.String(u'disabled', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'AutoConnect'): dbus.Boolean(True, variant_level=1), dbus.String(u'Name'): dbus.String(u'Wired', variant_level=1), dbus.String(u'Nameservers'): dbus.Array([dbus.String(u'192.168.1.254')], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Provider'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Favorite'): dbus.Boolean(True, variant_level=1), dbus.String(u'Domains.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Timeservers.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'State'): dbus.String(u'online', variant_level=1), dbus.String(u'Proxy'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'direct', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Nameservers.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'IPv4'): dbus.Dictionary({dbus.String(u'Netmask'): dbus.String(u'255.255.255.0', variant_level=1), dbus.String(u'Gateway'): dbus.String(u'192.168.1.254', variant_level=1), dbus.String(u'Method'): dbus.String(u'dhcp', variant_level=1), dbus.String(u'Address'): dbus.String(u'192.168.1.79', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Timeservers'): dbus.Array([dbus.String(u'192.168.1.254')], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'IPv6'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Domains'): dbus.Array([dbus.String(u'home')], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Ethernet'): dbus.Dictionary({dbus.String(u'Interface'): dbus.String(u'eth0', variant_level=1), dbus.String(u'MTU'): dbus.UInt16(1500, variant_level=1), dbus.String(u'Method'): dbus.String(u'auto', variant_level=1), dbus.String(u'Address'): dbus.String(u'B8:27:EB:AF:24:D8', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Security'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Proxy.Configuration'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Type'): dbus.String(u'ethernet', variant_level=1), dbus.String(u'Immutable'): dbus.Boolean(False, variant_level=1), dbus.String(u'IPv4.Configuration'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'dhcp', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1)}, signature=dbus.Signature('sv'))), signature=None), dbus.Struct((dbus.ObjectPath('/net/connman/service/wifi_000f1330203f_4254576946692d776974682d464f4e_managed_none'), dbus.Dictionary({dbus.String(u'Strength'): dbus.Byte(73, variant_level=1), dbus.String(u'Nameservers'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'State'): dbus.String(u'idle', variant_level=1), dbus.String(u'Provider'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Type'): dbus.String(u'wifi', variant_level=1), dbus.String(u'Security'): dbus.Array([dbus.String(u'none')], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'AutoConnect'): dbus.Boolean(False, variant_level=1), dbus.String(u'Immutable'): dbus.Boolean(False, variant_level=1), dbus.String(u'Proxy'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'IPv4.Configuration'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'dhcp', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'IPv6.Configuration'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'auto', variant_level=1), dbus.String(u'Privacy'): dbus.String(u'disabled', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Name'): dbus.String(u'BTWiFi-with-FON', variant_level=1), dbus.String(u'Favorite'): dbus.Boolean(False, variant_level=1), dbus.String(u'Timeservers'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Domains'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Ethernet'): dbus.Dictionary({dbus.String(u'Interface'): dbus.String(u'wlan0', variant_level=1), dbus.String(u'Method'): dbus.String(u'auto', variant_level=1), dbus.String(u'Address'): dbus.String(u'00:0F:13:30:20:3F', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Nameservers.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Proxy.Configuration'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Domains.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Timeservers.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'IPv4'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'IPv6'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1)}, signature=dbus.Signature('sv'))), signature=None), dbus.Struct((dbus.ObjectPath('/net/connman/service/wifi_000f1330203f_4254487562352d4e473657_managed_psk'), dbus.Dictionary({dbus.String(u'Strength'): dbus.Byte(73, variant_level=1), dbus.String(u'Nameservers'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'State'): dbus.String(u'idle', variant_level=1), dbus.String(u'Provider'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Type'): dbus.String(u'wifi', variant_level=1), dbus.String(u'Security'): dbus.Array([dbus.String(u'psk'), dbus.String(u'wps')], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'AutoConnect'): dbus.Boolean(False, variant_level=1), dbus.String(u'Immutable'): dbus.Boolean(False, variant_level=1), dbus.String(u'Proxy'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'IPv4.Configuration'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'dhcp', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'IPv6.Configuration'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'auto', variant_level=1), dbus.String(u'Privacy'): dbus.String(u'disabled', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Name'): dbus.String(u'BTHub5-NG6W', variant_level=1), dbus.String(u'Favorite'): dbus.Boolean(False, variant_level=1), dbus.String(u'Timeservers'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Domains'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Ethernet'): dbus.Dictionary({dbus.String(u'Interface'): dbus.String(u'wlan0', variant_level=1), dbus.String(u'Method'): dbus.String(u'auto', variant_level=1), dbus.String(u'Address'): dbus.String(u'00:0F:13:30:20:3F', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Nameservers.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Proxy.Configuration'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Domains.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Timeservers.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'IPv4'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'IPv6'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1)}, signature=dbus.Signature('sv'))), signature=None), dbus.Struct((dbus.ObjectPath('/net/connman/service/wifi_000f1330203f_544e434150374345434535_managed_psk'), dbus.Dictionary({dbus.String(u'Strength'): dbus.Byte(49, variant_level=1), dbus.String(u'Nameservers'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'State'): dbus.String(u'idle', variant_level=1), dbus.String(u'Provider'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Type'): dbus.String(u'wifi', variant_level=1), dbus.String(u'Security'): dbus.Array([dbus.String(u'psk'), dbus.String(u'wps')], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'AutoConnect'): dbus.Boolean(False, variant_level=1), dbus.String(u'Immutable'): dbus.Boolean(False, variant_level=1), dbus.String(u'Proxy'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'IPv4.Configuration'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'dhcp', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'IPv6.Configuration'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'auto', variant_level=1), dbus.String(u'Privacy'): dbus.String(u'disabled', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Name'): dbus.String(u'TNCAP7CECE5', variant_level=1), dbus.String(u'Favorite'): dbus.Boolean(False, variant_level=1), dbus.String(u'Timeservers'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Domains'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Ethernet'): dbus.Dictionary({dbus.String(u'Interface'): dbus.String(u'wlan0', variant_level=1), dbus.String(u'Method'): dbus.String(u'auto', variant_level=1), dbus.String(u'Address'): dbus.String(u'00:0F:13:30:20:3F', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Nameservers.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Proxy.Configuration'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Domains.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Timeservers.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'IPv4'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'IPv6'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1)}, signature=dbus.Signature('sv'))), signature=None)], signature=dbus.Signature('(oa{sv})')) # noqa
elif (self.addr == 'net.connman.Technology'):
self._props = dbus.Dictionary({dbus.String(u'Connected'): dbus.Boolean(False, variant_level=1), dbus.String(u'Tethering'): dbus.Boolean(False, variant_level=1), dbus.String(u'Type'): dbus.String(u'wifi', variant_level=1), dbus.String(u'Name'): dbus.String(u'WiFi', variant_level=1), dbus.String(u'Powered'): dbus.Boolean(True, variant_level=1)}, signature=dbus.Signature('sv')) # noqa
elif (self.addr == 'net.connman.Service'):
self._props = dbus.Dictionary({dbus.String(u'IPv6.Configuration'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'auto', variant_level=1), dbus.String(u'Privacy'): dbus.String(u'disabled', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'AutoConnect'): dbus.Boolean(True, variant_level=1), dbus.String(u'Name'): dbus.String(u'Wired', variant_level=1), dbus.String(u'Nameservers'): dbus.Array([dbus.String(u'192.168.1.254')], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Provider'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Favorite'): dbus.Boolean(True, variant_level=1), dbus.String(u'Domains.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Timeservers.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'State'): dbus.String(u'online', variant_level=1), dbus.String(u'Proxy'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'direct', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Nameservers.Configuration'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'IPv4'): dbus.Dictionary({dbus.String(u'Netmask'): dbus.String(u'255.255.255.0', variant_level=1), dbus.String(u'Gateway'): dbus.String(u'192.168.1.254', variant_level=1), dbus.String(u'Method'): dbus.String(u'dhcp', variant_level=1), dbus.String(u'Address'): dbus.String(u'192.168.1.79', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Timeservers'): dbus.Array([dbus.String(u'192.168.1.254')], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'IPv6'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Domains'): dbus.Array([dbus.String(u'home')], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Ethernet'): dbus.Dictionary({dbus.String(u'Interface'): dbus.String(u'eth0', variant_level=1), dbus.String(u'MTU'): dbus.UInt16(1500, variant_level=1), dbus.String(u'Method'): dbus.String(u'auto', variant_level=1), dbus.String(u'Address'): dbus.String(u'B8:27:EB:AF:24:D8', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Security'): dbus.Array([], signature=dbus.Signature('s'), variant_level=1), dbus.String(u'Proxy.Configuration'): dbus.Dictionary({}, signature=dbus.Signature('sv'), variant_level=1), dbus.String(u'Type'): dbus.String(u'ethernet', variant_level=1), dbus.String(u'Immutable'): dbus.Boolean(False, variant_level=1), dbus.String(u'IPv4.Configuration'): dbus.Dictionary({dbus.String(u'Method'): dbus.String(u'dhcp', variant_level=1)}, signature=dbus.Signature('sv'), variant_level=1)}, signature=dbus.Signature('sv')) # noqa
def SetProperty(self, name, value):
self._props[name] = value
def GetProperties(self):
return self._props
def GetTechnologies(self):
return self._technologies
def GetServices(self):
return self._services
def RegisterAgent(self, obj_path):
if (not self.agent):
self.agent = obj_path
else:
raise dbus.DBusException('Agent already registered')
def UnregisterAgent(self, obj_path):
if (self.agent == obj_path):
self.agent = None
else:
raise dbus.DBusException('Agent path not registered')
def Scan(self):
pass
def Connect(self):
pass
def Disconnect(self):
pass
def MoveBefore(self, obj_path):
pass
def MoveAfter(self, obj_path):
pass
def ResetCounters(self):
pass
def Remove(self):
pass
def ClearProperty(self, name):
pass
| 244.313433 | 10,535 | 0.731077 |
ff7d1faee41844c4a031aeb65e3bb642410913ab | 546 | py | Python | ExpenseProject/manage.py | cs-fullstack-fall-2018/project3-django-psanon19 | 4df4459265c5d0b560dc4fa26b7da2c1b3bf9cff | [
"Apache-2.0"
] | null | null | null | ExpenseProject/manage.py | cs-fullstack-fall-2018/project3-django-psanon19 | 4df4459265c5d0b560dc4fa26b7da2c1b3bf9cff | [
"Apache-2.0"
] | null | null | null | ExpenseProject/manage.py | cs-fullstack-fall-2018/project3-django-psanon19 | 4df4459265c5d0b560dc4fa26b7da2c1b3bf9cff | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ExpenseProject.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.125 | 78 | 0.690476 |
171d6f674cb892de4ba3579a86563297bbfed6cc | 1,202 | py | Python | ggplot/scales/scale_y_continuous.py | ouseful-backup/ggplot | b5872f04f4e6ee47874eb66676b1d44375d005ef | [
"BSD-2-Clause"
] | 6 | 2016-10-06T08:37:45.000Z | 2019-12-09T06:52:28.000Z | ggplot/scales/scale_y_continuous.py | vannem/ggplot | 0f3774e6a645796b843d3ce77fb388958773338e | [
"BSD-2-Clause"
] | null | null | null | ggplot/scales/scale_y_continuous.py | vannem/ggplot | 0f3774e6a645796b843d3ce77fb388958773338e | [
"BSD-2-Clause"
] | 15 | 2015-12-15T05:49:39.000Z | 2021-04-17T09:47:48.000Z | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
from matplotlib.pyplot import FuncFormatter
dollar = lambda x, pos: '$%1.2f' % x
currency = dollar
comma = lambda x, pos: '{:0,d}'.format(int(x))
millions = lambda x, pos: '$%1.1fM' % (x*1e-6)
percent = lambda x, pos: '{0:.0f}%'.format(x*100)
LABEL_FORMATS = {
'comma': comma,
'dollar': dollar,
'currency': currency,
'millions': millions,
'percent': percent
}
class scale_y_continuous(scale):
VALID_SCALES = ['name', 'labels', 'limits', 'breaks', 'trans']
def __radd__(self, gg):
gg = deepcopy(gg)
if self.name:
gg.ylab = self.name.title()
if not (self.labels is None):
if self.labels in LABEL_FORMATS:
format_func = LABEL_FORMATS[self.labels]
gg.ytick_formatter = FuncFormatter(format_func)
else:
gg.ytick_labels = self.labels
if not (self.limits is None):
gg.ylimits = self.limits
if not (self.breaks is None):
gg.ybreaks = self.breaks
return gg
| 30.820513 | 66 | 0.595674 |
0840b817e6f37617897789199c37b59ec75db6f2 | 2,849 | py | Python | Object_Classification/DFAENT/DFAENT_Home/eval.py | lindagaw/Emotion-Detection | 7431984fb5ab45abe3b793e314c3c15b747d2226 | [
"MIT"
] | 121 | 2020-06-25T02:50:03.000Z | 2022-03-17T07:19:21.000Z | Object_Classification/DFAENT/DFAENT_Home/eval.py | lindagaw/Emotion-Detection | 7431984fb5ab45abe3b793e314c3c15b747d2226 | [
"MIT"
] | 2 | 2020-07-03T04:37:32.000Z | 2020-12-25T04:31:38.000Z | Object_Classification/DFAENT/DFAENT_Home/eval.py | lindagaw/Emotion-Detection | 7431984fb5ab45abe3b793e314c3c15b747d2226 | [
"MIT"
] | 9 | 2020-06-25T02:50:04.000Z | 2022-03-17T07:19:22.000Z | # run: python eval.py --task CP --target Product
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from torch.autograd import Variable
from utils import CLEFImage, print_args
from model.net import ResNet50_mod_name, ResClassifier
parser = argparse.ArgumentParser()
parser.add_argument("--data_root", default='data/OfficeHome/list')
parser.add_argument("--target", default='RealWorld')
parser.add_argument("--batch_size", default=64)
parser.add_argument("--shuffle", default=False)
parser.add_argument("--num_workers", default=0)
parser.add_argument("--snapshot", default="")
parser.add_argument("--epoch", default=200, type=int)
parser.add_argument("--result", default='record')
parser.add_argument("--class_num", default=65)
parser.add_argument("--task", default='None', type=str)
parser.add_argument("--post", default='-1', type=str)
parser.add_argument("--repeat", default='-1', type=str)
args = parser.parse_args()
print_args(args)
result = open(os.path.join(args.result, "ImageCLEF_IAFN_" + args.task + '_' + args.post + '.' + args.repeat +"_score.txt"), "a")
target_root = 'data/OfficeHome/'+args.target
target_label = os.path.join(args.data_root, args.target+'.txt')
data_transform = transforms.Compose([
transforms.Scale((256, 256)),
transforms.CenterCrop((221, 221)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
t_set = CLEFImage(target_root, target_label, data_transform)
t_loader = torch.utils.data.DataLoader(t_set, batch_size=args.batch_size, shuffle=args.shuffle, num_workers=args.num_workers)
netG = ResNet50_mod_name().cuda()
netF = ResClassifier(class_num=args.class_num, extract=False).cuda()
netG.eval()
netF.eval()
maxCorrect = 0
for epoch in range(1, args.epoch + 1):
netG.load_state_dict(torch.load(os.path.join(args.snapshot, "ImageHome_IAFN_" + args.task + "_netG_" + args.post + '.' + args.repeat + '_' + str(epoch) + ".pth")))
netF.load_state_dict(torch.load(os.path.join(args.snapshot, "ImageHome_IAFN_" + args.task + "_netF_" + args.post + '.' + args.repeat + '_' + str(epoch) + ".pth")))
correct = 0
tick = 0
for (imgs, labels) in t_loader:
tick += 1
imgs = Variable(imgs.cuda())
pred = netF(netG(imgs))
pred = F.softmax(pred)
pred = pred.data.cpu().numpy()
pred = pred.argmax(axis=1)
labels = labels.numpy()
correct += np.equal(labels, pred).sum()
correct = correct * 1.0 / len(t_set)
if correct > maxCorrect:
maxCorrect = correct
print ("Epoch {0}: {1}".format(epoch, correct))
result.write("Epoch " + str(epoch) + ": " + str(correct) + "\n")
print ("Max: {0}".format(maxCorrect))
result.write("Max: {0}".format(maxCorrect))
result.close()
| 40.126761 | 168 | 0.693226 |
929641458ccd44318c1d4b321b5b5bcae93d7f29 | 301 | py | Python | Basic Python Theory/Part 4 (Classess & Objects)/main.py | Randula98/Python-For-Beginners | e41a6014be882f01c6ccdcbe2167e2b581646eee | [
"MIT"
] | 6 | 2021-12-14T17:52:11.000Z | 2021-12-19T20:22:44.000Z | Basic Python Theory/Part 4 (Classess & Objects)/main.py | GIHAA/Python-For-Beginners | e41a6014be882f01c6ccdcbe2167e2b581646eee | [
"MIT"
] | null | null | null | Basic Python Theory/Part 4 (Classess & Objects)/main.py | GIHAA/Python-For-Beginners | e41a6014be882f01c6ccdcbe2167e2b581646eee | [
"MIT"
] | 2 | 2021-12-19T18:50:30.000Z | 2022-01-01T23:05:18.000Z | #import file
from student import Student
#create object
student1 = Student("John" , "Software Engineer" , 3.8 , 3 , 1)
student2 = Student("Shyla" , "Graphic Designer" , 3.2 , 2 , 2)
print(student1.name)
print(student1.on_deans_list())
print()
print(student2.name)
print(student2.on_deans_list())
| 18.8125 | 62 | 0.714286 |
606304bdd6f6de96e11d09bad62adaec10cb76c0 | 3,234 | py | Python | usaspending_api/references/tests/integration/test_load_gtas_mgmt_cmd.py | beboplove/usaspending-api | ee4fb35e2d5bcdc68f6c0d4240871ea298e532d9 | [
"CC0-1.0"
] | null | null | null | usaspending_api/references/tests/integration/test_load_gtas_mgmt_cmd.py | beboplove/usaspending-api | ee4fb35e2d5bcdc68f6c0d4240871ea298e532d9 | [
"CC0-1.0"
] | null | null | null | usaspending_api/references/tests/integration/test_load_gtas_mgmt_cmd.py | beboplove/usaspending-api | ee4fb35e2d5bcdc68f6c0d4240871ea298e532d9 | [
"CC0-1.0"
] | null | null | null | import pytest
from django.core.management import call_command
from django.db import DEFAULT_DB_ALIAS
from unittest.mock import MagicMock
from usaspending_api.etl.broker_etl_helpers import PhonyCursor
from usaspending_api.references.models import GTASSF133Balances
@pytest.mark.django_db
def test_program_activity_fresh_load(monkeypatch):
"""
Test the gtas totals load to ensure data is loaded with the correct totals.
"""
data_broker_mock = MagicMock()
data_broker_mock.cursor.return_value = PhonyCursor("usaspending_api/references/tests/data/broker_gtas.json")
mock_connections = {
DEFAULT_DB_ALIAS: MagicMock(),
"data_broker": data_broker_mock,
}
monkeypatch.setattr("usaspending_api.references.management.commands.load_gtas.connections", mock_connections)
call_command("load_gtas")
expected_results = {
"count": 3,
"row_tuples": [
(
1600,
-1,
-11.00,
-11.00,
-10.00,
-11.00,
-11.00,
-11.00,
-11.00,
-11.00,
-11.00,
-11.00,
-11.00,
11,
-111,
-110,
),
(
1600,
-2,
-12.00,
-12.00,
-9.00,
-12.00,
-12.00,
-12.00,
-12.00,
-12.00,
-12.00,
-12.00,
-12.00,
12,
-121,
-120,
),
(
1601,
-1,
-13.00,
-13.00,
-8.00,
-13.00,
-13.00,
-13.00,
-13.00,
-13.00,
-13.00,
-13.00,
-13.00,
13,
-131,
-130,
),
],
}
actual_results = {
"count": GTASSF133Balances.objects.count(),
"row_tuples": list(
GTASSF133Balances.objects.values_list(
"fiscal_year",
"fiscal_period",
"budget_authority_unobligated_balance_brought_forward_cpe",
"adjustments_to_unobligated_balance_brought_forward_cpe",
"obligations_incurred_total_cpe",
"budget_authority_appropriation_amount_cpe",
"borrowing_authority_amount",
"contract_authority_amount",
"spending_authority_from_offsetting_collections_amount",
"other_budgetary_resources_amount_cpe",
"obligations_incurred",
"deobligations_or_recoveries_or_refunds_from_prior_year_cpe",
"unobligated_balance_cpe",
"total_budgetary_resources_cpe",
"anticipated_prior_year_obligation_recoveries",
"prior_year_paid_obligation_recoveries",
)
),
}
assert expected_results == actual_results
| 28.875 | 113 | 0.480829 |
25ab24da59d0d6e2e4e9cde9eff54210fc486bab | 2,983 | py | Python | services/contrib/KafkaAgent/setup.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | 1 | 2020-05-26T01:29:50.000Z | 2020-05-26T01:29:50.000Z | services/contrib/KafkaAgent/setup.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | null | null | null | services/contrib/KafkaAgent/setup.py | gnmerritt/volttron | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
from os import path
from setuptools import setup, find_packages
MAIN_MODULE = 'agent'
# Find the agent package that contains the main module
packages = find_packages('.')
agent_package = ''
for package in find_packages():
# Because there could be other packages such as tests
if path.isfile(package + '/' + MAIN_MODULE + '.py') is True:
agent_package = package
if not agent_package:
raise RuntimeError('None of the packages under {dir} contain the file '
'{main_module}'.format(main_module=MAIN_MODULE + '.py',
dir=path.abspath('.')))
# Find the version number from the main module
agent_module = agent_package + '.' + MAIN_MODULE
_temp = __import__(agent_module, globals(), locals(), ['__version__'], 0)
__version__ = _temp.__version__
# Setup
setup(
name=agent_package,
version=__version__,
install_requires=['volttron'],
packages=packages,
entry_points={
'setuptools.installation': [
'eggsecutable = ' + agent_module + ':main',
]
}
)
| 40.863014 | 79 | 0.732149 |
947bb5005c7fa9ef33b1c9c994568207b0c69cc6 | 961 | py | Python | backend/authentication/admin.py | gorkemarslan/react-django-ecommerce | 86ca776f0f45f0b959b910116803686523632810 | [
"MIT"
] | null | null | null | backend/authentication/admin.py | gorkemarslan/react-django-ecommerce | 86ca776f0f45f0b959b910116803686523632810 | [
"MIT"
] | null | null | null | backend/authentication/admin.py | gorkemarslan/react-django-ecommerce | 86ca776f0f45f0b959b910116803686523632810 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from .forms import CustomUserChangeForm, CustomUserCreationForm
from .models import CustomUser
class CustomUserAdmin(UserAdmin):
add_form = CustomUserCreationForm
form = CustomUserChangeForm
model = CustomUser
list_display = ('email', 'is_staff', 'is_active',)
list_filter = ('is_staff', 'is_active',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Permissions'), {'fields': ('is_staff', 'is_active')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2', 'is_staff', 'is_active')}
),
)
search_fields = ('email',)
ordering = ('email',)
admin.site.register(CustomUser, CustomUserAdmin)
| 32.033333 | 83 | 0.640999 |
b432fde1b4dd03535e2a5fc7fe15ed1663895bbb | 730 | py | Python | selfdrive/controls/lib/latcontrol_angle.py | jamcar23/openpilot | 8a609b73710f723686c1a944770c5b19f209a0ca | [
"MIT"
] | 5 | 2020-12-23T01:49:04.000Z | 2021-08-28T23:42:52.000Z | selfdrive/controls/lib/latcontrol_angle.py | jamcar23/openpilot | 8a609b73710f723686c1a944770c5b19f209a0ca | [
"MIT"
] | 60 | 2020-09-03T15:23:38.000Z | 2021-12-17T12:39:50.000Z | selfdrive/controls/lib/latcontrol_angle.py | jamcar23/openpilot | 8a609b73710f723686c1a944770c5b19f209a0ca | [
"MIT"
] | 2 | 2020-12-07T20:04:19.000Z | 2022-03-05T08:00:03.000Z | import math
from cereal import log
class LatControlAngle():
def __init__(self, CP):
pass
def reset(self):
pass
def update(self, active, CS, CP, VM, params, desired_curvature, desired_curvature_rate, lat_plan):
angle_log = log.ControlsState.LateralAngleState.new_message()
if CS.vEgo < 0.3 or not active:
angle_log.active = False
angle_steers_des = float(CS.steeringAngleDeg)
else:
angle_log.active = True
angle_steers_des = math.degrees(VM.get_steer_from_curvature(-desired_curvature, CS.vEgo))
angle_steers_des += params.angleOffsetDeg
angle_log.saturated = False
angle_log.steeringAngleDeg = angle_steers_des
return 0, float(angle_steers_des), angle_log
| 28.076923 | 100 | 0.730137 |
4376f050dde173802dab8cadd6b2463b6127ea73 | 495 | py | Python | backend/util/crypto_hash.py | wagnerperin/Python-Blockchain | 5e066ab17839db33bb9cfc841d25680e9c73f535 | [
"MIT"
] | null | null | null | backend/util/crypto_hash.py | wagnerperin/Python-Blockchain | 5e066ab17839db33bb9cfc841d25680e9c73f535 | [
"MIT"
] | null | null | null | backend/util/crypto_hash.py | wagnerperin/Python-Blockchain | 5e066ab17839db33bb9cfc841d25680e9c73f535 | [
"MIT"
] | null | null | null | import hashlib
import json
def crypto_hash(*args):
""""
Return a sha-256 hash of the given arguments.
"""
stringfied_args = sorted(map(lambda data: json.dumps(data), args))
joined_data = ''.join(stringfied_args)
return hashlib.sha256(joined_data.encode('utf-8')).hexdigest()
def main():
print(f"crypto_hash('one', 2, [3]): {crypto_hash('one', 2, [3])}")
print(f"crypto_hash(2, 'one', [3]): {crypto_hash(2, 'one', [3])}")
if __name__ == "__main__":
main(); | 27.5 | 70 | 0.630303 |
a4dc931674291a1b958af7ec11fec1a4ee591c7b | 22,191 | py | Python | evennia/contrib/rplanguage.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 1,544 | 2015-01-01T22:16:31.000Z | 2022-03-31T19:17:45.000Z | evennia/contrib/rplanguage.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 1,686 | 2015-01-02T18:26:31.000Z | 2022-03-31T20:12:03.000Z | evennia/contrib/rplanguage.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 867 | 2015-01-02T21:01:54.000Z | 2022-03-29T00:28:27.000Z | """
Language and whisper obfuscation system
Evennia contrib - Griatch 2015
This module is intented to be used with an emoting system (such as
contrib/rpsystem.py). It offers the ability to obfuscate spoken words
in the game in various ways:
- Language: The language functionality defines a pseudo-language map
to any number of languages. The string will be obfuscated depending
on a scaling that (most likely) will be input as a weighted average of
the language skill of the speaker and listener.
- Whisper: The whisper functionality will gradually "fade out" a
whisper along as scale 0-1, where the fading is based on gradually
removing sections of the whisper that is (supposedly) easier to
overhear (for example "s" sounds tend to be audible even when no other
meaning can be determined).
Usage:
```python
from evennia.contrib import rplanguage
# need to be done once, here we create the "default" lang
rplanguage.add_language()
say = "This is me talking."
whisper = "This is me whispering.
print rplanguage.obfuscate_language(say, level=0.0)
<<< "This is me talking."
print rplanguage.obfuscate_language(say, level=0.5)
<<< "This is me byngyry."
print rplanguage.obfuscate_language(say, level=1.0)
<<< "Daly ly sy byngyry."
result = rplanguage.obfuscate_whisper(whisper, level=0.0)
<<< "This is me whispering"
result = rplanguage.obfuscate_whisper(whisper, level=0.2)
<<< "This is m- whisp-ring"
result = rplanguage.obfuscate_whisper(whisper, level=0.5)
<<< "---s -s -- ---s------"
result = rplanguage.obfuscate_whisper(whisper, level=0.7)
<<< "---- -- -- ----------"
result = rplanguage.obfuscate_whisper(whisper, level=1.0)
<<< "..."
```
To set up new languages, import and use the `add_language()`
helper method in this module. This allows you to customize the
"feel" of the semi-random language you are creating. Especially
the `word_length_variance` helps vary the length of translated
words compared to the original and can help change the "feel" for
the language you are creating. You can also add your own
dictionary and "fix" random words for a list of input words.
Below is an example of "elvish", using "rounder" vowels and sounds:
```python
phonemes = "oi oh ee ae aa eh ah ao aw ay er ey ow ia ih iy " \
"oy ua uh uw y p b t d f v t dh s z sh zh ch jh k " \
"ng g m n l r w",
vowels = "eaoiuy"
grammar = "v vv vvc vcc vvcc cvvc vccv vvccv vcvccv vcvcvcc vvccvvcc " \
"vcvvccvvc cvcvvcvvcc vcvcvvccvcvv",
word_length_variance = 1
noun_postfix = "'la"
manual_translations = {"the":"y'e", "we":"uyi", "she":"semi", "he":"emi",
"you": "do", 'me':'mi','i':'me', 'be':"hy'e", 'and':'y'}
rplanguage.add_language(key="elvish", phonemes=phonemes, grammar=grammar,
word_length_variance=word_length_variance,
noun_postfix=noun_postfix, vowels=vowels,
manual_translations=manual_translations
auto_translations="my_word_file.txt")
```
This will produce a decicively more "rounded" and "soft" language
than the default one. The few manual_translations also make sure
to make it at least look superficially "reasonable".
The `auto_translations` keyword is useful, this accepts either a
list or a path to a file of words (one per line) to automatically
create fixed translations for according to the grammatical rules.
This allows to quickly build a large corpus of translated words
that never change (if this is desired).
"""
import re
from random import choice, randint
from collections import defaultdict
from evennia import DefaultScript
from evennia.utils import logger
# ------------------------------------------------------------
#
# Obfuscate language
#
# ------------------------------------------------------------
# default language grammar
_PHONEMES = (
"ea oh ae aa eh ah ao aw ai er ey ow ia ih iy oy ua uh uw a e i u y p b t d f v t dh "
"s z sh zh ch jh k ng g m n l r w"
)
_VOWELS = "eaoiuy"
# these must be able to be constructed from phonemes (so for example,
# if you have v here, there must exist at least one single-character
# vowel phoneme defined above)
_GRAMMAR = "v cv vc cvv vcc vcv cvcc vccv cvccv cvcvcc cvccvcv vccvccvc cvcvccvv cvcvcvcvv"
_RE_FLAGS = re.MULTILINE + re.IGNORECASE + re.DOTALL + re.UNICODE
_RE_GRAMMAR = re.compile(r"vv|cc|v|c", _RE_FLAGS)
_RE_WORD = re.compile(r"\w+", _RE_FLAGS)
_RE_EXTRA_CHARS = re.compile(r"\s+(?=\W)|[,.?;](?=[,.?;]|\s+[,.?;])", _RE_FLAGS)
class LanguageError(RuntimeError):
pass
class LanguageExistsError(LanguageError):
pass
class LanguageHandler(DefaultScript):
"""
This is a storage class that should usually not be created on its
own. It's automatically created by a call to `obfuscate_language`
or `add_language` below.
Languages are implemented as a "logical" pseudo- consistent language
algorith here. The idea is that a language is built up from
phonemes. These are joined together according to a "grammar" of
possible phoneme- combinations and allowed characters. It may
sound simplistic, but this allows to easily make
"similar-sounding" languages. One can also custom-define a
dictionary of some common words to give further consistency.
Optionally, the system also allows an input list of common words
to be loaded and given random translations. These will be stored
to disk and will thus not change. This gives a decent "stability"
of the language but if the goal is to obfuscate, this may allow
players to eventually learn to understand the gist of a sentence
even if their characters can not. Any number of languages can be
created this way.
This nonsense language will partially replace the actual spoken
language when so desired (usually because the speaker/listener
don't know the language well enough).
"""
def at_script_creation(self):
"Called when script is first started"
self.key = "language_handler"
self.persistent = True
self.db.language_storage = {}
def add(
self,
key="default",
phonemes=_PHONEMES,
grammar=_GRAMMAR,
word_length_variance=0,
noun_translate=False,
noun_prefix="",
noun_postfix="",
vowels=_VOWELS,
manual_translations=None,
auto_translations=None,
force=False,
):
"""
Add a new language. Note that you generally only need to do
this once per language and that adding an existing language
will re-initialize all the random components to new permanent
values.
Args:
key (str, optional): The name of the language. This
will be used as an identifier for the language so it
should be short and unique.
phonemes (str, optional): Space-separated string of all allowed
phonemes in this language. If either of the base phonemes
(c, v, cc, vv) are present in the grammar, the phoneme list must
at least include one example of each.
grammar (str): All allowed consonant (c) and vowel (v) combinations
allowed to build up words. Grammars are broken into the base phonemes
(c, v, cc, vv) prioritizing the longer bases. So cvv would be a
the c + vv (would allow for a word like 'die' whereas
cvcvccc would be c+v+c+v+cc+c (a word like 'galosch').
word_length_variance (real): The variation of length of words.
0 means a minimal variance, higher variance may mean words
have wildly varying length; this strongly affects how the
language "looks".
noun_translate (bool, optional): If a proper noun, identified as a
capitalized word, should be translated or not. By default they
will not, allowing for e.g. the names of characters to be understandable.
noun_prefix (str, optional): A prefix to go before every noun
in this language (if any).
noun_postfix (str, optuonal): A postfix to go after every noun
in this language (if any, usually best to avoid combining
with `noun_prefix` or language becomes very wordy).
vowels (str, optional): Every vowel allowed in this language.
manual_translations (dict, optional): This allows for custom-setting
certain words in the language to mean the same thing. It is
on the form `{real_word: fictional_word}`, for example
`{"the", "y'e"}` .
auto_translations (str or list, optional): These are lists
words that should be auto-translated with a random, but
fixed, translation. If a path to a file, this file should
contain a list of words to produce translations for, one
word per line. If a list, the list's elements should be
the words to translate. The `manual_translations` will
always override overlapping translations created
automatically.
force (bool, optional): Unless true, will not allow the addition
of a language that is already created.
Raises:
LanguageExistsError: Raised if trying to adding a language
with a key that already exists, without `force` being set.
Notes:
The `word_file` is for example a word-frequency list for
the N most common words in the host language. The
translations will be random, but will be stored
persistently to always be the same. This allows for
building a quick, decently-sounding fictive language that
tend to produce the same "translation" (mostly) with the
same input sentence.
"""
if key in self.db.language_storage and not force:
raise LanguageExistsError(
"Language is already created. Re-adding it will re-build"
" its dictionary map. Use 'force=True' keyword if you are sure."
)
# create grammar_component->phoneme mapping
# {"vv": ["ea", "oh", ...], ...}
grammar2phonemes = defaultdict(list)
for phoneme in phonemes.split():
if re.search("\W", phoneme):
raise LanguageError("The phoneme '%s' contains an invalid character" % phoneme)
gram = "".join(["v" if char in vowels else "c" for char in phoneme])
grammar2phonemes[gram].append(phoneme)
# allowed grammar are grouped by length
gramdict = defaultdict(list)
for gram in grammar.split():
if re.search("\W|(!=[cv])", gram):
raise LanguageError(
"The grammar '%s' is invalid (only 'c' and 'v' are allowed)" % gram
)
gramdict[len(gram)].append(gram)
grammar = dict(gramdict)
# create automatic translation
translation = {}
if auto_translations:
if isinstance(auto_translations, str):
# path to a file rather than a list
with open(auto_translations, "r") as f:
auto_translations = f.readlines()
for word in auto_translations:
word = word.strip()
lword = len(word)
new_word = ""
wlen = max(0, lword + sum(randint(-1, 1) for i in range(word_length_variance)))
if wlen not in grammar:
# always create a translation, use random length
structure = choice(grammar[choice(list(grammar))])
else:
# use the corresponding length
structure = choice(grammar[wlen])
for match in _RE_GRAMMAR.finditer(structure):
new_word += choice(grammar2phonemes[match.group()])
translation[word.lower()] = new_word.lower()
if manual_translations:
# update with manual translations
translation.update(
dict((key.lower(), value.lower()) for key, value in manual_translations.items())
)
# store data
storage = {
"translation": translation,
"grammar": grammar,
"grammar2phonemes": dict(grammar2phonemes),
"word_length_variance": word_length_variance,
"noun_translate": noun_translate,
"noun_prefix": noun_prefix,
"noun_postfix": noun_postfix,
}
self.db.language_storage[key] = storage
def _translate_sub(self, match):
"""
Replacer method called by re.sub when
traversing the language string.
Args:
match (re.matchobj): Match object from regex.
Returns:
converted word.
Notes:
Assumes self.lastword and self.level is available
on the object.
"""
word = match.group()
lword = len(word)
if len(word) <= self.level:
# below level. Don't translate
new_word = word
else:
# try to translate the word from dictionary
new_word = self.language["translation"].get(word.lower(), "")
if not new_word:
# no dictionary translation. Generate one
# find out what preceeded this word
wpos = match.start()
preceeding = match.string[:wpos].strip()
start_sentence = preceeding.endswith((".", "!", "?")) or not preceeding
# make up translation on the fly. Length can
# vary from un-translated word.
wlen = max(
0,
lword
+ sum(randint(-1, 1) for i in range(self.language["word_length_variance"])),
)
grammar = self.language["grammar"]
if wlen not in grammar:
if randint(0, 1) == 0:
# this word has no direct translation!
wlen = 0
new_word = ""
else:
# use random word length
wlen = choice(list(grammar.keys()))
if wlen:
structure = choice(grammar[wlen])
grammar2phonemes = self.language["grammar2phonemes"]
for match in _RE_GRAMMAR.finditer(structure):
# there are only four combinations: vv,cc,c,v
try:
new_word += choice(grammar2phonemes[match.group()])
except KeyError:
logger.log_trace(
"You need to supply at least one example of each of "
"the four base phonemes (c, v, cc, vv)"
)
# abort translation here
new_word = ""
break
if word.istitle():
title_word = ""
if not start_sentence and not self.language.get("noun_translate", False):
# don't translate what we identify as proper nouns (names)
title_word = word
elif new_word:
title_word = new_word
if title_word:
# Regardless of if we translate or not, we will add the custom prefix/postfixes
new_word = "%s%s%s" % (
self.language["noun_prefix"],
title_word.capitalize(),
self.language["noun_postfix"],
)
if len(word) > 1 and word.isupper():
# keep LOUD words loud also when translated
new_word = new_word.upper()
return new_word
def translate(self, text, level=0.0, language="default"):
"""
Translate the text according to the given level.
Args:
text (str): The text to translate
level (real): Value between 0.0 and 1.0, where
0.0 means no obfuscation (text returned unchanged) and
1.0 means full conversion of every word. The closer to
1, the shorter words will be translated.
language (str): The language key identifier.
Returns:
text (str): A translated string.
"""
if level == 0.0:
# no translation
return text
language = self.db.language_storage.get(language, None)
if not language:
return text
self.language = language
# configuring the translation
self.level = int(10 * (1.0 - max(0, min(level, 1.0))))
translation = _RE_WORD.sub(self._translate_sub, text)
# the substitution may create too long empty spaces, remove those
return _RE_EXTRA_CHARS.sub("", translation)
# Language access functions
_LANGUAGE_HANDLER = None
def obfuscate_language(text, level=0.0, language="default"):
"""
Main access method for the language parser.
Args:
text (str): Text to obfuscate.
level (real, optional): A value from 0.0-1.0 determining
the level of obfuscation where 0 means no jobfuscation
(string returned unchanged) and 1.0 means the entire
string is obfuscated.
language (str, optional): The identifier of a language
the system understands.
Returns:
translated (str): The translated text.
"""
# initialize the language handler and cache it
global _LANGUAGE_HANDLER
if not _LANGUAGE_HANDLER:
try:
_LANGUAGE_HANDLER = LanguageHandler.objects.get(db_key="language_handler")
except LanguageHandler.DoesNotExist:
if not _LANGUAGE_HANDLER:
from evennia import create_script
_LANGUAGE_HANDLER = create_script(LanguageHandler)
return _LANGUAGE_HANDLER.translate(text, level=level, language=language)
def add_language(**kwargs):
"""
Access function to creating a new language. See the docstring of
`LanguageHandler.add` for list of keyword arguments.
"""
global _LANGUAGE_HANDLER
if not _LANGUAGE_HANDLER:
try:
_LANGUAGE_HANDLER = LanguageHandler.objects.get(db_key="language_handler")
except LanguageHandler.DoesNotExist:
if not _LANGUAGE_HANDLER:
from evennia import create_script
_LANGUAGE_HANDLER = create_script(LanguageHandler)
_LANGUAGE_HANDLER.add(**kwargs)
def available_languages():
"""
Returns all available language keys.
Returns:
languages (list): List of key strings of all available
languages.
"""
global _LANGUAGE_HANDLER
if not _LANGUAGE_HANDLER:
try:
_LANGUAGE_HANDLER = LanguageHandler.objects.get(db_key="language_handler")
except LanguageHandler.DoesNotExist:
if not _LANGUAGE_HANDLER:
from evennia import create_script
_LANGUAGE_HANDLER = create_script(LanguageHandler)
return list(_LANGUAGE_HANDLER.attributes.get("language_storage", {}))
# ------------------------------------------------------------
#
# Whisper obscuration
#
# This obsucration table is designed by obscuring certain
# vowels first, following by consonants that tend to be
# more audible over long distances, like s. Finally it
# does non-auditory replacements, like exclamation marks
# and capitalized letters (assumed to be spoken louder) that may still
# give a user some idea of the sentence structure. Then the word
# lengths are also obfuscated and finally the whisper # length itself.
#
# ------------------------------------------------------------
_RE_WHISPER_OBSCURE = [
re.compile(r"^$", _RE_FLAGS), # This is a Test! #0 full whisper
re.compile(r"[ae]", _RE_FLAGS), # This -s - Test! #1 add uy
re.compile(r"[aeuy]", _RE_FLAGS), # This -s - Test! #2 add oue
re.compile(r"[aeiouy]", _RE_FLAGS), # Th-s -s - T-st! #3 add all consonants
re.compile(r"[aeiouybdhjlmnpqrv]", _RE_FLAGS), # T--s -s - T-st! #4 add hard consonants
re.compile(r"[a-eg-rt-z]", _RE_FLAGS), # T--s -s - T-s-! #5 add all capitals
re.compile(r"[A-EG-RT-Za-eg-rt-z]", _RE_FLAGS), # ---s -s - --s-! #6 add f
re.compile(r"[A-EG-RT-Za-rt-z]", _RE_FLAGS), # ---s -s - --s-! #7 add s
re.compile(r"[A-EG-RT-Za-z]", _RE_FLAGS), # ---- -- - ----! #8 add capital F
re.compile(r"[A-RT-Za-z]", _RE_FLAGS), # ---- -- - ----! #9 add capital S
re.compile(r"[\w]", _RE_FLAGS), # ---- -- - ----! #10 non-alphanumerals
re.compile(r"[\S]", _RE_FLAGS), # ---- -- - ---- #11 words
re.compile(r"[\w\W]", _RE_FLAGS), # -------------- #12 whisper length
re.compile(r".*", _RE_FLAGS),
] # ... #13 (always same length)
def obfuscate_whisper(whisper, level=0.0):
"""
Obfuscate whisper depending on a pre-calculated level
(that may depend on distance, listening skill etc)
Args:
whisper (str): The whisper string to obscure. The
entire string will be considered in the obscuration.
level (real, optional): This is a value 0-1, where 0
means not obscured (whisper returned unchanged) and 1
means fully obscured.
"""
level = min(max(0.0, level), 1.0)
olevel = int(13.0 * level)
if olevel == 13:
return "..."
else:
return _RE_WHISPER_OBSCURE[olevel].sub("-", whisper)
| 40.642857 | 103 | 0.594836 |
15645d0085d408beedc517051027729a3e5d9f00 | 10,235 | py | Python | python/pyspark/sql/utils.py | amwufiv/spark | b50d4507f52315d5f6d75c617e845248a1c828a9 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2022-02-10T11:30:07.000Z | 2022-02-14T18:26:30.000Z | python/pyspark/sql/utils.py | amwufiv/spark | b50d4507f52315d5f6d75c617e845248a1c828a9 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 10 | 2021-06-23T21:23:30.000Z | 2022-02-16T01:19:37.000Z | python/pyspark/sql/utils.py | amwufiv/spark | b50d4507f52315d5f6d75c617e845248a1c828a9 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 6 | 2022-01-24T20:07:59.000Z | 2022-01-25T16:11:34.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Optional, Sequence, TYPE_CHECKING, cast
import py4j
from py4j.java_collections import JavaArray # type: ignore[import]
from py4j.java_gateway import ( # type: ignore[import]
JavaClass,
JavaGateway,
JavaObject,
is_instance_of,
)
from py4j.protocol import Py4JJavaError # type: ignore[import]
from pyspark import SparkContext
from pyspark.find_spark_home import _find_spark_home
if TYPE_CHECKING:
from pyspark.sql.context import SQLContext
from pyspark.sql.dataframe import DataFrame
class CapturedException(Exception):
def __init__(
self,
desc: Optional[str] = None,
stackTrace: Optional[str] = None,
cause: Optional[Py4JJavaError] = None,
origin: Optional[Py4JJavaError] = None,
):
# desc & stackTrace vs origin are mutually exclusive.
# cause is optional.
assert (origin is not None and desc is None and stackTrace is None) or (
origin is None and desc is not None and stackTrace is not None
)
self.desc = desc if desc is not None else cast(Py4JJavaError, origin).getMessage()
assert SparkContext._jvm is not None
self.stackTrace = (
stackTrace
if stackTrace is not None
else (SparkContext._jvm.org.apache.spark.util.Utils.exceptionString(origin))
)
self.cause = convert_exception(cause) if cause is not None else None
if self.cause is None and origin is not None and origin.getCause() is not None:
self.cause = convert_exception(origin.getCause())
self._origin = origin
def __str__(self) -> str:
assert SparkContext._jvm is not None # type: ignore[attr-defined]
jvm = SparkContext._jvm # type: ignore[attr-defined]
sql_conf = jvm.org.apache.spark.sql.internal.SQLConf.get()
debug_enabled = sql_conf.pysparkJVMStacktraceEnabled()
desc = self.desc
if debug_enabled:
desc = desc + "\n\nJVM stacktrace:\n%s" % self.stackTrace
return str(desc)
def getErrorClass(self) -> Optional[str]:
assert SparkContext._gateway is not None # type: ignore[attr-defined]
gw = SparkContext._gateway # type: ignore[attr-defined]
if self._origin is not None and is_instance_of(
gw, self._origin, "org.apache.spark.SparkThrowable"
):
return self._origin.getErrorClass()
else:
return None
def getSqlState(self) -> Optional[str]:
assert SparkContext._gateway is not None # type: ignore[attr-defined]
gw = SparkContext._gateway # type: ignore[attr-defined]
if self._origin is not None and is_instance_of(
gw, self._origin, "org.apache.spark.SparkThrowable"
):
return self._origin.getSqlState()
else:
return None
class AnalysisException(CapturedException):
"""
Failed to analyze a SQL query plan.
"""
class ParseException(CapturedException):
"""
Failed to parse a SQL command.
"""
class IllegalArgumentException(CapturedException):
"""
Passed an illegal or inappropriate argument.
"""
class StreamingQueryException(CapturedException):
"""
Exception that stopped a :class:`StreamingQuery`.
"""
class QueryExecutionException(CapturedException):
"""
Failed to execute a query.
"""
class PythonException(CapturedException):
"""
Exceptions thrown from Python workers.
"""
class UnknownException(CapturedException):
"""
None of the above exceptions.
"""
class SparkUpgradeException(CapturedException):
"""
Exception thrown because of Spark upgrade
"""
def convert_exception(e: Py4JJavaError) -> CapturedException:
assert e is not None
assert SparkContext._jvm is not None # type: ignore[attr-defined]
assert SparkContext._gateway is not None # type: ignore[attr-defined]
jvm = SparkContext._jvm # type: ignore[attr-defined]
gw = SparkContext._gateway # type: ignore[attr-defined]
if is_instance_of(gw, e, "org.apache.spark.sql.catalyst.parser.ParseException"):
return ParseException(origin=e)
# Order matters. ParseException inherits AnalysisException.
elif is_instance_of(gw, e, "org.apache.spark.sql.AnalysisException"):
return AnalysisException(origin=e)
elif is_instance_of(gw, e, "org.apache.spark.sql.streaming.StreamingQueryException"):
return StreamingQueryException(origin=e)
elif is_instance_of(gw, e, "org.apache.spark.sql.execution.QueryExecutionException"):
return QueryExecutionException(origin=e)
elif is_instance_of(gw, e, "java.lang.IllegalArgumentException"):
return IllegalArgumentException(origin=e)
elif is_instance_of(gw, e, "org.apache.spark.SparkUpgradeException"):
return SparkUpgradeException(origin=e)
c: Py4JJavaError = e.getCause()
stacktrace: str = jvm.org.apache.spark.util.Utils.exceptionString(e)
if c is not None and (
is_instance_of(gw, c, "org.apache.spark.api.python.PythonException")
# To make sure this only catches Python UDFs.
and any(
map(
lambda v: "org.apache.spark.sql.execution.python" in v.toString(), c.getStackTrace()
)
)
):
msg = (
"\n An exception was thrown from the Python worker. "
"Please see the stack trace below.\n%s" % c.getMessage()
)
return PythonException(msg, stacktrace)
return UnknownException(desc=e.toString(), stackTrace=stacktrace, cause=c)
def capture_sql_exception(f: Callable[..., Any]) -> Callable[..., Any]:
def deco(*a: Any, **kw: Any) -> Any:
try:
return f(*a, **kw)
except Py4JJavaError as e:
converted = convert_exception(e.java_exception)
if not isinstance(converted, UnknownException):
# Hide where the exception came from that shows a non-Pythonic
# JVM exception message.
raise converted from None
else:
raise
return deco
def install_exception_handler() -> None:
"""
Hook an exception handler into Py4j, which could capture some SQL exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_sql_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def toJArray(gateway: JavaGateway, jtype: JavaClass, arr: Sequence[Any]) -> JavaArray:
"""
Convert python list to java type array
Parameters
----------
gateway :
Py4j Gateway
jtype :
java type of element in array
arr :
python type list
"""
jarray: JavaArray = gateway.new_array(jtype, len(arr))
for i in range(0, len(arr)):
jarray[i] = arr[i]
return jarray
def require_test_compiled() -> None:
"""Raise Exception if test classes are not compiled"""
import os
import glob
test_class_path = os.path.join(_find_spark_home(), "sql", "core", "target", "*", "test-classes")
paths = glob.glob(test_class_path)
if len(paths) == 0:
raise RuntimeError(
"%s doesn't exist. Spark sql test classes are not compiled." % test_class_path
)
class ForeachBatchFunction:
"""
This is the Python implementation of Java interface 'ForeachBatchFunction'. This wraps
the user-defined 'foreachBatch' function such that it can be called from the JVM when
the query is active.
"""
def __init__(self, sql_ctx: "SQLContext", func: Callable[["DataFrame", int], None]):
self.sql_ctx = sql_ctx
self.func = func
def call(self, jdf: JavaObject, batch_id: int) -> None:
from pyspark.sql.dataframe import DataFrame
try:
self.func(DataFrame(jdf, self.sql_ctx), batch_id)
except Exception as e:
self.error = e
raise e
class Java:
implements = ["org.apache.spark.sql.execution.streaming.sources.PythonForeachBatchFunction"]
def to_str(value: Any) -> Optional[str]:
"""
A wrapper over str(), but converts bool values to lower case strings.
If None is given, just returns None, instead of converting it to string "None".
"""
if isinstance(value, bool):
return str(value).lower()
elif value is None:
return value
else:
return str(value)
def is_timestamp_ntz_preferred() -> bool:
"""
Return a bool if TimestampNTZType is preferred according to the SQL configuration set.
"""
jvm = SparkContext._jvm # type: ignore[attr-defined]
return (
jvm is not None
and getattr(getattr(jvm.org.apache.spark.sql.internal, "SQLConf$"), "MODULE$")
.get()
.timestampType()
.typeName()
== "timestamp_ntz"
)
| 33.667763 | 100 | 0.668197 |
e12262bdab851c1c662182f5d1041d745812f5e1 | 661 | py | Python | Ago-Dic-2018/Orlando Martinez/practica 2/UnchangedMagicians.py | angelicardz/DAS_Sistemas | e2a69fec358f0fad4fe05c39ea6168c89eed41ac | [
"MIT"
] | 41 | 2017-09-26T09:36:32.000Z | 2022-03-19T18:05:25.000Z | Ago-Dic-2018/Orlando Martinez/practica 2/UnchangedMagicians.py | angelicardz/DAS_Sistemas | e2a69fec358f0fad4fe05c39ea6168c89eed41ac | [
"MIT"
] | 67 | 2017-09-11T05:06:12.000Z | 2022-02-14T04:44:04.000Z | Ago-Dic-2018/Orlando Martinez/practica 2/UnchangedMagicians.py | angelicardz/DAS_Sistemas | e2a69fec358f0fad4fe05c39ea6168c89eed41ac | [
"MIT"
] | 210 | 2017-09-01T00:10:08.000Z | 2022-03-19T18:05:12.000Z | def show_magicians(nombres_magos):
for magician in nombres_magos:
print(magician)
def make_great(nombres_magos):
great_magicians = []
while nombres_magos:
mago = nombres_magos.pop()
great_magician = mago + ' the Great'
great_magicians.append(great_magician)
for great_magician in great_magicians:
nombres_magos.append(great_magician)
return nombres_magos
nombres_magos = ['Luis', 'Pedro', 'Antonio']
show_magicians(nombres_magos)
print("\nGrandes Magos:")
great_magicians = make_great(nombres_magos[:])
show_magicians(great_magicians)
print("\nMagos Originales:")
show_magicians(nombres_magos)
| 24.481481 | 46 | 0.732224 |
79c233760e728d70bb06241f8d99ebf8b0088364 | 2,299 | py | Python | bayescache/api/data/loaders.py | jacobhinkle/bayescache | 1728aaaec112a375a7341776ccb5c2d4b67242d6 | [
"MIT"
] | null | null | null | bayescache/api/data/loaders.py | jacobhinkle/bayescache | 1728aaaec112a375a7341776ccb5c2d4b67242d6 | [
"MIT"
] | null | null | null | bayescache/api/data/loaders.py | jacobhinkle/bayescache | 1728aaaec112a375a7341776ccb5c2d4b67242d6 | [
"MIT"
] | null | null | null | import numpy as np
from math import floor
from torch.utils import data
class DataSplitter(data.Dataset):
"""
Subset dataset by index.
Helper class to be used with `train_valid_splitter`.
Parameters
----------
data : torch.utils.data.Dataset instance
length : int
Number of samples in subset.
mapping : list
Indices of the original data to be used in subset.
"""
def __init__(self, data, length, mapping):
self.data = data
self.length = length
self.mapping = mapping
def __repr__(self):
return str(self.data)
def __getitem__(self, index):
return self.data[self.mapping[index]]
def __len__(self):
return self.length
def train_valid_split(data, valpercent=.20, random_seed=None):
"""
Split dataset into train and validation sets.
Parameters
----------
data : torch.utils.data.DataSet instance
Dataset to be split into training and validation sets.
valpercent : float
Percentage of the validation set to be withheld for validation.
Note: will take the floor of that percentage, we can't index by floats.
random_seed : int
Random seed for shuffling.
Returns
-------
train : torch.utils.data.Dataset instance
Training set.
valid : torch.utils.data.Dataset instance
Validation set.
"""
if random_seed!=None:
np.random.seed(random_seed)
datalen = len(data)
valid_size = floor(datalen * valpercent)
train_size = datalen - valid_size
indices = list(range(datalen))
np.random.shuffle(indices)
train_mapping = indices[valid_size:]
valid_mapping = indices[:valid_size]
train = DataSplitter(data, train_size, train_mapping)
valid = DataSplitter(data, valid_size, valid_mapping)
# train.__repr__ = update_repr(train, 'train', len(train))
# valid.__repr__ = update_repr(valid, 'valid', len(valid))
return train, valid
def update_repr(data, partition, n_samples):
fmt_str = 'Dataset ' + data.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(n_samples)
fmt_str += ' Split: {}\n'.format(partition)
fmt_str += ' Root Location: {}\n'.format(data.data.root)
return fmt_str
| 25.831461 | 79 | 0.652893 |
17ba1de65741e8b57c3e0d3cdb737c5838d5e327 | 1,461 | py | Python | setup.py | remusao/httpsig | 8712e9b026eb6053d42bc52612a3cc42380829df | [
"MIT"
] | 36 | 2015-02-12T02:45:23.000Z | 2021-03-02T21:16:11.000Z | setup.py | remusao/httpsig | 8712e9b026eb6053d42bc52612a3cc42380829df | [
"MIT"
] | 18 | 2015-01-16T15:15:11.000Z | 2022-03-01T09:30:19.000Z | setup.py | remusao/httpsig | 8712e9b026eb6053d42bc52612a3cc42380829df | [
"MIT"
] | 18 | 2016-04-28T22:59:38.000Z | 2021-09-27T19:15:41.000Z | #!/usr/bin/env python
from setuptools import setup, find_packages
# create long description
with open('README.rst') as file:
long_description = file.read()
with open('CHANGELOG.rst') as file:
long_description += '\n\n' + file.read()
setup(
name='httpsig',
description="Secure HTTP request signing using the HTTP Signature draft specification",
long_description=long_description,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='http,authorization,api,web',
author='Adam Knight',
author_email='adam@movq.us',
url='https://github.com/ahknight/httpsig',
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
use_scm_version=True,
setup_requires=['setuptools_scm'],
install_requires=['pycryptodome>=3,<4', 'six'],
test_suite="httpsig.tests",
)
| 34.785714 | 91 | 0.645448 |
45fd8c1ac7696b7b22685fbb82ba5ec1ab24d59c | 4,226 | py | Python | Contents/Code/localmedia.py | littleneko/LocalSubs.bundle | a29a85119d69e12858fc1c605af82c8c29cef21c | [
"MIT"
] | 2 | 2021-09-27T13:54:31.000Z | 2021-12-25T16:58:03.000Z | Contents/Code/localmedia.py | littleneko/LocalSubs.bundle | a29a85119d69e12858fc1c605af82c8c29cef21c | [
"MIT"
] | null | null | null | Contents/Code/localmedia.py | littleneko/LocalSubs.bundle | a29a85119d69e12858fc1c605af82c8c29cef21c | [
"MIT"
] | null | null | null | import os
import re
import config
import helpers
import subtitlehelpers
def findSubtitles(part):
RE_METAFILES = re.compile(r'^[\.~]')
lang_sub_map = {}
part_filename = helpers.unicodize(part.file)
part_basename = os.path.splitext(os.path.basename(part_filename))[0]
paths = [os.path.dirname(part_filename)]
local_subtitle_folders = [language.strip() for language in Prefs['subs_folder_path'].split(',')]
if local_subtitle_folders is not None:
for f in local_subtitle_folders:
local_subtitle_folder_path = os.path.join(paths[0], f)
if os.path.exists(local_subtitle_folder_path):
paths.append(local_subtitle_folder_path)
# Check for a global subtitle location
global_subtitle_folder = os.path.join(Core.app_support_path, 'Subtitles')
if os.path.exists(global_subtitle_folder):
paths.append(global_subtitle_folder)
# We start by building a dictionary of files to their absolute paths. We also need to know
# the number of media files that are actually present, in case the found local media asset
# is limited to a single instance per media file.
#
file_paths = {}
total_media_files = 0
for path in paths:
path = helpers.unicodize(path)
for file_path_listing in os.listdir(path):
# When using os.listdir with a unicode path, it will always return a string using the
# NFD form. However, we internally are using the form NFC and therefore need to convert
# it to allow correct regex / comparisons to be performed.
#
file_path_listing = helpers.unicodize(file_path_listing)
if os.path.isfile(os.path.join(path, file_path_listing)) and not RE_METAFILES.search(file_path_listing):
file_paths[file_path_listing.lower()] = os.path.join(path, file_path_listing)
# If we've found an actual media file, we should record it.
(root, ext) = os.path.splitext(file_path_listing)
if ext.lower()[1:] in config.VIDEO_EXTS:
total_media_files += 1
Log('Looking for subtitle media in %d paths with %d media files.', len(paths), total_media_files)
Log('Paths: %s', ", ".join([helpers.unicodize(p) for p in paths]))
for file_path in file_paths.values():
local_basename = helpers.unicodize(os.path.splitext(os.path.basename(file_path))[0]) # no language, no flag
local_basename2 = local_basename.rsplit('.', 1)[0] # includes language, no flag
local_basename3 = local_basename2.rsplit('.', 1)[0] # includes language and flag
filename_matches_part = local_basename == part_basename or local_basename2 == part_basename or local_basename3 == part_basename
# If the file is located within the global subtitle folder and it's name doesn't match exactly
# then we should simply ignore it.
#
if file_path.count(global_subtitle_folder) and not filename_matches_part:
continue
# If we have more than one media file within the folder and located filename doesn't match
# exactly then we should simply ignore it.
#
if total_media_files > 1 and not filename_matches_part:
continue
subtitle_helper = subtitlehelpers.SubtitleHelpers(file_path)
if subtitle_helper is not None:
local_lang_map = subtitle_helper.process_subtitles(part)
for new_language, subtitles in local_lang_map.items():
# Add the possible new language along with the located subtitles so that we can validate them
# at the end...
#
if not lang_sub_map.has_key(new_language):
lang_sub_map[new_language] = []
lang_sub_map[new_language] = lang_sub_map[new_language] + subtitles
# Now whack subtitles that don't exist anymore.
for language in lang_sub_map.keys():
part.subtitles[language].validate_keys(lang_sub_map[language])
# Now whack the languages that don't exist anymore.
for language in list(set(part.subtitles.keys()) - set(lang_sub_map.keys())):
part.subtitles[language].validate_keys({})
| 45.934783 | 135 | 0.677473 |
df175d06aa43f0cf2371afcc091ece0d44569936 | 9,919 | py | Python | lib/modeling/mask_rcnn_heads.py | Juggernaut93/Detectron | 727b1f2291edf07cf9d20b6a869d5ad1307f0ffc | [
"Apache-2.0"
] | 3 | 2018-03-22T02:59:02.000Z | 2018-08-12T12:12:01.000Z | lib/modeling/mask_rcnn_heads.py | Juggernaut93/Detectron | 727b1f2291edf07cf9d20b6a869d5ad1307f0ffc | [
"Apache-2.0"
] | null | null | null | lib/modeling/mask_rcnn_heads.py | Juggernaut93/Detectron | 727b1f2291edf07cf9d20b6a869d5ad1307f0ffc | [
"Apache-2.0"
] | 1 | 2019-12-21T01:59:34.000Z | 2019-12-21T01:59:34.000Z | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Various network "heads" for predicting masks in Mask R-CNN.
The design is as follows:
... -> RoI ----\
-> RoIFeatureXform -> mask head -> mask output -> loss
... -> Feature /
Map
The mask head produces a feature representation of the RoI for the purpose
of mask prediction. The mask output module converts the feature representation
into real-valued (soft) masks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from core.config import cfg
from utils.c2 import const_fill
from utils.c2 import gauss_fill
from utils.net import get_group_gn
import modeling.ResNet as ResNet
import utils.blob as blob_utils
# ---------------------------------------------------------------------------- #
# Mask R-CNN outputs and losses
# ---------------------------------------------------------------------------- #
def add_mask_rcnn_outputs(model, blob_in, dim):
"""Add Mask R-CNN specific outputs: either mask logits or probs."""
num_cls = cfg.MODEL.NUM_CLASSES if cfg.MRCNN.CLS_SPECIFIC_MASK else 1
if cfg.MRCNN.USE_FC_OUTPUT:
# Predict masks with a fully connected layer (ignore 'fcn' in the blob
# name)
blob_out = model.FC(
blob_in,
'mask_fcn_logits',
dim,
num_cls * cfg.MRCNN.RESOLUTION**2,
weight_init=gauss_fill(0.001),
bias_init=const_fill(0.0)
)
else:
# Predict mask using Conv
# Use GaussianFill for class-agnostic mask prediction; fills based on
# fan-in can be too large in this case and cause divergence
fill = (
cfg.MRCNN.CONV_INIT
if cfg.MRCNN.CLS_SPECIFIC_MASK else 'GaussianFill'
)
blob_out = model.Conv(
blob_in,
'mask_fcn_logits',
dim,
num_cls,
kernel=1,
pad=0,
stride=1,
weight_init=(fill, {'std': 0.001}),
bias_init=const_fill(0.0)
)
if cfg.MRCNN.UPSAMPLE_RATIO > 1:
blob_out = model.BilinearInterpolation(
'mask_fcn_logits', 'mask_fcn_logits_up', num_cls, num_cls,
cfg.MRCNN.UPSAMPLE_RATIO
)
if not model.train: # == if test
blob_out = model.net.Sigmoid(blob_out, 'mask_fcn_probs')
return blob_out
def add_mask_rcnn_losses(model, blob_mask):
"""Add Mask R-CNN specific losses."""
loss_mask = model.net.SigmoidCrossEntropyLoss(
[blob_mask, 'masks_int32'],
'loss_mask',
scale=model.GetLossScale() * cfg.MRCNN.WEIGHT_LOSS_MASK
)
loss_gradients = blob_utils.get_loss_gradients(model, [loss_mask])
model.AddLosses('loss_mask')
return loss_gradients
# ---------------------------------------------------------------------------- #
# Mask heads
# ---------------------------------------------------------------------------- #
def mask_rcnn_fcn_head_v1up4convs(model, blob_in, dim_in, spatial_scale):
"""v1up design: 4 * (conv 3x3), convT 2x2."""
return mask_rcnn_fcn_head_v1upXconvs(
model, blob_in, dim_in, spatial_scale, 4
)
def mask_rcnn_fcn_head_v1up4convs_gn(model, blob_in, dim_in, spatial_scale):
"""v1up design: 4 * (conv 3x3), convT 2x2, with GroupNorm"""
return mask_rcnn_fcn_head_v1upXconvs_gn(
model, blob_in, dim_in, spatial_scale, 4
)
def mask_rcnn_fcn_head_v1up(model, blob_in, dim_in, spatial_scale):
"""v1up design: 2 * (conv 3x3), convT 2x2."""
return mask_rcnn_fcn_head_v1upXconvs(
model, blob_in, dim_in, spatial_scale, 2
)
def mask_rcnn_fcn_head_v1upXconvs(
model, blob_in, dim_in, spatial_scale, num_convs
):
"""v1upXconvs design: X * (conv 3x3), convT 2x2."""
current = model.RoIFeatureTransform(
blob_in,
blob_out='_[mask]_roi_feat',
blob_rois='mask_rois',
method=cfg.MRCNN.ROI_XFORM_METHOD,
resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION,
sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO,
spatial_scale=spatial_scale
)
dilation = cfg.MRCNN.DILATION
dim_inner = cfg.MRCNN.DIM_REDUCED
for i in range(num_convs):
current = model.Conv(
current,
'_[mask]_fcn' + str(i + 1),
dim_in,
dim_inner,
kernel=3,
pad=1 * dilation,
stride=1,
weight_init=(cfg.MRCNN.CONV_INIT, {'std': 0.001}),
bias_init=('ConstantFill', {'value': 0.})
)
current = model.Relu(current, current)
dim_in = dim_inner
# upsample layer
model.ConvTranspose(
current,
'conv5_mask',
dim_inner,
dim_inner,
kernel=2,
pad=0,
stride=2,
weight_init=(cfg.MRCNN.CONV_INIT, {'std': 0.001}),
bias_init=const_fill(0.0)
)
blob_mask = model.Relu('conv5_mask', 'conv5_mask')
return blob_mask, dim_inner
def mask_rcnn_fcn_head_v1upXconvs_gn(
model, blob_in, dim_in, spatial_scale, num_convs
):
"""v1upXconvs design: X * (conv 3x3), convT 2x2, with GroupNorm"""
current = model.RoIFeatureTransform(
blob_in,
blob_out='_mask_roi_feat',
blob_rois='mask_rois',
method=cfg.MRCNN.ROI_XFORM_METHOD,
resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION,
sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO,
spatial_scale=spatial_scale
)
dilation = cfg.MRCNN.DILATION
dim_inner = cfg.MRCNN.DIM_REDUCED
for i in range(num_convs):
current = model.ConvGN(
current,
'_mask_fcn' + str(i + 1),
dim_in,
dim_inner,
group_gn=get_group_gn(dim_inner),
kernel=3,
pad=1 * dilation,
stride=1,
weight_init=(cfg.MRCNN.CONV_INIT, {'std': 0.001}),
bias_init=('ConstantFill', {'value': 0.})
)
current = model.Relu(current, current)
dim_in = dim_inner
# upsample layer
model.ConvTranspose(
current,
'conv5_mask',
dim_inner,
dim_inner,
kernel=2,
pad=0,
stride=2,
weight_init=(cfg.MRCNN.CONV_INIT, {'std': 0.001}),
bias_init=const_fill(0.0)
)
blob_mask = model.Relu('conv5_mask', 'conv5_mask')
return blob_mask, dim_inner
def mask_rcnn_fcn_head_v0upshare(model, blob_in, dim_in, spatial_scale):
"""Use a ResNet "conv5" / "stage5" head for mask prediction. Weights and
computation are shared with the conv5 box head. Computation can only be
shared during training, since inference is cascaded.
v0upshare design: conv5, convT 2x2.
"""
# Since box and mask head are shared, these must match
assert cfg.MRCNN.ROI_XFORM_RESOLUTION == cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
if model.train: # share computation with bbox head at training time
dim_conv5 = 2048
blob_conv5 = model.net.SampleAs(
['res5_2_sum', 'roi_has_mask_int32'],
['_[mask]_res5_2_sum_sliced']
)
else: # re-compute at test time
blob_conv5, dim_conv5 = add_ResNet_roi_conv5_head_for_masks(
model,
blob_in,
dim_in,
spatial_scale
)
dim_reduced = cfg.MRCNN.DIM_REDUCED
blob_mask = model.ConvTranspose(
blob_conv5,
'conv5_mask',
dim_conv5,
dim_reduced,
kernel=2,
pad=0,
stride=2,
weight_init=(cfg.MRCNN.CONV_INIT, {'std': 0.001}), # std only for gauss
bias_init=const_fill(0.0)
)
model.Relu('conv5_mask', 'conv5_mask')
return blob_mask, dim_reduced
def mask_rcnn_fcn_head_v0up(model, blob_in, dim_in, spatial_scale):
"""v0up design: conv5, deconv 2x2 (no weight sharing with the box head)."""
blob_conv5, dim_conv5 = add_ResNet_roi_conv5_head_for_masks(
model,
blob_in,
dim_in,
spatial_scale
)
dim_reduced = cfg.MRCNN.DIM_REDUCED
model.ConvTranspose(
blob_conv5,
'conv5_mask',
dim_conv5,
dim_reduced,
kernel=2,
pad=0,
stride=2,
weight_init=('GaussianFill', {'std': 0.001}),
bias_init=const_fill(0.0)
)
blob_mask = model.Relu('conv5_mask', 'conv5_mask')
return blob_mask, dim_reduced
def add_ResNet_roi_conv5_head_for_masks(model, blob_in, dim_in, spatial_scale):
"""Add a ResNet "conv5" / "stage5" head for predicting masks."""
model.RoIFeatureTransform(
blob_in,
blob_out='_[mask]_pool5',
blob_rois='mask_rois',
method=cfg.MRCNN.ROI_XFORM_METHOD,
resolution=cfg.MRCNN.ROI_XFORM_RESOLUTION,
sampling_ratio=cfg.MRCNN.ROI_XFORM_SAMPLING_RATIO,
spatial_scale=spatial_scale
)
dilation = cfg.MRCNN.DILATION
stride_init = int(cfg.MRCNN.ROI_XFORM_RESOLUTION / 7) # by default: 2
s, dim_in = ResNet.add_stage(
model,
'_[mask]_res5',
'_[mask]_pool5',
3,
dim_in,
2048,
512,
dilation,
stride_init=stride_init
)
return s, 2048
| 30.240854 | 80 | 0.607017 |
03d712c3f7e24c3457c09d28e088b692006ad9c0 | 1,612 | py | Python | scrapy_statsd_extension/__init__.py | edison7500/scrapy-statsd | 8fd4e77fef675ecd3c788dc036840a9759ba9b12 | [
"BSD-3-Clause"
] | null | null | null | scrapy_statsd_extension/__init__.py | edison7500/scrapy-statsd | 8fd4e77fef675ecd3c788dc036840a9759ba9b12 | [
"BSD-3-Clause"
] | null | null | null | scrapy_statsd_extension/__init__.py | edison7500/scrapy-statsd | 8fd4e77fef675ecd3c788dc036840a9759ba9b12 | [
"BSD-3-Clause"
] | null | null | null | from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.utils.misc import load_object
from twisted.internet.task import LoopingCall
from scrapy_statsd_extension import utils, defaults
class StatsdExtension(object):
def __init__(self, crawler):
if not crawler.settings.getbool("STATSD_ENABLED", defaults.STATSD_ENABLED):
raise NotConfigured
self.log_periodic = crawler.settings.get(
"STATSD_LOG_PERIODIC", defaults.STATSD_LOG_PERIODIC
)
self.callack_timer = crawler.settings.get(
"STATSD_LOG_EVERY", defaults.STATSD_LOG_EVERY
)
self.handler = load_object(defaults.STATSD_HANDLER).from_crawler(crawler)
self.stats = crawler.stats
@classmethod
def from_crawler(cls, crawler):
ext = cls(crawler)
crawler.signals.connect(ext.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(ext.spider_closed, signal=signals.spider_closed)
return ext
def spider_opened(self, spider):
if self.log_periodic:
self.log_task = LoopingCall(self.log_stats, spider)
self.log_task.start(self.callack_timer)
def log_stats(self, spider):
for key, value in self.stats.get_stats().items():
if isinstance(value, int) or isinstance(value, float):
self.handler.increment(utils.create_stat_key(key), value, spider)
def spider_closed(self, spider):
if hasattr(self, "log_task") and self.log_task.running:
self.log_task.stop()
self.log_stats(spider)
| 33.583333 | 83 | 0.691067 |
6b7dbdd19527b4d42da3b48fc2423060847b4831 | 1,549 | py | Python | samples/snippets/import_data_text_classification_single_label_sample_test.py | telpirion/python-aiplatform | 5155dee5edd86fb700a91dfca01bddd4d6393410 | [
"Apache-2.0"
] | null | null | null | samples/snippets/import_data_text_classification_single_label_sample_test.py | telpirion/python-aiplatform | 5155dee5edd86fb700a91dfca01bddd4d6393410 | [
"Apache-2.0"
] | null | null | null | samples/snippets/import_data_text_classification_single_label_sample_test.py | telpirion/python-aiplatform | 5155dee5edd86fb700a91dfca01bddd4d6393410 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
from unittest.mock import patch, mock_open, MagicMock
import import_data_text_classification_single_label_sample
# Test to assert that the import data function was called. We assert that the function was called
# rather than wait for this LRO to complete
def test_ucaip_generated_import_data_text_classification_single_label_sample():
response = MagicMock()
response.next_page_token = b""
rpc = MagicMock(return_value=response)
mock_channel = MagicMock()
mock_channel.unary_unary = MagicMock(return_value=rpc)
with patch(
"google.api_core.grpc_helpers.create_channel", return_value=mock_channel
), patch("time.sleep"), patch("builtins.open", mock_open(read_data=b"")):
import_data_text_classification_single_label_sample.import_data_text_classification_single_label_sample(
gcs_source_uri="GCS_SOURCE_URI", project="PROJECT", dataset_id="DATASET_ID"
)
rpc.assert_called()
| 38.725 | 112 | 0.768883 |
d1b0033a45df29a2f73a5aad0eb2132447ad9992 | 2,734 | py | Python | marshmallow_peewee/fields.py | iBelieve/marshmallow-peewee | d9ff33039592ad1a8b1a95c9e16fd806b694858d | [
"MIT"
] | null | null | null | marshmallow_peewee/fields.py | iBelieve/marshmallow-peewee | d9ff33039592ad1a8b1a95c9e16fd806b694858d | [
"MIT"
] | null | null | null | marshmallow_peewee/fields.py | iBelieve/marshmallow-peewee | d9ff33039592ad1a8b1a95c9e16fd806b694858d | [
"MIT"
] | null | null | null | import datetime as dt
from marshmallow import fields
from marshmallow.compat import PY2, string_types
class Timestamp(fields.Field):
default_error_messages = {
'invalid': 'Not a valid timestamp.'
}
def _serialize(self, value, attr, obj):
"""Serialize given datetime to timestamp."""
if value is None:
return None
return int(datetime_to_timestamp(value))
def _deserialize(self, value, attr, data):
if not value: # Falsy values, e.g. '', None, [] are not valid
raise self.fail('invalid')
try:
return dt.datetime.utcfromtimestamp(float(value))
except ValueError:
raise self.fail('invalid')
class MSTimestamp(Timestamp):
def _serialize(self, value, *args):
"""Serialize given datetime to timestamp."""
if value is not None:
value = super(MSTimestamp, self)._serialize(value, *args) * 1e3
return value
def _deserialize(self, value, *args):
if value:
value = int(value) / 1e3
return super(MSTimestamp, self)._deserialize(value, *args)
class Related(fields.Nested):
def __init__(self, nested=None, meta=None, **kwargs):
self.meta = meta or {}
super(Related, self).__init__(nested, **kwargs)
def init_model(self, model, name):
from .schema import ModelSchema
field = model._meta.fields.get(name)
if not field:
field = model._meta.reverse_rel.get(name)
if not field:
raise KeyError(name)
self.many = True
rel_model = field.model_class
else:
rel_model = field.rel_model
self.attribute = self.attribute or name
self.meta['model'] = rel_model
meta = type('Meta', (), self.meta)
self.nested = type('Schema', (ModelSchema,), {'Meta': meta})
def _deserialize(self, value, attr, data):
if isinstance(value, (int, string_types)):
return int(value)
return super(Related, self)._deserialize(value, attr, data)
class ForeignKey(fields.Raw):
def get_value(self, attr, obj, *args, **kwargs):
"""Return the value for a given key from an object."""
value = obj._data.get(attr)
if value is not None:
value = str(value)
return value
if PY2:
def datetime_to_timestamp(dt_, epoch=dt.datetime(1970, 1, 1)):
"""Convert given datetime object to timestamp in seconds."""
return (dt_ - epoch).total_seconds()
else:
def datetime_to_timestamp(dt_):
"""Convert given datetime object to timestamp in seconds."""
return dt_.replace(tzinfo=dt.timezone.utc).timestamp()
| 28.778947 | 75 | 0.61229 |
729409c1df5887ae4070dd3d20323f8c6fa3b527 | 119 | py | Python | bspider/master/__init__.py | littlebai3618/bspider | ff4d003cd0825247db4efe62db95f9245c0a303c | [
"BSD-3-Clause"
] | 3 | 2020-06-19T03:52:29.000Z | 2021-05-21T05:50:46.000Z | bspider/master/__init__.py | littlebai3618/bspider | ff4d003cd0825247db4efe62db95f9245c0a303c | [
"BSD-3-Clause"
] | 2 | 2021-03-31T19:39:03.000Z | 2021-05-12T02:10:26.000Z | bspider/master/__init__.py | littlebai3618/bspider | ff4d003cd0825247db4efe62db95f9245c0a303c | [
"BSD-3-Clause"
] | null | null | null | from bspider.utils.logger import LoggerPool
log = LoggerPool().get_logger(key='master', fn='master', module='master')
| 29.75 | 73 | 0.756303 |
ad32f157d0e694477e009b5a9e3650701787d79e | 13,330 | py | Python | python/paddle/fluid/parallel_executor.py | ysh329/Paddle | 50ad9046c9a440564d104eaa354eb9df83a35678 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/parallel_executor.py | ysh329/Paddle | 50ad9046c9a440564d104eaa354eb9df83a35678 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/parallel_executor.py | ysh329/Paddle | 50ad9046c9a440564d104eaa354eb9df83a35678 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from . import core
from . import framework
from . import executor
from . import compiler
import sys
__all__ = ['ParallelExecutor']
ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy
BuildStrategy = core.ParallelExecutor.BuildStrategy
class ParallelExecutor(object):
"""
ParallelExecutor is designed for data parallelism, which focuses on distributing
the data across different nodes and every node operates on the data in parallel.
If you use ParallelExecutor to run the current program on GPU, the node means GPU
device, and ParallelExecutor will get the available GPU device automatically on
the current machine. If you use ParallelExecutor to run the current program on CPU,
the node means the CPU device, and you can specify the CPU device number by adding
'CPU_NUM' environment variable, for example 'CPU_NUM=4', if the environment variable
is not found, ParallelExecutor will call `multiprocessing.cpu_count` to get the number
of CPUs in the system.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
import os
use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# NOTE: If you use CPU to run the program, you need
# to specify the CPU_NUM, otherwise, fluid will use
# all the number of the logic core as the CPU_NUM,
# in that case, the batch size of the input should be
# greater than CPU_NUM, if not, the process will be
# failed by an exception.
if not use_cuda:
os.environ['CPU_NUM'] = str(2)
exe = fluid.Executor(place)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
test_program = fluid.default_main_program().clone(for_test=True)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
startup_program.random_seed=1
exe.run(startup_program)
train_exe = fluid.ParallelExecutor(use_cuda=use_cuda,
main_program=train_program,
loss_name=loss.name)
test_exe = fluid.ParallelExecutor(use_cuda=use_cuda,
main_program=test_program,
share_vars_from=train_exe)
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = train_exe.run(feed={"X": x},
fetch_list=[loss.name])
loss_data, = test_exe.run(feed={"X": x},
fetch_list=[loss.name])
Args:
use_cuda (bool): Whether to use CUDA or not.
loss_name (str): The loss name must set in training. Default None.
main_program (Program): The program that need to run, if not provided,
then default_main_program will be used. Default None.
share_vars_from(ParallelExecutor): If provide, it will share variables
from the specified ParallelExecutor. Default None.
exec_strategy(ExecutionStrategy): exec_strategy is used to control how to run
the program in ParallelExecutor, for example how many threads are used to
execute the program, how many iterations to clean up the temp variables
which is generated during execution. For more information, please refer
to fluid.ExecutionStrategy. Default None.
build_strategy(BuildStrategy): build_strategy is used to control how to
build the SSA Graph in ParallelExecutor by setting the property,
for example reduce_strategy, gradient_scale_strategy. For more information,
please refer to fluid.BuildStrategy. Default None.
num_trainers(int): If greater than 1, NCCL will be initialized with
multiple rank of nodes, each node should have same number of GPUs.
Distributed training will be enabled then. Default 1.
trainer_id(int): Must use together with num_trainers. trainer_id is the
"rank" of current node starts from 0. Default 0.
scope(Scope): scope to run with, default use fluid.global_scope().
Returns:
ParallelExecutor: The initialized ParallelExecutor object.
Raises:
TypeError: If share_vars_from is provided, but not ParallelExecutor object.
"""
def __init__(self,
use_cuda,
loss_name=None,
main_program=None,
share_vars_from=None,
exec_strategy=None,
build_strategy=None,
num_trainers=1,
trainer_id=0,
scope=None):
sys.stderr.write(
'ParallelExecutor is deprecated. '
'Please use CompiledProgram and Executor. CompiledProgram '
'is a central place for optimization and Executor is the '
'unified executor. Example can be found in compiler.py.\n')
if build_strategy is None:
build_strategy = BuildStrategy()
# TODO(paddle-dev): trainer_id and num_trainers should be removed from parameter list.
if num_trainers != 1 and build_strategy.num_trainers != num_trainers:
sys.stderr.write(
'The value of build_strategy.num_trainers[%d] is overwritten '
'by the passed num_trainers[%d].\n' %
(build_strategy.num_trainers, num_trainers))
build_strategy.num_trainers = num_trainers
if trainer_id != 0 and build_strategy.trainer_id != trainer_id:
sys.stderr.write(
'The value of build_strategy.trainer_id[%d] is overwritten '
'by the passed trainer_id[%d].\n' %
(build_strategy.trainer_id, trainer_id))
build_strategy.trainer_id = trainer_id
self._places = framework.cuda_places(
) if use_cuda else framework.cpu_places()
self._scope = scope if scope is not None else executor.global_scope()
if main_program is not None and main_program._enable_dgc:
assert build_strategy.num_trainers > 1, "dgc is not useful when num_trainers <= 1"
assert build_strategy.reduce_strategy == BuildStrategy.ReduceStrategy.AllReduce, "dgc \
only used for allreduce"
assert build_strategy.num_trainers * len(
self._places) > 1, "dgc is not useful for single card training"
assert use_cuda, "dgc only used under cuda"
main_program = main_program if main_program is not None \
else framework.default_main_program()
self._compiled_program = compiler.CompiledProgram(main_program)
if share_vars_from:
assert isinstance(
share_vars_from, ParallelExecutor
), "The share_vars_from should be ParallelExecutor."
self._compiled_program.with_data_parallel(
loss_name=loss_name,
build_strategy=build_strategy,
exec_strategy=exec_strategy,
share_vars_from=share_vars_from._compiled_program
if share_vars_from else None)
self._place = core.CUDAPlace(0) if use_cuda else core.CPUPlace()
self._exe = executor.Executor(self._place)
self._compiled_program._compile(place=self._place, scope=self._scope)
def run(self, fetch_list, feed=None, feed_dict=None, return_numpy=True):
"""
Run a parallel executor with fetch_list.
The feed parameter can be a dict or a list. If feed is a dict, the
feed data will be split into multiple devices. If feed is a list, we
assume the data has been splitted into multiple devices, the each
element in the list will be copied to each device directly.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy
import os
use_cuda = True
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
# NOTE: If you use CPU to run the program, you need
# to specify the CPU_NUM, otherwise, fluid will use
# all the number of the logic core as the CPU_NUM,
# in that case, the batch size of the input should be
# greater than CPU_NUM, if not, the process will be
# failed by an exception.
if not use_cuda:
os.environ['CPU_NUM'] = str(2)
exe = fluid.Executor(place)
train_program = fluid.Program()
startup_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
data = fluid.layers.data(name='X', shape=[1], dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
loss = fluid.layers.mean(hidden)
fluid.optimizer.SGD(learning_rate=0.01).minimize(loss)
startup_program.random_seed=1
exe.run(startup_program)
train_exe = fluid.ParallelExecutor(use_cuda=use_cuda,
main_program=train_program,
loss_name=loss.name)
# If the feed is a dict:
# the image will be splitted into devices. If there is two devices
# each device will process an image with shape (5, 1)
x = numpy.random.random(size=(10, 1)).astype('float32')
loss_data, = train_exe.run(feed={"X": x},
fetch_list=[loss.name])
# If the feed is a list:
# each device will process each element in the list.
# the 1st device will process an image with shape (10, 1)
# the 2nd device will process an image with shape (9, 1)
#
# you can use exe.device_count to get the device number.
x2 = numpy.random.random(size=(9, 1)).astype('float32')
loss_data, = train_exe.run(feed=[{"X": x}, {"X": x2}],
fetch_list=[loss.name])
Args:
fetch_list(list): The fetched variable names
feed(list|dict|None): The feed variables. If the feed is a dict,
tensors in that dict will be splitted into each devices. If
the feed is a list, each element of the list will be copied
to each device. Default None.
feed_dict: Alias for feed parameter, for backward compatibility.
This parameter has been deprecated. Default None.
return_numpy(bool): Whether converts the fetched tensor to numpy.
Default: True.
Returns:
List: The fetched result list.
Raises:
ValueError: If the feed is a list, but its length is not equal the
length of active places, or its element's is not dict.
NOTES:
1. If the feed's type is dict, the number of data that feeds to
ParallelExecutor must be bigger than active places. Otherwise,
it will throw exception from C++ side. Special attention should be
paid to check whether the last batch of the dataset is bigger
than active places.
2. If active places are more than one, the fetch results for each
variable is a list, and each element of this list is the variable of
respective active place.
Examples:
.. code-block:: python
pe = fluid.ParallelExecutor(use_cuda=use_cuda,
loss_name=avg_cost.name,
main_program=fluid.default_main_program())
loss = pe.run(feed=feeder.feed(cur_batch),
fetch_list=[avg_cost.name]))
"""
return self._exe.run(program=self._compiled_program,
scope=self._scope,
feed=feed,
fetch_list=fetch_list,
return_numpy=return_numpy)
@property
def device_count(self):
return len(self._places)
| 45.80756 | 99 | 0.611028 |
2f8b0339e49cef0c9aa1ead3b927ea369e81898f | 3,641 | py | Python | Wellcheck/Webapps/api.wellcheck/Object/mail.py | SCcagg5/WellcheckV2 | 5c96a577f460e1f0dee8d3b3c3efc8217d1a2f5b | [
"MIT"
] | 3 | 2020-05-16T19:24:42.000Z | 2020-09-26T17:05:19.000Z | Wellcheck/Webapps/api.wellcheck/Object/mail.py | SCcagg5/WellcheckV2 | 5c96a577f460e1f0dee8d3b3c3efc8217d1a2f5b | [
"MIT"
] | 30 | 2020-05-16T16:01:17.000Z | 2020-10-08T23:05:08.000Z | Wellcheck/Webapps/api.wellcheck/Object/mail.py | WellCheck-Co/WellCheck-Web-App | afa076e6451ebbb055d06f938bb2ecc511cf9b6f | [
"MIT"
] | null | null | null | from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
import os
from datetime import datetime
from Source.email.heb_report import *
from Source.email.alerte import *
from Source.email.new_user import *
smtp_user = str(os.getenv('MAILER_USER', None))
smtp_pass = str(os.getenv('MAILER_PASS', None))
smtp_server = str(os.getenv('MAILER_HOST', None))
smtp_port = str(os.getenv('MAILER_PORT', None))
class Mailer():
def __init__(self):
"""Open connection to the mail server"""
self.sender = smtp_user
self.password = smtp_pass
self.server = smtplib.SMTP_SSL(smtp_server, smtp_port)
self.server.login(self.sender, self.password)
self.msg = MIMEMultipart()
def heb_report(self, to_list, point_name, point_id, timestamp_start, timestamp_end, timestamp_origin):
"""Send a message to the recipient"""
point_id = str(point_id)
start = datetime.fromtimestamp(timestamp)
end = datetime.fromtimestamp(timestamp)
origin = datetime.fromtimestamp(timestamp)
self.html = report_header + report_body.format(point_id = point_id,
report_link = "https://doc.wellcheck.fr/src.php?id=" + point_id + "&from=" + str(timestamp_start) + "&to=" + str(timestamp_end),
date_start = start.strftime("%d/%m/%Y"),
date_end = end.strftime("%d/%m/%Y"),
date_origin = origin.strftime("%H:%M:%S %d/%m/%Y"))
self.to_list = to_list
self.msg['Subject'] = "Rapport hebdomadaire Point " + point_name
self.__send()
return [True, {}, None]
def alerte(self, to_list, point_name, point_id, timestamp, note):
"""Send a message to the recipient"""
point_id = str(point_id)
date = datetime.fromtimestamp(timestamp)
self.html = alerte_header + alerte_body.format(point_id = point_id,
alerte_link = "https://dashboard.wellcheck.fr/stats?bindlocal=true&force=true&selected=" + point_id,
date = date.strftime("%H:%M:%S %d/%m/%Y"),
note_10 = "{:.2f}".format(note))
self.to_list = to_list
self.msg['Subject'] = "Alerte pollution ! Point " + point_name
self.__send()
return [True, {}, None]
def new_user(self, to, key, timestamp):
"""Send a message to the recipient"""
date = datetime.fromtimestamp(timestamp)
self.html = new_header + new_body.format(valid_link = "https://dashboard.wellcheck.fr/valid?bindlocal=true&act_key=" + key.replace("=", "%3D"),
key = str(key))
self.to_list = [to]
self.msg['Subject'] = "Votre inscription Wellcheck"
self.__send()
return [True, {}, None]
def __send(self):
self.message = ""
self.msg['From'] = self.sender
self.msg['To'] = ", ".join(self.to_list)
self.msg.attach(MIMEText(self.html, 'html'))
self.msg.attach(MIMEText(self.message, 'plain'))
self.server.send_message(self.msg, self.msg['From'], self.to_list)
self.__close()
return
def __close(self):
"""Close the server connection"""
self.server.quit()
| 47.285714 | 184 | 0.553694 |
3a286d103a3c363185db3241b5ae7f47200ad1ff | 15,915 | py | Python | colour/characterisation/dataset/colour_checkers/chromaticity_coordinates.py | sobotka/colour | aa3fe95fba83ffc0f3ce1eb6aca85e6d8f3bde51 | [
"Cube",
"BSD-3-Clause"
] | 2 | 2020-05-03T20:15:42.000Z | 2021-04-09T18:19:06.000Z | colour/characterisation/dataset/colour_checkers/chromaticity_coordinates.py | sobotka/colour | aa3fe95fba83ffc0f3ce1eb6aca85e6d8f3bde51 | [
"Cube",
"BSD-3-Clause"
] | null | null | null | colour/characterisation/dataset/colour_checkers/chromaticity_coordinates.py | sobotka/colour | aa3fe95fba83ffc0f3ce1eb6aca85e6d8f3bde51 | [
"Cube",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
ColourCheckers Chromaticity Coordinates
=======================================
Defines *ColourCheckers* chromaticity coordinates in *CIE xyY* colourspace.
Each *ColourChecker* data is in the form of a list of an :class:`OrderedDict`
class instance of 24 samples as follows::
{'name': 'xyY', ..., 'name': 'xyY'}
The following *ColourCheckers* data is available:
- :attr:`colour.characterisation.dataset.colour_checkers.\
chromaticity_coordinates.COLORCHECKER_1976`: *ColourChecker* developed by
*McCamy et al.* at Macbeth, a Division of Kollmorgen.
- :attr:`colour.characterisation.dataset.colour_checkers.\
chromaticity_coordinates.COLORCHECKER_2005`: Reference data from
*GretagMacbeth* published in 2005.
- :attr:`colour.characterisation.dataset.colour_checkers.\
chromaticity_coordinates.BABELCOLOR_AVERAGE`: Average data derived from
measurements of 30 *ColourChecker* charts.
- :attr:`colour.characterisation.dataset.colour_checkers.\
chromaticity_coordinates.COLORCHECKER24_BEFORE_NOV2014`: Reference data from
*X-Rite* published in 2015 and matching the data from *GretagMacbeth*
published in 2005.
- :attr:`colour.characterisation.dataset.colour_checkers.\
chromaticity_coordinates.COLORCHECKER24_AFTER_NOV2014`: Reference data from
*X-Rite* published in 2015 and matching the *ColourChecker* edition after
November 2014.
See Also
--------
`Colour Fitting Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/characterisation/fitting.ipynb>`_
References
----------
- :cite:`BabelColor2012b` : BabelColor. (2012). The ColorChecker
(since 1976!). Retrieved September 26, 2014, from
http://www.babelcolor.com/main_level/ColorChecker.htm
- :cite:`BabelColor2012c` : BabelColor. (2012). ColorChecker RGB and spectra.
Retrieved from http://www.babelcolor.com/download/\
ColorChecker_RGB_and_spectra.xls
- :cite:`X-Rite2015` : X-Rite. (2015). New color specifications for
ColorChecker SG and Classic Charts. Retrieved October 29, 2018,
from http://xritephoto.com/ph_product_overview.aspx?\
ID=938&Action=Support&SupportID=5884#
"""
from __future__ import division, unicode_literals
import numpy as np
from collections import OrderedDict, namedtuple
from colour.colorimetry import ILLUMINANTS
from colour.models import Lab_to_XYZ, XYZ_to_xyY
from colour.utilities import CaseInsensitiveMapping
__author__ = 'Colour Developers, Danny Pascale '
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__copyright__ += ', '
__copyright__ += (
'BabelColor ColorChecker data: Copyright (C) 2004-2012 Danny Pascale '
'(www.babelcolor.com); used by permission.')
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = [
'ColourChecker', 'COLORCHECKER_1976_DATA', 'COLORCHECKER_1976_ILLUMINANT',
'COLORCHECKER_1976', 'COLORCHECKER_2005_DATA',
'COLORCHECKER_2005_ILLUMINANT', 'COLORCHECKER_2005',
'BABELCOLOR_AVERAGE_DATA', 'BABELCOLOR_AVERAGE_ILLUMINANT',
'BABELCOLOR_AVERAGE', 'COLORCHECKER24_BEFORE_NOV2014_LAB_DATA',
'COLORCHECKER24_BEFORE_NOV2014_DATA',
'COLORCHECKER24_BEFORE_NOV2014_ILLUMINANT',
'COLORCHECKER24_BEFORE_NOV2014', 'COLORCHECKER24_AFTER_NOV2014_LAB_DATA',
'COLORCHECKER24_AFTER_NOV2014_DATA',
'COLORCHECKER24_AFTER_NOV2014_ILLUMINANT', 'COLORCHECKER24_AFTER_NOV2014',
'COLOURCHECKERS'
]
class ColourChecker(
namedtuple('ColourChecker', ('name', 'data', 'illuminant'))):
"""
*ColourChecker* data.
Parameters
----------
name : unicode
*ColourChecker* name.
data : OrderedDict
chromaticity coordinates in *CIE xyY* colourspace.
illuminant : array_like
*ColourChecker* illuminant chromaticity coordinates.
"""
COLORCHECKER_1976_DATA = OrderedDict((
('dark skin', np.array([0.4002, 0.3504, 0.1005])),
('light skin', np.array([0.3773, 0.3446, 0.3582])),
('blue sky', np.array([0.2470, 0.2514, 0.1933])),
('foliage', np.array([0.3372, 0.4220, 0.1329])),
('blue flower', np.array([0.2651, 0.2400, 0.2427])),
('bluish green', np.array([0.2608, 0.3430, 0.4306])),
('orange', np.array([0.5060, 0.4070, 0.3005])),
('purplish blue', np.array([0.2110, 0.1750, 0.1200])),
('moderate red', np.array([0.4533, 0.3058, 0.1977])),
('purple', np.array([0.2845, 0.2020, 0.0656])),
('yellow green', np.array([0.3800, 0.4887, 0.4429])),
('orange yellow', np.array([0.4729, 0.4375, 0.4306])),
('blue', np.array([0.1866, 0.1285, 0.0611])),
('green', np.array([0.3046, 0.4782, 0.2339])),
('red', np.array([0.5385, 0.3129, 0.1200])),
('yellow', np.array([0.4480, 0.4703, 0.5910])),
('magenta', np.array([0.3635, 0.2325, 0.1977])),
('cyan', np.array([0.1958, 0.2519, 0.1977])),
('white 9.5 (.05 D)', np.array([0.3101, 0.3163, 0.9001])),
('neutral 8 (.23 D)', np.array([0.3101, 0.3163, 0.5910])),
('neutral 6.5 (.44 D)', np.array([0.3101, 0.3163, 0.3620])),
('neutral 5 (.70 D)', np.array([0.3101, 0.3163, 0.1977])),
('neutral 3.5 (1.05 D)', np.array([0.3101, 0.3163, 0.0900])),
('black 2 (1.5 D)', np.array([0.3101, 0.3163, 0.0313])),
))
COLORCHECKER_1976_ILLUMINANT = (
ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['C'])
"""
*ColorChecker 1976* illuminant.
COLORCHECKER_1976_ILLUMINANT : ndarray
"""
COLORCHECKER_1976 = ColourChecker('ColorChecker 1976', COLORCHECKER_1976_DATA,
COLORCHECKER_1976_ILLUMINANT)
"""
*ColourChecker* developed by *McCamy et al.* at Macbeth, a Division of
Kollmorgen.
COLORCHECKER_1976 : ColourChecker
"""
COLORCHECKER_2005_DATA = OrderedDict((
('dark skin', np.array([0.4316, 0.3777, 0.1008])),
('light skin', np.array([0.4197, 0.3744, 0.3495])),
('blue sky', np.array([0.2760, 0.3016, 0.1836])),
('foliage', np.array([0.3703, 0.4499, 0.1325])),
('blue flower', np.array([0.2999, 0.2856, 0.2304])),
('bluish green', np.array([0.2848, 0.3911, 0.4178])),
('orange', np.array([0.5295, 0.4055, 0.3118])),
('purplish blue', np.array([0.2305, 0.2106, 0.1126])),
('moderate red', np.array([0.5012, 0.3273, 0.1938])),
('purple', np.array([0.3319, 0.2482, 0.0637])),
('yellow green', np.array([0.3984, 0.5008, 0.4446])),
('orange yellow', np.array([0.4957, 0.4427, 0.4357])),
('blue', np.array([0.2018, 0.1692, 0.0575])),
('green', np.array([0.3253, 0.5032, 0.2318])),
('red', np.array([0.5686, 0.3303, 0.1257])),
('yellow', np.array([0.4697, 0.4734, 0.5981])),
('magenta', np.array([0.4159, 0.2688, 0.2009])),
('cyan', np.array([0.2131, 0.3023, 0.1930])),
('white 9.5 (.05 D)', np.array([0.3469, 0.3608, 0.9131])),
('neutral 8 (.23 D)', np.array([0.3440, 0.3584, 0.5894])),
('neutral 6.5 (.44 D)', np.array([0.3432, 0.3581, 0.3632])),
('neutral 5 (.70 D)', np.array([0.3446, 0.3579, 0.1915])),
('neutral 3.5 (1.05 D)', np.array([0.3401, 0.3548, 0.0883])),
('black 2 (1.5 D)', np.array([0.3406, 0.3537, 0.0311])),
))
COLORCHECKER_2005_ILLUMINANT = (
ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D50'])
"""
*ColorChecker 2005* illuminant.
COLORCHECKER_2005_ILLUMINANT : ndarray
"""
COLORCHECKER_2005 = ColourChecker('ColorChecker 2005', COLORCHECKER_2005_DATA,
COLORCHECKER_2005_ILLUMINANT)
"""
Reference data from *GretagMacbeth (2005)*.
COLORCHECKER_2005 : ColourChecker
"""
BABELCOLOR_AVERAGE_DATA = OrderedDict((
('dark skin', np.array([0.4325, 0.3788, 0.1034])),
('light skin', np.array([0.4191, 0.3748, 0.3525])),
('blue sky', np.array([0.2761, 0.3004, 0.1847])),
('foliage', np.array([0.3700, 0.4501, 0.1335])),
('blue flower', np.array([0.3020, 0.2877, 0.2324])),
('bluish green', np.array([0.2856, 0.3910, 0.4174])),
('orange', np.array([0.5291, 0.4075, 0.3117])),
('purplish blue', np.array([0.2339, 0.2155, 0.1140])),
('moderate red', np.array([0.5008, 0.3293, 0.1979])),
('purple', np.array([0.3326, 0.2556, 0.0644])),
('yellow green', np.array([0.3989, 0.4998, 0.4435])),
('orange yellow', np.array([0.4962, 0.4428, 0.4358])),
('blue', np.array([0.2040, 0.1696, 0.0579])),
('green', np.array([0.3270, 0.5033, 0.2307])),
('red', np.array([0.5709, 0.3298, 0.1268])),
('yellow', np.array([0.4694, 0.4732, 0.6081])),
('magenta', np.array([0.4177, 0.2704, 0.2007])),
('cyan', np.array([0.2151, 0.3037, 0.1903])),
('white 9.5 (.05 D)', np.array([0.3488, 0.3628, 0.9129])),
('neutral 8 (.23 D)', np.array([0.3451, 0.3596, 0.5885])),
('neutral 6.5 (.44 D)', np.array([0.3446, 0.3590, 0.3595])),
('neutral 5 (.70 D)', np.array([0.3438, 0.3589, 0.1912])),
('neutral 3.5 (1.05 D)', np.array([0.3423, 0.3576, 0.0893])),
('black 2 (1.5 D)', np.array([0.3439, 0.3565, 0.0320])),
))
BABELCOLOR_AVERAGE_ILLUMINANT = (
ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D50'])
"""
*BabelColor Average* illuminant.
BABELCOLOR_AVERAGE_ILLUMINANT : ndarray
"""
BABELCOLOR_AVERAGE = ColourChecker('BabelColor Average',
BABELCOLOR_AVERAGE_DATA,
BABELCOLOR_AVERAGE_ILLUMINANT)
"""
Average data derived from measurements of 30 *ColourChecker* charts.
BABELCOLOR_AVERAGE : ColourChecker
"""
COLORCHECKER24_BEFORE_NOV2014_LAB_DATA = OrderedDict((
('dark skin', np.array([37.986, 13.555, 14.059])),
('light skin', np.array([65.711, 18.13, 17.81])),
('blue sky', np.array([49.927, -4.88, -21.905])),
('foliage', np.array([43.139, -13.095, 21.905])),
('blue flower', np.array([55.112, 8.844, -25.399])),
('bluish green', np.array([70.719, -33.397, -0.199])),
('orange', np.array([62.661, 36.067, 57.096])),
('purplish blue', np.array([40.02, 10.41, -45.964])),
('moderate red', np.array([51.124, 48.239, 16.248])),
('purple', np.array([30.325, 22.976, -21.587])),
('yellow green', np.array([72.532, -23.709, 57.255])),
('orange yellow', np.array([71.941, 19.363, 67.857])),
('blue', np.array([28.778, 14.179, -50.297])),
('green', np.array([55.261, -38.342, 31.37])),
('red', np.array([42.101, 53.378, 28.19])),
('yellow', np.array([81.733, 4.039, 79.819])),
('magenta', np.array([51.935, 49.986, -14.574])),
('cyan', np.array([51.038, -28.631, -28.638])),
('white 9.5 (.05 D)', np.array([96.539, -0.425, 1.186])),
('neutral 8 (.23 D)', np.array([81.257, -0.638, -0.335])),
('neutral 6.5 (.44 D)', np.array([66.766, -0.734, -0.504])),
('neutral 5 (.70 D)', np.array([50.867, -0.153, -0.27])),
('neutral 3.5 (1.05 D)', np.array([35.656, -0.421, -1.231])),
('black 2 (1.5 D)', np.array([20.461, -0.079, -0.973])),
))
"""
*ColorChecker24 - Before November 2014* illuminant.
Notes
-----
- *X-Rite* data is given as *CIE L\\*a\\*b\\** colourspace values under
*CIE Illuminant D Series D50* for the
*CIE 1931 2 Degree Standard Observer*.
COLORCHECKER24_BEFORE_NOV2014_LAB_DATA : ndarray
"""
COLORCHECKER24_BEFORE_NOV2014_DATA = OrderedDict(
zip(
COLORCHECKER24_BEFORE_NOV2014_LAB_DATA.keys(),
XYZ_to_xyY(
Lab_to_XYZ(
list(COLORCHECKER24_BEFORE_NOV2014_LAB_DATA.values()),
ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D50']))))
COLORCHECKER24_BEFORE_NOV2014_ILLUMINANT = (
ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D50'])
"""
*ColorChecker24 - Before November 2014* illuminant.
COLORCHECKER24_BEFORE_NOV2014_ILLUMINANT : ndarray
"""
COLORCHECKER24_BEFORE_NOV2014 = ColourChecker(
'ColorChecker24 - Before November 2014',
COLORCHECKER24_BEFORE_NOV2014_DATA,
COLORCHECKER24_BEFORE_NOV2014_ILLUMINANT)
"""
Reference *ColourChecker* data from *X-Rite (2015)*.
Notes
-----
- The rounded *ColorChecker24 - Before November 2014* values should match the
*ColorChecker 2005* values. They are given for reference of the original
*CIE L\\*a\\*b\\** colourspace values.
COLORCHECKER24_BEFORE_NOV2014 : ColourChecker
"""
COLORCHECKER24_AFTER_NOV2014_LAB_DATA = OrderedDict((
('dark skin', np.array([37.54, 14.37, 14.92])),
('light skin', np.array([64.66, 19.27, 17.5])),
('blue sky', np.array([49.32, -3.82, -22.54])),
('foliage', np.array([43.46, -12.74, 22.72])),
('blue flower', np.array([54.94, 9.61, -24.79])),
('bluish green', np.array([70.48, -32.26, -0.37])),
('orange', np.array([62.73, 35.83, 56.5])),
('purplish blue', np.array([39.43, 10.75, -45.17])),
('moderate red', np.array([50.57, 48.64, 16.67])),
('purple', np.array([30.1, 22.54, -20.87])),
('yellow green', np.array([71.77, -24.13, 58.19])),
('orange yellow', np.array([71.51, 18.24, 67.37])),
('blue', np.array([28.37, 15.42, -49.8])),
('green', np.array([54.38, -39.72, 32.27])),
('red', np.array([42.43, 51.05, 28.62])),
('yellow', np.array([81.8, 2.67, 80.41])),
('magenta', np.array([50.63, 51.28, -14.12])),
('cyan', np.array([49.57, -29.71, -28.32])),
('white 9.5 (.05 D)', np.array([95.19, -1.03, 2.93])),
('neutral 8 (.23 D)', np.array([81.29, -0.57, 0.44])),
('neutral 6.5 (.44 D)', np.array([66.89, -0.75, -0.06])),
('neutral 5 (.70 D)', np.array([50.76, -0.13, 0.14])),
('neutral 3.5 (1.05 D)', np.array([35.63, -0.46, -0.48])),
('black 2 (1.5 D)', np.array([20.64, 0.07, -0.46])),
))
"""
*ColorChecker24 - After November 2014* illuminant.
Notes
-----
- *X-Rite* data is given as *CIE L\\*a\\*b\\** colourspace values under
*CIE Illuminant D Series D50* for the
*CIE 1931 2 Degree Standard Observer*.
COLORCHECKER24_AFTER_NOV2014_LAB_DATA : ndarray
"""
COLORCHECKER24_AFTER_NOV2014_DATA = OrderedDict(
zip(
COLORCHECKER24_AFTER_NOV2014_LAB_DATA.keys(),
XYZ_to_xyY(
Lab_to_XYZ(
list(COLORCHECKER24_AFTER_NOV2014_LAB_DATA.values()),
ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D50']))))
COLORCHECKER24_AFTER_NOV2014_ILLUMINANT = (
ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D50'])
"""
*ColorChecker24 - After November 2014* illuminant.
COLORCHECKER24_AFTER_NOV2014_ILLUMINANT : ndarray
"""
COLORCHECKER24_AFTER_NOV2014 = ColourChecker(
'ColorChecker24 - After November 2014', COLORCHECKER24_AFTER_NOV2014_DATA,
COLORCHECKER24_AFTER_NOV2014_ILLUMINANT)
"""
Reference *ColourChecker* data from *X-Rite (2015)* and matching the
*ColourChecker* edition after November 2014.
COLORCHECKER24_AFTER_NOV2014 : ColourChecker
"""
COLOURCHECKERS = CaseInsensitiveMapping({
'ColorChecker 1976': COLORCHECKER_1976,
'ColorChecker 2005': COLORCHECKER_2005,
'BabelColor Average': BABELCOLOR_AVERAGE,
'ColorChecker24 - Before November 2014': COLORCHECKER24_BEFORE_NOV2014,
'ColorChecker24 - After November 2014': COLORCHECKER24_AFTER_NOV2014,
})
COLOURCHECKERS.__doc__ = """
Aggregated *ColourCheckers* chromaticity coordinates.
References
----------
:cite:`BabelColor2012b`, :cite:`BabelColor2012c`, :cite:`X-Rite2015`
COLOURCHECKERS : CaseInsensitiveMapping
**{'ColorChecker 1976', 'ColorChecker 2005', 'BabelColor Average',
'ColorChecker24 - Before November 2014',
'ColorChecker24 - After November 2014'}**
Aliases:
- 'babel_average': 'BabelColor Average'
- 'cc2005': 'ColorChecker 2005'
- 'ccb2014': 'ColorChecker24 - Before November 2014'
- 'cca2014': 'ColorChecker24 - After November 2014'
"""
COLOURCHECKERS['babel_average'] = COLOURCHECKERS['BabelColor Average']
COLOURCHECKERS['cc2005'] = COLOURCHECKERS['ColorChecker 2005']
COLOURCHECKERS['ccb2014'] = COLOURCHECKERS[
'ColorChecker24 - Before November 2014']
COLOURCHECKERS['cca2014'] = COLOURCHECKERS[
'ColorChecker24 - After November 2014']
| 39.7875 | 79 | 0.650958 |
c6e8ba5c114096412e99e495ac1f996a6927627c | 6,556 | py | Python | main.py | Starfunk/Climate-Game-Genetic-Algorithm | 89b4f9d4e3b612c589abc3317779a7144fa32c0b | [
"MIT"
] | null | null | null | main.py | Starfunk/Climate-Game-Genetic-Algorithm | 89b4f9d4e3b612c589abc3317779a7144fa32c0b | [
"MIT"
] | null | null | null | main.py | Starfunk/Climate-Game-Genetic-Algorithm | 89b4f9d4e3b612c589abc3317779a7144fa32c0b | [
"MIT"
] | null | null | null | #I employ a genetic algorithm to try to find the collective optimal outcome of the climate game. The climate game is based on
#the paper: "The collective-risk social dilemma and the prevention of simulated dangerous climate change" written by
#Milinski et al. - http://www.pnas.org/content/105/7/2291. I also drew inspiration from Wang & and Zhang's paper:
#Agent-Based Modeling and Genetic Algorithm Simulation for the Climate Game Problem
#- https://www.hindawi.com/journals/mpe/2012/709473/. I hope to be able to rewrite this program in Rust soon -
#as soon as I get a laptop.
import random
#Generate a random investment strategy
def generateIndividualStrategy():
number_of_rounds = 10
individual_investment = []
for _i in range(number_of_rounds):
individual_investment.append(investmentAmount());\
total_contribution = calculateContribution(individual_investment)
individual_investment.append(total_contribution)
#Amount left in agent's account after investing
account_balance = 40 - total_contribution
individual_investment.append(account_balance)
return individual_investment
#Generate a random investment amount: $0, $2, or $4
def investmentAmount():
rand_num = random.randint(0, 2);
if rand_num == 0:
return 0
if rand_num == 1:
return 2
if rand_num == 2:
return 4
#Generate a group of 6 individuals in an array
def generateInvestmentGroup():
population = []
for _i in range(6):
population.append(generateIndividualStrategy())
return population
#Group all the investment rounds into an array
def generateInitialPopulation():
population = []
for _i in range(10):
population.append(generateInvestmentGroup())
return population
#Sums the total contributions made to the climate account for 1 agent
def calculateContribution(individualInvestment):
contribution = 0
for i in individualInvestment[0:9]:
contribution = contribution + i
return contribution
#Accepts an array of arrays - i.e. an investment round made up of 6 individual investment strategies
def calculateClimateAccountTotal(investment_game):
climate_account = 0
for individual_contribution in investment_game:
climate_account = climate_account + individual_contribution[10]
return climate_account
#Roll a d10, if the number is 9 or less, the agent loses everything
def punish(investment_game, punish_probability):
for individual_investment in investment_game:
punish_roll = random.randint(1, 10)
if punish_roll <= punish_probability:
individual_investment[11] = 0
#This re-calculates the 11th element in the individual strategy array
def resetScore(investment_game):
for individual_investment in investment_game:
total_contribution = calculateContribution(individual_investment)
account_balance = 40 - total_contribution
individual_investment[11] = account_balance
#This function sums each agent's account balance and returns the net gains of all the agents in one full game of investing.
#This also serves as the fitness function for an investment game. I.e. an investment game is considered more fit the higher
#the net balance after punishment is.
def publishNetAccountBalance(investment_game):
net_balance = 0
for individual_investment in investment_game:
net_balance = net_balance + individual_investment[11]
return net_balance
#We use single point crossover to create two children investment games. We then return a tuple of the two lists,
#which we can unpack outside of the function
def crossover(investment_game1, investment_game2):
crossover_point = random.randint(1,5)
slice1 = investment_game1[0:crossover_point]
slice2 = investment_game1[crossover_point + 1:6]
slice3 = investment_game2[0:crossover_point]
slice4 = investment_game2[crossover_point + 1:6]
child1 = slice1 + slice4
child2 = slice3 + slice2
return (child1, child2)
def mutation(child1, child2):
mutation_roll1 = random.randint(1,10)
mutation_roll2 = random.randint(1,10)
if mutation_roll1 == 1:
child1[random.randint(1,6)][random.randint(1,10)] = investmentAmount()
if mutation_roll2 == 1:
child1[random.randint(1,6)][random.randint(1,10)] = investmentAmount()
return (child1, child2)
#Replaces the bottom two worst performing investment games with the children of the most fit investment games
def replaceIndividuals(population, child1, child2):
#The population has already been sorted. So the weakest games take positions 0 and 1.
population[0] = child1
population[1] = child2
return population
#Probability that an agent will lose all their assets if the climate goal is not reached
_punish_probability = 3
#Main program starts here
initial_population = generateInitialPopulation()
for i in initial_population:
if calculateClimateAccountTotal(i) < 120:
punish(i, _punish_probability)
init_pop_sorted = sorted(initial_population, key = publishNetAccountBalance)
print(calculateClimateAccountTotal(init_pop_sorted[9]))
print(calculateClimateAccountTotal(init_pop_sorted[8]))
print(calculateClimateAccountTotal(init_pop_sorted[7]))
print(calculateClimateAccountTotal(init_pop_sorted[6]))
#Now reporting the values - CHANGE INDEX IF INDEX ERROR IS POPPING UP
for i in init_pop_sorted:
counter = 1
print("Game " + str(counter) + ":")
print(publishNetAccountBalance(i))
for i in init_pop_sorted:
init_pop_start = resetScore(i)
most_fit1 = init_pop_sorted[9]
most_fit2 = init_pop_sorted[8]
children = crossover(most_fit1, most_fit2)
children_mutated = (children[0], children[1])
init_pop_morph = replaceIndividuals(init_pop_sorted, children_mutated[0], children_mutated[1])
for i in init_pop_morph:
if calculateClimateAccountTotal(i) < 120:
punish(i, _punish_probability)
init_pop_sorted = sorted(init_pop_morph, key = publishNetAccountBalance)
for i in init_pop_sorted:
counter = 2
print("Game " + str(counter) + ":")
print(publishNetAccountBalance(i))
counter = 2
#Simulating 1000 rounds of evolution
for i in range(1000):
for i in init_pop_sorted:
init_pop_start = resetScore(i)
most_fit1 = init_pop_sorted[9]
most_fit2 = init_pop_sorted[8]
children = crossover(most_fit1, most_fit2)
children_mutated = (children[0], children[1])
init_pop_morph = replaceIndividuals(init_pop_sorted, children_mutated[0], children_mutated[1])
for i in init_pop_morph:
if calculateClimateAccountTotal(i) < 120:
punish(i, _punish_probability)
init_pop_sorted = sorted(init_pop_morph, key = publishNetAccountBalance)
for i in init_pop_sorted:
print("Game " + str(counter) + ":")
print(publishNetAccountBalance(i))
counter = counter + 1
| 33.44898 | 126 | 0.78432 |
788e2a246645ea24b4c95c7c190c73a91412bc41 | 19,746 | py | Python | opm/patch_manager.py | joseagraz/OPM | efb680528f3a3dbf570f4fb12571ed0bc78b4fa9 | [
"BSD-3-Clause"
] | null | null | null | opm/patch_manager.py | joseagraz/OPM | efb680528f3a3dbf570f4fb12571ed0bc78b4fa9 | [
"BSD-3-Clause"
] | null | null | null | opm/patch_manager.py | joseagraz/OPM | efb680528f3a3dbf570f4fb12571ed0bc78b4fa9 | [
"BSD-3-Clause"
] | null | null | null | import concurrent.futures
import os
from functools import partial
from .patch import Patch
from .utils import get_patch_class_proportions
import numpy as np
import openslide
from tqdm import tqdm
class PatchManager:
def __init__(self, filename):
"""
Initialization for PatchManager
@param filename: name of main WSI.
"""
self.patches = list()
self.path = filename
self.slide_object = openslide.open_slide(filename)
self.slide_dims = openslide.open_slide(self.path).dimensions
self.slide_folder = filename[:filename.rindex(".")].split("/")[-1] + "/"
self.valid_mask = None
self.mined_mask = None
self.valid_mask_scale = (0, 0)
self.valid_patch_checks = []
self.label_map = None
self.label_map_object = None
self.label_map_folder = None
self.label_map_patches = list()
self.subjectID = None
self.save_subjectID = False
def set_subjectID(self, subjectID):
self.subjectID = str(subjectID)
self.save_subjectID = True
def set_slide_path(self, filename):
self.path = filename
def set_label_map(self, path):
"""
Add associated label map to Patch Manager.
@param path: path to label map.
"""
self.label_map = path
self.label_map_object = openslide.open_slide(path)
assert all(x == y for x, y in zip(self.label_map_object.dimensions, self.slide_object.dimensions)), \
"Label map must have same dimensions as main slide."
self.label_map_folder = path[:path.rindex(".")].split("/")[-1] + "/"
def set_valid_mask(self, mask, scale=(1, 1)):
self.valid_mask = mask
self.mined_mask = np.zeros_like(mask)
self.valid_mask_scale = scale
def add_patch(self, patch, overlap_factor, patch_size):
"""
Add patch to manager and take care of self.mined_mask update so it doesn't pull the same patch twice.
This method works by first finding the coordinates of the upper-left corner of the patch. It then multiples the
inverse overlap factor (0 = full overlap -> 1 = no overlap) by the patch dimensions to find the region that
should be excluded from being called again. It does this from the top-left to bottom-right of the coordinate to
include all space that would cause a patch overlap.
TODO: Rework the math so the (x,y) coordinate is the center of the patch-- not the top left.
:param patch: Patch object to add to set of patches
:return: None
"""
try:
# Set inverse overlap factor
inverse_overlap_factor = 1 - overlap_factor
# Find the coordinates on the valid mask, make sure to scale
valid_start_x = int(round(
(patch.coordinates[0] - int(round((patch_size[0] + 1) * inverse_overlap_factor))) /
self.valid_mask_scale[0]))
valid_start_y = int(round(
(patch.coordinates[1] - int(round((patch_size[1] + 1) * inverse_overlap_factor))) /
self.valid_mask_scale[1]))
# If the user specifies anything other than 100% overlap being okay, update the valid mask to remove an
# already called region.
if overlap_factor != 1:
# (bounds checking)
valid_end_x = int(round(
(patch.coordinates[0] + int(round(patch_size[0] * inverse_overlap_factor))) / self.valid_mask_scale[
0]))
valid_end_y = int(round(
(patch.coordinates[1] + int(round(patch_size[1] * inverse_overlap_factor))) / self.valid_mask_scale[
1]))
# Set the valid mask values to False so a coordinate that would cause overlap cannot be called later.
self.valid_mask[
max(valid_start_x, 0):self.width_bound_check(valid_end_x),
max(valid_start_y, 0):self.height_bound_check(valid_end_y)
] = False
else:
# If the user is okay with 100% overlap, just remove the single pixel of the coordinate.
self.valid_mask[
valid_start_x, valid_start_y] = False # Change only the starting index
mined_start_x = int(round((patch.coordinates[0]) / self.valid_mask_scale[0]))
mined_start_y = int(round((patch.coordinates[1]) / self.valid_mask_scale[1]))
mined_end_x = int(round((patch.coordinates[0] + patch_size[0]) / self.valid_mask_scale[0]))
mined_end_y = int(round((patch.coordinates[1] + patch_size[1]) / self.valid_mask_scale[1]))
# Update the mined mask
self.mined_mask[
max(mined_start_x, 0):self.width_bound_check(mined_end_x),
max(mined_start_y, 0):self.width_bound_check(mined_end_y)
] = True
# Append this patch to the list of patches to be saved
self.patches.append(patch)
return True
except Exception as e:
# If it fails for any reason, print the exception and return
print("Exception thrown when adding patch:", e)
return False
def find_next_patch(self, patch_size, read_type, overlap_factor):
"""
Select the next patch location.
@return: True if patch is successfully saved, False if not.
"""
# If there is no valid mask, select anywhere on the slide.
if self.valid_mask is None:
# Find indices on filled mask, then multiply by real scale to get actual coordinates
x_value = np.random.choice(self.slide_dims[0], 1)
y_value = np.random.choice(self.slide_dims[1], 1)
coordinates = np.array([x_value, y_value])
patch = Patch(self.path, self.slide_object, self, coordinates, 0, patch_size, "_patch@{}-{}.png")
return self.add_patch(patch, overlap_factor, patch_size)
else:
# Find indices on filled mask, then multiply by real scale to get actual coordinates
try:
indices = np.argwhere(self.valid_mask)
# (X/Y get reversed because OpenSlide and np use reversed height/width indexing)
x_values = np.round(indices[:, 0] * self.valid_mask_scale[0]).astype(int)
y_values = np.round(indices[:, 1] * self.valid_mask_scale[1]).astype(int)
num_indices = len(indices.ravel()) // 2
print("%i indices left " % num_indices, end="\r")
# Find index of coordinates to select for patch
if read_type == 'random':
choice = np.random.choice(num_indices, 1)
elif read_type == 'sequential':
choice = 0
else:
choice = -1
print("Unrecognized read type %s" % read_type)
exit(1)
coordinates = np.array([x_values[choice], y_values[choice]]).ravel()
patch = Patch(slide_path=self.path,
slide_object=self.slide_object,
manager=self,
coordinates=coordinates,
level=0,
size=patch_size,
output_suffix="_patch@{}-{}.png")
return self.add_patch(patch, overlap_factor, patch_size)
except Exception as e:
print("Exception thrown when adding next patch:")
print(e)
return False
def remove_patch(self, patch):
return self.patches.remove(patch)
def height_bound_check(self, num):
return min(num, self.slide_dims[0])
def width_bound_check(self, num):
return min(num, self.slide_dims[1])
def add_patch_criteria(self, patch_validity_check):
"""
Add check for if the patch can be saved.
@param patch_validity_check: A function that takes only an image as an argument and returns True if the patch
passes the check, False if the patch should be rejected.
"""
self.valid_patch_checks.append(patch_validity_check)
def mine_patches(self, output_directory, config, output_csv=None):
"""
Main loop of the program. This generates patch locations and attempts to save them until the slide is either
saturated or the quota has been met.
This is essentially a large loop that takes the following steps:
[LOOP START]
1) Find potential patch coordinates from self.valid_mask
- Adds each patch to self.patches
2) Read and save all patches stored in self.patches
- If patch CANNOT be saved, return [False, Patch, ""]
> this is due to either being rejected by methods added by add_patch_criteria or an error.
- If patch WAS saved, return [False, Patch, patch_processor(patch)]
IF label_map is provided:
3) Pull successfully saved slide patches from corresponding label map locations.
4) Save all pulled label map patches
- this does NOT check patches for validity
IF slide is saturated ==> EXIT LOOP
IF quota is met ==> EXIT LOOP
[REPEAT LOOP]
@param output_directory: Main directory which contains a folder for slide and label map patches (+ csv).
@param n_patches: either an int for the number of patches, or -1 for mining until exhaustion.
@param output_csv: The path of the output .csv to write. If none specified, put it in the output folder.
@param n_jobs: Number of threads to launch.
@param save: 'Dummy' run of patch extraction if False.
@param value_map: Dictionary for value swapping.
"""
n_patches = config['num_patches']
n_jobs = config['num_workers']
save = config['save_patches']
value_map = config['value_map']
if output_csv is None:
print("Creating output csv")
csv_filename = output_directory + self.path[:self.path.rindex(".")].split("/")[-1] + ".csv"
else:
csv_filename = output_csv
output = open(csv_filename, "a")
if self.save_subjectID:
output.write("SubjectID,")
if self.label_map is not None:
output.write("SlidePatchPath,LabelMapPatchPath,PatchComposition\n")
else:
output.write("Slide Patch path\n")
if n_patches == -1:
n_patches = np.Inf
_save_patch_partial = partial(_save_patch,
output_directory=output_directory,
save=save,
check_if_valid=True)
n_completed = 0
saturated = False
while n_patches - n_completed > 0 and not saturated:
# If there is a patch quota given...
if n_patches != np.Inf:
# Generate feasible patches until quota is reached or error is raised
for _ in range(n_patches - n_completed):
if not self.find_next_patch(patch_size=config['patch_size'],
read_type=config['read_type'],
overlap_factor=config['overlap_factor']):
print("\nCould not add new patch, breaking.")
break
else:
print("") # Fixes spacing in case it breaks. Inelegant but I'll fix later
# If the quota is not met, assume the slide is saturated
if len(self.patches) != n_patches - n_completed:
print("Slide has reached saturation: No more non-overlapping patches to be found.\n"
"Change SHOW_MINED in config.py to True to see patch locations.\n"
"Alternatively, change READ_TYPE to 'sequential' for greater mining effiency.")
saturated = True
# If there is no patch quota given, add patches until saturation.
else:
while True:
if not self.find_next_patch(patch_size=config['patch_size'],
read_type=config['read_type'],
overlap_factor=config['overlap_factor']):
print("\nCould not add new patch, breaking.")
break
if len(self.patches) != n_patches - n_completed:
print("Slide has reached saturation: No more non-overlapping patches to be found.\n"
"Change SHOW_MINED in config.py to True to see patch locations.\n"
"Alternatively, change READ_TYPE to 'sequential' for greater mining effiency.")
saturated = True
os.makedirs(output_directory + self.slide_folder, exist_ok=True)
print("Saving patches:")
with concurrent.futures.ThreadPoolExecutor(n_jobs) as executor:
np_slide_futures = list(
tqdm(
executor.map(_save_patch_partial, self.patches),
total=len(self.patches),
unit="pchs",
)
)
self.patches = list()
np_slide_futures = np.array(np_slide_futures)
try:
successful_indices = np.argwhere(np_slide_futures[:, 0]).ravel()
except Exception as e:
print(e)
print("Setting successful indices to []")
successful_indices = []
# Find all successfully saved patches, copy and extract from label map.
if self.label_map is not None:
_lm_save_patch_partial = partial(_save_patch,
output_directory=output_directory,
save=save,
check_if_valid=False,
patch_processor=get_patch_class_proportions,
value_map=value_map)
for i in successful_indices:
slide_patch = np_slide_futures[i, 1]
lm_patch = self.pull_from_label_map(slide_patch)
self.label_map_patches.append(lm_patch)
print("Saving label maps:")
os.makedirs(output_directory + self.label_map_folder, exist_ok=True)
with concurrent.futures.ThreadPoolExecutor(config['num_workers']) as executor:
lm_futures = list(
tqdm(
executor.map(_lm_save_patch_partial, self.label_map_patches),
total=len(self.label_map_patches),
unit="pchs",
)
)
np_lm_futures = np.array(lm_futures)
successful = np.count_nonzero(np_slide_futures[:, 0])
print("{}/{} valid patches found in this run.".format(successful, n_patches))
n_completed += successful
for index in successful_indices:
if self.save_subjectID:
output.write(self.subjectID + ",")
if self.label_map is not None:
slide_patch_path = np_slide_futures[index, 1].get_patch_path(output_directory)
lm_patch_path = np_lm_futures[index, 1].get_patch_path(output_directory)
lm_result = np_lm_futures[index, 2]
output.write("{},{},{}\n".format(slide_patch_path, lm_patch_path, lm_result))
else:
path_path = np_slide_futures[index, 1].get_patch_path(output_directory)
output.write("{}\n".format(path_path))
output.close()
print("Done!")
def save_predefined_patches(self, output_directory, patch_coord_csv, config):
"""
@param output_directory:
@param patch_coord_csv:
@param value_map:
@param n_jobs:
@return:
"""
value_map = config['value_map']
patch_size = config['patch_size']
n_jobs = config['num_workers']
# Todo, port to pandas or something more sophisticated?
with open(patch_coord_csv, "r") as input_csv:
for line in input_csv:
try:
x, y = [int(val) for val in line.split(",")]
os.makedirs(output_directory + self.slide_folder, exist_ok=True)
_save_patch_partial = partial(_save_patch, output_directory=output_directory,
save=True,
check_if_valid=False)
patch = Patch(self.path, self.slide_object, self, [x, y], 0, patch_size, "_patch@{}-{}.png")
self.patches.append(patch)
if self.label_map is not None:
lm_patch = self.pull_from_label_map(patch)
self.label_map_patches.append(lm_patch)
except Exception as e:
print(e)
os.makedirs(output_directory + self.slide_folder, exist_ok=True)
print("Saving slide patches:")
with concurrent.futures.ThreadPoolExecutor(n_jobs) as executor:
list(
tqdm(
executor.map(_save_patch_partial, self.patches),
total=len(self.patches),
unit="pchs",
)
)
print("Saving label maps:")
if self.label_map is not None:
os.makedirs(output_directory + self.label_map_folder, exist_ok=True)
_lm_save_patch_partial = partial(_save_patch,
output_directory=output_directory,
save=True,
check_if_valid=False,
patch_processor=get_patch_class_proportions,
value_map=value_map)
with concurrent.futures.ThreadPoolExecutor(n_jobs) as executor:
list(
tqdm(
executor.map(_lm_save_patch_partial, self.label_map_patches),
total=len(self.label_map_patches),
unit="pchs",
)
)
def pull_from_label_map(self, slide_patch):
"""
Copy a patch from the slide and use the coordinates to pull a corresponding patch from the LM.
@param slide_patch:
@return:
"""
lm_patch = slide_patch.copy()
lm_patch.set_slide(self.label_map)
lm_patch.slide_object = self.label_map_object
lm_patch.output_suffix = "_patch@{}-{}_LM.png"
return lm_patch
def _save_patch(patch, output_directory, save, check_if_valid=True, patch_processor=None, value_map=None):
return patch.save(out_dir=output_directory,
save=save,
check_if_valid=check_if_valid,
process_method=patch_processor,
value_map=value_map)
| 46.461176 | 120 | 0.560822 |
e47f1a28b591d5fb5182f565e761df72734131f8 | 8,408 | py | Python | utils/caffe_layers/proposal_target_layer.py | cgiliberto/RESDOG | 5e2603251d8673a9360211b57a51177af63def17 | [
"MIT"
] | 1 | 2020-04-03T09:14:48.000Z | 2020-04-03T09:14:48.000Z | utils/caffe_layers/proposal_target_layer.py | cgiliberto/RESDOG | 5e2603251d8673a9360211b57a51177af63def17 | [
"MIT"
] | 15 | 2020-01-28T21:50:59.000Z | 2022-03-11T23:19:14.000Z | utils/caffe_layers/proposal_target_layer.py | cgiliberto/RESDOG | 5e2603251d8673a9360211b57a51177af63def17 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
#import caffe
import yaml
import numpy as np
import numpy.random as npr
from utils.caffe_layers.default_config import cfg
from utils.rpn.bbox_transform import bbox_transform
from utils.cython_modules.cython_bbox import bbox_overlaps
DEBUG = False
class ProposalTargetLayer(): #caffe.Layer):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
def set_param_str(self, param_str):
self.param_str_ = param_str
def set_deterministic_mode(self, mode = True):
self._determininistic_mode = mode
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._determininistic_mode = False
# sampled rois (0, x1, y1, x2, y2)
#top[0].reshape(1, 5)
# labels
#top[1].reshape(1, 1)
# bbox_targets
#top[2].reshape(1, self._num_classes * 4)
# bbox_inside_weights
#top[3].reshape(1, self._num_classes * 4)
# bbox_outside_weights
#top[4].reshape(1, self._num_classes * 4)
def forward(self, bottom, top):
# Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
# (i.e., rpn.proposal_layer.ProposalLayer), or any other source
all_rois = bottom[0] #.data
# GT boxes (x1, y1, x2, y2, label)
# TODO(rbg): it's annoying that sometimes I have extra info before
# and other times after box coordinates -- normalize to one format
gt_boxes = bottom[1] #.data
# Include ground-truth boxes in the set of candidate rois
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack(
(all_rois, np.hstack((zeros, gt_boxes[:, :-1])))
)
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), \
'Only single item batches are supported'
#num_images = 1
#rois_per_image = int(cfg.TRAIN.BATCH_SIZE / num_images)
rois_per_image = cfg.TRAIN.BATCH_SIZE
fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image).astype(int)
# Sample rois with classification labels and bounding box regression
# targets
labels, rois, bbox_targets, bbox_inside_weights = _sample_rois(
all_rois, gt_boxes, fg_rois_per_image,
rois_per_image, self._num_classes,
deterministic=self._determininistic_mode)
if DEBUG:
print('num fg: {}'.format((labels > 0).sum()))
print('num bg: {}'.format((labels == 0).sum()))
self._count += 1
self._fg_num += (labels > 0).sum()
self._bg_num += (labels == 0).sum()
print('num fg avg: {}'.format(self._fg_num / self._count))
print('num bg avg: {}'.format(self._bg_num / self._count))
print('ratio: {:.3f}'.format(float(self._fg_num) / float(self._bg_num)))
return rois, labels, bbox_targets, bbox_inside_weights
# sampled rois
#top[0].reshape(*rois.shape)
#top[0].data[...] = rois
# classification labels
#top[1].reshape(*labels.shape)
#top[1].data[...] = labels
# bbox_targets
#top[2].reshape(*bbox_targets.shape)
#top[2].data[...] = bbox_targets
# bbox_inside_weights
#top[3].reshape(*bbox_inside_weights.shape)
#top[3].data[...] = bbox_inside_weights
# bbox_outside_weights
#top[4].reshape(*bbox_inside_weights.shape)
#top[4].data[...] = np.array(bbox_inside_weights > 0).astype(np.float32)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _get_bbox_regression_labels(bbox_target_data, num_classes):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
clss = bbox_target_data[:, 0].astype(int)
bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
def _compute_targets(ex_rois, gt_rois, labels):
"""Compute bounding-box regression targets for an image."""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 4
targets = bbox_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS))
/ np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return np.hstack(
(labels[:, np.newaxis], targets)).astype(np.float32, copy=False)
def _sample_rois(all_rois, gt_boxes, fg_rois_per_image, rois_per_image, num_classes, deterministic=False):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
if deterministic:
fg_inds = fg_inds[:fg_rois_per_this_image]
else:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < cfg.TRAIN.BG_THRESH_HI) &
(max_overlaps >= cfg.TRAIN.BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
if deterministic:
bg_inds = bg_inds[:bg_rois_per_this_image]
else:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_this_image:] = 0
rois = all_rois[keep_inds]
bbox_target_data = _compute_targets(
rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], labels)
bbox_targets, bbox_inside_weights = \
_get_bbox_regression_labels(bbox_target_data, num_classes)
return labels, rois, bbox_targets, bbox_inside_weights
| 39.660377 | 107 | 0.633801 |
727cafedc80ae8e7e6968b8bdb44de73247b7015 | 11,032 | py | Python | tests/validation/test_known_directives.py | patrys/graphql-core-next | f6b078bddae4dd8a48ebf878a6fd5fbbac52bd1b | [
"MIT"
] | null | null | null | tests/validation/test_known_directives.py | patrys/graphql-core-next | f6b078bddae4dd8a48ebf878a6fd5fbbac52bd1b | [
"MIT"
] | 3 | 2021-03-20T05:19:34.000Z | 2021-06-02T03:27:28.000Z | tests/validation/test_known_directives.py | patrys/graphql-core-next | f6b078bddae4dd8a48ebf878a6fd5fbbac52bd1b | [
"MIT"
] | null | null | null | from functools import partial
from graphql.utilities import build_schema
from graphql.validation import KnownDirectivesRule
from graphql.validation.rules.known_directives import (
unknown_directive_message,
misplaced_directive_message,
)
from .harness import expect_fails_rule, expect_passes_rule, expect_sdl_errors_from_rule
expect_sdl_errors = partial(expect_sdl_errors_from_rule, KnownDirectivesRule)
def unknown_directive(directive_name, line, column):
return {
"message": unknown_directive_message(directive_name),
"locations": [(line, column)],
}
def misplaced_directive(directive_name, placement, line, column):
return {
"message": misplaced_directive_message(directive_name, placement),
"locations": [(line, column)],
}
schema_with_sdl_directives = build_schema(
"""
directive @onSchema on SCHEMA
directive @onScalar on SCALAR
directive @onObject on OBJECT
directive @onFieldDefinition on FIELD_DEFINITION
directive @onArgumentDefinition on ARGUMENT_DEFINITION
directive @onInterface on INTERFACE
directive @onUnion on UNION
directive @onEnum on ENUM
directive @onEnumValue on ENUM_VALUE
directive @onInputObject on INPUT_OBJECT
directive @onInputFieldDefinition on INPUT_FIELD_DEFINITION
"""
)
def describe_known_directives():
def with_no_directives():
expect_passes_rule(
KnownDirectivesRule,
"""
query Foo {
name
...Frag
}
fragment Frag on Dog {
name
}
""",
)
def with_known_directives():
expect_passes_rule(
KnownDirectivesRule,
"""
{
dog @include(if: true) {
name
}
human @skip(if: false) {
name
}
}
""",
)
def with_unknown_directive():
expect_fails_rule(
KnownDirectivesRule,
"""
{
dog @unknown(directive: "value") {
name
}
}
""",
[unknown_directive("unknown", 3, 19)],
)
def with_many_unknown_directives():
expect_fails_rule(
KnownDirectivesRule,
"""
{
dog @unknown(directive: "value") {
name
}
human @unknown(directive: "value") {
name
pets @unknown(directive: "value") {
name
}
}
}
""",
[
unknown_directive("unknown", 3, 19),
unknown_directive("unknown", 6, 21),
unknown_directive("unknown", 8, 22),
],
)
def with_well_placed_directives():
expect_passes_rule(
KnownDirectivesRule,
"""
query Foo($var: Boolean) @onQuery {
name @include(if: $var)
...Frag @include(if: true)
skippedField @skip(if: true)
...SkippedFrag @skip(if: true)
}
mutation Bar @onMutation {
someField
}
""",
)
def with_well_placed_variable_definition_directive():
expect_passes_rule(
KnownDirectivesRule,
"""
query Foo($var: Boolean @onVariableDefinition) {
name
}
""",
)
def with_misplaced_directives():
expect_fails_rule(
KnownDirectivesRule,
"""
query Foo($var: Boolean) @include(if: true) {
name @onQuery @include(if: $var)
...Frag @onQuery
}
mutation Bar @onQuery {
someField
}
""",
[
misplaced_directive("include", "query", 2, 38),
misplaced_directive("onQuery", "field", 3, 20),
misplaced_directive("onQuery", "fragment spread", 4, 23),
misplaced_directive("onQuery", "mutation", 7, 26),
],
)
def with_misplaced_variable_definition_directive():
expect_fails_rule(
KnownDirectivesRule,
"""
query Foo($var: Boolean @onField) {
name
}
""",
[misplaced_directive("onField", "variable definition", 2, 37)],
)
def describe_within_sdl():
def with_directive_defined_inside_sdl():
assert (
expect_sdl_errors(
"""
type Query {
foo: String @test
}
directive @test on FIELD_DEFINITION
"""
)
== []
)
def with_standard_directive():
assert (
expect_sdl_errors(
"""
type Query {
foo: String @deprecated
}
"""
)
== []
)
def with_overridden_standard_directive():
assert (
expect_sdl_errors(
"""
schema @deprecated {
query: Query
}
directive @deprecated on SCHEMA
"""
)
== []
)
def with_directive_defined_in_schema_extension():
schema = build_schema(
"""
type Query {
foo: String
}
"""
)
assert (
expect_sdl_errors(
"""
directive @test on OBJECT
extend type Query @test
""",
schema,
)
== []
)
def with_directive_used_in_schema_extension():
schema = build_schema(
"""
directive @test on OBJECT
type Query {
foo: String
}
"""
)
assert (
expect_sdl_errors(
"""
extend type Query @test
""",
schema,
)
== []
)
def with_unknown_directive_in_schema_extension():
schema = build_schema(
"""
type Query {
foo: String
}
"""
)
assert (
expect_sdl_errors(
"""
extend type Query @unknown
""",
schema,
)
== [unknown_directive("unknown", 2, 39)]
)
def with_well_placed_directives():
assert (
expect_sdl_errors(
"""
type MyObj implements MyInterface @onObject {
myField(myArg: Int @onArgumentDefinition): String @onFieldDefinition
}
extend type MyObj @onObject
scalar MyScalar @onScalar
extend scalar MyScalar @onScalar
interface MyInterface @onInterface {
myField(myArg: Int @onArgumentDefinition): String @onFieldDefinition
}
extend interface MyInterface @onInterface
union MyUnion @onUnion = MyObj | Other
extend union MyUnion @onUnion
enum MyEnum @onEnum {
MY_VALUE @onEnumValue
}
extend enum MyEnum @onEnum
input MyInput @onInputObject {
myField: Int @onInputFieldDefinition
}
extend input MyInput @onInputObject
schema @onSchema {
query: MyQuery
}
extend schema @onSchema
""", # noqa: E501
schema_with_sdl_directives,
)
== []
)
def with_misplaced_directives():
assert (
expect_sdl_errors(
"""
type MyObj implements MyInterface @onInterface {
myField(myArg: Int @onInputFieldDefinition): String @onInputFieldDefinition
}
scalar MyScalar @onEnum
interface MyInterface @onObject {
myField(myArg: Int @onInputFieldDefinition): String @onInputFieldDefinition
}
union MyUnion @onEnumValue = MyObj | Other
enum MyEnum @onScalar {
MY_VALUE @onUnion
}
input MyInput @onEnum {
myField: Int @onArgumentDefinition
}
schema @onObject {
query: MyQuery
}
extend schema @onObject
""", # noqa: E501
schema_with_sdl_directives,
)
== [
misplaced_directive("onInterface", "object", 2, 55),
misplaced_directive(
"onInputFieldDefinition", "argument definition", 3, 42
),
misplaced_directive(
"onInputFieldDefinition", "field definition", 3, 75
),
misplaced_directive("onEnum", "scalar", 6, 37),
misplaced_directive("onObject", "interface", 8, 43),
misplaced_directive(
"onInputFieldDefinition", "argument definition", 9, 42
),
misplaced_directive(
"onInputFieldDefinition", "field definition", 9, 75
),
misplaced_directive("onEnumValue", "union", 12, 35),
misplaced_directive("onScalar", "enum", 14, 33),
misplaced_directive("onUnion", "enum value", 15, 32),
misplaced_directive("onEnum", "input object", 18, 35),
misplaced_directive(
"onArgumentDefinition", "input field definition", 19, 36
),
misplaced_directive("onObject", "schema", 22, 28),
misplaced_directive("onObject", "schema", 26, 35),
]
)
| 29.340426 | 97 | 0.443709 |
1bdc1d9af32c53f78e07571fd585d68115870d8d | 2,226 | py | Python | venv/lib/python3.6/site-packages/bioblend/_tests/TestGalaxyQuotas.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 51 | 2015-01-23T20:45:01.000Z | 2022-01-31T10:46:31.000Z | bioblend/_tests/TestGalaxyQuotas.py | violethaze74/galaxy | 18ac5bdba333810fb10972daf60c9543e26aa236 | [
"MIT"
] | 288 | 2015-01-22T21:01:31.000Z | 2022-03-14T09:09:25.000Z | bioblend/_tests/TestGalaxyQuotas.py | violethaze74/galaxy | 18ac5bdba333810fb10972daf60c9543e26aa236 | [
"MIT"
] | 87 | 2015-02-02T06:31:54.000Z | 2022-03-31T02:39:31.000Z | import uuid
from . import GalaxyTestBase
class TestGalaxyQuotas(GalaxyTestBase.GalaxyTestBase):
def setUp(self):
super().setUp()
# Quota names must be unique, and they're impossible to delete
# without accessing the database.
self.quota_name = f"BioBlend-Test-Quota-{uuid.uuid4().hex}"
self.quota = self.gi.quotas.create_quota(
self.quota_name, 'testing', '100 GB', '=',
default='registered'
)
def tearDown(self):
self.gi.quotas.update_quota(self.quota['id'], default='registered')
self.gi.quotas.update_quota(self.quota['id'], default='no')
self.gi.quotas.delete_quota(self.quota['id'])
def test_create_quota(self):
quota = self.gi.quotas.show_quota(self.quota['id'])
self.assertEqual(quota['name'], self.quota_name)
self.assertEqual(quota['bytes'], 107374182400)
self.assertEqual(quota['operation'], '=')
self.assertEqual(quota['description'], 'testing')
def test_get_quotas(self):
quotas = self.gi.quotas.get_quotas()
self.assertIn(self.quota['id'], [quota['id'] for quota in quotas])
def test_update_quota(self):
response = self.gi.quotas.update_quota(
self.quota['id'], name=self.quota_name + '-new',
description='asdf', default='registered', operation='-',
amount='.01 TB'
)
self.assertIn(f"""Quota '{self.quota_name}' has been renamed to '{self.quota_name}-new'""", response)
quota = self.gi.quotas.show_quota(self.quota['id'])
self.assertEqual(quota['name'], self.quota_name + '-new')
self.assertEqual(quota['bytes'], 10995116277)
self.assertEqual(quota['operation'], '-')
self.assertEqual(quota['description'], 'asdf')
def test_delete_undelete_quota(self):
self.gi.quotas.update_quota(
self.quota['id'], default='no'
)
response = self.gi.quotas.delete_quota(self.quota['id'])
self.assertEqual(response, 'Deleted 1 quotas: ' + self.quota_name)
response = self.gi.quotas.undelete_quota(self.quota['id'])
self.assertEqual(response, 'Undeleted 1 quotas: ' + self.quota_name)
| 39.75 | 109 | 0.632075 |
3348223f02520d1576aa534204cbe038ba6570fa | 221 | py | Python | src/dftinputgen/qe/settings/__init__.py | aced-differentiate/dft-input-gen | 14bee323517714c433682bad2dcb897b223dd5ec | [
"Apache-2.0"
] | 1 | 2021-04-15T09:54:52.000Z | 2021-04-15T09:54:52.000Z | src/dftinputgen/qe/settings/__init__.py | CitrineInformatics/dft-input-gen | 14bee323517714c433682bad2dcb897b223dd5ec | [
"Apache-2.0"
] | 1 | 2021-01-28T22:12:07.000Z | 2021-01-28T22:12:07.000Z | src/dftinputgen/qe/settings/__init__.py | aced-differentiate/dft-input-gen | 14bee323517714c433682bad2dcb897b223dd5ec | [
"Apache-2.0"
] | 2 | 2020-12-08T18:14:13.000Z | 2020-12-18T19:01:11.000Z | import json
import pkg_resources
__all__ = ["QE_TAGS"]
tags_file = pkg_resources.resource_filename(
"dftinputgen.qe.settings", "tags_and_groups.json"
)
with open(tags_file, "r") as fr:
QE_TAGS = json.load(fr)
| 17 | 53 | 0.728507 |
1fc28604e4b5c97c362311883c99e39f2c8f4b5e | 248 | py | Python | prog 47.py | shailaja0221/gethub | 883a1c96ee5d5d09224b4736fb2e1378c80c5dae | [
"MIT"
] | null | null | null | prog 47.py | shailaja0221/gethub | 883a1c96ee5d5d09224b4736fb2e1378c80c5dae | [
"MIT"
] | null | null | null | prog 47.py | shailaja0221/gethub | 883a1c96ee5d5d09224b4736fb2e1378c80c5dae | [
"MIT"
] | null | null | null |
m=int(input())
l=list(map(int,input().split()))
b=l[0]
for i in range(m):
for j in range(i+1,m):
if(b<l[j]):
b=l[j]
c=l[0]
for i in range(m):
for j in range(i+1,m):
if(c>l[j]):
c=l[j]
print(c,b)
| 16.533333 | 32 | 0.447581 |
46a7c6488427e0c8475c76f58b5db2846948ea79 | 650 | py | Python | photos/admin.py | kmollee/gallery | cb54d8bddb184b35f6d62c17b44b311617b76b4f | [
"MIT"
] | 2 | 2018-05-04T06:18:37.000Z | 2021-01-12T13:16:08.000Z | photos/admin.py | kmollee/gallery | cb54d8bddb184b35f6d62c17b44b311617b76b4f | [
"MIT"
] | null | null | null | photos/admin.py | kmollee/gallery | cb54d8bddb184b35f6d62c17b44b311617b76b4f | [
"MIT"
] | null | null | null | from django.contrib import admin
from photos.models import Person, Location, Album, Photo, Thumbnail
class NameOnlyAdmin(admin.ModelAdmin):
list_display = ['name', ]
class AlbumAdmin(admin.ModelAdmin):
list_display = ['name', 'month', 'year', 'location', ]
class ThumbnailInline(admin.TabularInline):
model = Thumbnail
readonly_fields = ['file', ]
class PhotoAdmin(admin.ModelAdmin):
list_display = ['name', 'album', ]
inlines = [ThumbnailInline, ]
admin.site.register(Person, NameOnlyAdmin)
admin.site.register(Location, NameOnlyAdmin)
admin.site.register(Album, AlbumAdmin)
admin.site.register(Photo, PhotoAdmin)
| 23.214286 | 67 | 0.733846 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.