hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
970k
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_non_ascii
int64 0
514k
| filtered:remove_delete_markers
int64 0
0
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c04249abf3a5ebc265209326af85d9f62c50c23b | 3,052 | py | Python | testing/python_lib/test_faucet_state_collector.py | pbatta/forch | df033bc5b7cbac06e1c406257193cb0cb62f2742 | [
"Apache-2.0"
] | 1 | 2019-12-12T23:13:24.000Z | 2019-12-12T23:13:24.000Z | testing/python_lib/test_faucet_state_collector.py | pbatta/forch | df033bc5b7cbac06e1c406257193cb0cb62f2742 | [
"Apache-2.0"
] | 92 | 2019-12-13T03:30:35.000Z | 2021-11-11T16:16:13.000Z | testing/python_lib/test_faucet_state_collector.py | pbatta/forch | df033bc5b7cbac06e1c406257193cb0cb62f2742 | [
"Apache-2.0"
] | 7 | 2020-01-11T14:12:46.000Z | 2021-01-25T17:30:55.000Z | """Unit tests for Faucet State Collector"""
import unittest
from unit_base import FaucetStateCollectorTestBase
from forch.proto.faucet_event_pb2 import StackTopoChange
from forch.utils import dict_proto
class DataplaneStateTestCase(FaucetStateCollectorTestBase):
"""Test cases for dataplane state"""
def _build_link(self, dp1, port1, dp2, port2):
return {
'key': dp1 + ':' + port1 + '-' + dp2 + ':' + port2,
'source': dp1,
'target': dp2,
'port_map': {
'dp_a': dp1,
'port_a': 'Port ' + port1,
'dp_z': dp2,
'port_z': 'Port ' + port2
}
}
def _build_loop_topo_obj(self):
dps = {
'sw1': StackTopoChange.StackDp(root_hop_port=1),
'sw2': StackTopoChange.StackDp(root_hop_port=1),
'sw3': StackTopoChange.StackDp(root_hop_port=1),
}
links = [
self._build_link('sw1', '1', 'sw2', '2'),
self._build_link('sw2', '1', 'sw3', '2'),
self._build_link('sw3', '1', 'sw1', '2'),
]
links_graph = [dict_proto(link, StackTopoChange.StackLink) for link in links]
return {
'dps': dps,
'links_graph': links_graph
}
def _build_topo_obj(self):
dps = {
'sw1': StackTopoChange.StackDp(),
'sw2': StackTopoChange.StackDp(root_hop_port=1),
'sw3': StackTopoChange.StackDp(root_hop_port=1),
}
links = [
self._build_link('sw1', '1', 'sw2', '1'),
self._build_link('sw2', '2', 'sw3', '2'),
self._build_link('sw3', '1', 'sw1', '2'),
]
links_graph = [dict_proto(link, StackTopoChange.StackLink) for link in links]
return {
'active_root': 'sw1',
'dps': dps,
'links_graph': links_graph
}
def test_topology_loop(self):
"""test faucet_state_collector behavior when faucet sends loop in path to egress topology"""
self._faucet_state_collector.topo_state = self._build_loop_topo_obj()
egress_path = self._faucet_state_collector.get_switch_egress_path('sw1')
self.assertEqual(egress_path['path_state'], 1)
self.assertEqual(egress_path['path_state_detail'],
'No path to root found. Loop in topology.')
def test_egress_path(self):
"""test faucet_state_collector behavior when faucet sends loop in path to egress topology"""
self._faucet_state_collector.topo_state = self._build_topo_obj()
# pylint: disable=protected-access
self._faucet_state_collector._get_egress_port = lambda port: 28
egress_path = self._faucet_state_collector.get_switch_egress_path('sw3')
self.assertEqual(egress_path['path_state'], 5)
self.assertEqual(egress_path['path'],
[{'switch': 'sw3', 'out': 1}, {'switch': 'sw1', 'in': 2, 'out': 28}])
if __name__ == '__main__':
unittest.main()
| 37.219512 | 100 | 0.582241 | """Unit tests for Faucet State Collector"""
import unittest
from unit_base import FaucetStateCollectorTestBase
from forch.proto.faucet_event_pb2 import StackTopoChange
from forch.utils import dict_proto
class DataplaneStateTestCase(FaucetStateCollectorTestBase):
"""Test cases for dataplane state"""
def _build_link(self, dp1, port1, dp2, port2):
return {
'key': dp1 + ':' + port1 + '-' + dp2 + ':' + port2,
'source': dp1,
'target': dp2,
'port_map': {
'dp_a': dp1,
'port_a': 'Port ' + port1,
'dp_z': dp2,
'port_z': 'Port ' + port2
}
}
def _build_loop_topo_obj(self):
dps = {
'sw1': StackTopoChange.StackDp(root_hop_port=1),
'sw2': StackTopoChange.StackDp(root_hop_port=1),
'sw3': StackTopoChange.StackDp(root_hop_port=1),
}
links = [
self._build_link('sw1', '1', 'sw2', '2'),
self._build_link('sw2', '1', 'sw3', '2'),
self._build_link('sw3', '1', 'sw1', '2'),
]
links_graph = [dict_proto(link, StackTopoChange.StackLink) for link in links]
return {
'dps': dps,
'links_graph': links_graph
}
def _build_topo_obj(self):
dps = {
'sw1': StackTopoChange.StackDp(),
'sw2': StackTopoChange.StackDp(root_hop_port=1),
'sw3': StackTopoChange.StackDp(root_hop_port=1),
}
links = [
self._build_link('sw1', '1', 'sw2', '1'),
self._build_link('sw2', '2', 'sw3', '2'),
self._build_link('sw3', '1', 'sw1', '2'),
]
links_graph = [dict_proto(link, StackTopoChange.StackLink) for link in links]
return {
'active_root': 'sw1',
'dps': dps,
'links_graph': links_graph
}
def test_topology_loop(self):
"""test faucet_state_collector behavior when faucet sends loop in path to egress topology"""
self._faucet_state_collector.topo_state = self._build_loop_topo_obj()
egress_path = self._faucet_state_collector.get_switch_egress_path('sw1')
self.assertEqual(egress_path['path_state'], 1)
self.assertEqual(egress_path['path_state_detail'],
'No path to root found. Loop in topology.')
def test_egress_path(self):
"""test faucet_state_collector behavior when faucet sends loop in path to egress topology"""
self._faucet_state_collector.topo_state = self._build_topo_obj()
# pylint: disable=protected-access
self._faucet_state_collector._get_egress_port = lambda port: 28
egress_path = self._faucet_state_collector.get_switch_egress_path('sw3')
self.assertEqual(egress_path['path_state'], 5)
self.assertEqual(egress_path['path'],
[{'switch': 'sw3', 'out': 1}, {'switch': 'sw1', 'in': 2, 'out': 28}])
if __name__ == '__main__':
unittest.main()
| 0 | 0 |
3c98d96e351e9f0cf0c5d2fb68fa0eae5f624451 | 2,344 | py | Python | tests/python/gaia-ui-tests/gaiatest/apps/persona/app.py | pdehaan/gaia | 0ea959d81cefa0128157ec3ff0e2b7bdd29afacf | [
"Apache-2.0"
] | 1 | 2015-03-02T04:03:00.000Z | 2015-03-02T04:03:00.000Z | tests/python/gaia-ui-tests/gaiatest/apps/persona/app.py | caseyyee/gaia | fa82433dda06e9ae7d35a1f74cc16f4dd72cc514 | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/apps/persona/app.py | caseyyee/gaia | fa82433dda06e9ae7d35a1f74cc16f4dd72cc514 | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette.by import By
from gaiatest.apps.base import Base
class Persona(Base):
# iframes
_persona_frame_locator = (By.CSS_SELECTOR, "iframe.screen[data-url*='persona.org/sign_in#NATIVE']")
# persona login
_body_loading_locator = (By.CSS_SELECTOR, 'body.loading')
_email_input_locator = (By.ID, 'authentication_email')
_password_input_locator = (By.ID, 'authentication_password')
_continue_button_locator = (By.CSS_SELECTOR, '.continue.right')
_returning_button_locator = (By.CSS_SELECTOR, 'button.isReturning')
def __init__(self, marionette):
Base.__init__(self, marionette)
def login(self, email, password):
# This only supports logging in with a known user and no existing session
self.type_email(email)
self.tap_continue()
self.type_password(password)
self.tap_returning()
self.marionette.switch_to_frame()
self.wait_for_element_not_present(*self._persona_frame_locator)
self.apps.switch_to_displayed_app()
def wait_for_persona_to_load(self):
self.wait_for_element_not_displayed(*self._body_loading_locator)
def switch_to_persona_frame(self):
self.marionette.switch_to_frame()
self.frame = self.wait_for_element_present(*self._persona_frame_locator)
self.marionette.switch_to_frame(self.frame)
self.wait_for_persona_to_load()
def type_email(self, value):
self.marionette.find_element(*self._email_input_locator).send_keys(value)
self.keyboard.dismiss()
self.switch_to_persona_frame()
def type_password(self, value):
self.marionette.find_element(*self._password_input_locator).send_keys(value)
self.keyboard.dismiss()
self.switch_to_persona_frame()
def tap_continue(self):
self.marionette.find_element(*self._continue_button_locator).tap()
self.wait_for_element_not_displayed(*self._continue_button_locator)
self.wait_for_element_displayed(*self._password_input_locator)
def tap_returning(self):
self.marionette.find_element(*self._returning_button_locator).tap()
| 37.206349 | 103 | 0.729522 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette.by import By
from gaiatest.apps.base import Base
class Persona(Base):
# iframes
_persona_frame_locator = (By.CSS_SELECTOR, "iframe.screen[data-url*='persona.org/sign_in#NATIVE']")
# persona login
_body_loading_locator = (By.CSS_SELECTOR, 'body.loading')
_email_input_locator = (By.ID, 'authentication_email')
_password_input_locator = (By.ID, 'authentication_password')
_continue_button_locator = (By.CSS_SELECTOR, '.continue.right')
_returning_button_locator = (By.CSS_SELECTOR, 'button.isReturning')
def __init__(self, marionette):
Base.__init__(self, marionette)
def login(self, email, password):
# This only supports logging in with a known user and no existing session
self.type_email(email)
self.tap_continue()
self.type_password(password)
self.tap_returning()
self.marionette.switch_to_frame()
self.wait_for_element_not_present(*self._persona_frame_locator)
self.apps.switch_to_displayed_app()
def wait_for_persona_to_load(self):
self.wait_for_element_not_displayed(*self._body_loading_locator)
def switch_to_persona_frame(self):
self.marionette.switch_to_frame()
self.frame = self.wait_for_element_present(*self._persona_frame_locator)
self.marionette.switch_to_frame(self.frame)
self.wait_for_persona_to_load()
def type_email(self, value):
self.marionette.find_element(*self._email_input_locator).send_keys(value)
self.keyboard.dismiss()
self.switch_to_persona_frame()
def type_password(self, value):
self.marionette.find_element(*self._password_input_locator).send_keys(value)
self.keyboard.dismiss()
self.switch_to_persona_frame()
def tap_continue(self):
self.marionette.find_element(*self._continue_button_locator).tap()
self.wait_for_element_not_displayed(*self._continue_button_locator)
self.wait_for_element_displayed(*self._password_input_locator)
def tap_returning(self):
self.marionette.find_element(*self._returning_button_locator).tap()
| 0 | 0 |
46007c370fa322eb1ca7e4346385565ed9dfbbd8 | 4,826 | py | Python | ethevents/client/connection.py | ezdac/ethevents | 9f4b0ff1ba0d303180abe3b5336805335bc0765b | [
"MIT"
] | 2 | 2018-08-21T01:06:30.000Z | 2019-03-05T08:15:55.000Z | ethevents/client/connection.py | ezdac/ethevents | 9f4b0ff1ba0d303180abe3b5336805335bc0765b | [
"MIT"
] | 1 | 2018-04-23T14:01:51.000Z | 2018-04-23T14:09:51.000Z | ethevents/client/connection.py | ezdac/ethevents | 9f4b0ff1ba0d303180abe3b5336805335bc0765b | [
"MIT"
] | 1 | 2022-03-22T04:57:16.000Z | 2022-03-22T04:57:16.000Z | import time
import click
import requests
from elasticsearch.connection import Connection
from elasticsearch.connection_pool import DummyConnectionPool
from elasticsearch.transport import Transport
from elasticsearch.exceptions import (
ConnectionError,
ConnectionTimeout,
SSLError
)
from elasticsearch.compat import urlencode
from requests import Session
from ethevents.client.app import App
import logging
log = logging.getLogger(__name__)
class MicroRaidenConnection(Connection):
def __init__(
self,
host,
port,
session: Session,
use_ssl=False,
headers=None,
**kwargs
):
super(MicroRaidenConnection, self).__init__(
host=host,
port=port,
use_ssl=use_ssl,
**kwargs
)
self.base_url = 'http%s://%s:%d%s' % (
's' if self.use_ssl else '',
host, port, self.url_prefix
)
self.session = session
self.session.headers = headers or {}
self.session.headers.setdefault('content-type', 'application/json')
def perform_request(
self,
method,
url,
params=None,
body=None,
timeout=None,
ignore=(),
headers=None
):
url = self.base_url + url
if params:
url = '%s?%s' % (url, urlencode(params or {}))
start = time.time()
request = requests.Request(method=method, headers=headers, url=url, data=body)
prepared_request = self.session.prepare_request(request)
settings = self.session.merge_environment_settings(
prepared_request.url,
{},
None,
None,
None
)
send_kwargs = {'timeout': timeout or self.timeout}
send_kwargs.update(settings)
try:
response = self.session.request(
prepared_request.method,
prepared_request.url,
data=prepared_request.body,
headers=prepared_request.headers,
**send_kwargs
)
duration = time.time() - start
raw_data = response.text
except Exception as e:
self.log_request_fail(
method,
url,
prepared_request.path_url,
body,
time.time() - start,
exception=e
)
if isinstance(e, requests.exceptions.SSLError):
raise SSLError('N/A', str(e), e)
if isinstance(e, requests.Timeout):
raise ConnectionTimeout('TIMEOUT', str(e), e)
raise ConnectionError('N/A', str(e), e)
# raise errors based on http status codes, let the client handle those if needed
if not (200 <= response.status_code < 300) and response.status_code not in ignore:
self.log_request_fail(
method,
url,
response.request.path_url,
body,
duration,
response.status_code,
raw_data
)
self._raise_error(response.status_code, raw_data)
self.log_request_success(
method,
url,
response.request.path_url,
body,
response.status_code,
raw_data,
duration
)
return response.status_code, response.headers, raw_data
class MicroRaidenTransport(Transport):
def __init__(
self,
hosts,
*args,
session: Session,
connection_class=MicroRaidenConnection,
connection_pool_class=DummyConnectionPool,
**kwargs
):
self.hosts = hosts
log.debug('initializing transport')
super(MicroRaidenTransport, self).__init__(
hosts,
*args,
connection_class=connection_class,
connection_pool_class=connection_pool_class,
session=session,
**kwargs
)
@click.option(
'--limits/--no-limits',
default=True
)
@click.command()
def main(limits: bool):
logging.basicConfig(level=logging.DEBUG)
log.debug('in main')
app = App()
app.start(ignore_security_limits=not limits, endpoint_url='https://api.eth.events')
log.debug('session started')
if app.account.unlocked:
import elasticsearch
es = elasticsearch.Elasticsearch(
transport_class=MicroRaidenTransport,
hosts=['api.eth.events:443'],
use_ssl=True,
session=app.session
)
response = es.search('ethereum', 'block', body=dict(query=dict(match_all=dict())))
print(response)
if __name__ == '__main__':
main()
| 28.05814 | 90 | 0.564857 | import time
import click
import requests
from elasticsearch.connection import Connection
from elasticsearch.connection_pool import DummyConnectionPool
from elasticsearch.transport import Transport
from elasticsearch.exceptions import (
ConnectionError,
ConnectionTimeout,
SSLError
)
from elasticsearch.compat import urlencode
from requests import Session
from ethevents.client.app import App
import logging
log = logging.getLogger(__name__)
class MicroRaidenConnection(Connection):
def __init__(
self,
host,
port,
session: Session,
use_ssl=False,
headers=None,
**kwargs
):
super(MicroRaidenConnection, self).__init__(
host=host,
port=port,
use_ssl=use_ssl,
**kwargs
)
self.base_url = 'http%s://%s:%d%s' % (
's' if self.use_ssl else '',
host, port, self.url_prefix
)
self.session = session
self.session.headers = headers or {}
self.session.headers.setdefault('content-type', 'application/json')
def perform_request(
self,
method,
url,
params=None,
body=None,
timeout=None,
ignore=(),
headers=None
):
url = self.base_url + url
if params:
url = '%s?%s' % (url, urlencode(params or {}))
start = time.time()
request = requests.Request(method=method, headers=headers, url=url, data=body)
prepared_request = self.session.prepare_request(request)
settings = self.session.merge_environment_settings(
prepared_request.url,
{},
None,
None,
None
)
send_kwargs = {'timeout': timeout or self.timeout}
send_kwargs.update(settings)
try:
response = self.session.request(
prepared_request.method,
prepared_request.url,
data=prepared_request.body,
headers=prepared_request.headers,
**send_kwargs
)
duration = time.time() - start
raw_data = response.text
except Exception as e:
self.log_request_fail(
method,
url,
prepared_request.path_url,
body,
time.time() - start,
exception=e
)
if isinstance(e, requests.exceptions.SSLError):
raise SSLError('N/A', str(e), e)
if isinstance(e, requests.Timeout):
raise ConnectionTimeout('TIMEOUT', str(e), e)
raise ConnectionError('N/A', str(e), e)
# raise errors based on http status codes, let the client handle those if needed
if not (200 <= response.status_code < 300) and response.status_code not in ignore:
self.log_request_fail(
method,
url,
response.request.path_url,
body,
duration,
response.status_code,
raw_data
)
self._raise_error(response.status_code, raw_data)
self.log_request_success(
method,
url,
response.request.path_url,
body,
response.status_code,
raw_data,
duration
)
return response.status_code, response.headers, raw_data
class MicroRaidenTransport(Transport):
def __init__(
self,
hosts,
*args,
session: Session,
connection_class=MicroRaidenConnection,
connection_pool_class=DummyConnectionPool,
**kwargs
):
self.hosts = hosts
log.debug('initializing transport')
super(MicroRaidenTransport, self).__init__(
hosts,
*args,
connection_class=connection_class,
connection_pool_class=connection_pool_class,
session=session,
**kwargs
)
@click.option(
'--limits/--no-limits',
default=True
)
@click.command()
def main(limits: bool):
logging.basicConfig(level=logging.DEBUG)
log.debug('in main')
app = App()
app.start(ignore_security_limits=not limits, endpoint_url='https://api.eth.events')
log.debug('session started')
if app.account.unlocked:
import elasticsearch
es = elasticsearch.Elasticsearch(
transport_class=MicroRaidenTransport,
hosts=['api.eth.events:443'],
use_ssl=True,
session=app.session
)
response = es.search('ethereum', 'block', body=dict(query=dict(match_all=dict())))
print(response)
if __name__ == '__main__':
main()
| 0 | 0 |
df6d818cc916d501a3e8707af88d5a3730dc6550 | 615 | py | Python | source/client/pages/SpeciesElement.py | RobinSatterthwaite/BirdTable | ef35c0135ee54910e535281e7f690643c3b4f6c4 | [
"MIT"
] | null | null | null | source/client/pages/SpeciesElement.py | RobinSatterthwaite/BirdTable | ef35c0135ee54910e535281e7f690643c3b4f6c4 | [
"MIT"
] | null | null | null | source/client/pages/SpeciesElement.py | RobinSatterthwaite/BirdTable | ef35c0135ee54910e535281e7f690643c3b4f6c4 | [
"MIT"
] | null | null | null |
from pystache import TemplateSpec
class SpeciesElement(TemplateSpec):
template_name = "SpeciesElement"
def __init__(self,
name,
binomial_name,
count,
times_seen,
include_times_seen,
seen,
heard):
self.name = name
self.binomialName = binomial_name
self.count = count
if include_times_seen:
self.timesSeen = " / {0}".format(times_seen)
else:
self.timesSeen = ""
if seen:
self.seen = "\u26ab"
else:
self.seen = ""
if heard:
self.heard = "\u26ab"
else:
self.heard = ""
| 18.088235 | 57 | 0.580488 |
from pystache import TemplateSpec
class SpeciesElement(TemplateSpec):
template_name = "SpeciesElement"
def __init__(self,
name,
binomial_name,
count,
times_seen,
include_times_seen,
seen,
heard):
self.name = name
self.binomialName = binomial_name
self.count = count
if include_times_seen:
self.timesSeen = " / {0}".format(times_seen)
else:
self.timesSeen = ""
if seen:
self.seen = "\u26ab"
else:
self.seen = ""
if heard:
self.heard = "\u26ab"
else:
self.heard = ""
| 0 | 0 |
2abdd1a8347e55f710b0bd9bf098d6715d1155a9 | 561 | py | Python | Number Guessing.py | GamePlayer-7/Gaming | 4466f2e693f0c10d3bc041b388526484713dc2e1 | [
"MIT"
] | null | null | null | Number Guessing.py | GamePlayer-7/Gaming | 4466f2e693f0c10d3bc041b388526484713dc2e1 | [
"MIT"
] | null | null | null | Number Guessing.py | GamePlayer-7/Gaming | 4466f2e693f0c10d3bc041b388526484713dc2e1 | [
"MIT"
] | null | null | null | import random # imports the random module, which contains a variety of things to do with random number generation.
number = random.randint(1,10) #If we wanted a random integer, we can use the randint function Randint accepts two parameters: a lowest and a highest number.
for i in range(0,3):
user = int(input("guess the number"))
if user == number:
print("Hurray!!")
print(f"you guessed the number right it's {number}")
break
if user != number:
print(f"Your guess is incorrect the number is {number}")
| 51 | 161 | 0.672014 | import random # imports the random module, which contains a variety of things to do with random number generation.
number = random.randint(1,10) #If we wanted a random integer, we can use the randint function Randint accepts two parameters: a lowest and a highest number.
for i in range(0,3):
user = int(input("guess the number"))
if user == number:
print("Hurray!!")
print(f"you guessed the number right it's {number}")
break
if user != number:
print(f"Your guess is incorrect the number is {number}")
| 0 | 0 |
1afcc354de4e4e1ba67d59086c2b25d41157da44 | 2,681 | py | Python | src/waldur_auth_saml2/utils.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 26 | 2017-10-18T13:49:58.000Z | 2021-09-19T04:44:09.000Z | src/waldur_auth_saml2/utils.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 14 | 2018-12-10T14:14:51.000Z | 2021-06-07T10:33:39.000Z | src/waldur_auth_saml2/utils.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 32 | 2017-09-24T03:10:45.000Z | 2021-10-16T16:41:09.000Z | from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from djangosaml2.conf import get_config
from djangosaml2.utils import available_idps
from saml2.attribute_converter import ac_factory
from saml2.mdstore import InMemoryMetaData, MetaDataFile
from saml2.mdstore import name as get_idp_name
from saml2.s_utils import UnknownSystemEntity
from . import models
def load_providers():
metadata = {}
for filename in settings.WALDUR_AUTH_SAML2['IDP_METADATA_LOCAL']:
mdf = MetaDataFile(ac_factory(), filename)
mdf.load()
metadata.update(mdf.items())
return metadata
def sync_providers():
providers = load_providers()
current_idps = list(models.IdentityProvider.objects.all().only('url', 'pk'))
backend_urls = set(providers.keys())
stale_idps = set(idp.pk for idp in current_idps if idp.url not in backend_urls)
models.IdentityProvider.objects.filter(pk__in=stale_idps).delete()
existing_urls = set(idp.url for idp in current_idps)
for url, metadata in providers.items():
name = get_idp_name(metadata)
if not name:
# It is expected that every provider has name. For corner cases check entity_id
name = metadata.get('entity_id')
if not name:
# Skip invalid identity provider
continue
if url in existing_urls:
# Skip identity provider if its url is already in the database
continue
models.IdentityProvider.objects.create(url=url, name=name, metadata=metadata)
for provider in models.IdentityProvider.objects.all().iterator():
backend_metadata = providers.get(provider.url)
if backend_metadata and provider.metadata != backend_metadata:
provider.metadata = backend_metadata
provider.save()
def is_valid_idp(value):
remote_providers = available_idps(get_config()).keys()
return (
value in remote_providers
or models.IdentityProvider.objects.filter(url=value).exists()
)
def get_idp_sso_supported_bindings(idp_entity_id, config):
try:
return config.metadata.service(
idp_entity_id, 'idpsso_descriptor', 'single_sign_on_service'
).keys()
except (UnknownSystemEntity, AttributeError):
return []
class DatabaseMetadataLoader(InMemoryMetaData):
def load(self, *args, **kwargs):
# Skip default parsing because data is not stored in file
pass
def __getitem__(self, item):
try:
return models.IdentityProvider.objects.get(url=item).metadata
except ObjectDoesNotExist:
raise KeyError
| 33.5125 | 91 | 0.697128 | from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from djangosaml2.conf import get_config
from djangosaml2.utils import available_idps
from saml2.attribute_converter import ac_factory
from saml2.mdstore import InMemoryMetaData, MetaDataFile
from saml2.mdstore import name as get_idp_name
from saml2.s_utils import UnknownSystemEntity
from . import models
def load_providers():
metadata = {}
for filename in settings.WALDUR_AUTH_SAML2['IDP_METADATA_LOCAL']:
mdf = MetaDataFile(ac_factory(), filename)
mdf.load()
metadata.update(mdf.items())
return metadata
def sync_providers():
providers = load_providers()
current_idps = list(models.IdentityProvider.objects.all().only('url', 'pk'))
backend_urls = set(providers.keys())
stale_idps = set(idp.pk for idp in current_idps if idp.url not in backend_urls)
models.IdentityProvider.objects.filter(pk__in=stale_idps).delete()
existing_urls = set(idp.url for idp in current_idps)
for url, metadata in providers.items():
name = get_idp_name(metadata)
if not name:
# It is expected that every provider has name. For corner cases check entity_id
name = metadata.get('entity_id')
if not name:
# Skip invalid identity provider
continue
if url in existing_urls:
# Skip identity provider if its url is already in the database
continue
models.IdentityProvider.objects.create(url=url, name=name, metadata=metadata)
for provider in models.IdentityProvider.objects.all().iterator():
backend_metadata = providers.get(provider.url)
if backend_metadata and provider.metadata != backend_metadata:
provider.metadata = backend_metadata
provider.save()
def is_valid_idp(value):
remote_providers = available_idps(get_config()).keys()
return (
value in remote_providers
or models.IdentityProvider.objects.filter(url=value).exists()
)
def get_idp_sso_supported_bindings(idp_entity_id, config):
try:
return config.metadata.service(
idp_entity_id, 'idpsso_descriptor', 'single_sign_on_service'
).keys()
except (UnknownSystemEntity, AttributeError):
return []
class DatabaseMetadataLoader(InMemoryMetaData):
def load(self, *args, **kwargs):
# Skip default parsing because data is not stored in file
pass
def __getitem__(self, item):
try:
return models.IdentityProvider.objects.get(url=item).metadata
except ObjectDoesNotExist:
raise KeyError
| 0 | 0 |
a8f717c691e08e576daf5d6b539ccd45bbb8b08f | 2,114 | py | Python | src/blockdiag/imagedraw/utils/__init__.py | Dridi/blockdiag | bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b | [
"Apache-2.0"
] | null | null | null | src/blockdiag/imagedraw/utils/__init__.py | Dridi/blockdiag | bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b | [
"Apache-2.0"
] | null | null | null | src/blockdiag/imagedraw/utils/__init__.py | Dridi/blockdiag | bbb16f8a731cdf79a675a63c1ff847e70fdc4a5b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unicodedata
from functools import wraps
from blockdiag.utils import Size
from blockdiag.utils.compat import u
def is_zenkaku(char):
"""Detect given character is Japanese ZENKAKU character"""
char_width = unicodedata.east_asian_width(char)
return char_width in u("WFA")
def zenkaku_len(string):
"""Count Japanese ZENKAKU characters from string"""
return len([x for x in string if is_zenkaku(x)])
def hankaku_len(string):
"""Count non Japanese ZENKAKU characters from string"""
return len([x for x in string if not is_zenkaku(x)])
def string_width(string):
"""Measure rendering width of string.
Count ZENKAKU-character as 2-point and non ZENKAKU-character as 1-point
"""
widthmap = {'Na': 1, 'N': 1, 'H': 1, 'W': 2, 'F': 2, 'A': 2}
return sum(widthmap[unicodedata.east_asian_width(c)] for c in string)
def textsize(string, font):
"""Measure rendering size (width and height) of line.
Returned size will not be exactly as rendered text size,
Because this method does not use fonts to measure size.
"""
width = (zenkaku_len(string) * font.size +
hankaku_len(string) * font.size * 0.55)
return Size(int(math.ceil(width)), font.size)
def memoize(fn):
fn.cache = {}
@wraps(fn)
def func(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in fn.cache:
fn.cache[key] = fn(*args, **kwargs)
return fn.cache[key]
return func
| 30.2 | 78 | 0.682119 | # -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unicodedata
from functools import wraps
from blockdiag.utils import Size
from blockdiag.utils.compat import u
def is_zenkaku(char):
"""Detect given character is Japanese ZENKAKU character"""
char_width = unicodedata.east_asian_width(char)
return char_width in u("WFA")
def zenkaku_len(string):
"""Count Japanese ZENKAKU characters from string"""
return len([x for x in string if is_zenkaku(x)])
def hankaku_len(string):
"""Count non Japanese ZENKAKU characters from string"""
return len([x for x in string if not is_zenkaku(x)])
def string_width(string):
"""Measure rendering width of string.
Count ZENKAKU-character as 2-point and non ZENKAKU-character as 1-point
"""
widthmap = {'Na': 1, 'N': 1, 'H': 1, 'W': 2, 'F': 2, 'A': 2}
return sum(widthmap[unicodedata.east_asian_width(c)] for c in string)
def textsize(string, font):
"""Measure rendering size (width and height) of line.
Returned size will not be exactly as rendered text size,
Because this method does not use fonts to measure size.
"""
width = (zenkaku_len(string) * font.size +
hankaku_len(string) * font.size * 0.55)
return Size(int(math.ceil(width)), font.size)
def memoize(fn):
fn.cache = {}
@wraps(fn)
def func(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in fn.cache:
fn.cache[key] = fn(*args, **kwargs)
return fn.cache[key]
return func
| 0 | 0 |
4b72d9fce27cf73da17b49970b55c087c4464e31 | 362 | py | Python | telefones/models.py | projeto-agro-tcc/osvaldo-backend | 7e8b6b2ed849cd54f0bb5b855c4016fa062d3c33 | [
"MIT"
] | null | null | null | telefones/models.py | projeto-agro-tcc/osvaldo-backend | 7e8b6b2ed849cd54f0bb5b855c4016fa062d3c33 | [
"MIT"
] | null | null | null | telefones/models.py | projeto-agro-tcc/osvaldo-backend | 7e8b6b2ed849cd54f0bb5b855c4016fa062d3c33 | [
"MIT"
] | null | null | null | from django.db import models
class Telefone(models.Model):
residencial = models.CharField(max_length=14, null=True, blank=True)
celular = models.CharField(max_length=12, null=False)
outro = models.CharField(max_length=14, null=True, blank=True)
class Meta:
db_table = "en_telefones"
def __str__(self):
return self.celular
| 25.857143 | 72 | 0.70442 | from django.db import models
class Telefone(models.Model):
residencial = models.CharField(max_length=14, null=True, blank=True)
celular = models.CharField(max_length=12, null=False)
outro = models.CharField(max_length=14, null=True, blank=True)
class Meta:
db_table = "en_telefones"
def __str__(self):
return self.celular
| 0 | 0 |
5a4d29d31fc8b9261b5b5f65d7bb0b5cb3b90e4d | 4,639 | py | Python | xt/framework/comm/comm_conf.py | ZZHsunsky/xingtian | 0484e2c968d9e6b2e5f43a3b86c0213a095ba309 | [
"MIT"
] | null | null | null | xt/framework/comm/comm_conf.py | ZZHsunsky/xingtian | 0484e2c968d9e6b2e5f43a3b86c0213a095ba309 | [
"MIT"
] | null | null | null | xt/framework/comm/comm_conf.py | ZZHsunsky/xingtian | 0484e2c968d9e6b2e5f43a3b86c0213a095ba309 | [
"MIT"
] | null | null | null | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import socket
import time
from subprocess import Popen
import redis
MAX_ACTOR_NUM = 40
MAX_LEARNER_NUM = 10
START_PORT = 20000
PORTNUM_PERLEARNER = MAX_ACTOR_NUM + 1
# redisredis,
class CommConf(object):
def __init__(self):
try:
redis.Redis(host="127.0.0.1", port=6379, db=0).ping()
except redis.ConnectionError:
Popen("echo save '' | setsid redis-server -", shell=True)
time.sleep(0.3)
self.redis = redis.Redis(host="127.0.0.1", port=6379, db=0)
self.pool_name = "port_pool"
if not self.redis.exists(self.pool_name):
self.init_portpool()
def init_portpool(self):
''' init port pool '''
start_port = START_PORT
try_num = 10
for _ in range(MAX_LEARNER_NUM):
for _ in range(try_num):
check_flag, next_port = self.check_learner_port(start_port)
if not check_flag:
break
else:
start_port = next_port
self.redis.lpush(self.pool_name, start_port)
self.redis.incr('port_num', amount=1)
self.redis.incr('max_port_num', amount=1)
start_port = next_port
def get_start_port(self):
''' get start port '''
if int(self.redis.get('port_num')) == 0:
raise Exception("Dont have available port")
start_port = self.redis.lpop(self.pool_name)
self.redis.decr('port_num', amount=1)
return int(start_port)
def release_start_port(self, start_port):
''' release start port '''
self.redis.lpush(self.pool_name, start_port)
self.redis.incr('port_num', amount=1)
if self.redis.get('port_num') == self.redis.get('max_port_num'):
self.redis.delete('port_num')
self.redis.delete('max_port_num')
self.redis.delete('port_pool')
print("shutdown redis")
self.redis.shutdown(nosave=True)
return
def check_learner_port(self, start_port):
''' check if multi-port is in use '''
ip = "localhost"
for i in range(PORTNUM_PERLEARNER):
if self.check_port(ip, start_port + i):
return True, start_port + i + 1
return False, start_port + PORTNUM_PERLEARNER
def check_port(self, ip, port):
''' check if port is in use '''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
print("port is used", int(port))
return True
except BaseException:
return False
def get_port(start_port):
''' get port used by module '''
predict_port = start_port + 1
if (predict_port + MAX_ACTOR_NUM - start_port) > PORTNUM_PERLEARNER:
raise Exception("port num is not enough")
return start_port, predict_port
def test():
''' test interface'''
test_comm_conf = CommConf()
redis_key = 'port_pool'
print("{} len: {}".format(redis_key, test_comm_conf.redis.llen(redis_key)))
for _ in range(test_comm_conf.redis.llen(redis_key)):
pop_val = test_comm_conf.redis.lpop(redis_key)
print("pop val: {} from '{}'".format(pop_val, redis_key))
start = time.time()
test_comm_conf.init_portpool()
print("use time", time.time() - start)
train_port = get_port(20000)
print(train_port)
if __name__ == "__main__":
test()
| 34.362963 | 79 | 0.64432 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import socket
import time
from subprocess import Popen
import redis
MAX_ACTOR_NUM = 40
MAX_LEARNER_NUM = 10
START_PORT = 20000
PORTNUM_PERLEARNER = MAX_ACTOR_NUM + 1
# 初始化,查看redis,连接redis, 生成端口池,即检测端口号哪些可用
class CommConf(object):
def __init__(self):
try:
redis.Redis(host="127.0.0.1", port=6379, db=0).ping()
except redis.ConnectionError:
Popen("echo save '' | setsid redis-server -", shell=True)
time.sleep(0.3)
self.redis = redis.Redis(host="127.0.0.1", port=6379, db=0)
self.pool_name = "port_pool"
if not self.redis.exists(self.pool_name):
self.init_portpool()
def init_portpool(self):
''' init port pool '''
start_port = START_PORT
try_num = 10
for _ in range(MAX_LEARNER_NUM):
for _ in range(try_num):
check_flag, next_port = self.check_learner_port(start_port)
if not check_flag:
break
else:
start_port = next_port
self.redis.lpush(self.pool_name, start_port)
self.redis.incr('port_num', amount=1)
self.redis.incr('max_port_num', amount=1)
start_port = next_port
def get_start_port(self):
''' get start port '''
if int(self.redis.get('port_num')) == 0:
raise Exception("Dont have available port")
start_port = self.redis.lpop(self.pool_name)
self.redis.decr('port_num', amount=1)
return int(start_port)
def release_start_port(self, start_port):
''' release start port '''
self.redis.lpush(self.pool_name, start_port)
self.redis.incr('port_num', amount=1)
if self.redis.get('port_num') == self.redis.get('max_port_num'):
self.redis.delete('port_num')
self.redis.delete('max_port_num')
self.redis.delete('port_pool')
print("shutdown redis")
self.redis.shutdown(nosave=True)
return
def check_learner_port(self, start_port):
''' check if multi-port is in use '''
ip = "localhost"
for i in range(PORTNUM_PERLEARNER):
if self.check_port(ip, start_port + i):
return True, start_port + i + 1
return False, start_port + PORTNUM_PERLEARNER
def check_port(self, ip, port):
''' check if port is in use '''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((ip, int(port)))
s.shutdown(2)
print("port is used", int(port))
return True
except BaseException:
return False
def get_port(start_port):
''' get port used by module '''
predict_port = start_port + 1
if (predict_port + MAX_ACTOR_NUM - start_port) > PORTNUM_PERLEARNER:
raise Exception("port num is not enough")
return start_port, predict_port
def test():
''' test interface'''
test_comm_conf = CommConf()
redis_key = 'port_pool'
print("{} len: {}".format(redis_key, test_comm_conf.redis.llen(redis_key)))
for _ in range(test_comm_conf.redis.llen(redis_key)):
pop_val = test_comm_conf.redis.lpop(redis_key)
print("pop val: {} from '{}'".format(pop_val, redis_key))
start = time.time()
test_comm_conf.init_portpool()
print("use time", time.time() - start)
train_port = get_port(20000)
print(train_port)
if __name__ == "__main__":
test()
| 75 | 0 |
3e25d2bb70c9499de1f4cb505fe2880342dc5c50 | 2,572 | py | Python | python/nbdb/anomaly/static.py | rubrikinc/nbdb2 | 359db63a39e016e3eb197b8ea511d6e8cffa1853 | [
"Apache-2.0"
] | 2 | 2022-03-21T15:48:33.000Z | 2022-03-27T00:43:12.000Z | python/nbdb/anomaly/static.py | rubrikinc/nbdb2 | 359db63a39e016e3eb197b8ea511d6e8cffa1853 | [
"Apache-2.0"
] | null | null | null | python/nbdb/anomaly/static.py | rubrikinc/nbdb2 | 359db63a39e016e3eb197b8ea511d6e8cffa1853 | [
"Apache-2.0"
] | 1 | 2022-03-27T00:43:31.000Z | 2022-03-27T00:43:31.000Z | """
Static threshold based anomaly detection
"""
from typing import List, Tuple
import logging
import numpy as np
import pandas as pd
from nbdb.anomaly.anomaly_interface import AnomalyInterface
from nbdb.readapi.graphite_response import Anomaly
from nbdb.readapi.time_series_response import TimeRange
logger = logging.getLogger(__name__)
class Static(AnomalyInterface): # pylint: disable=too-few-public-methods
"""
Simple algorithm to do threshold based anomaly detection.
Currently supports two functions (lt, gt).
"""
def find_anomalies(self,
baseline: np.ndarray,
raw_data: pd.Series) -> List[Tuple]:
"""
Use static threshold to determine anomalies in the
raw data. Supports the lt, gt functions to compare
against the threshold
:param baseline:
:param raw_data:
:return:
"""
comparator_fn = self.config.get('comparator_fn', 'gt')
threshold = self.config.get('threshold')
raw_data.dropna(inplace=True)
if comparator_fn == 'gt':
anomalous_points = raw_data[raw_data > threshold]
elif comparator_fn == 'lt':
anomalous_points = raw_data[raw_data < threshold]
else:
raise NotImplementedError('Unknown comparator fn: {}'.format(
comparator_fn))
anomalies = []
# No anomalous points found. Return early
if len(anomalous_points) == 0:
return anomalies
previous_epoch = anomalous_points.index[0]
anomaly_start = anomalous_points.index[0]
sampling_interval = np.diff(raw_data.index).min()
anomaly_score = 1.0
epoch = None
for epoch, _ in anomalous_points.iteritems():
if (epoch - previous_epoch) / sampling_interval > 1:
# Mark the current anomaly as ended and start a new one
anomaly_window = TimeRange(anomaly_start, previous_epoch,
sampling_interval)
anomalies.append(Anomaly(anomaly_window, anomaly_score))
anomaly_score = 1.0
anomaly_start = epoch
else:
previous_epoch = epoch
anomaly_score += 1
# append the final anomaly
if epoch is not None:
anomaly_window = TimeRange(anomaly_start, epoch,
sampling_interval)
anomalies.append(Anomaly(anomaly_window, anomaly_score))
return anomalies
| 35.232877 | 73 | 0.613919 | """
Static threshold based anomaly detection
"""
from typing import List, Tuple
import logging
import numpy as np
import pandas as pd
from nbdb.anomaly.anomaly_interface import AnomalyInterface
from nbdb.readapi.graphite_response import Anomaly
from nbdb.readapi.time_series_response import TimeRange
logger = logging.getLogger(__name__)
class Static(AnomalyInterface): # pylint: disable=too-few-public-methods
"""
Simple algorithm to do threshold based anomaly detection.
Currently supports two functions (lt, gt).
"""
def find_anomalies(self,
baseline: np.ndarray,
raw_data: pd.Series) -> List[Tuple]:
"""
Use static threshold to determine anomalies in the
raw data. Supports the lt, gt functions to compare
against the threshold
:param baseline:
:param raw_data:
:return:
"""
comparator_fn = self.config.get('comparator_fn', 'gt')
threshold = self.config.get('threshold')
raw_data.dropna(inplace=True)
if comparator_fn == 'gt':
anomalous_points = raw_data[raw_data > threshold]
elif comparator_fn == 'lt':
anomalous_points = raw_data[raw_data < threshold]
else:
raise NotImplementedError('Unknown comparator fn: {}'.format(
comparator_fn))
anomalies = []
# No anomalous points found. Return early
if len(anomalous_points) == 0:
return anomalies
previous_epoch = anomalous_points.index[0]
anomaly_start = anomalous_points.index[0]
sampling_interval = np.diff(raw_data.index).min()
anomaly_score = 1.0
epoch = None
for epoch, _ in anomalous_points.iteritems():
if (epoch - previous_epoch) / sampling_interval > 1:
# Mark the current anomaly as ended and start a new one
anomaly_window = TimeRange(anomaly_start, previous_epoch,
sampling_interval)
anomalies.append(Anomaly(anomaly_window, anomaly_score))
anomaly_score = 1.0
anomaly_start = epoch
else:
previous_epoch = epoch
anomaly_score += 1
# append the final anomaly
if epoch is not None:
anomaly_window = TimeRange(anomaly_start, epoch,
sampling_interval)
anomalies.append(Anomaly(anomaly_window, anomaly_score))
return anomalies
| 0 | 0 |
9288bc7c0b122d032f93019718b7a23eb2c872b0 | 1,598 | py | Python | tests/test_help.py | thomasvolk/R_ev3dev | 53b8c83af49e88eb4766deea0a690c55d1304d6a | [
"Apache-2.0"
] | null | null | null | tests/test_help.py | thomasvolk/R_ev3dev | 53b8c83af49e88eb4766deea0a690c55d1304d6a | [
"Apache-2.0"
] | null | null | null | tests/test_help.py | thomasvolk/R_ev3dev | 53b8c83af49e88eb4766deea0a690c55d1304d6a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import unittest
from R_ev3dev.interpreter import Interpreter, Command
from R_ev3dev.help import Help, Version
class TestCommand01(Command):
""" this is the test command 01
usage:
c01
"""
def invoke(self, interpreter_context, args):
return 1
class TestCommand02(Command):
""" this is the test command 02
"""
def invoke(self, interpreter_context, args):
return 2
class TestCommand03(Command):
def invoke(self, interpreter_context, args):
return 3
class TestHelp(unittest.TestCase):
def setUp(self):
self.interpreter = Interpreter([
TestCommand01('c01'),
TestCommand02('c02'),
TestCommand03('c03'),
Help('help'),
Version('version')
])
def test_overview(self):
self.assertEqual("""---
R_ev3 protocol language version 0.0.1
author: Thomas Volk
license: Apache License Version 2.0
source: https://github.com/thomasvolk/R_ev3dev
possible commands:
c01 - this is the test command 01
c02 - this is the test command 02
c03 -
help - show help
version - show version
use help <command> for details
---""", self.interpreter.evaluate_internal("help").value)
def test_help(self):
self.assertEqual("""---
c01
this is the test command 01
usage:
c01
---""", self.interpreter.evaluate_internal("help c01").value)
def test_version(self):
self.assertEqual('0.0.1', self.interpreter.evaluate_internal("version").value) | 21.594595 | 86 | 0.628911 | #!/usr/bin/env python3
import unittest
from R_ev3dev.interpreter import Interpreter, Command
from R_ev3dev.help import Help, Version
class TestCommand01(Command):
""" this is the test command 01
usage:
c01
"""
def invoke(self, interpreter_context, args):
return 1
class TestCommand02(Command):
""" this is the test command 02
"""
def invoke(self, interpreter_context, args):
return 2
class TestCommand03(Command):
def invoke(self, interpreter_context, args):
return 3
class TestHelp(unittest.TestCase):
def setUp(self):
self.interpreter = Interpreter([
TestCommand01('c01'),
TestCommand02('c02'),
TestCommand03('c03'),
Help('help'),
Version('version')
])
def test_overview(self):
self.assertEqual("""---
R_ev3 protocol language version 0.0.1
author: Thomas Volk
license: Apache License Version 2.0
source: https://github.com/thomasvolk/R_ev3dev
possible commands:
c01 - this is the test command 01
c02 - this is the test command 02
c03 -
help - show help
version - show version
use help <command> for details
---""", self.interpreter.evaluate_internal("help").value)
def test_help(self):
self.assertEqual("""---
c01
this is the test command 01
usage:
c01
---""", self.interpreter.evaluate_internal("help c01").value)
def test_version(self):
self.assertEqual('0.0.1', self.interpreter.evaluate_internal("version").value) | 0 | 0 |
0cb3d0dd6f38e1ffc07fd4e85e3458786f9cf6d8 | 420 | py | Python | news/urls.py | vigen-b/FakeNews | fc19f623529d1661c9f3d475adc9db98ee95a38a | [
"Apache-2.0"
] | null | null | null | news/urls.py | vigen-b/FakeNews | fc19f623529d1661c9f3d475adc9db98ee95a38a | [
"Apache-2.0"
] | null | null | null | news/urls.py | vigen-b/FakeNews | fc19f623529d1661c9f3d475adc9db98ee95a38a | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from news import views
app_name = "news"
urlpatterns = [
path("news/", views.NewsList.as_view()),
path("news/<int:pk>/", views.NewsDetail.as_view()),
path("category/", views.CategoryList.as_view()),
path("category/<str:pk>/", views.CategoryDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 30 | 63 | 0.735714 | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from news import views
app_name = "news"
urlpatterns = [
path("news/", views.NewsList.as_view()),
path("news/<int:pk>/", views.NewsDetail.as_view()),
path("category/", views.CategoryList.as_view()),
path("category/<str:pk>/", views.CategoryDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 0 | 0 |
8a0003c5108f33e3f1329656eaa782586e2568a7 | 9,993 | py | Python | realtime_hand_3d/segmentation/models/csm.py | NeelayS/realtime_hand | 219c772b9b7df60c390edac7da23f9cdddebca4d | [
"MIT"
] | null | null | null | realtime_hand_3d/segmentation/models/csm.py | NeelayS/realtime_hand | 219c772b9b7df60c390edac7da23f9cdddebca4d | [
"MIT"
] | null | null | null | realtime_hand_3d/segmentation/models/csm.py | NeelayS/realtime_hand | 219c772b9b7df60c390edac7da23f9cdddebca4d | [
"MIT"
] | null | null | null | import torch
from torch import nn
from .retrieve import SEG_MODELS_REGISTRY
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=stride,
bias=False,
padding=1,
)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(
out_channels, out_channels * self.expansion, kernel_size=1, bias=False
)
self.bn3 = nn.BatchNorm2d(out_channels * self.expansion)
self.relu = nn.ReLU()
self.downsample = downsample
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out += shortcut
out = self.relu(out)
return out
class DeconvBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, expansion=2, stride=1, upsample=None):
super(DeconvBottleneck, self).__init__()
self.expansion = expansion
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
if stride == 1:
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=stride,
bias=False,
padding=1,
)
else:
self.conv2 = nn.ConvTranspose2d(
out_channels,
out_channels,
kernel_size=3,
stride=stride,
bias=False,
padding=1,
output_padding=1,
)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(
out_channels, out_channels * self.expansion, kernel_size=1, bias=False
)
self.bn3 = nn.BatchNorm2d(out_channels * self.expansion)
self.relu = nn.ReLU()
self.upsample = upsample
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu(out)
if self.upsample is not None:
shortcut = self.upsample(x)
out += shortcut
out = self.relu(out)
return out
class CSM_model(nn.Module):
def __init__(
self, downblock, upblock, in_channels, n_classes, with_energy=True, n_stages=2
):
super(CSM_model, self).__init__()
self.start_channels = 32
self.n_classes = n_classes
self.n_stages = n_stages
self.with_energy = with_energy
down_layer_size = 3
up_layer_size = 3
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.conv1 = nn.Conv2d(
in_channels, 32, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = nn.BatchNorm2d(32)
self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.dlayer1 = self._make_downlayer(downblock, 32, down_layer_size)
self.dlayer2 = self._make_downlayer(downblock, 64, down_layer_size, stride=2)
self.dlayer3 = self._make_downlayer(downblock, 128, down_layer_size, stride=2)
self.dlayer4 = self._make_downlayer(downblock, 256, down_layer_size, stride=2)
# stage1
if self.n_stages >= 1 or self.n_stages == -1:
self.uplayer1_1 = self._make_up_block(upblock, 256, up_layer_size, stride=2)
self.uplayer2_1 = self._make_up_block(upblock, 128, up_layer_size, stride=2)
upsample_1 = nn.Sequential(
nn.ConvTranspose2d(
self.start_channels,
32,
kernel_size=1,
stride=2,
bias=False,
output_padding=1,
),
nn.BatchNorm2d(32),
)
self.uplayer_stage_1 = DeconvBottleneck(
self.start_channels, 32, 1, 2, upsample_1
)
self.conv_seg_out_1 = nn.Conv2d(
32, n_classes, kernel_size=1, stride=1, bias=False
)
if self.with_energy:
self.conv_e_out_1 = nn.Conv2d(
32, n_classes, kernel_size=1, stride=1, bias=False
)
# stage2
if self.n_stages >= 2 or self.n_stages == -1:
self.uplayer1_2 = self._make_up_block(upblock, 64, up_layer_size, stride=2)
if self.with_energy:
self.post_cat_2 = nn.Conv2d(
134, 128, kernel_size=1, stride=1, bias=False
)
else:
self.post_cat_2 = nn.Conv2d(
131, 128, kernel_size=1, stride=1, bias=False
)
self.bn_2 = nn.BatchNorm2d(128)
self.uplayer2_2 = self._make_up_block(upblock, 32, up_layer_size)
upsample_2 = nn.Sequential(
nn.ConvTranspose2d(
self.start_channels,
32,
kernel_size=1,
stride=2,
bias=False,
output_padding=1,
),
nn.BatchNorm2d(32),
)
self.uplayer_stage_2 = DeconvBottleneck(64, 32, 1, 2, upsample_2)
self.conv_seg_out_2 = nn.Conv2d(
32, n_classes, kernel_size=1, stride=1, bias=False
)
if self.with_energy:
self.conv_e_out_2 = nn.Conv2d(
32, n_classes, kernel_size=1, stride=1, bias=False
)
def _make_downlayer(self, block, init_channels, num_layer, stride=1):
downsample = None
if stride != 1 or self.start_channels != init_channels * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.start_channels,
init_channels * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(init_channels * block.expansion),
)
layers = []
layers.append(block(self.start_channels, init_channels, stride, downsample))
self.start_channels = init_channels * block.expansion
for i in range(1, num_layer):
layers.append(block(self.start_channels, init_channels))
return nn.Sequential(*layers)
def _make_up_block(self, block, init_channels, num_layer, stride=1):
upsample = None
if stride != 1 or self.start_channels != init_channels * 2:
if stride == 1:
output_padding = 0
else:
output_padding = 1
upsample = nn.Sequential(
nn.ConvTranspose2d(
self.start_channels,
init_channels * 2,
kernel_size=1,
stride=stride,
bias=False,
output_padding=output_padding,
), # 1),
nn.BatchNorm2d(init_channels * 2),
)
layers = []
for i in range(1, num_layer):
layers.append(block(self.start_channels, init_channels, 4))
layers.append(block(self.start_channels, init_channels, 2, stride, upsample))
self.start_channels = init_channels * 2
return nn.Sequential(*layers)
def forward(self, x):
img = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.avgpool(x)
x = self.dlayer1(x)
x = self.dlayer2(x)
x = self.dlayer3(x)
x = self.dlayer4(x)
# Mid
x = self.uplayer1_1(x)
x_mid = self.uplayer2_1(x)
# Stage 1
x_stage1 = self.uplayer_stage_1(x_mid)
x_seg_out1 = self.conv_seg_out_1(x_stage1)
x_hands1 = x_seg_out1
if self.with_energy:
x_e_out1 = self.sigmoid(self.conv_e_out_1(x_stage1))
if self.n_stages == 1:
if self.with_energy:
return x_hands1, x_e_out1
else:
return x_hands1
# stage2
x_mid2 = self.uplayer1_2(x_mid)
if self.with_energy:
x = torch.cat([x_mid2, x_seg_out1, x_e_out1], dim=1)
else:
x = torch.cat([x_mid2, x_seg_out1], dim=1)
x = self.post_cat_2(x)
x = self.bn_2(x)
x = self.relu(x)
x = self.uplayer2_2(x)
x = self.uplayer_stage_2(x)
x_seg_out2 = self.conv_seg_out_2(x)
x_hands2 = x_seg_out2
if self.with_energy:
x_e_out2 = self.sigmoid(self.conv_e_out_2(x))
if self.n_stages == 2:
if self.with_energy:
return x_e_out2, x_hands2
else:
return x_hands2
else:
if self.with_energy:
return x_hands1, x_e_out1, x_hands2, x_e_out2
else:
return x_hands1, x_hands2
@SEG_MODELS_REGISTRY.register()
def CSM(in_channels=3, n_classes=3, **kwargs):
return CSM_model(
Bottleneck,
DeconvBottleneck,
in_channels=in_channels,
n_classes=n_classes,
**kwargs
)
| 32.339806 | 88 | 0.536976 | import torch
from torch import nn
from .retrieve import SEG_MODELS_REGISTRY
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=stride,
bias=False,
padding=1,
)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(
out_channels, out_channels * self.expansion, kernel_size=1, bias=False
)
self.bn3 = nn.BatchNorm2d(out_channels * self.expansion)
self.relu = nn.ReLU()
self.downsample = downsample
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out += shortcut
out = self.relu(out)
return out
class DeconvBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, expansion=2, stride=1, upsample=None):
super(DeconvBottleneck, self).__init__()
self.expansion = expansion
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
if stride == 1:
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=stride,
bias=False,
padding=1,
)
else:
self.conv2 = nn.ConvTranspose2d(
out_channels,
out_channels,
kernel_size=3,
stride=stride,
bias=False,
padding=1,
output_padding=1,
)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(
out_channels, out_channels * self.expansion, kernel_size=1, bias=False
)
self.bn3 = nn.BatchNorm2d(out_channels * self.expansion)
self.relu = nn.ReLU()
self.upsample = upsample
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu(out)
if self.upsample is not None:
shortcut = self.upsample(x)
out += shortcut
out = self.relu(out)
return out
class CSM_model(nn.Module):
def __init__(
self, downblock, upblock, in_channels, n_classes, with_energy=True, n_stages=2
):
super(CSM_model, self).__init__()
self.start_channels = 32
self.n_classes = n_classes
self.n_stages = n_stages
self.with_energy = with_energy
down_layer_size = 3
up_layer_size = 3
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
self.conv1 = nn.Conv2d(
in_channels, 32, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = nn.BatchNorm2d(32)
self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.dlayer1 = self._make_downlayer(downblock, 32, down_layer_size)
self.dlayer2 = self._make_downlayer(downblock, 64, down_layer_size, stride=2)
self.dlayer3 = self._make_downlayer(downblock, 128, down_layer_size, stride=2)
self.dlayer4 = self._make_downlayer(downblock, 256, down_layer_size, stride=2)
# stage1
if self.n_stages >= 1 or self.n_stages == -1:
self.uplayer1_1 = self._make_up_block(upblock, 256, up_layer_size, stride=2)
self.uplayer2_1 = self._make_up_block(upblock, 128, up_layer_size, stride=2)
upsample_1 = nn.Sequential(
nn.ConvTranspose2d(
self.start_channels,
32,
kernel_size=1,
stride=2,
bias=False,
output_padding=1,
),
nn.BatchNorm2d(32),
)
self.uplayer_stage_1 = DeconvBottleneck(
self.start_channels, 32, 1, 2, upsample_1
)
self.conv_seg_out_1 = nn.Conv2d(
32, n_classes, kernel_size=1, stride=1, bias=False
)
if self.with_energy:
self.conv_e_out_1 = nn.Conv2d(
32, n_classes, kernel_size=1, stride=1, bias=False
)
# stage2
if self.n_stages >= 2 or self.n_stages == -1:
self.uplayer1_2 = self._make_up_block(upblock, 64, up_layer_size, stride=2)
if self.with_energy:
self.post_cat_2 = nn.Conv2d(
134, 128, kernel_size=1, stride=1, bias=False
)
else:
self.post_cat_2 = nn.Conv2d(
131, 128, kernel_size=1, stride=1, bias=False
)
self.bn_2 = nn.BatchNorm2d(128)
self.uplayer2_2 = self._make_up_block(upblock, 32, up_layer_size)
upsample_2 = nn.Sequential(
nn.ConvTranspose2d(
self.start_channels,
32,
kernel_size=1,
stride=2,
bias=False,
output_padding=1,
),
nn.BatchNorm2d(32),
)
self.uplayer_stage_2 = DeconvBottleneck(64, 32, 1, 2, upsample_2)
self.conv_seg_out_2 = nn.Conv2d(
32, n_classes, kernel_size=1, stride=1, bias=False
)
if self.with_energy:
self.conv_e_out_2 = nn.Conv2d(
32, n_classes, kernel_size=1, stride=1, bias=False
)
def _make_downlayer(self, block, init_channels, num_layer, stride=1):
downsample = None
if stride != 1 or self.start_channels != init_channels * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.start_channels,
init_channels * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(init_channels * block.expansion),
)
layers = []
layers.append(block(self.start_channels, init_channels, stride, downsample))
self.start_channels = init_channels * block.expansion
for i in range(1, num_layer):
layers.append(block(self.start_channels, init_channels))
return nn.Sequential(*layers)
def _make_up_block(self, block, init_channels, num_layer, stride=1):
upsample = None
if stride != 1 or self.start_channels != init_channels * 2:
if stride == 1:
output_padding = 0
else:
output_padding = 1
upsample = nn.Sequential(
nn.ConvTranspose2d(
self.start_channels,
init_channels * 2,
kernel_size=1,
stride=stride,
bias=False,
output_padding=output_padding,
), # 1),
nn.BatchNorm2d(init_channels * 2),
)
layers = []
for i in range(1, num_layer):
layers.append(block(self.start_channels, init_channels, 4))
layers.append(block(self.start_channels, init_channels, 2, stride, upsample))
self.start_channels = init_channels * 2
return nn.Sequential(*layers)
def forward(self, x):
img = x
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.avgpool(x)
x = self.dlayer1(x)
x = self.dlayer2(x)
x = self.dlayer3(x)
x = self.dlayer4(x)
# Mid
x = self.uplayer1_1(x)
x_mid = self.uplayer2_1(x)
# Stage 1
x_stage1 = self.uplayer_stage_1(x_mid)
x_seg_out1 = self.conv_seg_out_1(x_stage1)
x_hands1 = x_seg_out1
if self.with_energy:
x_e_out1 = self.sigmoid(self.conv_e_out_1(x_stage1))
if self.n_stages == 1:
if self.with_energy:
return x_hands1, x_e_out1
else:
return x_hands1
# stage2
x_mid2 = self.uplayer1_2(x_mid)
if self.with_energy:
x = torch.cat([x_mid2, x_seg_out1, x_e_out1], dim=1)
else:
x = torch.cat([x_mid2, x_seg_out1], dim=1)
x = self.post_cat_2(x)
x = self.bn_2(x)
x = self.relu(x)
x = self.uplayer2_2(x)
x = self.uplayer_stage_2(x)
x_seg_out2 = self.conv_seg_out_2(x)
x_hands2 = x_seg_out2
if self.with_energy:
x_e_out2 = self.sigmoid(self.conv_e_out_2(x))
if self.n_stages == 2:
if self.with_energy:
return x_e_out2, x_hands2
else:
return x_hands2
else:
if self.with_energy:
return x_hands1, x_e_out1, x_hands2, x_e_out2
else:
return x_hands1, x_hands2
@SEG_MODELS_REGISTRY.register()
def CSM(in_channels=3, n_classes=3, **kwargs):
return CSM_model(
Bottleneck,
DeconvBottleneck,
in_channels=in_channels,
n_classes=n_classes,
**kwargs
)
| 0 | 0 |
9967baa443818e97fb20549f70a4bd20685b2cd4 | 5,239 | py | Python | bobstack/sipmessaging/sipMessage.py | bobjects/BobStack | c177b286075044832f44baf9ace201780c8b4320 | [
"Apache-2.0"
] | null | null | null | bobstack/sipmessaging/sipMessage.py | bobjects/BobStack | c177b286075044832f44baf9ace201780c8b4320 | [
"Apache-2.0"
] | null | null | null | bobstack/sipmessaging/sipMessage.py | bobjects/BobStack | c177b286075044832f44baf9ace201780c8b4320 | [
"Apache-2.0"
] | null | null | null | try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from sipHeader import SIPHeader
from sipStartLineFactory import SIPStartLineFactory
class SIPMessage(object):
@classmethod
def new_parsed_from(cls, a_string):
answer = cls()
answer.raw_string = a_string
return answer
@classmethod
def _new_for_attributes(cls, start_line=None, header=None, content=""):
answer = cls()
answer.start_line = start_line
if header:
answer.header = header
else:
answer.header = SIPHeader.new_for_attributes(header_fields=None)
answer.content = content
return answer
def __init__(self):
self._content = None
self._startLine = None
self._header = None
self._rawString = None
@property
def deep_copy(self):
return self.__class__.new_parsed_from(self.raw_string)
@property
def raw_string(self):
if self._rawString is None:
self.render_raw_string_from_attributes()
return self._rawString
@raw_string.setter
def raw_string(self, a_string):
self._rawString = a_string
self.clear_attributes()
@property
def body(self):
return self.content
def clear_raw_string(self):
self._rawString = None
def clear_attributes(self):
self._content = None
self._startLine = None
self._header = None
def parse_attributes_from_raw_string(self):
self._content = ""
string_io = StringIO(self._rawString)
self._startLine = SIPStartLineFactory().next_for_stringio(string_io)
self._header = SIPHeader.new_parsed_from(string_io)
self._content = string_io.read()
string_io.close()
def render_raw_string_from_attributes(self):
stringio = StringIO()
stringio.write(self._startLine.raw_string)
stringio.write("\r\n")
self._header.render_raw_string_from_attributes(stringio)
stringio.write(self._content)
self._rawString = stringio.getvalue()
stringio.close()
@property
def start_line(self):
if self._startLine is None:
self.parse_attributes_from_raw_string()
return self._startLine
@start_line.setter
def start_line(self, a_sip_start_line):
self._startLine = a_sip_start_line
self.clear_raw_string()
@property
def header(self):
if self._header is None:
self.parse_attributes_from_raw_string()
return self._header
@header.setter
def header(self, a_sip_header):
self._header = a_sip_header
self.clear_raw_string()
@property
def content(self):
if self._content is None:
self.parse_attributes_from_raw_string()
return self._content
@content.setter
def content(self, a_string):
self._content = a_string
self.clear_raw_string()
@property
def vias(self):
return self.header.vias
@property
def via_header_fields(self):
return self.header.via_header_fields
@property
def route_uris(self):
return self.header.route_uris
@property
def record_route_uris(self):
return self.header.record_route_uris
@property
def transaction_hash(self):
return self.header.transaction_hash
@property
def dialog_hash(self):
return self.header.dialog_hash
# TODO: This is a hot method. Should we cache?
@property
def is_valid(self):
if self.is_malformed:
return False
if not self.header.is_valid:
return False
if self.header.content_length is not None:
if self.header.content_length != self.content.__len__():
return False
return True
@property
def is_invalid(self):
return not self.is_valid
@property
def is_unknown(self):
return not self.is_known
@property
def is_known(self):
return False
@property
def is_malformed(self):
return False
@property
def is_request(self):
return False
@property
def is_response(self):
return False
@property
def is_ack_request(self):
return False
@property
def is_bye_request(self):
return False
@property
def is_cancel_request(self):
return False
@property
def is_info_request(self):
return False
@property
def is_invite_request(self):
return False
@property
def is_message_request(self):
return False
@property
def is_notify_request(self):
return False
@property
def is_options_request(self):
return False
@property
def is_publish_request(self):
return False
@property
def is_prack_request(self):
return False
@property
def is_refer_request(self):
return False
@property
def is_register_request(self):
return False
@property
def is_subscribe_request(self):
return False
@property
def is_update_request(self):
return False
| 23.181416 | 76 | 0.640389 | try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from sipHeader import SIPHeader
from sipStartLineFactory import SIPStartLineFactory
class SIPMessage(object):
@classmethod
def new_parsed_from(cls, a_string):
answer = cls()
answer.raw_string = a_string
return answer
@classmethod
def _new_for_attributes(cls, start_line=None, header=None, content=""):
answer = cls()
answer.start_line = start_line
if header:
answer.header = header
else:
answer.header = SIPHeader.new_for_attributes(header_fields=None)
answer.content = content
return answer
def __init__(self):
self._content = None
self._startLine = None
self._header = None
self._rawString = None
@property
def deep_copy(self):
return self.__class__.new_parsed_from(self.raw_string)
@property
def raw_string(self):
if self._rawString is None:
self.render_raw_string_from_attributes()
return self._rawString
@raw_string.setter
def raw_string(self, a_string):
self._rawString = a_string
self.clear_attributes()
@property
def body(self):
return self.content
def clear_raw_string(self):
self._rawString = None
def clear_attributes(self):
self._content = None
self._startLine = None
self._header = None
def parse_attributes_from_raw_string(self):
self._content = ""
string_io = StringIO(self._rawString)
self._startLine = SIPStartLineFactory().next_for_stringio(string_io)
self._header = SIPHeader.new_parsed_from(string_io)
self._content = string_io.read()
string_io.close()
def render_raw_string_from_attributes(self):
stringio = StringIO()
stringio.write(self._startLine.raw_string)
stringio.write("\r\n")
self._header.render_raw_string_from_attributes(stringio)
stringio.write(self._content)
self._rawString = stringio.getvalue()
stringio.close()
@property
def start_line(self):
if self._startLine is None:
self.parse_attributes_from_raw_string()
return self._startLine
@start_line.setter
def start_line(self, a_sip_start_line):
self._startLine = a_sip_start_line
self.clear_raw_string()
@property
def header(self):
if self._header is None:
self.parse_attributes_from_raw_string()
return self._header
@header.setter
def header(self, a_sip_header):
self._header = a_sip_header
self.clear_raw_string()
@property
def content(self):
if self._content is None:
self.parse_attributes_from_raw_string()
return self._content
@content.setter
def content(self, a_string):
self._content = a_string
self.clear_raw_string()
@property
def vias(self):
return self.header.vias
@property
def via_header_fields(self):
return self.header.via_header_fields
@property
def route_uris(self):
return self.header.route_uris
@property
def record_route_uris(self):
return self.header.record_route_uris
@property
def transaction_hash(self):
return self.header.transaction_hash
@property
def dialog_hash(self):
return self.header.dialog_hash
# TODO: This is a hot method. Should we cache?
@property
def is_valid(self):
if self.is_malformed:
return False
if not self.header.is_valid:
return False
if self.header.content_length is not None:
if self.header.content_length != self.content.__len__():
return False
return True
@property
def is_invalid(self):
return not self.is_valid
@property
def is_unknown(self):
return not self.is_known
@property
def is_known(self):
return False
@property
def is_malformed(self):
return False
@property
def is_request(self):
return False
@property
def is_response(self):
return False
@property
def is_ack_request(self):
return False
@property
def is_bye_request(self):
return False
@property
def is_cancel_request(self):
return False
@property
def is_info_request(self):
return False
@property
def is_invite_request(self):
return False
@property
def is_message_request(self):
return False
@property
def is_notify_request(self):
return False
@property
def is_options_request(self):
return False
@property
def is_publish_request(self):
return False
@property
def is_prack_request(self):
return False
@property
def is_refer_request(self):
return False
@property
def is_register_request(self):
return False
@property
def is_subscribe_request(self):
return False
@property
def is_update_request(self):
return False
| 0 | 0 |
943c8ed1cd17178b2e7dd6ef67854da8a007f148 | 98 | py | Python | codes_auto/1635.number-of-good-pairs.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/1635.number-of-good-pairs.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | codes_auto/1635.number-of-good-pairs.py | smartmark-pro/leetcode_record | 6504b733d892a705571eb4eac836fb10e94e56db | [
"MIT"
] | null | null | null | #
# @lc app=leetcode.cn id=1635 lang=python3
#
# [1635] number-of-good-pairs
#
None
# @lc code=end | 14 | 42 | 0.673469 | #
# @lc app=leetcode.cn id=1635 lang=python3
#
# [1635] number-of-good-pairs
#
None
# @lc code=end | 0 | 0 |
3d707e5f1c279e06637838e9d88dd40ec499c8ba | 1,110 | py | Python | python-the-hard-way/12-prompting-people.py | Valka7a/python-playground | f08d4374f2cec2e8b1afec3753854b1ec10ff480 | [
"MIT"
] | null | null | null | python-the-hard-way/12-prompting-people.py | Valka7a/python-playground | f08d4374f2cec2e8b1afec3753854b1ec10ff480 | [
"MIT"
] | null | null | null | python-the-hard-way/12-prompting-people.py | Valka7a/python-playground | f08d4374f2cec2e8b1afec3753854b1ec10ff480 | [
"MIT"
] | null | null | null | # Exercise 12: Prompting People
# Variables
age = raw_input("How old are you? ")
height = raw_input("How tall are you? ")
weight = raw_input("How much do you weigh? ")
# Print
print "So, you're %r old, %r tall and %r heavy." % (age, height, weight)
# Study Drills
# 1. In Terminal where you normally run python to run your scripts,
# type pydoc raw_input. Read what it says. If you're on Windows
# try python -m pydoc raw_input instead.
# 2. Get out of pydoc by typing q to quit.
# 3. Look onine for what the pydoc command does.
# 4. Use pydoc to also read about open, file, os and sys. It's
# alright if you do not understand thosel just read through
# and take notes about interesting things.
# Drill 1
# Help on built-in function raw_input in module __builtin__:
# raw_input(...)
# raw_input([prompt]) -> string
#
# Read a string from standard input. The trailing newline is stripped.
# If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
# On Unix, GNU readline is used if enabled. The prompt string, if given,
# is printed without a trailing newline before reading.
| 34.6875 | 76 | 0.715315 | # Exercise 12: Prompting People
# Variables
age = raw_input("How old are you? ")
height = raw_input("How tall are you? ")
weight = raw_input("How much do you weigh? ")
# Print
print "So, you're %r old, %r tall and %r heavy." % (age, height, weight)
# Study Drills
# 1. In Terminal where you normally run python to run your scripts,
# type pydoc raw_input. Read what it says. If you're on Windows
# try python -m pydoc raw_input instead.
# 2. Get out of pydoc by typing q to quit.
# 3. Look onine for what the pydoc command does.
# 4. Use pydoc to also read about open, file, os and sys. It's
# alright if you do not understand thosel just read through
# and take notes about interesting things.
# Drill 1
# Help on built-in function raw_input in module __builtin__:
# raw_input(...)
# raw_input([prompt]) -> string
#
# Read a string from standard input. The trailing newline is stripped.
# If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
# On Unix, GNU readline is used if enabled. The prompt string, if given,
# is printed without a trailing newline before reading.
| 0 | 0 |
17884ad7e858a3c341d64db09625d9ca52b143f6 | 1,730 | py | Python | alipay/aop/api/domain/InvestorMaterialInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/InvestorMaterialInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/InvestorMaterialInfo.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InvestorMaterialInfo(object):
def __init__(self):
self._file_id = None
self._file_url = None
self._type = None
@property
def file_id(self):
return self._file_id
@file_id.setter
def file_id(self, value):
self._file_id = value
@property
def file_url(self):
return self._file_url
@file_url.setter
def file_url(self, value):
self._file_url = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.file_id:
if hasattr(self.file_id, 'to_alipay_dict'):
params['file_id'] = self.file_id.to_alipay_dict()
else:
params['file_id'] = self.file_id
if self.file_url:
if hasattr(self.file_url, 'to_alipay_dict'):
params['file_url'] = self.file_url.to_alipay_dict()
else:
params['file_url'] = self.file_url
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InvestorMaterialInfo()
if 'file_id' in d:
o.file_id = d['file_id']
if 'file_url' in d:
o.file_url = d['file_url']
if 'type' in d:
o.type = d['type']
return o
| 24.366197 | 67 | 0.550289 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InvestorMaterialInfo(object):
def __init__(self):
self._file_id = None
self._file_url = None
self._type = None
@property
def file_id(self):
return self._file_id
@file_id.setter
def file_id(self, value):
self._file_id = value
@property
def file_url(self):
return self._file_url
@file_url.setter
def file_url(self, value):
self._file_url = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.file_id:
if hasattr(self.file_id, 'to_alipay_dict'):
params['file_id'] = self.file_id.to_alipay_dict()
else:
params['file_id'] = self.file_id
if self.file_url:
if hasattr(self.file_url, 'to_alipay_dict'):
params['file_url'] = self.file_url.to_alipay_dict()
else:
params['file_url'] = self.file_url
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InvestorMaterialInfo()
if 'file_id' in d:
o.file_id = d['file_id']
if 'file_url' in d:
o.file_url = d['file_url']
if 'type' in d:
o.type = d['type']
return o
| 0 | 0 |
ab62cfd57062bdbe06cdc777b7148d1f94c2278d | 476 | py | Python | lens/exceptions.py | port-zero/lens | 06b52dd8ac3dde2f7f162a567f8e5e961e7209b1 | [
"MIT"
] | 11 | 2016-11-25T20:07:15.000Z | 2021-04-03T07:39:38.000Z | lens/exceptions.py | port-zero/lens | 06b52dd8ac3dde2f7f162a567f8e5e961e7209b1 | [
"MIT"
] | null | null | null | lens/exceptions.py | port-zero/lens | 06b52dd8ac3dde2f7f162a567f8e5e961e7209b1 | [
"MIT"
] | 2 | 2016-11-30T11:49:42.000Z | 2017-09-18T04:57:03.000Z | class LensError(ValueError):
def __init__(self, name):
self.name = name
class NotFound(LensError):
def __str__(self):
return "Did not find parser for {}".format(self.name)
class WrongFormat(LensError):
def __str__(self):
return "Parser {} does not have the correct format".format(self.name)
class LensNotConfigured(LensError):
def __str__(self):
return "Parser {} does not implement lens specifications".format(self.name)
| 25.052632 | 83 | 0.686975 | class LensError(ValueError):
def __init__(self, name):
self.name = name
class NotFound(LensError):
def __str__(self):
return "Did not find parser for {}".format(self.name)
class WrongFormat(LensError):
def __str__(self):
return "Parser {} does not have the correct format".format(self.name)
class LensNotConfigured(LensError):
def __str__(self):
return "Parser {} does not implement lens specifications".format(self.name)
| 0 | 0 |
715eab9e05e4e3e6f81c12646f271a7236441291 | 12,764 | py | Python | msticpy/nbtools/azure_ml_tools.py | ekmixon/msticpy | 8676a648ba9bfb4d848a8dda964820d4942a32ca | [
"MIT"
] | null | null | null | msticpy/nbtools/azure_ml_tools.py | ekmixon/msticpy | 8676a648ba9bfb4d848a8dda964820d4942a32ca | [
"MIT"
] | 3 | 2021-05-15T02:16:39.000Z | 2022-01-19T13:13:25.000Z | msticpy/nbtools/azure_ml_tools.py | ekmixon/msticpy | 8676a648ba9bfb4d848a8dda964820d4942a32ca | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Checker functions for Azure ML notebooks."""
import json
import os
import socket
import sys
import urllib
from pathlib import Path
from typing import Any, List, Mapping, Optional, Tuple, Union
from IPython import get_ipython
from IPython.display import HTML, display
from pkg_resources import parse_version
from .._version import VERSION
from ..common.pkg_config import refresh_config
__version__ = VERSION
AZ_GET_STARTED = (
"https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/A%20Getting"
"%20Started%20Guide%20For%20Azure%20Sentinel%20ML%20Notebooks.ipynb"
)
TROUBLE_SHOOTING = (
"https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/"
"TroubleShootingNotebooks.ipynb"
)
MISSING_PKG_ERR = """
<h4><font color='orange'>The package '<b>{package}</b>' is not
installed or has an unsupported version (installed version = '{inst_ver}')</font></h4>
Please install or upgrade before continuing: required version is {package}>={req_ver}
"""
MP_INSTALL_FAILED = """
<h4><font color='red'>The notebook may not run correctly without
the correct version of '<b>{pkg}</b>' ({ver} or later).</font></h4>
Please see the <a href="{nbk_uri}">
Getting Started Guide For Azure Sentinel ML Notebooks</a></b>
for more information<br><hr>
"""
RELOAD_MP = """
<h4><font color='orange'>Kernel restart needed</h4>
An error was detected trying to load the updated version of MSTICPy.<br>
Please restart the notebook kernel and re-run this cell - it should
run without error.
"""
MIN_PYTHON_VER_DEF = "3.6"
MSTICPY_REQ_VERSION = __version__
VER_RGX = r"(?P<maj>\d+)\.(?P<min>\d+).(?P<pnt>\d+)(?P<suff>.*)"
MP_ENV_VAR = "MSTICPYCONFIG"
MP_FILE = "msticpyconfig.yaml"
NB_CHECK_URI = (
"https://raw.githubusercontent.com/Azure/Azure-Sentinel-"
"Notebooks/master/utils/nb_check.py"
)
def is_in_aml():
"""Return True if running in Azure Machine Learning."""
return os.environ.get("APPSETTING_WEBSITE_SITE_NAME") == "AMLComputeInstance"
def check_versions(
min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF,
min_mp_ver: Union[str, Tuple] = MSTICPY_REQ_VERSION,
extras: Optional[List[str]] = None,
mp_release: Optional[str] = None,
**kwargs,
):
"""
Check the current versions of the Python kernel and MSTICPy.
Parameters
----------
min_py_ver : Union[Tuple[int, int], str]
Minimum Python version
min_mp_ver : Union[Tuple[int, int], str]
Minimum MSTICPy version
extras : Optional[List[str]], optional
A list of extras required for MSTICPy
mp_release : Optional[str], optional
Override the MSTICPy release version. This
can also be specified in the environment variable 'MP_TEST_VER'
Raises
------
RuntimeError
If the Python version does not support the notebook.
If the MSTICPy version does not support the notebook
and the user chose not to upgrade
"""
del kwargs
_disp_html("<h4>Starting notebook pre-checks...</h4>")
if isinstance(min_py_ver, str):
min_py_ver = _get_pkg_version(min_py_ver).release
check_python_ver(min_py_ver=min_py_ver)
_check_mp_install(min_mp_ver, mp_release, extras)
_check_kql_prereqs()
_set_kql_env_vars(extras)
_run_user_settings()
_set_mpconfig_var()
_disp_html("<h4>Notebook pre-checks complete.</h4>")
def check_python_ver(min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF):
"""
Check the current version of the Python kernel.
Parameters
----------
min_py_ver : Tuple[int, int]
Minimum Python version
Raises
------
RuntimeError
If the Python version does not support the notebook.
"""
min_py_ver = _get_pkg_version(min_py_ver)
sys_ver = _get_pkg_version(sys.version_info[:3])
_disp_html("Checking Python kernel version...")
if sys_ver < min_py_ver:
# Bandit SQL inject error found here
_disp_html(
f"""
<h4><font color='red'>This notebook requires a later
(Python) kernel version.</h4></font>
Select a kernel from the notebook toolbar (above), that is Python
{min_py_ver} or later (Python 3.8 recommended)<br>
""" # nosec
)
_disp_html(
f"""
Please see the <a href="{TROUBLE_SHOOTING}">TroubleShootingNotebooks</a>
for more information<br><br><hr>
"""
)
# Bandit SQL inject error found here
raise RuntimeError(f"Python {min_py_ver} or later kernel is required.") # nosec
if sys_ver < _get_pkg_version("3.8"):
_disp_html(
"Recommended: switch to using the 'Python 3.8 - AzureML' notebook kernel"
" if this is available."
)
_disp_html(f"Info: Python kernel version {sys_ver} - OK<br>")
def _check_mp_install(
min_mp_ver: Union[str, Tuple],
mp_release: Optional[str],
extras: Optional[List[str]],
):
"""Check for and try to install required MSTICPy version."""
# Use the release ver specified in params, in the environment or
# the notebook default.
pkg_version = _get_pkg_version(min_mp_ver)
mp_install_version = mp_release or os.environ.get("MP_TEST_VER") or str(pkg_version)
check_mp_ver(min_msticpy_ver=mp_install_version, extras=extras)
def check_mp_ver(min_msticpy_ver: Union[str, Tuple], extras: Optional[List[str]]):
"""
Check and optionally update the current version of msticpy.
Parameters
----------
min_msticpy_ver : Tuple[int, int]
Minimum MSTICPy version
extras : Optional[List[str]], optional
A list of extras required for MSTICPy
Raises
------
ImportError
If MSTICPy version is insufficient and we need to upgrade
"""
mp_min_pkg_ver = _get_pkg_version(min_msticpy_ver)
_disp_html("Checking msticpy version...<br>")
inst_version = _get_pkg_version(__version__)
if inst_version < mp_min_pkg_ver:
_disp_html(
MISSING_PKG_ERR.format(
package="msticpy",
inst_ver=inst_version,
req_ver=mp_min_pkg_ver,
)
)
mp_pkg_spec = f"msticpy[{','.join(extras)}]" if extras else "msticpy"
mp_pkg_spec = f"{mp_pkg_spec}>={min_msticpy_ver}"
_disp_html(
f"Please run the following command to upgrade MSTICPy<br>"
f"<pre>!{mp_pkg_spec}</pre><br>"
)
raise ImportError(
"Unsupported version of MSTICPy installed",
f"Installed version: {inst_version}",
f"Required version: {mp_min_pkg_ver}",
)
_disp_html(f"Info: msticpy version {inst_version} (>= {mp_min_pkg_ver}) - OK<br>")
def _set_kql_env_vars(extras: Optional[List[str]]):
"""Set environment variables for Kqlmagic based on MP extras."""
jp_extended = ("azsentinel", "azuresentinel", "kql")
if extras and any(extra for extra in extras if extra in jp_extended):
os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-extended"
else:
os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-basic"
if is_in_aml():
os.environ["KQLMAGIC_AZUREML_COMPUTE"] = _get_vm_fqdn()
def _get_pkg_version(version: Union[str, Tuple]) -> Any:
"""Return pkg_resources parsed version from string or tuple."""
if isinstance(version, str):
return parse_version(version)
if isinstance(version, tuple):
return parse_version(".".join(str(ver) for ver in version))
raise TypeError(f"Unparseable type version {version}")
def _disp_html(text: str):
"""Display the HTML text."""
display(HTML(text))
def get_aml_user_folder() -> Optional[Path]:
"""Return the root of the user folder."""
path_parts = Path(".").absolute().parts
if "Users" not in path_parts:
return None
# find the index of the last occurrence of "users"
users_idx = len(path_parts) - path_parts[::-1].index("Users")
# the user folder is one item below this
if len(path_parts) < users_idx + 1:
return None
return Path("/".join(path_parts[: users_idx + 1]))
# pylint: disable=import-outside-toplevel, unused-import, import-error
def _run_user_settings():
"""Import nbuser_settings.py, if it exists."""
user_folder = get_aml_user_folder()
if user_folder.joinpath("nbuser_settings.py").is_file():
sys.path.append(str(user_folder))
import nbuser_settings # noqa: F401
# pylint: enable=import-outside-toplevel, unused-import, import-error
def _set_mpconfig_var():
"""Set MSTICPYCONFIG to file in user directory if no other found."""
mp_path_val = os.environ.get(MP_ENV_VAR)
if (
# If a valid MSTICPYCONFIG value is found - return
(mp_path_val and Path(mp_path_val).is_file())
# Or if there is a msticpconfig in the current folder.
or Path(".").joinpath(MP_FILE).is_file()
):
return
# Otherwise check the user's root folder
user_dir = get_aml_user_folder()
mp_path = Path(user_dir).joinpath(MP_FILE)
if mp_path.is_file():
# If there's a file there, set the env variable to that.
os.environ[MP_ENV_VAR] = str(mp_path)
# Since we have already imported msticpy to check the version
# it will have already configured settings so we need to refresh.
refresh_config()
_disp_html(
f"<br>No {MP_FILE} found. Will use {MP_FILE} in user folder {user_dir}<br>"
)
def _get_vm_metadata() -> Mapping[str, Any]:
"""Use local request to get VM metadata."""
vm_uri = "http://169.254.169.254/metadata/instance?api-version=2017-08-01"
req = urllib.request.Request(vm_uri) # type: ignore
req.add_header("Metadata", "true")
# Bandit warning on urlopen - Fixed private URL
with urllib.request.urlopen(req) as resp: # type: ignore # nosec
metadata = json.loads(resp.read())
return metadata if isinstance(metadata, dict) else {}
def _get_vm_fqdn() -> str:
"""Get the FQDN of the host."""
az_region = _get_vm_metadata().get("compute", {}).get("location")
return ".".join(
[
socket.gethostname(),
az_region,
"instances.azureml.ms",
]
if az_region
else ""
)
def _check_kql_prereqs():
"""
Check and install packages for Kqlmagic/msal_extensions.
Notes
-----
Kqlmagic may trigger warnings about a missing PyGObject package
and some system library dependencies. To fix this do the
following:<br>
From a notebook run:
%pip uninstall enum34
!sudo apt-get --yes install libgirepository1.0-dev
!sudo apt-get --yes install gir1.2-secret-1
%pip install pygobject
You can also do this from a terminal - but ensure that you've
activated the environment corresponding to the kernel you are
using prior to running the pip commands.
# Install the libgi dependency
sudo apt install libgirepository1.0-dev
sudo apt install gir1.2-secret-1
# activate the environment
# conda activate azureml_py38
# source ./env_path/scripts/activate
# Uninstall enum34
python -m pip uninstall enum34
# Install pygobject
python -m install pygobject
"""
if not is_in_aml():
return
try:
# If this successfully imports, we are ok
# pylint: disable=import-outside-toplevel
import gi
# pylint: enable=import-outside-toplevel
del gi
except ImportError:
# Check for system packages
ip_shell = get_ipython()
if not ip_shell:
return
apt_list = ip_shell.run_line_magic("sx", "apt list")
apt_list = [apt.split("/", maxsplit=1)[0] for apt in apt_list]
missing_lx_pkg = [
apt_pkg
for apt_pkg in ("libgirepository1.0-dev", "gir1.2-secret-1")
if apt_pkg not in apt_list
]
if missing_lx_pkg:
_disp_html(
"Kqlmagic/msal-extensions pre-requisite PyGObject not installed."
)
_disp_html(
"To prevent warnings when loading the Kqlmagic data provider,"
" Please run the following command:<br>"
"!conda install --yes -c conda-forge pygobject<br>"
)
| 33.413613 | 90 | 0.645096 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Checker functions for Azure ML notebooks."""
import json
import os
import socket
import sys
import urllib
from pathlib import Path
from typing import Any, List, Mapping, Optional, Tuple, Union
from IPython import get_ipython
from IPython.display import HTML, display
from pkg_resources import parse_version
from .._version import VERSION
from ..common.pkg_config import refresh_config
__version__ = VERSION
AZ_GET_STARTED = (
"https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/A%20Getting"
"%20Started%20Guide%20For%20Azure%20Sentinel%20ML%20Notebooks.ipynb"
)
TROUBLE_SHOOTING = (
"https://github.com/Azure/Azure-Sentinel-Notebooks/blob/master/"
"TroubleShootingNotebooks.ipynb"
)
MISSING_PKG_ERR = """
<h4><font color='orange'>The package '<b>{package}</b>' is not
installed or has an unsupported version (installed version = '{inst_ver}')</font></h4>
Please install or upgrade before continuing: required version is {package}>={req_ver}
"""
MP_INSTALL_FAILED = """
<h4><font color='red'>The notebook may not run correctly without
the correct version of '<b>{pkg}</b>' ({ver} or later).</font></h4>
Please see the <a href="{nbk_uri}">
Getting Started Guide For Azure Sentinel ML Notebooks</a></b>
for more information<br><hr>
"""
RELOAD_MP = """
<h4><font color='orange'>Kernel restart needed</h4>
An error was detected trying to load the updated version of MSTICPy.<br>
Please restart the notebook kernel and re-run this cell - it should
run without error.
"""
MIN_PYTHON_VER_DEF = "3.6"
MSTICPY_REQ_VERSION = __version__
VER_RGX = r"(?P<maj>\d+)\.(?P<min>\d+).(?P<pnt>\d+)(?P<suff>.*)"
MP_ENV_VAR = "MSTICPYCONFIG"
MP_FILE = "msticpyconfig.yaml"
NB_CHECK_URI = (
"https://raw.githubusercontent.com/Azure/Azure-Sentinel-"
"Notebooks/master/utils/nb_check.py"
)
def is_in_aml():
"""Return True if running in Azure Machine Learning."""
return os.environ.get("APPSETTING_WEBSITE_SITE_NAME") == "AMLComputeInstance"
def check_versions(
min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF,
min_mp_ver: Union[str, Tuple] = MSTICPY_REQ_VERSION,
extras: Optional[List[str]] = None,
mp_release: Optional[str] = None,
**kwargs,
):
"""
Check the current versions of the Python kernel and MSTICPy.
Parameters
----------
min_py_ver : Union[Tuple[int, int], str]
Minimum Python version
min_mp_ver : Union[Tuple[int, int], str]
Minimum MSTICPy version
extras : Optional[List[str]], optional
A list of extras required for MSTICPy
mp_release : Optional[str], optional
Override the MSTICPy release version. This
can also be specified in the environment variable 'MP_TEST_VER'
Raises
------
RuntimeError
If the Python version does not support the notebook.
If the MSTICPy version does not support the notebook
and the user chose not to upgrade
"""
del kwargs
_disp_html("<h4>Starting notebook pre-checks...</h4>")
if isinstance(min_py_ver, str):
min_py_ver = _get_pkg_version(min_py_ver).release
check_python_ver(min_py_ver=min_py_ver)
_check_mp_install(min_mp_ver, mp_release, extras)
_check_kql_prereqs()
_set_kql_env_vars(extras)
_run_user_settings()
_set_mpconfig_var()
_disp_html("<h4>Notebook pre-checks complete.</h4>")
def check_python_ver(min_py_ver: Union[str, Tuple] = MIN_PYTHON_VER_DEF):
"""
Check the current version of the Python kernel.
Parameters
----------
min_py_ver : Tuple[int, int]
Minimum Python version
Raises
------
RuntimeError
If the Python version does not support the notebook.
"""
min_py_ver = _get_pkg_version(min_py_ver)
sys_ver = _get_pkg_version(sys.version_info[:3])
_disp_html("Checking Python kernel version...")
if sys_ver < min_py_ver:
# Bandit SQL inject error found here
_disp_html(
f"""
<h4><font color='red'>This notebook requires a later
(Python) kernel version.</h4></font>
Select a kernel from the notebook toolbar (above), that is Python
{min_py_ver} or later (Python 3.8 recommended)<br>
""" # nosec
)
_disp_html(
f"""
Please see the <a href="{TROUBLE_SHOOTING}">TroubleShootingNotebooks</a>
for more information<br><br><hr>
"""
)
# Bandit SQL inject error found here
raise RuntimeError(f"Python {min_py_ver} or later kernel is required.") # nosec
if sys_ver < _get_pkg_version("3.8"):
_disp_html(
"Recommended: switch to using the 'Python 3.8 - AzureML' notebook kernel"
" if this is available."
)
_disp_html(f"Info: Python kernel version {sys_ver} - OK<br>")
def _check_mp_install(
min_mp_ver: Union[str, Tuple],
mp_release: Optional[str],
extras: Optional[List[str]],
):
"""Check for and try to install required MSTICPy version."""
# Use the release ver specified in params, in the environment or
# the notebook default.
pkg_version = _get_pkg_version(min_mp_ver)
mp_install_version = mp_release or os.environ.get("MP_TEST_VER") or str(pkg_version)
check_mp_ver(min_msticpy_ver=mp_install_version, extras=extras)
def check_mp_ver(min_msticpy_ver: Union[str, Tuple], extras: Optional[List[str]]):
"""
Check and optionally update the current version of msticpy.
Parameters
----------
min_msticpy_ver : Tuple[int, int]
Minimum MSTICPy version
extras : Optional[List[str]], optional
A list of extras required for MSTICPy
Raises
------
ImportError
If MSTICPy version is insufficient and we need to upgrade
"""
mp_min_pkg_ver = _get_pkg_version(min_msticpy_ver)
_disp_html("Checking msticpy version...<br>")
inst_version = _get_pkg_version(__version__)
if inst_version < mp_min_pkg_ver:
_disp_html(
MISSING_PKG_ERR.format(
package="msticpy",
inst_ver=inst_version,
req_ver=mp_min_pkg_ver,
)
)
mp_pkg_spec = f"msticpy[{','.join(extras)}]" if extras else "msticpy"
mp_pkg_spec = f"{mp_pkg_spec}>={min_msticpy_ver}"
_disp_html(
f"Please run the following command to upgrade MSTICPy<br>"
f"<pre>!{mp_pkg_spec}</pre><br>"
)
raise ImportError(
"Unsupported version of MSTICPy installed",
f"Installed version: {inst_version}",
f"Required version: {mp_min_pkg_ver}",
)
_disp_html(f"Info: msticpy version {inst_version} (>= {mp_min_pkg_ver}) - OK<br>")
def _set_kql_env_vars(extras: Optional[List[str]]):
"""Set environment variables for Kqlmagic based on MP extras."""
jp_extended = ("azsentinel", "azuresentinel", "kql")
if extras and any(extra for extra in extras if extra in jp_extended):
os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-extended"
else:
os.environ["KQLMAGIC_EXTRAS_REQUIRE"] = "jupyter-basic"
if is_in_aml():
os.environ["KQLMAGIC_AZUREML_COMPUTE"] = _get_vm_fqdn()
def _get_pkg_version(version: Union[str, Tuple]) -> Any:
"""Return pkg_resources parsed version from string or tuple."""
if isinstance(version, str):
return parse_version(version)
if isinstance(version, tuple):
return parse_version(".".join(str(ver) for ver in version))
raise TypeError(f"Unparseable type version {version}")
def _disp_html(text: str):
"""Display the HTML text."""
display(HTML(text))
def get_aml_user_folder() -> Optional[Path]:
"""Return the root of the user folder."""
path_parts = Path(".").absolute().parts
if "Users" not in path_parts:
return None
# find the index of the last occurrence of "users"
users_idx = len(path_parts) - path_parts[::-1].index("Users")
# the user folder is one item below this
if len(path_parts) < users_idx + 1:
return None
return Path("/".join(path_parts[: users_idx + 1]))
# pylint: disable=import-outside-toplevel, unused-import, import-error
def _run_user_settings():
"""Import nbuser_settings.py, if it exists."""
user_folder = get_aml_user_folder()
if user_folder.joinpath("nbuser_settings.py").is_file():
sys.path.append(str(user_folder))
import nbuser_settings # noqa: F401
# pylint: enable=import-outside-toplevel, unused-import, import-error
def _set_mpconfig_var():
"""Set MSTICPYCONFIG to file in user directory if no other found."""
mp_path_val = os.environ.get(MP_ENV_VAR)
if (
# If a valid MSTICPYCONFIG value is found - return
(mp_path_val and Path(mp_path_val).is_file())
# Or if there is a msticpconfig in the current folder.
or Path(".").joinpath(MP_FILE).is_file()
):
return
# Otherwise check the user's root folder
user_dir = get_aml_user_folder()
mp_path = Path(user_dir).joinpath(MP_FILE)
if mp_path.is_file():
# If there's a file there, set the env variable to that.
os.environ[MP_ENV_VAR] = str(mp_path)
# Since we have already imported msticpy to check the version
# it will have already configured settings so we need to refresh.
refresh_config()
_disp_html(
f"<br>No {MP_FILE} found. Will use {MP_FILE} in user folder {user_dir}<br>"
)
def _get_vm_metadata() -> Mapping[str, Any]:
"""Use local request to get VM metadata."""
vm_uri = "http://169.254.169.254/metadata/instance?api-version=2017-08-01"
req = urllib.request.Request(vm_uri) # type: ignore
req.add_header("Metadata", "true")
# Bandit warning on urlopen - Fixed private URL
with urllib.request.urlopen(req) as resp: # type: ignore # nosec
metadata = json.loads(resp.read())
return metadata if isinstance(metadata, dict) else {}
def _get_vm_fqdn() -> str:
"""Get the FQDN of the host."""
az_region = _get_vm_metadata().get("compute", {}).get("location")
return ".".join(
[
socket.gethostname(),
az_region,
"instances.azureml.ms",
]
if az_region
else ""
)
def _check_kql_prereqs():
"""
Check and install packages for Kqlmagic/msal_extensions.
Notes
-----
Kqlmagic may trigger warnings about a missing PyGObject package
and some system library dependencies. To fix this do the
following:<br>
From a notebook run:
%pip uninstall enum34
!sudo apt-get --yes install libgirepository1.0-dev
!sudo apt-get --yes install gir1.2-secret-1
%pip install pygobject
You can also do this from a terminal - but ensure that you've
activated the environment corresponding to the kernel you are
using prior to running the pip commands.
# Install the libgi dependency
sudo apt install libgirepository1.0-dev
sudo apt install gir1.2-secret-1
# activate the environment
# conda activate azureml_py38
# source ./env_path/scripts/activate
# Uninstall enum34
python -m pip uninstall enum34
# Install pygobject
python -m install pygobject
"""
if not is_in_aml():
return
try:
# If this successfully imports, we are ok
# pylint: disable=import-outside-toplevel
import gi
# pylint: enable=import-outside-toplevel
del gi
except ImportError:
# Check for system packages
ip_shell = get_ipython()
if not ip_shell:
return
apt_list = ip_shell.run_line_magic("sx", "apt list")
apt_list = [apt.split("/", maxsplit=1)[0] for apt in apt_list]
missing_lx_pkg = [
apt_pkg
for apt_pkg in ("libgirepository1.0-dev", "gir1.2-secret-1")
if apt_pkg not in apt_list
]
if missing_lx_pkg:
_disp_html(
"Kqlmagic/msal-extensions pre-requisite PyGObject not installed."
)
_disp_html(
"To prevent warnings when loading the Kqlmagic data provider,"
" Please run the following command:<br>"
"!conda install --yes -c conda-forge pygobject<br>"
)
| 0 | 0 |
b65206728e5f3f6cbab0f87066d7ed1dc8784f63 | 4,423 | py | Python | konfuzio_sdk/urls.py | atraining/document-ai-python-sdk | ea2df68af0254053da7e6f4c6e2c2df6d7911233 | [
"MIT"
] | null | null | null | konfuzio_sdk/urls.py | atraining/document-ai-python-sdk | ea2df68af0254053da7e6f4c6e2c2df6d7911233 | [
"MIT"
] | null | null | null | konfuzio_sdk/urls.py | atraining/document-ai-python-sdk | ea2df68af0254053da7e6f4c6e2c2df6d7911233 | [
"MIT"
] | null | null | null | """Endpoints of the Konfuzio Host."""
import logging
from konfuzio_sdk import KONFUZIO_HOST, KONFUZIO_PROJECT_ID
logger = logging.getLogger(__name__)
def get_auth_token_url() -> str:
"""
Generate URL that creates an authentication token for the user.
:return: URL to generate the token.
"""
return f"{KONFUZIO_HOST}/api/token-auth/"
def get_project_list_url() -> str:
"""
Generate URL to load all the projects available for the user.
:return: URL to get all the projects for the user.
"""
return f"{KONFUZIO_HOST}/api/projects/"
def create_new_project_url() -> str:
"""
Generate URL to create a new project.
:return: URL to create a new project.
"""
return f"{KONFUZIO_HOST}/api/projects/"
def get_documents_meta_url() -> str:
"""
Generate URL to load meta information about documents.
:return: URL to get all the documents details.
"""
return f"{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/"
def get_upload_document_url() -> str:
"""
Generate URL to upload a document.
:return: URL to upload a document
"""
return f"{KONFUZIO_HOST}/api/v2/docs/"
def get_create_label_url() -> str:
"""
Generate URL to create a label.
:return: URL to create a label.
"""
return f"{KONFUZIO_HOST}/api/v2/labels/"
def get_document_ocr_file_url(document_id: int) -> str:
"""
Generate URL to access OCR version of document.
:param document_id: ID of the document as integer
:return: URL to get OCR document file.
"""
return f'{KONFUZIO_HOST}/doc/show/{document_id}/'
def get_document_original_file_url(document_id: int) -> str:
"""
Generate URL to access original version of the document.
:param document_id: ID of the document as integer
:return: URL to get the original document
"""
return f'{KONFUZIO_HOST}/doc/show-original/{document_id}/'
def get_document_api_details_url(document_id: int, include_extractions: bool = False, extra_fields='bbox') -> str:
"""
Generate URL to access document details of one document in a project.
:param document_id: ID of the document as integer
:param include_extractions: Bool to include extractions
:param extra_fields: Extra information to include in the response
:return: URL to get document details
"""
return (
f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/'
f'?include_extractions={include_extractions}&extra_fields={extra_fields}'
)
def get_project_url(project_id=None) -> str:
"""
Generate URL to get project details.
:param project_id: ID of the project
:return: URL to get project details.
"""
project_id = project_id if project_id else KONFUZIO_PROJECT_ID
return f'{KONFUZIO_HOST}/api/projects/{project_id}/'
def post_project_api_document_annotations_url(document_id: int) -> str:
"""
Add new annotations to a document.
:param document_id: ID of the document as integer
:return: URL for adding annotations to a document
"""
return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/annotations/'
def delete_project_api_document_annotations_url(document_id: int, annotation_id: int) -> str:
"""
Delete the annotation of a document.
:param document_id: ID of the document as integer
:param annotation_id: ID of the annotation as integer
:return: URL to delete annotation of a document
"""
return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/' f'annotations/{annotation_id}/'
def get_document_result_v1(document_id: int) -> str:
"""
Generate URL to access web interface for labeling of this project.
:param document_id: ID of the document as integer
:return: URL for labeling of the project.
"""
return f'{KONFUZIO_HOST}/api/v1/docs/{document_id}/'
def get_document_segmentation_details_url(document_id: int, project_id, action='segmentation') -> str:
"""
Generate URL to get the segmentation results of a document.
:param document_id: ID of the document as integer
:param project_id: ID of the project
:param action: Action from where to get the results
:return: URL to access the segmentation results of a document
"""
return f'https://app.konfuzio.com/api/projects/{project_id}/docs/{document_id}/{action}/'
| 29.291391 | 116 | 0.703821 | """Endpoints of the Konfuzio Host."""
import logging
from konfuzio_sdk import KONFUZIO_HOST, KONFUZIO_PROJECT_ID
logger = logging.getLogger(__name__)
def get_auth_token_url() -> str:
"""
Generate URL that creates an authentication token for the user.
:return: URL to generate the token.
"""
return f"{KONFUZIO_HOST}/api/token-auth/"
def get_project_list_url() -> str:
"""
Generate URL to load all the projects available for the user.
:return: URL to get all the projects for the user.
"""
return f"{KONFUZIO_HOST}/api/projects/"
def create_new_project_url() -> str:
"""
Generate URL to create a new project.
:return: URL to create a new project.
"""
return f"{KONFUZIO_HOST}/api/projects/"
def get_documents_meta_url() -> str:
"""
Generate URL to load meta information about documents.
:return: URL to get all the documents details.
"""
return f"{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/"
def get_upload_document_url() -> str:
"""
Generate URL to upload a document.
:return: URL to upload a document
"""
return f"{KONFUZIO_HOST}/api/v2/docs/"
def get_create_label_url() -> str:
"""
Generate URL to create a label.
:return: URL to create a label.
"""
return f"{KONFUZIO_HOST}/api/v2/labels/"
def get_document_ocr_file_url(document_id: int) -> str:
"""
Generate URL to access OCR version of document.
:param document_id: ID of the document as integer
:return: URL to get OCR document file.
"""
return f'{KONFUZIO_HOST}/doc/show/{document_id}/'
def get_document_original_file_url(document_id: int) -> str:
"""
Generate URL to access original version of the document.
:param document_id: ID of the document as integer
:return: URL to get the original document
"""
return f'{KONFUZIO_HOST}/doc/show-original/{document_id}/'
def get_document_api_details_url(document_id: int, include_extractions: bool = False, extra_fields='bbox') -> str:
"""
Generate URL to access document details of one document in a project.
:param document_id: ID of the document as integer
:param include_extractions: Bool to include extractions
:param extra_fields: Extra information to include in the response
:return: URL to get document details
"""
return (
f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/'
f'?include_extractions={include_extractions}&extra_fields={extra_fields}'
)
def get_project_url(project_id=None) -> str:
"""
Generate URL to get project details.
:param project_id: ID of the project
:return: URL to get project details.
"""
project_id = project_id if project_id else KONFUZIO_PROJECT_ID
return f'{KONFUZIO_HOST}/api/projects/{project_id}/'
def post_project_api_document_annotations_url(document_id: int) -> str:
"""
Add new annotations to a document.
:param document_id: ID of the document as integer
:return: URL for adding annotations to a document
"""
return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/annotations/'
def delete_project_api_document_annotations_url(document_id: int, annotation_id: int) -> str:
"""
Delete the annotation of a document.
:param document_id: ID of the document as integer
:param annotation_id: ID of the annotation as integer
:return: URL to delete annotation of a document
"""
return f'{KONFUZIO_HOST}/api/projects/{KONFUZIO_PROJECT_ID}/docs/{document_id}/' f'annotations/{annotation_id}/'
def get_document_result_v1(document_id: int) -> str:
"""
Generate URL to access web interface for labeling of this project.
:param document_id: ID of the document as integer
:return: URL for labeling of the project.
"""
return f'{KONFUZIO_HOST}/api/v1/docs/{document_id}/'
def get_document_segmentation_details_url(document_id: int, project_id, action='segmentation') -> str:
"""
Generate URL to get the segmentation results of a document.
:param document_id: ID of the document as integer
:param project_id: ID of the project
:param action: Action from where to get the results
:return: URL to access the segmentation results of a document
"""
return f'https://app.konfuzio.com/api/projects/{project_id}/docs/{document_id}/{action}/'
| 0 | 0 |
5a5179184e11bd69c115e048377711a912dc3761 | 440 | py | Python | Web/user/models.py | Pancras-Zheng/Graduation-Project | 5d1ae78d5e890fa7ecc2456d0d3d22bdea7c29f0 | [
"MIT"
] | 37 | 2018-01-25T03:14:24.000Z | 2021-12-15T10:02:37.000Z | Web/user/models.py | Pancras-Zheng/Graduation-Project | 5d1ae78d5e890fa7ecc2456d0d3d22bdea7c29f0 | [
"MIT"
] | null | null | null | Web/user/models.py | Pancras-Zheng/Graduation-Project | 5d1ae78d5e890fa7ecc2456d0d3d22bdea7c29f0 | [
"MIT"
] | 10 | 2019-04-11T07:27:10.000Z | 2021-11-24T11:16:14.000Z | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.base_user import BaseUserManager
# Create your models here.
class User(AbstractUser):
nickname = models.CharField(_(''),max_length=50,blank=True)
info = models.CharField(_(''),max_length=200,blank=True)
class Meta(AbstractUser.Meta):
pass
| 27.5 | 69 | 0.747727 | from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.base_user import BaseUserManager
# Create your models here.
class User(AbstractUser):
nickname = models.CharField(_('昵称'),max_length=50,blank=True)
info = models.CharField(_('备注'),max_length=200,blank=True)
class Meta(AbstractUser.Meta):
pass
| 12 | 0 |
db4b3216850356cdd188fbda35706bb2acbe536c | 14,096 | py | Python | src/huggingface_hub/commands/user.py | FrancescoSaverioZuppichini/huggingface_hub | 9e7ffda07ddcd668302a61156bcae0d9ec97a26e | [
"Apache-2.0"
] | 1 | 2022-03-28T14:15:24.000Z | 2022-03-28T14:15:24.000Z | src/huggingface_hub/commands/user.py | osanseviero/huggingface_hub | b1cf2d8f47088d3fce2244058d222a4d8234b3ab | [
"Apache-2.0"
] | null | null | null | src/huggingface_hub/commands/user.py | osanseviero/huggingface_hub | b1cf2d8f47088d3fce2244058d222a4d8234b3ab | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from argparse import ArgumentParser
from getpass import getpass
from typing import List, Union
from huggingface_hub.commands import BaseHuggingfaceCLICommand
from huggingface_hub.constants import (
REPO_TYPES,
REPO_TYPES_URL_PREFIXES,
SPACES_SDK_TYPES,
)
from huggingface_hub.hf_api import HfApi, HfFolder
from requests.exceptions import HTTPError
class UserCommands(BaseHuggingfaceCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
login_parser = parser.add_parser(
"login", help="Log in using the same credentials as on huggingface.co"
)
login_parser.set_defaults(func=lambda args: LoginCommand(args))
whoami_parser = parser.add_parser(
"whoami", help="Find out which huggingface.co account you are logged in as."
)
whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
logout_parser = parser.add_parser("logout", help="Log out")
logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
# new system: git-based repo system
repo_parser = parser.add_parser(
"repo",
help="{create, ls-files} Commands to interact with your huggingface.co repos.",
)
repo_subparsers = repo_parser.add_subparsers(
help="huggingface.co repos related commands"
)
repo_create_parser = repo_subparsers.add_parser(
"create", help="Create a new repo on huggingface.co"
)
repo_create_parser.add_argument(
"name",
type=str,
help="Name for your repo. Will be namespaced under your username to build the repo id.",
)
repo_create_parser.add_argument(
"--type",
type=str,
help='Optional: repo_type: set to "dataset" or "space" if creating a dataset or space, default is model.',
)
repo_create_parser.add_argument(
"--organization", type=str, help="Optional: organization namespace."
)
repo_create_parser.add_argument(
"--space_sdk",
type=str,
help='Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".',
choices=SPACES_SDK_TYPES,
)
repo_create_parser.add_argument(
"-y",
"--yes",
action="store_true",
help="Optional: answer Yes to the prompt",
)
repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))
class ANSI:
"""
Helper for en.wikipedia.org/wiki/ANSI_escape_code
"""
_bold = "\u001b[1m"
_red = "\u001b[31m"
_gray = "\u001b[90m"
_reset = "\u001b[0m"
@classmethod
def bold(cls, s):
return f"{cls._bold}{s}{cls._reset}"
@classmethod
def red(cls, s):
return f"{cls._bold + cls._red}{s}{cls._reset}"
@classmethod
def gray(cls, s):
return f"{cls._gray}{s}{cls._reset}"
def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
"""
Inspired by:
- stackoverflow.com/a/8356620/593036
- stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
"""
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
lines = []
lines.append(row_format.format(*headers))
lines.append(row_format.format(*["-" * w for w in col_widths]))
for row in rows:
lines.append(row_format.format(*row))
return "\n".join(lines)
def currently_setup_credential_helpers(directory=None) -> List[str]:
try:
output = subprocess.run(
"git config --list".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
cwd=directory,
).stdout.split("\n")
current_credential_helpers = []
for line in output:
if "credential.helper" in line:
current_credential_helpers.append(line.split("=")[-1])
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return current_credential_helpers
class BaseUserCommand:
def __init__(self, args):
self.args = args
self._api = HfApi()
class LoginCommand(BaseUserCommand):
def run(self):
print( # docstyle-ignore
"""
_| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|
_| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|
_| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|
To login, `huggingface_hub` now requires a token generated from https://huggingface.co/settings/tokens.
(Deprecated, will be removed in v0.3.0) To login with username and password instead, interrupt with Ctrl+C.
"""
)
try:
token = getpass("Token: ")
_login(self._api, token=token)
except KeyboardInterrupt:
username = input("\rUsername: ")
password = getpass()
_login(self._api, username, password)
class WhoamiCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
try:
info = self._api.whoami(token)
print(info["name"])
orgs = [org["name"] for org in info["orgs"]]
if orgs:
print(ANSI.bold("orgs: "), ",".join(orgs))
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
class LogoutCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
HfFolder.delete_token()
HfApi.unset_access_token()
try:
self._api.logout(token)
except HTTPError as e:
# Logging out with an access token will return a client error.
if not e.response.status_code == 400:
raise e
print("Successfully logged out.")
class RepoCreateCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
try:
stdout = subprocess.check_output(["git", "--version"]).decode("utf-8")
print(ANSI.gray(stdout.strip()))
except FileNotFoundError:
print("Looks like you do not have git installed, please install.")
try:
stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8")
print(ANSI.gray(stdout.strip()))
except FileNotFoundError:
print(
ANSI.red(
"Looks like you do not have git-lfs installed, please install."
" You can install from https://git-lfs.github.com/."
" Then run `git lfs install` (you only have to do this once)."
)
)
print("")
user = self._api.whoami(token)["name"]
namespace = (
self.args.organization if self.args.organization is not None else user
)
repo_id = f"{namespace}/{self.args.name}"
if self.args.type not in REPO_TYPES:
print("Invalid repo --type")
exit(1)
if self.args.type in REPO_TYPES_URL_PREFIXES:
prefixed_repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id
else:
prefixed_repo_id = repo_id
print(f"You are about to create {ANSI.bold(prefixed_repo_id)}")
if not self.args.yes:
choice = input("Proceed? [Y/n] ").lower()
if not (choice == "" or choice == "y" or choice == "yes"):
print("Abort")
exit()
try:
url = self._api.create_repo(
repo_id=repo_id,
token=token,
repo_type=self.args.type,
space_sdk=self.args.space_sdk,
)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
print("\nYour repo now lives at:")
print(f" {ANSI.bold(url)}")
print(
"\nYou can clone it locally with the command below,"
" and commit/push as usual."
)
print(f"\n git clone {url}")
print("")
NOTEBOOK_LOGIN_PASSWORD_HTML = """<center> <img
src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
alt='Hugging Face'> <br> Immediately click login after typing your password or
it might be stored in plain text in this notebook file. </center>"""
NOTEBOOK_LOGIN_TOKEN_HTML_START = """<center> <img
src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
alt='Hugging Face'> <br> Copy a token from <a
href="https://huggingface.co/settings/tokens" target="_blank">your Hugging Face
tokens page</a> and paste it below. <br> Immediately click login after copying
your token or it might be stored in plain text in this notebook file. </center>"""
NOTEBOOK_LOGIN_TOKEN_HTML_END = """
<b>Pro Tip:</b> If you don't already have one, you can create a dedicated
'notebooks' token with 'write' access, that you can then easily reuse for all
notebooks. <br> <i>Logging in with your username and password is deprecated and
won't be possible anymore in the near future. You can still use them for now by
clicking below.</i> </center>"""
def notebook_login():
"""
Displays a widget to login to the HF website and store the token.
"""
try:
import ipywidgets.widgets as widgets
from IPython.display import clear_output, display
except ImportError:
raise ImportError(
"The `notebook_login` function can only be used in a notebook (Jupyter or Colab) and you need the "
"`ipywdidgets` module: `pip install ipywidgets`."
)
box_layout = widgets.Layout(
display="flex", flex_flow="column", align_items="center", width="50%"
)
token_widget = widgets.Password(description="Token:")
token_finish_button = widgets.Button(description="Login")
switch_button = widgets.Button(description="Use password")
login_token_widget = widgets.VBox(
[
widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_START),
token_widget,
token_finish_button,
widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_END),
switch_button,
],
layout=box_layout,
)
display(login_token_widget)
# Deprecated page for login
input_widget = widgets.Text(description="Username:")
password_widget = widgets.Password(description="Password:")
password_finish_button = widgets.Button(description="Login")
login_password_widget = widgets.VBox(
[
widgets.HTML(value=NOTEBOOK_LOGIN_PASSWORD_HTML),
widgets.HBox([input_widget, password_widget]),
password_finish_button,
],
layout=box_layout,
)
# On click events
def login_token_event(t):
token = token_widget.value
# Erase token and clear value to make sure it's not saved in the notebook.
token_widget.value = ""
clear_output()
_login(HfApi(), token=token)
token_finish_button.on_click(login_token_event)
def login_password_event(t):
username = input_widget.value
password = password_widget.value
# Erase password and clear value to make sure it's not saved in the notebook.
password_widget.value = ""
clear_output()
_login(HfApi(), username=username, password=password)
password_finish_button.on_click(login_password_event)
def switch_event(t):
clear_output()
display(login_password_widget)
switch_button.on_click(switch_event)
def _login(hf_api, username=None, password=None, token=None):
if token is None:
try:
token = hf_api.login(username, password)
except HTTPError as e:
# probably invalid credentials, display error message.
print(e)
print(ANSI.red(e.response.text))
exit(1)
else:
token, name = hf_api._validate_or_retrieve_token(token)
hf_api.set_access_token(token)
HfFolder.save_token(token)
print("Login successful")
print("Your token has been saved to", HfFolder.path_token)
helpers = currently_setup_credential_helpers()
if "store" not in helpers:
print(
ANSI.red(
"Authenticated through git-credential store but this isn't the helper defined on your machine.\nYou "
"might have to re-authenticate when pushing to the Hugging Face Hub. Run the following command in your "
"terminal in case you want to set this credential helper as the default\n\ngit config --global credential.helper store"
)
)
| 35.064677 | 135 | 0.605065 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from argparse import ArgumentParser
from getpass import getpass
from typing import List, Union
from huggingface_hub.commands import BaseHuggingfaceCLICommand
from huggingface_hub.constants import (
REPO_TYPES,
REPO_TYPES_URL_PREFIXES,
SPACES_SDK_TYPES,
)
from huggingface_hub.hf_api import HfApi, HfFolder
from requests.exceptions import HTTPError
class UserCommands(BaseHuggingfaceCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
login_parser = parser.add_parser(
"login", help="Log in using the same credentials as on huggingface.co"
)
login_parser.set_defaults(func=lambda args: LoginCommand(args))
whoami_parser = parser.add_parser(
"whoami", help="Find out which huggingface.co account you are logged in as."
)
whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
logout_parser = parser.add_parser("logout", help="Log out")
logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
# new system: git-based repo system
repo_parser = parser.add_parser(
"repo",
help="{create, ls-files} Commands to interact with your huggingface.co repos.",
)
repo_subparsers = repo_parser.add_subparsers(
help="huggingface.co repos related commands"
)
repo_create_parser = repo_subparsers.add_parser(
"create", help="Create a new repo on huggingface.co"
)
repo_create_parser.add_argument(
"name",
type=str,
help="Name for your repo. Will be namespaced under your username to build the repo id.",
)
repo_create_parser.add_argument(
"--type",
type=str,
help='Optional: repo_type: set to "dataset" or "space" if creating a dataset or space, default is model.',
)
repo_create_parser.add_argument(
"--organization", type=str, help="Optional: organization namespace."
)
repo_create_parser.add_argument(
"--space_sdk",
type=str,
help='Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".',
choices=SPACES_SDK_TYPES,
)
repo_create_parser.add_argument(
"-y",
"--yes",
action="store_true",
help="Optional: answer Yes to the prompt",
)
repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args))
class ANSI:
"""
Helper for en.wikipedia.org/wiki/ANSI_escape_code
"""
_bold = "\u001b[1m"
_red = "\u001b[31m"
_gray = "\u001b[90m"
_reset = "\u001b[0m"
@classmethod
def bold(cls, s):
return f"{cls._bold}{s}{cls._reset}"
@classmethod
def red(cls, s):
return f"{cls._bold + cls._red}{s}{cls._reset}"
@classmethod
def gray(cls, s):
return f"{cls._gray}{s}{cls._reset}"
def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str:
"""
Inspired by:
- stackoverflow.com/a/8356620/593036
- stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
"""
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
lines = []
lines.append(row_format.format(*headers))
lines.append(row_format.format(*["-" * w for w in col_widths]))
for row in rows:
lines.append(row_format.format(*row))
return "\n".join(lines)
def currently_setup_credential_helpers(directory=None) -> List[str]:
try:
output = subprocess.run(
"git config --list".split(),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
cwd=directory,
).stdout.split("\n")
current_credential_helpers = []
for line in output:
if "credential.helper" in line:
current_credential_helpers.append(line.split("=")[-1])
except subprocess.CalledProcessError as exc:
raise EnvironmentError(exc.stderr)
return current_credential_helpers
class BaseUserCommand:
def __init__(self, args):
self.args = args
self._api = HfApi()
class LoginCommand(BaseUserCommand):
def run(self):
print( # docstyle-ignore
"""
_| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|
_| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|
_| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|
To login, `huggingface_hub` now requires a token generated from https://huggingface.co/settings/tokens.
(Deprecated, will be removed in v0.3.0) To login with username and password instead, interrupt with Ctrl+C.
"""
)
try:
token = getpass("Token: ")
_login(self._api, token=token)
except KeyboardInterrupt:
username = input("\rUsername: ")
password = getpass()
_login(self._api, username, password)
class WhoamiCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
try:
info = self._api.whoami(token)
print(info["name"])
orgs = [org["name"] for org in info["orgs"]]
if orgs:
print(ANSI.bold("orgs: "), ",".join(orgs))
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
class LogoutCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
HfFolder.delete_token()
HfApi.unset_access_token()
try:
self._api.logout(token)
except HTTPError as e:
# Logging out with an access token will return a client error.
if not e.response.status_code == 400:
raise e
print("Successfully logged out.")
class RepoCreateCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
try:
stdout = subprocess.check_output(["git", "--version"]).decode("utf-8")
print(ANSI.gray(stdout.strip()))
except FileNotFoundError:
print("Looks like you do not have git installed, please install.")
try:
stdout = subprocess.check_output(["git-lfs", "--version"]).decode("utf-8")
print(ANSI.gray(stdout.strip()))
except FileNotFoundError:
print(
ANSI.red(
"Looks like you do not have git-lfs installed, please install."
" You can install from https://git-lfs.github.com/."
" Then run `git lfs install` (you only have to do this once)."
)
)
print("")
user = self._api.whoami(token)["name"]
namespace = (
self.args.organization if self.args.organization is not None else user
)
repo_id = f"{namespace}/{self.args.name}"
if self.args.type not in REPO_TYPES:
print("Invalid repo --type")
exit(1)
if self.args.type in REPO_TYPES_URL_PREFIXES:
prefixed_repo_id = REPO_TYPES_URL_PREFIXES[self.args.type] + repo_id
else:
prefixed_repo_id = repo_id
print(f"You are about to create {ANSI.bold(prefixed_repo_id)}")
if not self.args.yes:
choice = input("Proceed? [Y/n] ").lower()
if not (choice == "" or choice == "y" or choice == "yes"):
print("Abort")
exit()
try:
url = self._api.create_repo(
repo_id=repo_id,
token=token,
repo_type=self.args.type,
space_sdk=self.args.space_sdk,
)
except HTTPError as e:
print(e)
print(ANSI.red(e.response.text))
exit(1)
print("\nYour repo now lives at:")
print(f" {ANSI.bold(url)}")
print(
"\nYou can clone it locally with the command below,"
" and commit/push as usual."
)
print(f"\n git clone {url}")
print("")
NOTEBOOK_LOGIN_PASSWORD_HTML = """<center> <img
src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
alt='Hugging Face'> <br> Immediately click login after typing your password or
it might be stored in plain text in this notebook file. </center>"""
NOTEBOOK_LOGIN_TOKEN_HTML_START = """<center> <img
src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
alt='Hugging Face'> <br> Copy a token from <a
href="https://huggingface.co/settings/tokens" target="_blank">your Hugging Face
tokens page</a> and paste it below. <br> Immediately click login after copying
your token or it might be stored in plain text in this notebook file. </center>"""
NOTEBOOK_LOGIN_TOKEN_HTML_END = """
<b>Pro Tip:</b> If you don't already have one, you can create a dedicated
'notebooks' token with 'write' access, that you can then easily reuse for all
notebooks. <br> <i>Logging in with your username and password is deprecated and
won't be possible anymore in the near future. You can still use them for now by
clicking below.</i> </center>"""
def notebook_login():
"""
Displays a widget to login to the HF website and store the token.
"""
try:
import ipywidgets.widgets as widgets
from IPython.display import clear_output, display
except ImportError:
raise ImportError(
"The `notebook_login` function can only be used in a notebook (Jupyter or Colab) and you need the "
"`ipywdidgets` module: `pip install ipywidgets`."
)
box_layout = widgets.Layout(
display="flex", flex_flow="column", align_items="center", width="50%"
)
token_widget = widgets.Password(description="Token:")
token_finish_button = widgets.Button(description="Login")
switch_button = widgets.Button(description="Use password")
login_token_widget = widgets.VBox(
[
widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_START),
token_widget,
token_finish_button,
widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_END),
switch_button,
],
layout=box_layout,
)
display(login_token_widget)
# Deprecated page for login
input_widget = widgets.Text(description="Username:")
password_widget = widgets.Password(description="Password:")
password_finish_button = widgets.Button(description="Login")
login_password_widget = widgets.VBox(
[
widgets.HTML(value=NOTEBOOK_LOGIN_PASSWORD_HTML),
widgets.HBox([input_widget, password_widget]),
password_finish_button,
],
layout=box_layout,
)
# On click events
def login_token_event(t):
token = token_widget.value
# Erase token and clear value to make sure it's not saved in the notebook.
token_widget.value = ""
clear_output()
_login(HfApi(), token=token)
token_finish_button.on_click(login_token_event)
def login_password_event(t):
username = input_widget.value
password = password_widget.value
# Erase password and clear value to make sure it's not saved in the notebook.
password_widget.value = ""
clear_output()
_login(HfApi(), username=username, password=password)
password_finish_button.on_click(login_password_event)
def switch_event(t):
clear_output()
display(login_password_widget)
switch_button.on_click(switch_event)
def _login(hf_api, username=None, password=None, token=None):
if token is None:
try:
token = hf_api.login(username, password)
except HTTPError as e:
# probably invalid credentials, display error message.
print(e)
print(ANSI.red(e.response.text))
exit(1)
else:
token, name = hf_api._validate_or_retrieve_token(token)
hf_api.set_access_token(token)
HfFolder.save_token(token)
print("Login successful")
print("Your token has been saved to", HfFolder.path_token)
helpers = currently_setup_credential_helpers()
if "store" not in helpers:
print(
ANSI.red(
"Authenticated through git-credential store but this isn't the helper defined on your machine.\nYou "
"might have to re-authenticate when pushing to the Hugging Face Hub. Run the following command in your "
"terminal in case you want to set this credential helper as the default\n\ngit config --global credential.helper store"
)
)
| 0 | 0 |
293ccee023fc91f0aa073e2ba442d3ed89f6b0d4 | 8,365 | py | Python | src/ramstk/logger.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 26 | 2019-05-15T02:03:47.000Z | 2022-02-21T07:28:11.000Z | src/ramstk/logger.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 815 | 2019-05-10T12:31:52.000Z | 2022-03-31T12:56:26.000Z | src/ramstk/logger.py | TahaEntezari/ramstk | f82e5b31ef5c4e33cc02252263247b99a9abe129 | [
"BSD-3-Clause"
] | 9 | 2019-04-20T23:06:29.000Z | 2022-01-24T21:21:04.000Z | # -*- coding: utf-8 -*-
#
# ramstk.logger.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright 2019 Doyle Rowland doyle.rowland <AT> reliaqual <DOT> com
"""RAMSTK Logger Module."""
# Standard Library Imports
import logging
import sys
from typing import Dict
# Third Party Imports
from pubsub import pub
LOGFORMAT = logging.Formatter("%(asctime)s - %(name)s - %(lineno)s : %(message)s")
class RAMSTKLogManager:
"""Class to manage logging of RAMSTK messages."""
loggers: Dict[str, logging.Logger] = {}
def __init__(self, log_file: str) -> None:
"""Initialize an instance of the LogManager.
:param log_file: the absolute path to the log file to use with this
log manager.
"""
# Initialize private dictionary attributes.
# Initialize private list attributes.
# Initialize private scalar attributes.
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.log_file = log_file
# Subscribe to PyPubSub messages.
pub.subscribe(self._do_log_fail_message, "fail_connect_program_database")
pub.subscribe(self._do_log_fail_message, "fail_delete_environment")
pub.subscribe(self._do_log_fail_message, "fail_delete_failure_definition")
pub.subscribe(self._do_log_fail_message, "fail_delete_fmea")
pub.subscribe(self._do_log_fail_message, "fail_delete_function")
pub.subscribe(self._do_log_fail_message, "fail_delete_hazard")
pub.subscribe(self._do_log_fail_message, "fail_delete_mission")
pub.subscribe(self._do_log_fail_message, "fail_delete_mission_phase")
pub.subscribe(self._do_log_fail_message, "fail_delete_revision")
pub.subscribe(self._do_log_fail_message, "fail_import_module")
pub.subscribe(self._do_log_fail_message, "fail_insert_action")
pub.subscribe(self._do_log_fail_message, "fail_insert_cause")
pub.subscribe(self._do_log_fail_message, "fail_insert_control")
pub.subscribe(self._do_log_fail_message, "fail_insert_environment")
pub.subscribe(self._do_log_fail_message, "fail_insert_failure_definition")
pub.subscribe(self._do_log_fail_message, "fail_insert_mechanism")
pub.subscribe(self._do_log_fail_message, "fail_insert_mission")
pub.subscribe(self._do_log_fail_message, "fail_insert_mission_phase")
pub.subscribe(self._do_log_fail_message, "fail_insert_mode")
pub.subscribe(self._do_log_fail_message, "fail_insert_function")
pub.subscribe(self._do_log_fail_message, "fail_insert_hazard")
pub.subscribe(self._do_log_fail_message, "fail_insert_hardware")
pub.subscribe(self._do_log_fail_message, "fail_insert_validation")
pub.subscribe(self._do_log_fail_message, "fail_insert_stakeholder")
pub.subscribe(self._do_log_fail_message, "fail_insert_revision")
pub.subscribe(self._do_log_fail_message, "fail_insert_requirement")
pub.subscribe(self._do_log_fail_message, "fail_insert_opload")
pub.subscribe(self._do_log_fail_message, "fail_insert_opstress")
pub.subscribe(self._do_log_fail_message, "fail_insert_record")
pub.subscribe(self._do_log_fail_message, "fail_insert_test_method")
pub.subscribe(self._do_log_fail_message, "fail_update_fmea")
pub.subscribe(self._do_log_fail_message, "fail_update_function")
pub.subscribe(self._do_log_fail_message, "fail_update_hardware")
pub.subscribe(self._do_log_fail_message, "fail_update_record")
pub.subscribe(self._do_log_fail_message, "fail_update_requirement")
pub.subscribe(self._do_log_fail_message, "fail_update_revision")
pub.subscribe(self.do_log_debug, "do_log_debug_msg")
pub.subscribe(self.do_log_info, "do_log_info_msg")
pub.subscribe(self.do_log_warning, "do_log_warning_msg")
pub.subscribe(self.do_log_error, "do_log_error_msg")
pub.subscribe(self.do_log_critical, "do_log_critical_msg")
# Create a logger for the pypubsub fail_* messages.
self.do_create_logger(__name__, "WARN")
def _do_log_fail_message(self, error_message: str) -> None:
"""Log PyPubSub broadcast fail messages.
:param error_message: the error message that was part of the
broadcast package.
:return: None
:rtype: None
"""
self.loggers[__name__].warning(error_message)
@staticmethod
def _get_console_handler(log_level: str) -> logging.Handler:
"""Create the log handler for console output.
:return: _c_handler
:rtype: :class:`logging.Handler`
"""
_c_handler = logging.StreamHandler(sys.stdout)
_c_handler.setLevel(log_level)
_c_handler.setFormatter(LOGFORMAT)
return _c_handler
def _get_file_handler(self, log_level: str) -> logging.Handler:
"""Create the log handler for file output.
:return: _f_handler
:rtype: :class:`logging.Handler`
"""
_f_handler = logging.FileHandler(self.log_file)
_f_handler.setLevel(log_level)
_f_handler.setFormatter(LOGFORMAT)
return _f_handler
def do_create_logger(
self, logger_name: str, log_level: str, to_tty: bool = False
) -> None:
"""Create a logger instance.
:param logger_name: the name of the logger used in the application.
:param log_level: the level of messages to log.
:param to_tty: boolean indicating whether this logger will
also dump messages to the terminal.
:return: None
:rtype: None
"""
_logger = logging.getLogger(logger_name)
_logger.setLevel(log_level)
_logger.addHandler(self._get_file_handler(log_level))
if to_tty:
_logger.addHandler(self._get_console_handler(log_level))
self.loggers[logger_name] = _logger
def do_log_debug(self, logger_name: str, message: str) -> None:
"""Log DEBUG level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.DEBUG):
self.loggers[logger_name].debug(message)
def do_log_exception(self, logger_name: str, exception: object) -> None:
"""Log EXCEPTIONS.
:param logger_name: the name of the logger used in the application.
:param exception: the exception to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.WARNING):
self.loggers[logger_name].exception(exception)
def do_log_info(self, logger_name: str, message: str) -> None:
"""Log INFO level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.INFO):
self.loggers[logger_name].info(message)
def do_log_warning(self, logger_name: str, message: str) -> None:
"""Log WARN level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.WARNING):
self.loggers[logger_name].warning(message)
def do_log_error(self, logger_name: str, message: str) -> None:
"""Log ERROR level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.ERROR):
self.loggers[logger_name].error(message)
def do_log_critical(self, logger_name: str, message: str) -> None:
"""Log CRITICAL level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
self.loggers[logger_name].critical(message)
| 39.64455 | 82 | 0.68416 | # -*- coding: utf-8 -*-
#
# ramstk.logger.py is part of The RAMSTK Project
#
# All rights reserved.
# Copyright 2019 Doyle Rowland doyle.rowland <AT> reliaqual <DOT> com
"""RAMSTK Logger Module."""
# Standard Library Imports
import logging
import sys
from typing import Dict
# Third Party Imports
from pubsub import pub
LOGFORMAT = logging.Formatter("%(asctime)s - %(name)s - %(lineno)s : %(message)s")
class RAMSTKLogManager:
"""Class to manage logging of RAMSTK messages."""
loggers: Dict[str, logging.Logger] = {}
def __init__(self, log_file: str) -> None:
"""Initialize an instance of the LogManager.
:param log_file: the absolute path to the log file to use with this
log manager.
"""
# Initialize private dictionary attributes.
# Initialize private list attributes.
# Initialize private scalar attributes.
# Initialize public dictionary attributes.
# Initialize public list attributes.
# Initialize public scalar attributes.
self.log_file = log_file
# Subscribe to PyPubSub messages.
pub.subscribe(self._do_log_fail_message, "fail_connect_program_database")
pub.subscribe(self._do_log_fail_message, "fail_delete_environment")
pub.subscribe(self._do_log_fail_message, "fail_delete_failure_definition")
pub.subscribe(self._do_log_fail_message, "fail_delete_fmea")
pub.subscribe(self._do_log_fail_message, "fail_delete_function")
pub.subscribe(self._do_log_fail_message, "fail_delete_hazard")
pub.subscribe(self._do_log_fail_message, "fail_delete_mission")
pub.subscribe(self._do_log_fail_message, "fail_delete_mission_phase")
pub.subscribe(self._do_log_fail_message, "fail_delete_revision")
pub.subscribe(self._do_log_fail_message, "fail_import_module")
pub.subscribe(self._do_log_fail_message, "fail_insert_action")
pub.subscribe(self._do_log_fail_message, "fail_insert_cause")
pub.subscribe(self._do_log_fail_message, "fail_insert_control")
pub.subscribe(self._do_log_fail_message, "fail_insert_environment")
pub.subscribe(self._do_log_fail_message, "fail_insert_failure_definition")
pub.subscribe(self._do_log_fail_message, "fail_insert_mechanism")
pub.subscribe(self._do_log_fail_message, "fail_insert_mission")
pub.subscribe(self._do_log_fail_message, "fail_insert_mission_phase")
pub.subscribe(self._do_log_fail_message, "fail_insert_mode")
pub.subscribe(self._do_log_fail_message, "fail_insert_function")
pub.subscribe(self._do_log_fail_message, "fail_insert_hazard")
pub.subscribe(self._do_log_fail_message, "fail_insert_hardware")
pub.subscribe(self._do_log_fail_message, "fail_insert_validation")
pub.subscribe(self._do_log_fail_message, "fail_insert_stakeholder")
pub.subscribe(self._do_log_fail_message, "fail_insert_revision")
pub.subscribe(self._do_log_fail_message, "fail_insert_requirement")
pub.subscribe(self._do_log_fail_message, "fail_insert_opload")
pub.subscribe(self._do_log_fail_message, "fail_insert_opstress")
pub.subscribe(self._do_log_fail_message, "fail_insert_record")
pub.subscribe(self._do_log_fail_message, "fail_insert_test_method")
pub.subscribe(self._do_log_fail_message, "fail_update_fmea")
pub.subscribe(self._do_log_fail_message, "fail_update_function")
pub.subscribe(self._do_log_fail_message, "fail_update_hardware")
pub.subscribe(self._do_log_fail_message, "fail_update_record")
pub.subscribe(self._do_log_fail_message, "fail_update_requirement")
pub.subscribe(self._do_log_fail_message, "fail_update_revision")
pub.subscribe(self.do_log_debug, "do_log_debug_msg")
pub.subscribe(self.do_log_info, "do_log_info_msg")
pub.subscribe(self.do_log_warning, "do_log_warning_msg")
pub.subscribe(self.do_log_error, "do_log_error_msg")
pub.subscribe(self.do_log_critical, "do_log_critical_msg")
# Create a logger for the pypubsub fail_* messages.
self.do_create_logger(__name__, "WARN")
def _do_log_fail_message(self, error_message: str) -> None:
"""Log PyPubSub broadcast fail messages.
:param error_message: the error message that was part of the
broadcast package.
:return: None
:rtype: None
"""
self.loggers[__name__].warning(error_message)
@staticmethod
def _get_console_handler(log_level: str) -> logging.Handler:
"""Create the log handler for console output.
:return: _c_handler
:rtype: :class:`logging.Handler`
"""
_c_handler = logging.StreamHandler(sys.stdout)
_c_handler.setLevel(log_level)
_c_handler.setFormatter(LOGFORMAT)
return _c_handler
def _get_file_handler(self, log_level: str) -> logging.Handler:
"""Create the log handler for file output.
:return: _f_handler
:rtype: :class:`logging.Handler`
"""
_f_handler = logging.FileHandler(self.log_file)
_f_handler.setLevel(log_level)
_f_handler.setFormatter(LOGFORMAT)
return _f_handler
def do_create_logger(
self, logger_name: str, log_level: str, to_tty: bool = False
) -> None:
"""Create a logger instance.
:param logger_name: the name of the logger used in the application.
:param log_level: the level of messages to log.
:param to_tty: boolean indicating whether this logger will
also dump messages to the terminal.
:return: None
:rtype: None
"""
_logger = logging.getLogger(logger_name)
_logger.setLevel(log_level)
_logger.addHandler(self._get_file_handler(log_level))
if to_tty:
_logger.addHandler(self._get_console_handler(log_level))
self.loggers[logger_name] = _logger
def do_log_debug(self, logger_name: str, message: str) -> None:
"""Log DEBUG level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.DEBUG):
self.loggers[logger_name].debug(message)
def do_log_exception(self, logger_name: str, exception: object) -> None:
"""Log EXCEPTIONS.
:param logger_name: the name of the logger used in the application.
:param exception: the exception to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.WARNING):
self.loggers[logger_name].exception(exception)
def do_log_info(self, logger_name: str, message: str) -> None:
"""Log INFO level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.INFO):
self.loggers[logger_name].info(message)
def do_log_warning(self, logger_name: str, message: str) -> None:
"""Log WARN level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.WARNING):
self.loggers[logger_name].warning(message)
def do_log_error(self, logger_name: str, message: str) -> None:
"""Log ERROR level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
if self.loggers[logger_name].isEnabledFor(logging.ERROR):
self.loggers[logger_name].error(message)
def do_log_critical(self, logger_name: str, message: str) -> None:
"""Log CRITICAL level messages.
:param logger_name: the name of the logger used in the application.
:param message: the message to log.
:return: None
:rtype: None
"""
self.loggers[logger_name].critical(message)
| 0 | 0 |
f4bbd3c26bf1e8d647337c4dd66784c1c9d86a9f | 2,458 | py | Python | examples/demo/status_overlay.py | martinRenou/chaco | 1888da3ecee89f9b2d11900cda9333b32fc5e89a | [
"BSD-3-Clause"
] | 3 | 2017-09-17T17:32:06.000Z | 2022-03-15T13:04:43.000Z | examples/demo/status_overlay.py | martinRenou/chaco | 1888da3ecee89f9b2d11900cda9333b32fc5e89a | [
"BSD-3-Clause"
] | null | null | null | examples/demo/status_overlay.py | martinRenou/chaco | 1888da3ecee89f9b2d11900cda9333b32fc5e89a | [
"BSD-3-Clause"
] | 5 | 2015-05-17T16:08:11.000Z | 2021-02-23T09:23:42.000Z |
import numpy
from chaco.api import Plot, ArrayPlotData
from chaco.layers.api import ErrorLayer, WarningLayer, StatusLayer
from enable.component_editor import ComponentEditor
from traits.api import HasTraits, Instance, Button
from traitsui.api import UItem, View, HGroup
class MyPlot(HasTraits):
""" Displays a plot with a few buttons to control which overlay
to display
"""
plot = Instance(Plot)
status_overlay = Instance(StatusLayer)
error_button = Button('Error')
warn_button = Button('Warning')
no_problem_button = Button('No problem')
traits_view = View( HGroup(UItem('error_button'),
UItem('warn_button'),
UItem('no_problem_button')),
UItem('plot', editor=ComponentEditor()),
width=700, height=600, resizable=True,
)
def __init__(self, index, data_series, **kw):
super(MyPlot, self).__init__(**kw)
plot_data = ArrayPlotData(index=index)
plot_data.set_data('data_series', data_series)
self.plot = Plot(plot_data)
self.plot.plot(('index', 'data_series'))
def _error_button_fired(self, event):
""" removes the old overlay and replaces it with
an error overlay
"""
self.clear_status()
self.status_overlay = ErrorLayer(component=self.plot,
align='ul', scale_factor=0.25)
self.plot.overlays.append(self.status_overlay)
self.plot.request_redraw()
def _warn_button_fired(self, event):
""" removes the old overlay and replaces it with
an warning overlay
"""
self.clear_status()
self.status_overlay = WarningLayer(component=self.plot,
align='ur', scale_factor=0.25)
self.plot.overlays.append(self.status_overlay)
self.plot.request_redraw()
def _no_problem_button_fired(self, event):
""" removes the old overlay
"""
self.clear_status()
self.plot.request_redraw()
def clear_status(self):
if self.status_overlay in self.plot.overlays:
# fade_out will remove the overlay when its done
self.status_overlay.fade_out()
index = numpy.array([1,2,3,4,5])
data_series = index**2
my_plot = MyPlot(index, data_series)
my_plot.configure_traits()
| 32.342105 | 74 | 0.614321 |
import numpy
from chaco.api import Plot, ArrayPlotData
from chaco.layers.api import ErrorLayer, WarningLayer, StatusLayer
from enable.component_editor import ComponentEditor
from traits.api import HasTraits, Instance, Button
from traitsui.api import UItem, View, HGroup
class MyPlot(HasTraits):
""" Displays a plot with a few buttons to control which overlay
to display
"""
plot = Instance(Plot)
status_overlay = Instance(StatusLayer)
error_button = Button('Error')
warn_button = Button('Warning')
no_problem_button = Button('No problem')
traits_view = View( HGroup(UItem('error_button'),
UItem('warn_button'),
UItem('no_problem_button')),
UItem('plot', editor=ComponentEditor()),
width=700, height=600, resizable=True,
)
def __init__(self, index, data_series, **kw):
super(MyPlot, self).__init__(**kw)
plot_data = ArrayPlotData(index=index)
plot_data.set_data('data_series', data_series)
self.plot = Plot(plot_data)
self.plot.plot(('index', 'data_series'))
def _error_button_fired(self, event):
""" removes the old overlay and replaces it with
an error overlay
"""
self.clear_status()
self.status_overlay = ErrorLayer(component=self.plot,
align='ul', scale_factor=0.25)
self.plot.overlays.append(self.status_overlay)
self.plot.request_redraw()
def _warn_button_fired(self, event):
""" removes the old overlay and replaces it with
an warning overlay
"""
self.clear_status()
self.status_overlay = WarningLayer(component=self.plot,
align='ur', scale_factor=0.25)
self.plot.overlays.append(self.status_overlay)
self.plot.request_redraw()
def _no_problem_button_fired(self, event):
""" removes the old overlay
"""
self.clear_status()
self.plot.request_redraw()
def clear_status(self):
if self.status_overlay in self.plot.overlays:
# fade_out will remove the overlay when its done
self.status_overlay.fade_out()
index = numpy.array([1,2,3,4,5])
data_series = index**2
my_plot = MyPlot(index, data_series)
my_plot.configure_traits()
| 0 | 0 |
08cdc43106ee16eac03626a91a328ff78df10a22 | 681 | py | Python | multi_threadpool_executor.py | Dev-Bobbie/multi_spider | 8fd19ab70de04b6cac021d354850b07ffcf360f2 | [
"Apache-2.0"
] | null | null | null | multi_threadpool_executor.py | Dev-Bobbie/multi_spider | 8fd19ab70de04b6cac021d354850b07ffcf360f2 | [
"Apache-2.0"
] | null | null | null | multi_threadpool_executor.py | Dev-Bobbie/multi_spider | 8fd19ab70de04b6cac021d354850b07ffcf360f2 | [
"Apache-2.0"
] | null | null | null | from concurrent.futures import ThreadPoolExecutor
import time
def sayhello(a):
print("hello: "+a)
time.sleep(2)
def main():
seed=["a","b","c"]
start1=time.time()
for each in seed:
sayhello(each)
end1=time.time()
print("time1: "+str(end1-start1))
start2=time.time()
with ThreadPoolExecutor(3) as executor:
for each in seed:
executor.submit(sayhello,each)
end2=time.time()
print("time2: "+str(end2-start2))
start3=time.time()
with ThreadPoolExecutor(3) as executor1:
executor1.map(sayhello,seed)
end3=time.time()
print("time3: "+str(end3-start3))
if __name__ == '__main__':
main() | 24.321429 | 49 | 0.625551 | from concurrent.futures import ThreadPoolExecutor
import time
def sayhello(a):
print("hello: "+a)
time.sleep(2)
def main():
seed=["a","b","c"]
start1=time.time()
for each in seed:
sayhello(each)
end1=time.time()
print("time1: "+str(end1-start1))
start2=time.time()
with ThreadPoolExecutor(3) as executor:
for each in seed:
executor.submit(sayhello,each)
end2=time.time()
print("time2: "+str(end2-start2))
start3=time.time()
with ThreadPoolExecutor(3) as executor1:
executor1.map(sayhello,seed)
end3=time.time()
print("time3: "+str(end3-start3))
if __name__ == '__main__':
main() | 0 | 0 |
61b5793ee25a599b5e5738633cc2cd220b7bf9e9 | 7,541 | py | Python | boss2.py | Jamhacks2018/TheJamExpansion | 1acec353e666fef6608e06b57e82683053e7f060 | [
"MIT"
] | null | null | null | boss2.py | Jamhacks2018/TheJamExpansion | 1acec353e666fef6608e06b57e82683053e7f060 | [
"MIT"
] | null | null | null | boss2.py | Jamhacks2018/TheJamExpansion | 1acec353e666fef6608e06b57e82683053e7f060 | [
"MIT"
] | 3 | 2018-05-05T19:59:56.000Z | 2020-11-15T21:06:27.000Z | from pygame import *
from enemies import *
import random
init()
fontGeneral = font.Font('resources/fonts/Calibri.ttf', 30)
fontHealth = font.Font('resources/fonts/Calibri Bold.ttf', 15)
class Cap():
def __init__(self):
#initialize the image and pos of cap:
self.img = image.load('resources/jam/boss/cap.png')
self.x = 0
self.y = -150
self.rect = Rect(self.x, self.y, 722, 149)
def draw(self, screen):
screen.blit(self.image[self.phase], self.Rect())
self.rect = Rect(self.x, self.y, self.image[1].get_width(), self.image[1].get_height())
def check(self):
for b in bullets:
if b.rect.colliderrect(self.rect):
self.health -= b.dmg
#check if it is supposed to die, if dead start boss phase 2:
class Boss():
def __init__(self):
#initialize the image and pos:
self.image = image.load('resources/jam/boss/uncapped.png').convert_alpha()
self.w = self.image.get_width() // 5
self.h = self.image.get_width() // 5
self.x = 300
self.y = 25
self.rect = Rect(self.x, self.y, self.w, self.h)
self.image = transform.scale(self.image, (self.w, self.h))
self.gun3 = (self.rect.bottomleft[0]+10, self.rect.bottomleft[1]-10)
self.gun2 = (self.rect.bottomright[0]+10, self.rect.bottomright[1]-10)
self.gun1 = (self.rect.bottomright[0] + self.w // 2, self.rect.bottomright[1]-10)
self.guns = [self.gun1, self.gun2]
self.firing_speed = [25, 20, 15]
self.firing_time = 0
#grace time is reset if grace time is reached
self.grace_timers = [120, 90, 65]
self.grace_time = 180
#initialize boss properties
self.phase = 0
self.max_health = 12000
self.health = self.max_health
self.vulnerable = True
self.attacks = [False, False]
self.directions = 0
#counter of how much boss moved
self.frames_spent_moving = 0
#draws itself and its health
def draw(self, screen):
screen.blit(self.image, self.rect)
draw.rect(screen, (255, 0, 255), (15, 700 - 85, int(985 * self.health / self.max_health), 75))
screen.blit(fontGeneral.render("Boss health: %i/%i" %(self.health, self.max_health), 1, (0, 255, 0)), (467 - fontHealth.size("Boss health: %i/%i" %(self.health, self.max_health))[0] // 2, 700 - 55 - fontHealth.size("Boss health: %i/%i" %(self.health, self.max_health))[1] // 2))
def update(self, pl, eb):
if self.grace_time == 0:
#handles attack timings with some randomness
self.attacks[random.randint(0,1)] = True
self.directions = random.randint(0,3)
#resets movement during attacks
self.frames_spent_moving = 0
#handles in between attack grace timers
self.grace_time = self.grace_timers[self.phase]
else:
#handles movement between attacks
if self.frames_spent_moving <= 30:
self.move()
self.frames_spent_moving += 1
self.grace_time -= 1
self.rect = Rect(self.x, self.y, self.w, self.h)
self.gun3 = (self.rect.bottomleft[0]+10, self.rect.bottomleft[1]-10)
self.gun2 = (self.rect.bottomright[0]+10, self.rect.bottomright[1]-10)
self.gun1 = (self.rect.bottomright[0] - self.w // 2, self.rect.bottomright[1]-10)
self.guns = [self.gun1, self.gun2]
#tries to fire each attack
self.sweeper(eb)
self.ring(eb)
def check(self, bullets, pickups, pl):
for b in bullets:
if b.rect.colliderect(self.rect):
self.health -= b.dmg + pl.dmg_upG
#if health permits, spawns a randomly placed heart
if 0 <= self.health%500 <= 10 and self.health != self.max_health:
pickups.append(Heart(random.randint(300, 700), random.randint(200, 500), random.randint(250, 500)))
if 0 <= self.health%250 <= 10 and self.health != self.max_health:
self.weakpoint = random.randint(0, 4)
self.health -= 11
# checks if it is supposed to die
if self.health <= 0:
self.health = self.max_health
return False
#check for phase change
elif self.health < 8000:
self.phase = 2
elif self.health < 4000:
self.phase = 3
return True
def move(self):
#very similar to pl.directions, moves if it can
if self.directions == 0:
if self.y < 100:
self.y += 3
print("move 1")
elif self.directions == 1:
if 0 < self.y:
self.y -= 3
print("move 2")
elif self.directions == 2:
if 0 < self.x:
self.x -= 10
print("move 3")
elif self.directions == 3:
if self.x + 800 < 1000:
self.x += 10
print("move 4")
def sweeper(self, enemyBullets):
#shoots stream of bullets from left to right from random guns
if self.attacks[1]:
for angle in range(10, 170, 5):
#checks if timer conditions are just right
if self.firing_time + 10 == angle:
self.target_angle = (self.gun2[0] + 50 * cos(radians(angle)),
self.gun2[1] + 50 * sin(radians(angle)))
enemyBullets.append(JamBullet(self.gun2[0], self.gun2[1], self.target_angle[0], self.target_angle[1], 15 * (self.phase + 1)))
self.target_angle = (self.gun3[0] + 50 * cos(radians(180 - angle)),
self.gun3[1] + 50 * sin(radians(180 -angle)))
enemyBullets.append(JamBullet(self.gun3[0], self.gun3[1], self.target_angle[0], self.target_angle[1], 15 * (self.phase + 1)))
#ends attack
if self.firing_time + 10 >= 170:
self.attacks[1] = False
self.firing_time = 0
break
else: self.firing_time += 2
def ring(self, enemyBullets):
if self.attacks[0]:
for angle in range(0, 360, 10):
if self.firing_time == angle:
self.target_angle = (self.rect.centerx + 50 * cos(radians(angle)),
self.rect.centery + 50 * sin(radians(angle)))
enemyBullets.append(JamBullet(self.rect.centerx, self.rect.centery, self.target_angle[0], self.target_angle[1], 15 * self.phase))
if self.firing_time >= 360:
self.attacks[0] = False
self.firing_time = 0
break
else: self.firing_time += 2.5
| 40.983696 | 287 | 0.502851 | from pygame import *
from enemies import *
import random
init()
fontGeneral = font.Font('resources/fonts/Calibri.ttf', 30)
fontHealth = font.Font('resources/fonts/Calibri Bold.ttf', 15)
class Cap():
def __init__(self):
#initialize the image and pos of cap:
self.img = image.load('resources/jam/boss/cap.png')
self.x = 0
self.y = -150
self.rect = Rect(self.x, self.y, 722, 149)
def draw(self, screen):
screen.blit(self.image[self.phase], self.Rect())
self.rect = Rect(self.x, self.y, self.image[1].get_width(), self.image[1].get_height())
def check(self):
for b in bullets:
if b.rect.colliderrect(self.rect):
self.health -= b.dmg
#check if it is supposed to die, if dead start boss phase 2:
class Boss():
def __init__(self):
#initialize the image and pos:
self.image = image.load('resources/jam/boss/uncapped.png').convert_alpha()
self.w = self.image.get_width() // 5
self.h = self.image.get_width() // 5
self.x = 300
self.y = 25
self.rect = Rect(self.x, self.y, self.w, self.h)
self.image = transform.scale(self.image, (self.w, self.h))
self.gun3 = (self.rect.bottomleft[0]+10, self.rect.bottomleft[1]-10)
self.gun2 = (self.rect.bottomright[0]+10, self.rect.bottomright[1]-10)
self.gun1 = (self.rect.bottomright[0] + self.w // 2, self.rect.bottomright[1]-10)
self.guns = [self.gun1, self.gun2]
self.firing_speed = [25, 20, 15]
self.firing_time = 0
#grace time is reset if grace time is reached
self.grace_timers = [120, 90, 65]
self.grace_time = 180
#initialize boss properties
self.phase = 0
self.max_health = 12000
self.health = self.max_health
self.vulnerable = True
self.attacks = [False, False]
self.directions = 0
#counter of how much boss moved
self.frames_spent_moving = 0
#draws itself and its health
def draw(self, screen):
screen.blit(self.image, self.rect)
draw.rect(screen, (255, 0, 255), (15, 700 - 85, int(985 * self.health / self.max_health), 75))
screen.blit(fontGeneral.render("Boss health: %i/%i" %(self.health, self.max_health), 1, (0, 255, 0)), (467 - fontHealth.size("Boss health: %i/%i" %(self.health, self.max_health))[0] // 2, 700 - 55 - fontHealth.size("Boss health: %i/%i" %(self.health, self.max_health))[1] // 2))
def update(self, pl, eb):
if self.grace_time == 0:
#handles attack timings with some randomness
self.attacks[random.randint(0,1)] = True
self.directions = random.randint(0,3)
#resets movement during attacks
self.frames_spent_moving = 0
#handles in between attack grace timers
self.grace_time = self.grace_timers[self.phase]
else:
#handles movement between attacks
if self.frames_spent_moving <= 30:
self.move()
self.frames_spent_moving += 1
self.grace_time -= 1
self.rect = Rect(self.x, self.y, self.w, self.h)
self.gun3 = (self.rect.bottomleft[0]+10, self.rect.bottomleft[1]-10)
self.gun2 = (self.rect.bottomright[0]+10, self.rect.bottomright[1]-10)
self.gun1 = (self.rect.bottomright[0] - self.w // 2, self.rect.bottomright[1]-10)
self.guns = [self.gun1, self.gun2]
#tries to fire each attack
self.sweeper(eb)
self.ring(eb)
def check(self, bullets, pickups, pl):
for b in bullets:
if b.rect.colliderect(self.rect):
self.health -= b.dmg + pl.dmg_upG
#if health permits, spawns a randomly placed heart
if 0 <= self.health%500 <= 10 and self.health != self.max_health:
pickups.append(Heart(random.randint(300, 700), random.randint(200, 500), random.randint(250, 500)))
if 0 <= self.health%250 <= 10 and self.health != self.max_health:
self.weakpoint = random.randint(0, 4)
self.health -= 11
# checks if it is supposed to die
if self.health <= 0:
self.health = self.max_health
return False
#check for phase change
elif self.health < 8000:
self.phase = 2
elif self.health < 4000:
self.phase = 3
return True
def move(self):
#very similar to pl.directions, moves if it can
if self.directions == 0:
if self.y < 100:
self.y += 3
print("move 1")
elif self.directions == 1:
if 0 < self.y:
self.y -= 3
print("move 2")
elif self.directions == 2:
if 0 < self.x:
self.x -= 10
print("move 3")
elif self.directions == 3:
if self.x + 800 < 1000:
self.x += 10
print("move 4")
def sweeper(self, enemyBullets):
#shoots stream of bullets from left to right from random guns
if self.attacks[1]:
for angle in range(10, 170, 5):
#checks if timer conditions are just right
if self.firing_time + 10 == angle:
self.target_angle = (self.gun2[0] + 50 * cos(radians(angle)),
self.gun2[1] + 50 * sin(radians(angle)))
enemyBullets.append(JamBullet(self.gun2[0], self.gun2[1], self.target_angle[0], self.target_angle[1], 15 * (self.phase + 1)))
self.target_angle = (self.gun3[0] + 50 * cos(radians(180 - angle)),
self.gun3[1] + 50 * sin(radians(180 -angle)))
enemyBullets.append(JamBullet(self.gun3[0], self.gun3[1], self.target_angle[0], self.target_angle[1], 15 * (self.phase + 1)))
#ends attack
if self.firing_time + 10 >= 170:
self.attacks[1] = False
self.firing_time = 0
break
else: self.firing_time += 2
def ring(self, enemyBullets):
if self.attacks[0]:
for angle in range(0, 360, 10):
if self.firing_time == angle:
self.target_angle = (self.rect.centerx + 50 * cos(radians(angle)),
self.rect.centery + 50 * sin(radians(angle)))
enemyBullets.append(JamBullet(self.rect.centerx, self.rect.centery, self.target_angle[0], self.target_angle[1], 15 * self.phase))
if self.firing_time >= 360:
self.attacks[0] = False
self.firing_time = 0
break
else: self.firing_time += 2.5
| 0 | 0 |
9084b7ccd8e3dba852fd6469469662507b5a8c2b | 24,781 | py | Python | src/simulator/network_wrong_mi.py | ChenGeng-ZJU/PCC-RL | 6627a186643175ea68269d78e206e6bc45ac634f | [
"Apache-2.0"
] | null | null | null | src/simulator/network_wrong_mi.py | ChenGeng-ZJU/PCC-RL | 6627a186643175ea68269d78e206e6bc45ac634f | [
"Apache-2.0"
] | null | null | null | src/simulator/network_wrong_mi.py | ChenGeng-ZJU/PCC-RL | 6627a186643175ea68269d78e206e6bc45ac634f | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Nathan Jay and Noga Rotman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import heapq
import os
import random
import sys
import time
import math
import warnings
warnings.simplefilter(action='ignore', category=UserWarning)
import gym
from gym import spaces
from gym.envs.registration import register
from gym.utils import seeding
import numpy as np
from common import sender_obs
from common.utils import pcc_aurora_reward, read_json_file
from simulator.trace import Trace
import pandas as pd
MAX_CWND = 5000
MIN_CWND = 4
MAX_RATE = 20000
MIN_RATE = 5
REWARD_SCALE = 0.001
EVENT_TYPE_SEND = 'S'
EVENT_TYPE_ACK = 'A'
BYTES_PER_PACKET = 1500
LATENCY_PENALTY = 1.0
LOSS_PENALTY = 1.0
USE_LATENCY_NOISE = True
MAX_LATENCY_NOISE = 1.1
# DEBUG = True
DEBUG = False
MI_RTT_PROPORTION = 1.0
# PACKET_LOG_FLAG = False
PACKET_LOG_FLAG = True
def debug_print(msg):
if DEBUG:
print(msg, file=sys.stderr, flush=True)
class EmuReplay:
def __init__(self, ):
df = pd.read_csv('aurora_emulation_log.csv')
self.ts = df['timestamp'].tolist()
self.send_rate = df['send_rate'].tolist()
self.idx = 0
def get_ts(self):
if self.idx > len(self.ts):
self.idx = len(self.ts) -1
ts = self.ts[self.idx]
self.idx += 1
return ts
def get_rate(self):
return self.send_rate[self.idx] / 8 / BYTES_PER_PACKET
def reset(self):
self.idx = 0
class Link():
def __init__(self, trace: Trace):
self.trace = trace
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.queue_size = self.trace.get_queue_size()
self.pkt_in_queue = 0
def get_cur_queue_delay(self, event_time):
self.pkt_in_queue = max(0, self.pkt_in_queue -
(event_time - self.queue_delay_update_time) *
self.get_bandwidth(event_time))
self.queue_delay_update_time = event_time
cur_queue_delay = math.ceil(
self.pkt_in_queue) / self.get_bandwidth(event_time)
return cur_queue_delay
def get_cur_latency(self, event_time):
q_delay = self.get_cur_queue_delay(event_time)
# print('queue delay: ', q_delay)
return self.trace.get_delay(event_time) / 1000.0 + q_delay
def packet_enters_link(self, event_time):
if (random.random() < self.trace.get_loss_rate()):
return False
self.queue_delay = self.get_cur_queue_delay(event_time)
extra_delay = 1.0 / self.get_bandwidth(event_time)
if 1 + math.ceil(self.pkt_in_queue) > self.queue_size:
# print("{}\tDrop!".format(event_time))
return False
self.queue_delay += extra_delay
self.pkt_in_queue += 1
return True
def print_debug(self):
print("Link:")
# TODO: Do not use timestamp 0.
print("Bandwidth: %.3fMbps" % (self.trace.get_bandwidth(0)))
# TODO: Do not use timestamp 0.
print("Delay: %.3fms" % (self.trace.get_delay(0)))
print("Queue Delay: %.3fms" % (self.queue_delay * 1000))
print("One Packet Queue Delay: %.3fms" % (
1000.0 * 1 / (self.trace.get_bandwidth(0) * 1e6 / 8 / BYTES_PER_PACKET)))
print("Queue size: %dpackets" % self.queue_size)
print("Loss: %.4f" % self.trace.get_loss_rate())
def reset(self):
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.pkt_in_queue = 0
def get_bandwidth(self, ts):
return self.trace.get_bandwidth(ts) * 1e6 / 8 / BYTES_PER_PACKET
class Network():
def __init__(self, senders, links, env):
self.event_count = 0
self.q = []
self.cur_time = 0.0
self.senders = senders
self.links = links
self.queue_initial_packets()
self.env = env
self.pkt_log = []
def queue_initial_packets(self):
for sender in self.senders:
sender.register_network(self)
sender.reset_obs()
heapq.heappush(self.q, (0, sender, EVENT_TYPE_SEND,
0, 0.0, False, self.event_count, sender.rto, 0))
self.event_count += 1
def reset(self):
self.pkt_log = []
self.cur_time = 0.0
self.q = []
[link.reset() for link in self.links]
[sender.reset() for sender in self.senders]
self.queue_initial_packets()
def get_cur_time(self):
return self.cur_time
def run_for_dur(self, dur, action=None):
# if self.cur_time > 1.75:
# pass
# else:
# self.senders[0].rate = self.env.replay.get_rate()
# dur = self.env.replay.get_ts() - self.cur_time
end_time = min(self.cur_time + dur, self.env.current_trace.timestamps[-1])
debug_print('MI from {} to {}, dur {}'.format(
self.cur_time, end_time, dur))
for sender in self.senders:
sender.reset_obs()
while True:
event_time, sender, event_type, next_hop, cur_latency, dropped, \
event_id, rto, event_queue_delay = self.q[0]
if event_time >= end_time:
self.cur_time = end_time
break
event_time, sender, event_type, next_hop, cur_latency, dropped, \
event_id, rto, event_queue_delay = heapq.heappop(self.q)
self.cur_time = event_time
new_event_time = event_time
new_event_type = event_type
new_next_hop = next_hop
new_latency = cur_latency
new_dropped = dropped
new_event_queue_delay = event_queue_delay
push_new_event = False
debug_print("Got %d event %s, to link %d, latency %f at time %f, "
"next_hop %d, dropped %s, event_q length %f, "
"sender rate %f, duration: %f, queue_size: %f, "
"rto: %f, cwnd: %f, ssthresh: %f, sender rto %f, "
"pkt in flight %d, wait time %d" % (
event_id, event_type, next_hop, cur_latency,
event_time, next_hop, dropped, len(self.q),
sender.rate, dur, self.links[0].queue_size,
rto, sender.cwnd, sender.ssthresh, sender.rto,
int(sender.bytes_in_flight/BYTES_PER_PACKET),
sender.pkt_loss_wait_time))
if event_type == EVENT_TYPE_ACK:
if next_hop == len(sender.path):
# if cur_latency > 1.0:
# sender.timeout(cur_latency)
# sender.on_packet_lost(cur_latency)
if rto >= 0 and cur_latency > rto and sender.pkt_loss_wait_time <= 0:
sender.timeout()
dropped = True
new_dropped = True
elif dropped:
sender.on_packet_lost(cur_latency)
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'lost',
BYTES_PER_PACKET])
else:
sender.on_packet_acked(cur_latency)
debug_print('Ack packet at {}'.format(self.cur_time))
# log packet acked
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'acked',
BYTES_PER_PACKET, cur_latency,
event_queue_delay])
else:
new_next_hop = next_hop + 1
new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay(
self.cur_time)
link_latency = sender.path[next_hop].get_cur_latency(
self.cur_time)
# link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time)
# if USE_LATENCY_NOISE:
# link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE)
new_latency += link_latency
new_event_time += link_latency
push_new_event = True
elif event_type == EVENT_TYPE_SEND:
if next_hop == 0:
if sender.can_send_packet():
sender.on_packet_sent()
# print('Send packet at {}'.format(self.cur_time))
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'sent',
BYTES_PER_PACKET])
push_new_event = True
heapq.heappush(self.q, (self.cur_time + (1.0 / sender.rate),
sender, EVENT_TYPE_SEND, 0, 0.0,
False, self.event_count, sender.rto,
0))
self.event_count += 1
else:
push_new_event = True
if next_hop == sender.dest:
new_event_type = EVENT_TYPE_ACK
new_next_hop = next_hop + 1
new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay(
self.cur_time)
link_latency = sender.path[next_hop].get_cur_latency(
self.cur_time)
# if USE_LATENCY_NOISE:
# link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE)
# link_latency += self.env.current_trace.get_delay_noise(self.cur_time) / 1000
# link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time)
new_latency += link_latency
new_event_time += link_latency
new_dropped = not sender.path[next_hop].packet_enters_link(
self.cur_time)
if not new_dropped:
sender.queue_delay_samples.append(new_event_queue_delay)
if push_new_event:
heapq.heappush(self.q, (new_event_time, sender, new_event_type,
new_next_hop, new_latency, new_dropped,
event_id, rto, new_event_queue_delay))
for sender in self.senders:
sender.record_run()
sender_mi = self.senders[0].get_run_data()
throughput = sender_mi.get("recv rate") # bits/sec
latency = sender_mi.get("avg latency") # second
loss = sender_mi.get("loss ratio")
debug_print("thpt %f, delay %f, loss %f, bytes sent %f, bytes acked %f" % (
throughput/1e6, latency, loss, sender_mi.bytes_sent, sender_mi.bytes_acked))
reward = pcc_aurora_reward(
throughput / 8 / BYTES_PER_PACKET, latency, loss,
np.mean(self.env.current_trace.bandwidths) * 1e6 / 8 / BYTES_PER_PACKET)
if latency > 0.0:
self.env.run_dur = MI_RTT_PROPORTION * sender_mi.get("avg latency") + (1 / self.links[0].get_bandwidth(self.cur_time))
# self.env.run_dur = max(MI_RTT_PROPORTION * sender_mi.get("avg latency"), 5 * (1 / self.senders[0].rate))
# print(self.env.run_dur)
return reward * REWARD_SCALE
class Sender():
def __init__(self, rate, path, dest, features, cwnd=25, history_len=10,
delta_scale=1):
self.id = Sender._get_next_id()
self.delta_scale = delta_scale
self.starting_rate = rate
self.rate = rate
self.sent = 0
self.acked = 0
self.lost = 0
self.bytes_in_flight = 0
self.min_latency = None
self.rtt_samples = []
self.queue_delay_samples = []
self.prev_rtt_samples = self.rtt_samples
self.sample_time = []
self.net = None
self.path = path
self.dest = dest
self.history_len = history_len
self.features = features
self.history = sender_obs.SenderHistory(self.history_len,
self.features, self.id)
self.cwnd = cwnd
self.use_cwnd = False
self.rto = -1
self.ssthresh = 0
self.pkt_loss_wait_time = -1
self.estRTT = 1000000 / 1e6 # SynInterval in emulation
self.RTTVar = self.estRTT / 2 # RTT variance
# self.got_data = False
_next_id = 1
def _get_next_id():
result = Sender._next_id
Sender._next_id += 1
return result
def apply_rate_delta(self, delta):
# if self.got_data:
delta *= self.delta_scale
#print("Applying delta %f" % delta)
if delta >= 0.0:
self.set_rate(self.rate * (1.0 + delta))
else:
self.set_rate(self.rate / (1.0 - delta))
def apply_cwnd_delta(self, delta):
delta *= self.delta_scale
#print("Applying delta %f" % delta)
if delta >= 0.0:
self.set_cwnd(self.cwnd * (1.0 + delta))
else:
self.set_cwnd(self.cwnd / (1.0 - delta))
def can_send_packet(self):
if self.use_cwnd:
return int(self.bytes_in_flight) / BYTES_PER_PACKET < self.cwnd
else:
return True
def register_network(self, net):
self.net = net
def on_packet_sent(self):
self.sent += 1
self.bytes_in_flight += BYTES_PER_PACKET
def on_packet_acked(self, rtt):
self.estRTT = (7.0 * self.estRTT + rtt) / 8.0 # RTT of emulation way
self.RTTVar = (self.RTTVar * 7.0 + abs(rtt - self.estRTT) * 1.0) / 8.0
self.acked += 1
self.rtt_samples.append(rtt)
# self.rtt_samples.append(self.estRTT)
if (self.min_latency is None) or (rtt < self.min_latency):
self.min_latency = rtt
self.bytes_in_flight -= BYTES_PER_PACKET
def on_packet_lost(self, rtt):
self.lost += 1
self.bytes_in_flight -= BYTES_PER_PACKET
def set_rate(self, new_rate):
self.rate = new_rate
# print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE))
if self.rate > MAX_RATE:
self.rate = MAX_RATE
if self.rate < MIN_RATE:
self.rate = MIN_RATE
def set_cwnd(self, new_cwnd):
self.cwnd = int(new_cwnd)
#print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE))
# if self.cwnd > MAX_CWND:
# self.cwnd = MAX_CWND
# if self.cwnd < MIN_CWND:
# self.cwnd = MIN_CWND
def record_run(self):
smi = self.get_run_data()
# if not self.got_data and smi.rtt_samples:
# self.got_data = True
# self.history.step(smi)
# else:
self.history.step(smi)
def get_obs(self):
return self.history.as_array()
def get_run_data(self):
obs_end_time = self.net.get_cur_time()
#obs_dur = obs_end_time - self.obs_start_time
#print("Got %d acks in %f seconds" % (self.acked, obs_dur))
#print("Sent %d packets in %f seconds" % (self.sent, obs_dur))
#print("self.rate = %f" % self.rate)
# print(self.acked, self.sent)
rtt_samples = self.rtt_samples if self.rtt_samples else self.prev_rtt_samples
# if not self.rtt_samples:
# print(self.obs_start_time, obs_end_time, self.rate)
# rtt_samples is empty when there is no packet acked in MI
# Solution: inherit from previous rtt_samples.
return sender_obs.SenderMonitorInterval(
self.id,
bytes_sent=self.sent * BYTES_PER_PACKET,
bytes_acked=self.acked * BYTES_PER_PACKET,
bytes_lost=self.lost * BYTES_PER_PACKET,
send_start=self.obs_start_time,
send_end=obs_end_time,
recv_start=self.obs_start_time,
recv_end=obs_end_time,
rtt_samples=self.rtt_samples,
queue_delay_samples=self.queue_delay_samples,
packet_size=BYTES_PER_PACKET
)
def reset_obs(self):
self.sent = 0
self.acked = 0
self.lost = 0
if self.rtt_samples:
self.prev_rtt_samples = self.rtt_samples
self.rtt_samples = []
self.queue_delay_samples = []
self.obs_start_time = self.net.get_cur_time()
def print_debug(self):
print("Sender:")
print("Obs: %s" % str(self.get_obs()))
print("Rate: %f" % self.rate)
print("Sent: %d" % self.sent)
print("Acked: %d" % self.acked)
print("Lost: %d" % self.lost)
print("Min Latency: %s" % str(self.min_latency))
def reset(self):
#print("Resetting sender!")
self.rate = self.starting_rate
self.bytes_in_flight = 0
self.min_latency = None
self.reset_obs()
self.history = sender_obs.SenderHistory(self.history_len,
self.features, self.id)
self.estRTT = 1000000 / 1e6 # SynInterval in emulation
self.RTTVar = self.estRTT / 2 # RTT variance
# self.got_data = False
def timeout(self):
# placeholder
pass
class SimulatedNetworkEnv(gym.Env):
def __init__(self, traces, history_len=10,
features="sent latency inflation,latency ratio,send ratio",
congestion_control_type="aurora", train_flag=False,
delta_scale=1.0):
"""Network environment used in simulation.
congestion_control_type: aurora is pcc-rl. cubic is TCPCubic.
"""
assert congestion_control_type in {"aurora", "cubic"}, \
"Unrecognized congestion_control_type {}.".format(
congestion_control_type)
# self.replay = EmuReplay()
self.delta_scale = delta_scale
self.traces = traces
self.current_trace = np.random.choice(self.traces)
self.train_flag = train_flag
self.congestion_control_type = congestion_control_type
if self.congestion_control_type == 'aurora':
self.use_cwnd = False
elif self.congestion_control_type == 'cubic':
self.use_cwnd = True
self.history_len = history_len
# print("History length: %d" % history_len)
self.features = features.split(",")
# print("Features: %s" % str(self.features))
self.links = None
self.senders = None
self.create_new_links_and_senders()
self.net = Network(self.senders, self.links, self)
self.run_dur = None
self.run_period = 0.1
self.steps_taken = 0
self.debug_thpt_changes = False
self.last_thpt = None
self.last_rate = None
if self.use_cwnd:
self.action_space = spaces.Box(
np.array([-1e12, -1e12]), np.array([1e12, 1e12]), dtype=np.float32)
else:
self.action_space = spaces.Box(
np.array([-1e12]), np.array([1e12]), dtype=np.float32)
self.observation_space = None
# use_only_scale_free = True
single_obs_min_vec = sender_obs.get_min_obs_vector(self.features)
single_obs_max_vec = sender_obs.get_max_obs_vector(self.features)
self.observation_space = spaces.Box(np.tile(single_obs_min_vec, self.history_len),
np.tile(single_obs_max_vec,
self.history_len),
dtype=np.float32)
self.reward_sum = 0.0
self.reward_ewma = 0.0
self.episodes_run = -1
def seed(self, seed=None):
self.rand, seed = seeding.np_random(seed)
return [seed]
def _get_all_sender_obs(self):
sender_obs = self.senders[0].get_obs()
sender_obs = np.array(sender_obs).reshape(-1,)
return sender_obs
def step(self, actions):
#print("Actions: %s" % str(actions))
# print(actions)
for i in range(0, 1): # len(actions)):
#print("Updating rate for sender %d" % i)
action = actions
self.senders[i].apply_rate_delta(action[0])
if self.use_cwnd:
self.senders[i].apply_cwnd_delta(action[1])
# print("Running for %fs" % self.run_dur)
reward = self.net.run_for_dur(self.run_dur, action=actions[0])
self.steps_taken += 1
sender_obs = self._get_all_sender_obs()
should_stop = self.current_trace.is_finished(self.net.get_cur_time())
self.reward_sum += reward
# print('env step: {}s'.format(time.time() - t_start))
return sender_obs, reward, should_stop, {}
def print_debug(self):
print("---Link Debug---")
for link in self.links:
link.print_debug()
print("---Sender Debug---")
for sender in self.senders:
sender.print_debug()
def create_new_links_and_senders(self):
# self.replay.reset()
self.links = [Link(self.current_trace), Link(self.current_trace)]
if self.congestion_control_type == "aurora":
if not self.train_flag:
self.senders = [Sender( #self.replay.get_rate(),
# 2500000 / 8 /BYTES_PER_PACKET / 0.048,
# 12000000 / 8 /BYTES_PER_PACKET / 0.048,
# 10 / (self.current_trace.get_delay(0) *2/1000),
100,
[self.links[0], self.links[1]], 0,
self.features,
history_len=self.history_len,
delta_scale=self.delta_scale)]
else:
# self.senders = [Sender(random.uniform(0.3, 1.5) * bw,
# [self.links[0], self.links[1]], 0,
# self.features,
# history_len=self.history_len)]
# self.senders = [Sender(random.uniform(10/bw, 1.5) * bw,
# [self.links[0], self.links[1]], 0,
# self.features,
# history_len=self.history_len,
# delta_scale=self.delta_scale)]
self.senders = [Sender(100,
[self.links[0], self.links[1]], 0,
self.features,
history_len=self.history_len,
delta_scale=self.delta_scale)]
elif self.congestion_control_type == "cubic":
raise NotImplementedError
else:
raise RuntimeError("Unrecognized congestion_control_type {}".format(
self.congestion_control_type))
# self.run_dur = 3 * lat
# self.run_dur = 1 * lat
if not self.senders[0].rtt_samples:
# self.run_dur = 0.473
# self.run_dur = 5 / self.senders[0].rate
self.run_dur = 0.01
# self.run_dur = self.current_trace.get_delay(0) * 2 / 1000
# self.run_dur = self.replay.get_ts() - 0
def reset(self):
self.steps_taken = 0
self.net.reset()
self.current_trace = np.random.choice(self.traces)
self.current_trace.reset()
self.create_new_links_and_senders()
self.net = Network(self.senders, self.links, self)
self.episodes_run += 1
# self.replay.reset()
self.net.run_for_dur(self.run_dur)
self.reward_ewma *= 0.99
self.reward_ewma += 0.01 * self.reward_sum
# print("Reward: %0.2f, Ewma Reward: %0.2f" % (self.reward_sum, self.reward_ewma))
self.reward_sum = 0.0
return self._get_all_sender_obs()
register(id='PccNs-v0', entry_point='simulator.network:SimulatedNetworkEnv')
| 38.183359 | 130 | 0.561438 | # Copyright 2019 Nathan Jay and Noga Rotman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import heapq
import os
import random
import sys
import time
import math
import warnings
warnings.simplefilter(action='ignore', category=UserWarning)
import gym
from gym import spaces
from gym.envs.registration import register
from gym.utils import seeding
import numpy as np
from common import sender_obs
from common.utils import pcc_aurora_reward, read_json_file
from simulator.trace import Trace
import pandas as pd
MAX_CWND = 5000
MIN_CWND = 4
MAX_RATE = 20000
MIN_RATE = 5
REWARD_SCALE = 0.001
EVENT_TYPE_SEND = 'S'
EVENT_TYPE_ACK = 'A'
BYTES_PER_PACKET = 1500
LATENCY_PENALTY = 1.0
LOSS_PENALTY = 1.0
USE_LATENCY_NOISE = True
MAX_LATENCY_NOISE = 1.1
# DEBUG = True
DEBUG = False
MI_RTT_PROPORTION = 1.0
# PACKET_LOG_FLAG = False
PACKET_LOG_FLAG = True
def debug_print(msg):
if DEBUG:
print(msg, file=sys.stderr, flush=True)
class EmuReplay:
def __init__(self, ):
df = pd.read_csv('aurora_emulation_log.csv')
self.ts = df['timestamp'].tolist()
self.send_rate = df['send_rate'].tolist()
self.idx = 0
def get_ts(self):
if self.idx > len(self.ts):
self.idx = len(self.ts) -1
ts = self.ts[self.idx]
self.idx += 1
return ts
def get_rate(self):
return self.send_rate[self.idx] / 8 / BYTES_PER_PACKET
def reset(self):
self.idx = 0
class Link():
def __init__(self, trace: Trace):
self.trace = trace
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.queue_size = self.trace.get_queue_size()
self.pkt_in_queue = 0
def get_cur_queue_delay(self, event_time):
self.pkt_in_queue = max(0, self.pkt_in_queue -
(event_time - self.queue_delay_update_time) *
self.get_bandwidth(event_time))
self.queue_delay_update_time = event_time
cur_queue_delay = math.ceil(
self.pkt_in_queue) / self.get_bandwidth(event_time)
return cur_queue_delay
def get_cur_latency(self, event_time):
q_delay = self.get_cur_queue_delay(event_time)
# print('queue delay: ', q_delay)
return self.trace.get_delay(event_time) / 1000.0 + q_delay
def packet_enters_link(self, event_time):
if (random.random() < self.trace.get_loss_rate()):
return False
self.queue_delay = self.get_cur_queue_delay(event_time)
extra_delay = 1.0 / self.get_bandwidth(event_time)
if 1 + math.ceil(self.pkt_in_queue) > self.queue_size:
# print("{}\tDrop!".format(event_time))
return False
self.queue_delay += extra_delay
self.pkt_in_queue += 1
return True
def print_debug(self):
print("Link:")
# TODO: Do not use timestamp 0.
print("Bandwidth: %.3fMbps" % (self.trace.get_bandwidth(0)))
# TODO: Do not use timestamp 0.
print("Delay: %.3fms" % (self.trace.get_delay(0)))
print("Queue Delay: %.3fms" % (self.queue_delay * 1000))
print("One Packet Queue Delay: %.3fms" % (
1000.0 * 1 / (self.trace.get_bandwidth(0) * 1e6 / 8 / BYTES_PER_PACKET)))
print("Queue size: %dpackets" % self.queue_size)
print("Loss: %.4f" % self.trace.get_loss_rate())
def reset(self):
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.pkt_in_queue = 0
def get_bandwidth(self, ts):
return self.trace.get_bandwidth(ts) * 1e6 / 8 / BYTES_PER_PACKET
class Network():
def __init__(self, senders, links, env):
self.event_count = 0
self.q = []
self.cur_time = 0.0
self.senders = senders
self.links = links
self.queue_initial_packets()
self.env = env
self.pkt_log = []
def queue_initial_packets(self):
for sender in self.senders:
sender.register_network(self)
sender.reset_obs()
heapq.heappush(self.q, (0, sender, EVENT_TYPE_SEND,
0, 0.0, False, self.event_count, sender.rto, 0))
self.event_count += 1
def reset(self):
self.pkt_log = []
self.cur_time = 0.0
self.q = []
[link.reset() for link in self.links]
[sender.reset() for sender in self.senders]
self.queue_initial_packets()
def get_cur_time(self):
return self.cur_time
def run_for_dur(self, dur, action=None):
# if self.cur_time > 1.75:
# pass
# else:
# self.senders[0].rate = self.env.replay.get_rate()
# dur = self.env.replay.get_ts() - self.cur_time
end_time = min(self.cur_time + dur, self.env.current_trace.timestamps[-1])
debug_print('MI from {} to {}, dur {}'.format(
self.cur_time, end_time, dur))
for sender in self.senders:
sender.reset_obs()
while True:
event_time, sender, event_type, next_hop, cur_latency, dropped, \
event_id, rto, event_queue_delay = self.q[0]
if event_time >= end_time:
self.cur_time = end_time
break
event_time, sender, event_type, next_hop, cur_latency, dropped, \
event_id, rto, event_queue_delay = heapq.heappop(self.q)
self.cur_time = event_time
new_event_time = event_time
new_event_type = event_type
new_next_hop = next_hop
new_latency = cur_latency
new_dropped = dropped
new_event_queue_delay = event_queue_delay
push_new_event = False
debug_print("Got %d event %s, to link %d, latency %f at time %f, "
"next_hop %d, dropped %s, event_q length %f, "
"sender rate %f, duration: %f, queue_size: %f, "
"rto: %f, cwnd: %f, ssthresh: %f, sender rto %f, "
"pkt in flight %d, wait time %d" % (
event_id, event_type, next_hop, cur_latency,
event_time, next_hop, dropped, len(self.q),
sender.rate, dur, self.links[0].queue_size,
rto, sender.cwnd, sender.ssthresh, sender.rto,
int(sender.bytes_in_flight/BYTES_PER_PACKET),
sender.pkt_loss_wait_time))
if event_type == EVENT_TYPE_ACK:
if next_hop == len(sender.path):
# if cur_latency > 1.0:
# sender.timeout(cur_latency)
# sender.on_packet_lost(cur_latency)
if rto >= 0 and cur_latency > rto and sender.pkt_loss_wait_time <= 0:
sender.timeout()
dropped = True
new_dropped = True
elif dropped:
sender.on_packet_lost(cur_latency)
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'lost',
BYTES_PER_PACKET])
else:
sender.on_packet_acked(cur_latency)
debug_print('Ack packet at {}'.format(self.cur_time))
# log packet acked
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'acked',
BYTES_PER_PACKET, cur_latency,
event_queue_delay])
else:
new_next_hop = next_hop + 1
new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay(
self.cur_time)
link_latency = sender.path[next_hop].get_cur_latency(
self.cur_time)
# link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time)
# if USE_LATENCY_NOISE:
# link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE)
new_latency += link_latency
new_event_time += link_latency
push_new_event = True
elif event_type == EVENT_TYPE_SEND:
if next_hop == 0:
if sender.can_send_packet():
sender.on_packet_sent()
# print('Send packet at {}'.format(self.cur_time))
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'sent',
BYTES_PER_PACKET])
push_new_event = True
heapq.heappush(self.q, (self.cur_time + (1.0 / sender.rate),
sender, EVENT_TYPE_SEND, 0, 0.0,
False, self.event_count, sender.rto,
0))
self.event_count += 1
else:
push_new_event = True
if next_hop == sender.dest:
new_event_type = EVENT_TYPE_ACK
new_next_hop = next_hop + 1
new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay(
self.cur_time)
link_latency = sender.path[next_hop].get_cur_latency(
self.cur_time)
# if USE_LATENCY_NOISE:
# link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE)
# link_latency += self.env.current_trace.get_delay_noise(self.cur_time) / 1000
# link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time)
new_latency += link_latency
new_event_time += link_latency
new_dropped = not sender.path[next_hop].packet_enters_link(
self.cur_time)
if not new_dropped:
sender.queue_delay_samples.append(new_event_queue_delay)
if push_new_event:
heapq.heappush(self.q, (new_event_time, sender, new_event_type,
new_next_hop, new_latency, new_dropped,
event_id, rto, new_event_queue_delay))
for sender in self.senders:
sender.record_run()
sender_mi = self.senders[0].get_run_data()
throughput = sender_mi.get("recv rate") # bits/sec
latency = sender_mi.get("avg latency") # second
loss = sender_mi.get("loss ratio")
debug_print("thpt %f, delay %f, loss %f, bytes sent %f, bytes acked %f" % (
throughput/1e6, latency, loss, sender_mi.bytes_sent, sender_mi.bytes_acked))
reward = pcc_aurora_reward(
throughput / 8 / BYTES_PER_PACKET, latency, loss,
np.mean(self.env.current_trace.bandwidths) * 1e6 / 8 / BYTES_PER_PACKET)
if latency > 0.0:
self.env.run_dur = MI_RTT_PROPORTION * sender_mi.get("avg latency") + (1 / self.links[0].get_bandwidth(self.cur_time))
# self.env.run_dur = max(MI_RTT_PROPORTION * sender_mi.get("avg latency"), 5 * (1 / self.senders[0].rate))
# print(self.env.run_dur)
return reward * REWARD_SCALE
class Sender():
def __init__(self, rate, path, dest, features, cwnd=25, history_len=10,
delta_scale=1):
self.id = Sender._get_next_id()
self.delta_scale = delta_scale
self.starting_rate = rate
self.rate = rate
self.sent = 0
self.acked = 0
self.lost = 0
self.bytes_in_flight = 0
self.min_latency = None
self.rtt_samples = []
self.queue_delay_samples = []
self.prev_rtt_samples = self.rtt_samples
self.sample_time = []
self.net = None
self.path = path
self.dest = dest
self.history_len = history_len
self.features = features
self.history = sender_obs.SenderHistory(self.history_len,
self.features, self.id)
self.cwnd = cwnd
self.use_cwnd = False
self.rto = -1
self.ssthresh = 0
self.pkt_loss_wait_time = -1
self.estRTT = 1000000 / 1e6 # SynInterval in emulation
self.RTTVar = self.estRTT / 2 # RTT variance
# self.got_data = False
_next_id = 1
def _get_next_id():
result = Sender._next_id
Sender._next_id += 1
return result
def apply_rate_delta(self, delta):
# if self.got_data:
delta *= self.delta_scale
#print("Applying delta %f" % delta)
if delta >= 0.0:
self.set_rate(self.rate * (1.0 + delta))
else:
self.set_rate(self.rate / (1.0 - delta))
def apply_cwnd_delta(self, delta):
delta *= self.delta_scale
#print("Applying delta %f" % delta)
if delta >= 0.0:
self.set_cwnd(self.cwnd * (1.0 + delta))
else:
self.set_cwnd(self.cwnd / (1.0 - delta))
def can_send_packet(self):
if self.use_cwnd:
return int(self.bytes_in_flight) / BYTES_PER_PACKET < self.cwnd
else:
return True
def register_network(self, net):
self.net = net
def on_packet_sent(self):
self.sent += 1
self.bytes_in_flight += BYTES_PER_PACKET
def on_packet_acked(self, rtt):
self.estRTT = (7.0 * self.estRTT + rtt) / 8.0 # RTT of emulation way
self.RTTVar = (self.RTTVar * 7.0 + abs(rtt - self.estRTT) * 1.0) / 8.0
self.acked += 1
self.rtt_samples.append(rtt)
# self.rtt_samples.append(self.estRTT)
if (self.min_latency is None) or (rtt < self.min_latency):
self.min_latency = rtt
self.bytes_in_flight -= BYTES_PER_PACKET
def on_packet_lost(self, rtt):
self.lost += 1
self.bytes_in_flight -= BYTES_PER_PACKET
def set_rate(self, new_rate):
self.rate = new_rate
# print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE))
if self.rate > MAX_RATE:
self.rate = MAX_RATE
if self.rate < MIN_RATE:
self.rate = MIN_RATE
def set_cwnd(self, new_cwnd):
self.cwnd = int(new_cwnd)
#print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE))
# if self.cwnd > MAX_CWND:
# self.cwnd = MAX_CWND
# if self.cwnd < MIN_CWND:
# self.cwnd = MIN_CWND
def record_run(self):
smi = self.get_run_data()
# if not self.got_data and smi.rtt_samples:
# self.got_data = True
# self.history.step(smi)
# else:
self.history.step(smi)
def get_obs(self):
return self.history.as_array()
def get_run_data(self):
obs_end_time = self.net.get_cur_time()
#obs_dur = obs_end_time - self.obs_start_time
#print("Got %d acks in %f seconds" % (self.acked, obs_dur))
#print("Sent %d packets in %f seconds" % (self.sent, obs_dur))
#print("self.rate = %f" % self.rate)
# print(self.acked, self.sent)
rtt_samples = self.rtt_samples if self.rtt_samples else self.prev_rtt_samples
# if not self.rtt_samples:
# print(self.obs_start_time, obs_end_time, self.rate)
# rtt_samples is empty when there is no packet acked in MI
# Solution: inherit from previous rtt_samples.
return sender_obs.SenderMonitorInterval(
self.id,
bytes_sent=self.sent * BYTES_PER_PACKET,
bytes_acked=self.acked * BYTES_PER_PACKET,
bytes_lost=self.lost * BYTES_PER_PACKET,
send_start=self.obs_start_time,
send_end=obs_end_time,
recv_start=self.obs_start_time,
recv_end=obs_end_time,
rtt_samples=self.rtt_samples,
queue_delay_samples=self.queue_delay_samples,
packet_size=BYTES_PER_PACKET
)
def reset_obs(self):
self.sent = 0
self.acked = 0
self.lost = 0
if self.rtt_samples:
self.prev_rtt_samples = self.rtt_samples
self.rtt_samples = []
self.queue_delay_samples = []
self.obs_start_time = self.net.get_cur_time()
def print_debug(self):
print("Sender:")
print("Obs: %s" % str(self.get_obs()))
print("Rate: %f" % self.rate)
print("Sent: %d" % self.sent)
print("Acked: %d" % self.acked)
print("Lost: %d" % self.lost)
print("Min Latency: %s" % str(self.min_latency))
def reset(self):
#print("Resetting sender!")
self.rate = self.starting_rate
self.bytes_in_flight = 0
self.min_latency = None
self.reset_obs()
self.history = sender_obs.SenderHistory(self.history_len,
self.features, self.id)
self.estRTT = 1000000 / 1e6 # SynInterval in emulation
self.RTTVar = self.estRTT / 2 # RTT variance
# self.got_data = False
def timeout(self):
# placeholder
pass
class SimulatedNetworkEnv(gym.Env):
def __init__(self, traces, history_len=10,
features="sent latency inflation,latency ratio,send ratio",
congestion_control_type="aurora", train_flag=False,
delta_scale=1.0):
"""Network environment used in simulation.
congestion_control_type: aurora is pcc-rl. cubic is TCPCubic.
"""
assert congestion_control_type in {"aurora", "cubic"}, \
"Unrecognized congestion_control_type {}.".format(
congestion_control_type)
# self.replay = EmuReplay()
self.delta_scale = delta_scale
self.traces = traces
self.current_trace = np.random.choice(self.traces)
self.train_flag = train_flag
self.congestion_control_type = congestion_control_type
if self.congestion_control_type == 'aurora':
self.use_cwnd = False
elif self.congestion_control_type == 'cubic':
self.use_cwnd = True
self.history_len = history_len
# print("History length: %d" % history_len)
self.features = features.split(",")
# print("Features: %s" % str(self.features))
self.links = None
self.senders = None
self.create_new_links_and_senders()
self.net = Network(self.senders, self.links, self)
self.run_dur = None
self.run_period = 0.1
self.steps_taken = 0
self.debug_thpt_changes = False
self.last_thpt = None
self.last_rate = None
if self.use_cwnd:
self.action_space = spaces.Box(
np.array([-1e12, -1e12]), np.array([1e12, 1e12]), dtype=np.float32)
else:
self.action_space = spaces.Box(
np.array([-1e12]), np.array([1e12]), dtype=np.float32)
self.observation_space = None
# use_only_scale_free = True
single_obs_min_vec = sender_obs.get_min_obs_vector(self.features)
single_obs_max_vec = sender_obs.get_max_obs_vector(self.features)
self.observation_space = spaces.Box(np.tile(single_obs_min_vec, self.history_len),
np.tile(single_obs_max_vec,
self.history_len),
dtype=np.float32)
self.reward_sum = 0.0
self.reward_ewma = 0.0
self.episodes_run = -1
def seed(self, seed=None):
self.rand, seed = seeding.np_random(seed)
return [seed]
def _get_all_sender_obs(self):
sender_obs = self.senders[0].get_obs()
sender_obs = np.array(sender_obs).reshape(-1,)
return sender_obs
def step(self, actions):
#print("Actions: %s" % str(actions))
# print(actions)
for i in range(0, 1): # len(actions)):
#print("Updating rate for sender %d" % i)
action = actions
self.senders[i].apply_rate_delta(action[0])
if self.use_cwnd:
self.senders[i].apply_cwnd_delta(action[1])
# print("Running for %fs" % self.run_dur)
reward = self.net.run_for_dur(self.run_dur, action=actions[0])
self.steps_taken += 1
sender_obs = self._get_all_sender_obs()
should_stop = self.current_trace.is_finished(self.net.get_cur_time())
self.reward_sum += reward
# print('env step: {}s'.format(time.time() - t_start))
return sender_obs, reward, should_stop, {}
def print_debug(self):
print("---Link Debug---")
for link in self.links:
link.print_debug()
print("---Sender Debug---")
for sender in self.senders:
sender.print_debug()
def create_new_links_and_senders(self):
# self.replay.reset()
self.links = [Link(self.current_trace), Link(self.current_trace)]
if self.congestion_control_type == "aurora":
if not self.train_flag:
self.senders = [Sender( #self.replay.get_rate(),
# 2500000 / 8 /BYTES_PER_PACKET / 0.048,
# 12000000 / 8 /BYTES_PER_PACKET / 0.048,
# 10 / (self.current_trace.get_delay(0) *2/1000),
100,
[self.links[0], self.links[1]], 0,
self.features,
history_len=self.history_len,
delta_scale=self.delta_scale)]
else:
# self.senders = [Sender(random.uniform(0.3, 1.5) * bw,
# [self.links[0], self.links[1]], 0,
# self.features,
# history_len=self.history_len)]
# self.senders = [Sender(random.uniform(10/bw, 1.5) * bw,
# [self.links[0], self.links[1]], 0,
# self.features,
# history_len=self.history_len,
# delta_scale=self.delta_scale)]
self.senders = [Sender(100,
[self.links[0], self.links[1]], 0,
self.features,
history_len=self.history_len,
delta_scale=self.delta_scale)]
elif self.congestion_control_type == "cubic":
raise NotImplementedError
else:
raise RuntimeError("Unrecognized congestion_control_type {}".format(
self.congestion_control_type))
# self.run_dur = 3 * lat
# self.run_dur = 1 * lat
if not self.senders[0].rtt_samples:
# self.run_dur = 0.473
# self.run_dur = 5 / self.senders[0].rate
self.run_dur = 0.01
# self.run_dur = self.current_trace.get_delay(0) * 2 / 1000
# self.run_dur = self.replay.get_ts() - 0
def reset(self):
self.steps_taken = 0
self.net.reset()
self.current_trace = np.random.choice(self.traces)
self.current_trace.reset()
self.create_new_links_and_senders()
self.net = Network(self.senders, self.links, self)
self.episodes_run += 1
# self.replay.reset()
self.net.run_for_dur(self.run_dur)
self.reward_ewma *= 0.99
self.reward_ewma += 0.01 * self.reward_sum
# print("Reward: %0.2f, Ewma Reward: %0.2f" % (self.reward_sum, self.reward_ewma))
self.reward_sum = 0.0
return self._get_all_sender_obs()
register(id='PccNs-v0', entry_point='simulator.network:SimulatedNetworkEnv')
| 0 | 0 |
e2b17755e0aaa5b3a5cbb71d2ff79a60e5f99eea | 3,150 | py | Python | cheddar_oauth_example/settings.py | brianbrunner/cheddar-oauth-demo | 7768023a355d9cdc2e861aded2c05ebe3246c930 | [
"MIT"
] | 1 | 2015-05-26T18:21:32.000Z | 2015-05-26T18:21:32.000Z | cheddar_oauth_example/settings.py | brianbrunner/cheddar-oauth-demo | 7768023a355d9cdc2e861aded2c05ebe3246c930 | [
"MIT"
] | null | null | null | cheddar_oauth_example/settings.py | brianbrunner/cheddar-oauth-demo | 7768023a355d9cdc2e861aded2c05ebe3246c930 | [
"MIT"
] | null | null | null | """
Django settings for cheddar_oauth_example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '43cy=fmsak_xqkme&yi9@c^+-*0pvr%s+-of!yzx6rdiw*!bxt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social.apps.django_app.default',
'django.contrib.humanize',
'app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cheddar_oauth_example.urls'
WSGI_APPLICATION = 'cheddar_oauth_example.wsgi.application'
AUTHENTICATION_BACKENDS = (
'oauth.cheddar.CheddarOAuth2',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# SOCIAL_AUTH_CHEDDAR_SCOPE = []
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login_error'
# Logging
LOGGING = {
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
# Import Local Settings
try:
from local_settings import *
except ImportError as e:
print "FAILED TO IMPORT LOCAL SETTINGS: %s" % e
| 23.333333 | 89 | 0.699365 | """
Django settings for cheddar_oauth_example project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '43cy=fmsak_xqkme&yi9@c^+-*0pvr%s+-of!yzx6rdiw*!bxt'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social.apps.django_app.default',
'django.contrib.humanize',
'app',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cheddar_oauth_example.urls'
WSGI_APPLICATION = 'cheddar_oauth_example.wsgi.application'
AUTHENTICATION_BACKENDS = (
'oauth.cheddar.CheddarOAuth2',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# SOCIAL_AUTH_CHEDDAR_SCOPE = []
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login_error'
# Logging
LOGGING = {
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
# Import Local Settings
try:
from local_settings import *
except ImportError as e:
print "FAILED TO IMPORT LOCAL SETTINGS: %s" % e
| 0 | 0 |
1097c97b6b77b2f181e0c5a9531a0851278011cb | 470 | py | Python | django101/django102/urls.py | Minkov/python-web-2020-09 | a43baf4dd4dd811caf25aad971a0f1a4d3d486a4 | [
"MIT"
] | 4 | 2020-10-30T23:13:50.000Z | 2020-12-26T21:35:00.000Z | django101/django102/urls.py | Minkov/python-web-2020-09 | a43baf4dd4dd811caf25aad971a0f1a4d3d486a4 | [
"MIT"
] | null | null | null | django101/django102/urls.py | Minkov/python-web-2020-09 | a43baf4dd4dd811caf25aad971a0f1a4d3d486a4 | [
"MIT"
] | 7 | 2020-09-17T13:08:35.000Z | 2020-10-31T15:01:46.000Z | from django.urls import path
from django102.views import index as index_view, UsersListView, GamesListView, something, methods_demo, \
raises_exception, create_game
urlpatterns = [
path('', index_view, name='index'),
path('2/', UsersListView.as_view()),
path('games/', GamesListView.as_view()),
path('smth/', something),
path('methods/', methods_demo),
path('raises/', raises_exception),
path('creategame/', create_game),
] | 33.571429 | 106 | 0.676596 | from django.urls import path
from django102.views import index as index_view, UsersListView, GamesListView, something, methods_demo, \
raises_exception, create_game
urlpatterns = [
path('', index_view, name='index'),
path('2/', UsersListView.as_view()),
path('games/', GamesListView.as_view()),
path('smth/', something),
path('methods/', methods_demo),
path('raises/', raises_exception),
path('creategame/', create_game),
] | 0 | 0 |
699f409bdd5d561bb93770f28b38f939f53fc421 | 5,483 | py | Python | 3_dataset_create.py | shivanirmishra/musicgenre | 954214b6f7756c05de1253702811fd69dd99b0e2 | [
"MIT"
] | null | null | null | 3_dataset_create.py | shivanirmishra/musicgenre | 954214b6f7756c05de1253702811fd69dd99b0e2 | [
"MIT"
] | null | null | null | 3_dataset_create.py | shivanirmishra/musicgenre | 954214b6f7756c05de1253702811fd69dd99b0e2 | [
"MIT"
] | null | null | null |
from google.colab import drive
drive.mount('/content/drive')
import librosa
import os
import pandas as pd
from numpy import mean
import warnings;
warnings.filterwarnings('ignore');
folders_5s = {
'pop_5s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_5s',
'rnb_5s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_5s',
'blues_5s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_5s',
'hiphop_5s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_5s',
'rock_5s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_5s'
}
folders_10s = {
'pop_10s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_10s',
'rnb_10s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_10s',
'blues_10s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_10s',
'hiphop_10s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_10s',
'rock_10s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_10s'
}
folders_20s = {
'pop_20s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_20s',
'rnb_20s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_20s',
'blues_20s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_20s',
'hiphop_20s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_20s',
'rock_20s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_20s'
}
label = {
'pop_5s': 0, 'rnb_5s': 1, 'blues_5s': 2, 'hiphop_5s': 3, 'rock_5s': 4,
'pop_10s': 0, 'rnb_10s': 1, 'blues_10s': 2, 'hiphop_10s': 3, 'rock_10s': 4,
'pop_20s': 0, 'rnb_20s': 1, 'blues_20s': 2, 'hiphop_20s': 3, 'rock_20s': 4
}
data_5s = []
data_10s = []
data_20s = []
for name, path in folders_5s.items():
#count_5s = 3000
for filename in os.listdir(path):
# if(count_5s == 0):
# break
songData = []
songname = f'{path}/{filename}'
y, sr = librosa.load(songname, mono=True)
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
songData.append(tempo)
songData.append(mean(beats))
chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)
songData.append(mean(chroma_stft))
rmse = librosa.feature.rmse(y=y)
songData.append(mean(rmse))
spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)
songData.append(mean(spec_cent))
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
songData.append(mean(spec_bw))
rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
songData.append(mean(rolloff))
zcr = librosa.feature.zero_crossing_rate(y)
songData.append(mean(zcr))
mfcc = librosa.feature.mfcc(y=y, sr=sr)
for i in mfcc:
songData.append(mean(i))
songData.append(label[name])
data_5s.append(songData)
#count_5s -= 1
for name, path in folders_10s.items():
#count_10s = 1500
for filename in os.listdir(path):
# if(count_10s == 0):
# break
songData = []
songname = f'{path}/{filename}'
y, sr = librosa.load(songname, mono=True)
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
songData.append(tempo)
songData.append(mean(beats))
chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)
songData.append(mean(chroma_stft))
rmse = librosa.feature.rmse(y=y)
songData.append(mean(rmse))
spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)
songData.append(mean(spec_cent))
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
songData.append(mean(spec_bw))
rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
songData.append(mean(rolloff))
zcr = librosa.feature.zero_crossing_rate(y)
songData.append(mean(zcr))
mfcc = librosa.feature.mfcc(y=y, sr=sr)
for i in mfcc:
songData.append(mean(i))
songData.append(label[name])
data_10s.append(songData)
#count_10s -= 1
for name, path in folders_20s.items():
#count_20s = 900
for filename in os.listdir(path):
# if(count_20s == 0):
# break
songData = []
songname = f'{path}/{filename}'
y, sr = librosa.load(songname, mono=True)
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
songData.append(tempo)
songData.append(mean(beats))
chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)
songData.append(mean(chroma_stft))
rmse = librosa.feature.rmse(y=y)
songData.append(mean(rmse))
spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)
songData.append(mean(spec_cent))
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
songData.append(mean(spec_bw))
rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
songData.append(mean(rolloff))
zcr = librosa.feature.zero_crossing_rate(y)
songData.append(mean(zcr))
mfcc = librosa.feature.mfcc(y=y, sr=sr)
for i in mfcc:
songData.append(mean(i))
songData.append(label[name])
data_20s.append(songData)
#count_20s -= 1
data_5s = pd.DataFrame(data_5s)
data_5s.to_csv('/content/drive/My Drive/ML_Project/data_5s_test_all_genres.csv')
data_10s = pd.DataFrame(data_10s)
data_10s.to_csv('/content/drive/My Drive/ML_Project/data_10s_test_all_genres.csv')
data_20s = pd.DataFrame(data_20s)
data_20s.to_csv('/content/drive/My Drive/ML_Project/data_20s_test_all_genres.csv')
data_10s
| 28.857895 | 89 | 0.666423 |
from google.colab import drive
drive.mount('/content/drive')
import librosa
import os
import pandas as pd
from numpy import mean
import warnings;
warnings.filterwarnings('ignore');
folders_5s = {
'pop_5s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_5s',
'rnb_5s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_5s',
'blues_5s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_5s',
'hiphop_5s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_5s',
'rock_5s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_5s'
}
folders_10s = {
'pop_10s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_10s',
'rnb_10s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_10s',
'blues_10s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_10s',
'hiphop_10s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_10s',
'rock_10s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_10s'
}
folders_20s = {
'pop_20s':'/content/drive/My Drive/ML_Project/New_Data/pop_test_20s',
'rnb_20s':'/content/drive/My Drive/ML_Project/New_Data/rnb_test_20s',
'blues_20s':'/content/drive/My Drive/ML_Project/New_Data/blues_test_20s',
'hiphop_20s':'/content/drive/My Drive/ML_Project/New_Data/hiphop_test_20s',
'rock_20s':'/content/drive/My Drive/ML_Project/New_Data/rock_test_20s'
}
label = {
'pop_5s': 0, 'rnb_5s': 1, 'blues_5s': 2, 'hiphop_5s': 3, 'rock_5s': 4,
'pop_10s': 0, 'rnb_10s': 1, 'blues_10s': 2, 'hiphop_10s': 3, 'rock_10s': 4,
'pop_20s': 0, 'rnb_20s': 1, 'blues_20s': 2, 'hiphop_20s': 3, 'rock_20s': 4
}
data_5s = []
data_10s = []
data_20s = []
for name, path in folders_5s.items():
#count_5s = 3000
for filename in os.listdir(path):
# if(count_5s == 0):
# break
songData = []
songname = f'{path}/{filename}'
y, sr = librosa.load(songname, mono=True)
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
songData.append(tempo)
songData.append(mean(beats))
chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)
songData.append(mean(chroma_stft))
rmse = librosa.feature.rmse(y=y)
songData.append(mean(rmse))
spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)
songData.append(mean(spec_cent))
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
songData.append(mean(spec_bw))
rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
songData.append(mean(rolloff))
zcr = librosa.feature.zero_crossing_rate(y)
songData.append(mean(zcr))
mfcc = librosa.feature.mfcc(y=y, sr=sr)
for i in mfcc:
songData.append(mean(i))
songData.append(label[name])
data_5s.append(songData)
#count_5s -= 1
for name, path in folders_10s.items():
#count_10s = 1500
for filename in os.listdir(path):
# if(count_10s == 0):
# break
songData = []
songname = f'{path}/{filename}'
y, sr = librosa.load(songname, mono=True)
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
songData.append(tempo)
songData.append(mean(beats))
chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)
songData.append(mean(chroma_stft))
rmse = librosa.feature.rmse(y=y)
songData.append(mean(rmse))
spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)
songData.append(mean(spec_cent))
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
songData.append(mean(spec_bw))
rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
songData.append(mean(rolloff))
zcr = librosa.feature.zero_crossing_rate(y)
songData.append(mean(zcr))
mfcc = librosa.feature.mfcc(y=y, sr=sr)
for i in mfcc:
songData.append(mean(i))
songData.append(label[name])
data_10s.append(songData)
#count_10s -= 1
for name, path in folders_20s.items():
#count_20s = 900
for filename in os.listdir(path):
# if(count_20s == 0):
# break
songData = []
songname = f'{path}/{filename}'
y, sr = librosa.load(songname, mono=True)
tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
songData.append(tempo)
songData.append(mean(beats))
chroma_stft = librosa.feature.chroma_stft(y=y, sr=sr)
songData.append(mean(chroma_stft))
rmse = librosa.feature.rmse(y=y)
songData.append(mean(rmse))
spec_cent = librosa.feature.spectral_centroid(y=y, sr=sr)
songData.append(mean(spec_cent))
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=sr)
songData.append(mean(spec_bw))
rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr)
songData.append(mean(rolloff))
zcr = librosa.feature.zero_crossing_rate(y)
songData.append(mean(zcr))
mfcc = librosa.feature.mfcc(y=y, sr=sr)
for i in mfcc:
songData.append(mean(i))
songData.append(label[name])
data_20s.append(songData)
#count_20s -= 1
data_5s = pd.DataFrame(data_5s)
data_5s.to_csv('/content/drive/My Drive/ML_Project/data_5s_test_all_genres.csv')
data_10s = pd.DataFrame(data_10s)
data_10s.to_csv('/content/drive/My Drive/ML_Project/data_10s_test_all_genres.csv')
data_20s = pd.DataFrame(data_20s)
data_20s.to_csv('/content/drive/My Drive/ML_Project/data_20s_test_all_genres.csv')
data_10s
| 0 | 0 |
412a8b42be8c6054311e076c95465833bdd45355 | 1,206 | py | Python | data/train_test_split.py | ttaoREtw/A-Pytorch-Implementation-of-Tacotron-End-to-end-Text-to-speech-Deep-Learning-Model | 6b0f615cafb0530370631a880aac5736fe9a2c64 | [
"MIT"
] | 105 | 2018-09-13T02:45:10.000Z | 2021-06-24T03:31:15.000Z | data/train_test_split.py | henryhenrychen/Tacotron-pytorch | 4a4d1ea0d83fd88a50464999f5d55fe012c86687 | [
"MIT"
] | 9 | 2018-12-11T02:37:58.000Z | 2021-03-18T02:42:40.000Z | data/train_test_split.py | henryhenrychen/Tacotron-pytorch | 4a4d1ea0d83fd88a50464999f5d55fe012c86687 | [
"MIT"
] | 31 | 2018-09-15T14:51:31.000Z | 2021-01-19T07:37:14.000Z | import os
import argparse
import random
def split_and_save(args):
meta_all_path = args.meta_all
meta_dir = os.path.dirname(os.path.realpath(meta_all_path))
meta_tr_path = os.path.join(meta_dir, 'meta_train.txt')
meta_te_path = os.path.join(meta_dir, 'meta_test.txt')
with open(meta_all_path) as f:
meta_all = f.readlines()
meta_tr = []
meta_te = []
n_meta = len(meta_all)
n_test = int(args.ratio_test * n_meta)
indice_te = random.sample(range(n_meta), n_test)
for idx, line in enumerate(meta_all):
if idx in indice_te:
meta_te.append(line)
else:
meta_tr.append(line)
with open(meta_tr_path, 'w') as ftr:
ftr.write(''.join(meta_tr))
with open(meta_te_path, 'w') as fte:
fte.write(''.join(meta_te))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Split the data')
parser.add_argument('--meta-all', type=str, help='The meta file generated by preprocess.py', required=True)
parser.add_argument('--ratio-test', default=0.1, type=float, help='ratio of testing examples', required=False)
args = parser.parse_args()
split_and_save(args)
| 31.736842 | 114 | 0.662521 | import os
import argparse
import random
def split_and_save(args):
meta_all_path = args.meta_all
meta_dir = os.path.dirname(os.path.realpath(meta_all_path))
meta_tr_path = os.path.join(meta_dir, 'meta_train.txt')
meta_te_path = os.path.join(meta_dir, 'meta_test.txt')
with open(meta_all_path) as f:
meta_all = f.readlines()
meta_tr = []
meta_te = []
n_meta = len(meta_all)
n_test = int(args.ratio_test * n_meta)
indice_te = random.sample(range(n_meta), n_test)
for idx, line in enumerate(meta_all):
if idx in indice_te:
meta_te.append(line)
else:
meta_tr.append(line)
with open(meta_tr_path, 'w') as ftr:
ftr.write(''.join(meta_tr))
with open(meta_te_path, 'w') as fte:
fte.write(''.join(meta_te))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Split the data')
parser.add_argument('--meta-all', type=str, help='The meta file generated by preprocess.py', required=True)
parser.add_argument('--ratio-test', default=0.1, type=float, help='ratio of testing examples', required=False)
args = parser.parse_args()
split_and_save(args)
| 0 | 0 |
4d36c18720eb25777d76206398891b1da5c803d3 | 10,711 | py | Python | item_sets.py | jay-maity/RecommendPCY | 040eda27be46d241406d3cb8ce6605dde492fef9 | [
"MIT"
] | null | null | null | item_sets.py | jay-maity/RecommendPCY | 040eda27be46d241406d3cb8ce6605dde492fef9 | [
"MIT"
] | null | null | null | item_sets.py | jay-maity/RecommendPCY | 040eda27be46d241406d3cb8ce6605dde492fef9 | [
"MIT"
] | null | null | null | """ Frequent item discovery by PCY algorithm"""
import operator
import json
import sys
from pyspark import SparkContext, SparkConf
import pyspark_cassandra
from cassandra.cluster import Cluster
cluster = None
session = None
class PCYFrequentItems:
"""
Find Frequent item list using PCY algorithm
"""
IS_DEBUGGING = False
config_object = None
def __init__(self, is_debug, config_file="config.json"):
"""
Sets the initial debiggin parameter
:param is_debug: Print collect messages if set true
"""
self.IS_DEBUGGING = is_debug
json_data = open(config_file).read()
self.config_object = json.loads(json_data)
@staticmethod
def group_items(basket, group_size):
"""
Get item_groups from a basket
Returns sorted items by their numerical number
:param basket: Basket to search the item_group from (could be a single cart)
:param group_size: Size of the item_group to form
:return:
"""
assert (group_size >= 1 and isinstance(group_size, int)), \
"Please use group size as Integer and > 0"
# In case of group size is one simply return each items
if group_size == 1:
return [(item,) for item in basket]
item_groups = []
if len(basket) >= group_size:
# Sort the basket
basket = sorted(basket)
# Loop through the basket
for i in range(len(basket) - group_size + 1):
# Gets the base and add all items for each group
# until end
# If base is [2,3] and basket is [2,3,4,5]
# then creates [2,3,4], [2,3,5]
base_item_count = i + (group_size - 1)
base_items = basket[i:base_item_count]
for item in basket[base_item_count:]:
item_groups.append(tuple(base_items) + (item,))
return item_groups
@staticmethod
def map_nodes(line):
"""
Map line into graph node key = value as array
"""
key_values = line.split(":")
# key = int(key_values[0])
values = []
if key_values[1].strip() != "":
values = [int(node) for node in key_values[1].strip().split(' ')]
return values
@staticmethod
def filter_pairs(pair, hosts, keyspace, hashfunction, item_table, bitmap_table):
"""
Filter pairs by querying from cassandra table
:return:
"""
global cluster, session
if cluster is None:
cluster = Cluster(hosts)
session = cluster.connect(keyspace)
item1 = session.execute("select item from "
+ item_table
+ " where item = %d" % pair[0])
item2 = session.execute("select item from "
+ item_table
+ " where item = %d" % pair[1])
bitmap = session.execute("select hash from "
+ bitmap_table
+ " where hash = %d" % hashfunction(pair))
print("Pair checked " + str(pair[0]))
return item1 and item2 and bitmap
@staticmethod
def filter_pairs_broadcast(pair, freq_pair, bitmap, hashfunction):
"""
Filter pairs from broadcast variables
:return:
"""
return pair[0] in freq_pair and pair[1] in freq_pair and hashfunction(pair) in bitmap
def pcy_freq_items(self, item_group_rdd, hash_function, support_count):
"""
Get Frequent items for a particular group of items
:param item_group_rdd:
:param passno:
:param hash_function:
:param support_count:
:return:
"""
# Hash and Items mapping
order_prod_hash = item_group_rdd \
.map(lambda x: (hash_function(x), 1))
# Group, filter and get unique item sets
frequent_items = order_prod_hash.reduceByKey(operator.add) \
.filter(lambda x: x[1] > support_count) \
.map(lambda x: x[0])
return frequent_items
def pcy_pass(self, order_prod, pass_no, support_count, hashn, hashnplus1,
is_nplus1_cache=False):
"""
Calculates frequent items and bitmap after n th pass
:param order_prod:
:param pass_no:
:param support_count:
:param hashn:
:param hashnplus1:
:param is_nplus1_cache:
:return:
"""
item_set_count = pass_no
order_prod_single = order_prod. \
flatMap(lambda x: PCYFrequentItems.
group_items(x, item_set_count))
frequent_items_n = self.pcy_freq_items(order_prod_single,
hashn,
support_count)
item_set_count += 1
order_prod_pairs = order_prod. \
flatMap(lambda x: PCYFrequentItems.group_items(x, item_set_count))
if is_nplus1_cache:
order_prod_pairs = order_prod_pairs.cache()
bitmap_nplus1 = self.pcy_freq_items(order_prod_pairs,
hashnplus1,
support_count)
return frequent_items_n, bitmap_nplus1, order_prod_pairs
@staticmethod
def pair_bitmap(items):
"""
Hash function for calculation for pairs
:param items:
:return:
"""
mul = 1
for item in items:
mul *= ((2 * item) + 1)
return mul % 999917
@staticmethod
def single(items):
"""
Hash function for calculation
:param items:
:return:
"""
mul = 1
for item in items:
mul *= item
return mul % 100000000
def configure(self):
"""
Configure spark and cassandra objects
:param is_local_host:
:return:
"""
# Spark Configuration
conf = SparkConf().setAppName('Frequent Item Sets'). \
set('spark.cassandra.connection.host', ','.join(self.config_object["CassandraHosts"]))
return SparkContext(conf=conf)
def frequent_items(self, inputs, output, support_count, is_broadcast=True):
"""Output correlation coefficient without mean formula
Args:
inputs:Input file location
output:Output file location
support_count:
is_broadcast: Item pair will be found using broadcast or not
"""
spark_context = self.configure()
# File loading
text = spark_context.textFile(inputs)
order_prod = text.map(PCYFrequentItems.map_nodes).cache()
pass_no = 1
freq_items, bitmap, all_pairs = self.pcy_pass(order_prod,
pass_no,
support_count,
PCYFrequentItems.single,
PCYFrequentItems.pair_bitmap,
is_nplus1_cache=True)
if self.IS_DEBUGGING:
print("Frequent " + str(pass_no) + "-group items after pass:" + str(pass_no))
print(freq_items.collect())
print("Bitmap for " + str(pass_no + 1) + "-group items after pass:" + str(pass_no))
print(bitmap.collect())
# System will use broadcast based on user input
if is_broadcast:
bitmap_set = set(bitmap.collect())
freq_items_set = set(freq_items.collect())
bitmap_broadast = spark_context.broadcast(bitmap_set)
freq_items_set = spark_context.broadcast(freq_items_set)
frequent_pairs = all_pairs.filter(lambda x:
PCYFrequentItems.
filter_pairs_broadcast(x,
freq_items_set.value,
bitmap_broadast.value,
PCYFrequentItems.pair_bitmap
))
else:
# Making freq items Ready to save to cassandra
freq_items = freq_items.map(lambda x: {'item': x})
freq_items.saveToCassandra(self.config_object["KeySpace"],
self.config_object["Item1Table"])
# Making bitmap Ready to save to cassandra
bitmap = bitmap.map(lambda x: {'hash': x})
bitmap.saveToCassandra(self.config_object["KeySpace"],
self.config_object["Bitmap2Table"])
print(all_pairs.count())
frequent_pairs = all_pairs.filter(lambda x: PCYFrequentItems.
filter_pairs(x,
self.config_object["CassandraHosts"],
self.config_object["KeySpace"],
PCYFrequentItems.pair_bitmap,
self.config_object["Item1Table"],
self.config_object["Bitmap2Table"]))
if self.IS_DEBUGGING:
print(all_pairs.collect())
print(frequent_pairs.collect())
# Saves as text file
frequent_pairs.saveAsTextFile(output)
frequent_pairs = frequent_pairs.\
map(lambda x: {'productid1': x[0], 'productid2': x[1]})
# Save final output to cassandra
frequent_pairs.saveToCassandra(self.config_object["KeySpace"],
self.config_object["RecommendTable"])
all_pairs.unpersist()
order_prod.unpersist()
def main():
"""
Handles parameters for the file to run
:return:
"""
input_path = sys.argv[1]
output_path = sys.argv[2]
support_thresold = int(sys.argv[3])
broadcast = 1
if len(sys.argv) > 4:
broadcast = int(sys.argv[4])
pcy = PCYFrequentItems(is_debug=True)
if broadcast == 1:
is_broadcast = True
else:
is_broadcast = False
pcy.frequent_items(input_path, output_path, support_thresold, is_broadcast)
if __name__ == "__main__":
main()
| 34.220447 | 98 | 0.527588 | """ Frequent item discovery by PCY algorithm"""
import operator
import json
import sys
from pyspark import SparkContext, SparkConf
import pyspark_cassandra
from cassandra.cluster import Cluster
cluster = None
session = None
class PCYFrequentItems:
"""
Find Frequent item list using PCY algorithm
"""
IS_DEBUGGING = False
config_object = None
def __init__(self, is_debug, config_file="config.json"):
"""
Sets the initial debiggin parameter
:param is_debug: Print collect messages if set true
"""
self.IS_DEBUGGING = is_debug
json_data = open(config_file).read()
self.config_object = json.loads(json_data)
@staticmethod
def group_items(basket, group_size):
"""
Get item_groups from a basket
Returns sorted items by their numerical number
:param basket: Basket to search the item_group from (could be a single cart)
:param group_size: Size of the item_group to form
:return:
"""
assert (group_size >= 1 and isinstance(group_size, int)), \
"Please use group size as Integer and > 0"
# In case of group size is one simply return each items
if group_size == 1:
return [(item,) for item in basket]
item_groups = []
if len(basket) >= group_size:
# Sort the basket
basket = sorted(basket)
# Loop through the basket
for i in range(len(basket) - group_size + 1):
# Gets the base and add all items for each group
# until end
# If base is [2,3] and basket is [2,3,4,5]
# then creates [2,3,4], [2,3,5]
base_item_count = i + (group_size - 1)
base_items = basket[i:base_item_count]
for item in basket[base_item_count:]:
item_groups.append(tuple(base_items) + (item,))
return item_groups
@staticmethod
def map_nodes(line):
"""
Map line into graph node key = value as array
"""
key_values = line.split(":")
# key = int(key_values[0])
values = []
if key_values[1].strip() != "":
values = [int(node) for node in key_values[1].strip().split(' ')]
return values
@staticmethod
def filter_pairs(pair, hosts, keyspace, hashfunction, item_table, bitmap_table):
"""
Filter pairs by querying from cassandra table
:return:
"""
global cluster, session
if cluster is None:
cluster = Cluster(hosts)
session = cluster.connect(keyspace)
item1 = session.execute("select item from "
+ item_table
+ " where item = %d" % pair[0])
item2 = session.execute("select item from "
+ item_table
+ " where item = %d" % pair[1])
bitmap = session.execute("select hash from "
+ bitmap_table
+ " where hash = %d" % hashfunction(pair))
print("Pair checked " + str(pair[0]))
return item1 and item2 and bitmap
@staticmethod
def filter_pairs_broadcast(pair, freq_pair, bitmap, hashfunction):
"""
Filter pairs from broadcast variables
:return:
"""
return pair[0] in freq_pair and pair[1] in freq_pair and hashfunction(pair) in bitmap
def pcy_freq_items(self, item_group_rdd, hash_function, support_count):
"""
Get Frequent items for a particular group of items
:param item_group_rdd:
:param passno:
:param hash_function:
:param support_count:
:return:
"""
# Hash and Items mapping
order_prod_hash = item_group_rdd \
.map(lambda x: (hash_function(x), 1))
# Group, filter and get unique item sets
frequent_items = order_prod_hash.reduceByKey(operator.add) \
.filter(lambda x: x[1] > support_count) \
.map(lambda x: x[0])
return frequent_items
def pcy_pass(self, order_prod, pass_no, support_count, hashn, hashnplus1,
is_nplus1_cache=False):
"""
Calculates frequent items and bitmap after n th pass
:param order_prod:
:param pass_no:
:param support_count:
:param hashn:
:param hashnplus1:
:param is_nplus1_cache:
:return:
"""
item_set_count = pass_no
order_prod_single = order_prod. \
flatMap(lambda x: PCYFrequentItems.
group_items(x, item_set_count))
frequent_items_n = self.pcy_freq_items(order_prod_single,
hashn,
support_count)
item_set_count += 1
order_prod_pairs = order_prod. \
flatMap(lambda x: PCYFrequentItems.group_items(x, item_set_count))
if is_nplus1_cache:
order_prod_pairs = order_prod_pairs.cache()
bitmap_nplus1 = self.pcy_freq_items(order_prod_pairs,
hashnplus1,
support_count)
return frequent_items_n, bitmap_nplus1, order_prod_pairs
@staticmethod
def pair_bitmap(items):
"""
Hash function for calculation for pairs
:param items:
:return:
"""
mul = 1
for item in items:
mul *= ((2 * item) + 1)
return mul % 999917
@staticmethod
def single(items):
"""
Hash function for calculation
:param items:
:return:
"""
mul = 1
for item in items:
mul *= item
return mul % 100000000
def configure(self):
"""
Configure spark and cassandra objects
:param is_local_host:
:return:
"""
# Spark Configuration
conf = SparkConf().setAppName('Frequent Item Sets'). \
set('spark.cassandra.connection.host', ','.join(self.config_object["CassandraHosts"]))
return SparkContext(conf=conf)
def frequent_items(self, inputs, output, support_count, is_broadcast=True):
"""Output correlation coefficient without mean formula
Args:
inputs:Input file location
output:Output file location
support_count:
is_broadcast: Item pair will be found using broadcast or not
"""
spark_context = self.configure()
# File loading
text = spark_context.textFile(inputs)
order_prod = text.map(PCYFrequentItems.map_nodes).cache()
pass_no = 1
freq_items, bitmap, all_pairs = self.pcy_pass(order_prod,
pass_no,
support_count,
PCYFrequentItems.single,
PCYFrequentItems.pair_bitmap,
is_nplus1_cache=True)
if self.IS_DEBUGGING:
print("Frequent " + str(pass_no) + "-group items after pass:" + str(pass_no))
print(freq_items.collect())
print("Bitmap for " + str(pass_no + 1) + "-group items after pass:" + str(pass_no))
print(bitmap.collect())
# System will use broadcast based on user input
if is_broadcast:
bitmap_set = set(bitmap.collect())
freq_items_set = set(freq_items.collect())
bitmap_broadast = spark_context.broadcast(bitmap_set)
freq_items_set = spark_context.broadcast(freq_items_set)
frequent_pairs = all_pairs.filter(lambda x:
PCYFrequentItems.
filter_pairs_broadcast(x,
freq_items_set.value,
bitmap_broadast.value,
PCYFrequentItems.pair_bitmap
))
else:
# Making freq items Ready to save to cassandra
freq_items = freq_items.map(lambda x: {'item': x})
freq_items.saveToCassandra(self.config_object["KeySpace"],
self.config_object["Item1Table"])
# Making bitmap Ready to save to cassandra
bitmap = bitmap.map(lambda x: {'hash': x})
bitmap.saveToCassandra(self.config_object["KeySpace"],
self.config_object["Bitmap2Table"])
print(all_pairs.count())
frequent_pairs = all_pairs.filter(lambda x: PCYFrequentItems.
filter_pairs(x,
self.config_object["CassandraHosts"],
self.config_object["KeySpace"],
PCYFrequentItems.pair_bitmap,
self.config_object["Item1Table"],
self.config_object["Bitmap2Table"]))
if self.IS_DEBUGGING:
print(all_pairs.collect())
print(frequent_pairs.collect())
# Saves as text file
frequent_pairs.saveAsTextFile(output)
frequent_pairs = frequent_pairs.\
map(lambda x: {'productid1': x[0], 'productid2': x[1]})
# Save final output to cassandra
frequent_pairs.saveToCassandra(self.config_object["KeySpace"],
self.config_object["RecommendTable"])
all_pairs.unpersist()
order_prod.unpersist()
def main():
"""
Handles parameters for the file to run
:return:
"""
input_path = sys.argv[1]
output_path = sys.argv[2]
support_thresold = int(sys.argv[3])
broadcast = 1
if len(sys.argv) > 4:
broadcast = int(sys.argv[4])
pcy = PCYFrequentItems(is_debug=True)
if broadcast == 1:
is_broadcast = True
else:
is_broadcast = False
pcy.frequent_items(input_path, output_path, support_thresold, is_broadcast)
if __name__ == "__main__":
main()
| 0 | 0 |
67a731ca62e5cbd2844ce988950efc73fd0d3ec6 | 5,201 | pyw | Python | pncShell.pyw | BobBaylor/pnc | 11b5a08a1fce5c605a203c4e46c9d9599024ad3c | [
"MIT"
] | null | null | null | pncShell.pyw | BobBaylor/pnc | 11b5a08a1fce5c605a203c4e46c9d9599024ad3c | [
"MIT"
] | null | null | null | pncShell.pyw | BobBaylor/pnc | 11b5a08a1fce5c605a203c4e46c9d9599024ad3c | [
"MIT"
] | null | null | null | """
A wrapper around my pnc.py module
"""
import os.path
import wx
import wx.lib.filebrowsebutton as filebrowse
import pnc
class MyFrame(wx.Frame):
"""
This is MyFrame. It just shows a few controls on a wxPanel,
and has a simple menu.
Use this file inFileBtn
Write this root name TextEntry
and starting number TextEntry
To here outDirRootButton
Optional subdirectory TextEntry
Move the input file there, too CheckBox
"""
def __init__(self, parent, title):
wide = 860
wx.Frame.__init__(self, parent, wx.ID_ANY, title,
pos=(150, 150), size=(wide, 270))
# make a minimalist menu bar
self.CreateStatusBar()
menu_bar = wx.MenuBar()
menu1 = wx.Menu()
menu1.Append(101, '&Close', 'Close this frame')
self.SetMenuBar(menu_bar)
self.Bind(wx.EVT_MENU, self.Close, id=101)
# Now create the Panel to put the other controls on.
self.panel = wx.Panel(self, wx.ID_ANY)
# Use a sizer to layout the controls, stacked vertically and with
# a 6 pixel border around each
space = 6
sflags = wx.ALL
sizer = wx.BoxSizer(wx.VERTICAL)
# x = self
# sizer.Add(self.panel, wx.EXPAND )
# and a few controls
text = wx.StaticText(self, -1, "Browse to the .pnc file, choose a root and folder name, and press Do It!") #pylint: disable=line-too-long
text.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD))
text.SetSize(text.GetBestSize())
sizer.Add(text, 0, sflags, space)
self.btn_infile = filebrowse.FileBrowseButton(self, -1, size=(wide-10, -1),
changeCallback=self.cback_infile,
labelText='Use this PNC file')
self.btn_infile.SetValue('/Users/guy/Downloads/JpegData.PNC')
sizer.Add(self.btn_infile, 0, sflags, space)
self.file_browse_root = filebrowse.DirBrowseButton(self, -1, size=(wide-10, -1),
changeCallback=self.cback_file_root, #pylint: disable=line-too-long
labelText='To put JPG files here')
# self.file_browse_root.SetValue( '/Users/guy/Pictures' )
self.file_browse_root.SetValue('/Users/guy/python/test')
# self.file_browse_root.callCallback = False
sizer.Add(self.file_browse_root, 0, sflags, space)
# file name root and starting number
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, "Optional new dir:"), 0, sflags, space)
self.tc_out_dir = wx.TextCtrl(self, -1, '')
hsizer.Add(self.tc_out_dir, 0, sflags, space)
hsizer.Add(wx.StaticText(self, -1, "Filename root:"), 0, sflags, space)
self.tc_out_fname = wx.TextCtrl(self, -1, 'gcam')
hsizer.Add(self.tc_out_fname, 0, sflags, space)
# hsizer.Add(wx.StaticText(self, -1, "File number start:"), 0, sflags, space)
sizer.Add(hsizer, 0, sflags, space)
self.cb_move_file = wx.CheckBox(self, -1, 'Move Input file, too')
sizer.Add(self.cb_move_file, 0, sflags, space)
# bind the button events to handlers
hsizer2 = wx.BoxSizer(wx.HORIZONTAL)
funbtn = wx.Button(self, -1, "Do it")
self.Bind(wx.EVT_BUTTON, self.evh_doit, funbtn)
hsizer2.Add(funbtn, 0, sflags, space)
btn = wx.Button(self, -1, "Close")
self.Bind(wx.EVT_BUTTON, self.evh_close, btn)
hsizer2.Add(btn, 0, sflags, space)
sizer.Add(hsizer2, 0, sflags, space)
self.SetSizer(sizer)
def evh_close(self, evt): #pylint: disable=unused-argument
"""Event handler for the button click."""
self.Close()
def evh_doit(self, evt): #pylint: disable=unused-argument
"""Event handler for the button click."""
self.SetStatusText('working...')
print ''
out_dir = self.file_browse_root.GetValue()
out_new_dir = self.tc_out_dir.GetValue()
out_dir = os.path.join(out_dir, out_new_dir)
b_success = pnc.get_photos(self.btn_infile.GetValue(),
out_dir, self.tc_out_fname.GetValue(),
self.cb_move_file.GetValue())
if b_success:
self.SetStatusText('Done!')
else:
self.SetStatusText('Failed')
def cback_infile(self, evt): #pylint: disable=unused-argument
""" dummy callback """
pass
def cback_file_root(self, evt): #pylint: disable=unused-argument
""" dummy callback """
pass
class MyApp(wx.App):
""" a simple GUI """
def OnInit(self): #pylint: disable=invalid-name
""" let's get this party started """
frame = MyFrame(None, "Panasonic .PNC to .JPG converter")
self.SetTopWindow(frame)
frame.Show(True)
return True
# app = MyApp(redirect=True)
app = MyApp() #pylint: disable=invalid-name
app.MainLoop()
| 38.242647 | 153 | 0.586233 | """
A wrapper around my pnc.py module
"""
import os.path
import wx
import wx.lib.filebrowsebutton as filebrowse
import pnc
class MyFrame(wx.Frame):
"""
This is MyFrame. It just shows a few controls on a wxPanel,
and has a simple menu.
Use this file inFileBtn
Write this root name TextEntry
and starting number TextEntry
To here outDirRootButton
Optional subdirectory TextEntry
Move the input file there, too CheckBox
"""
def __init__(self, parent, title):
wide = 860
wx.Frame.__init__(self, parent, wx.ID_ANY, title,
pos=(150, 150), size=(wide, 270))
# make a minimalist menu bar
self.CreateStatusBar()
menu_bar = wx.MenuBar()
menu1 = wx.Menu()
menu1.Append(101, '&Close', 'Close this frame')
self.SetMenuBar(menu_bar)
self.Bind(wx.EVT_MENU, self.Close, id=101)
# Now create the Panel to put the other controls on.
self.panel = wx.Panel(self, wx.ID_ANY)
# Use a sizer to layout the controls, stacked vertically and with
# a 6 pixel border around each
space = 6
sflags = wx.ALL
sizer = wx.BoxSizer(wx.VERTICAL)
# x = self
# sizer.Add(self.panel, wx.EXPAND )
# and a few controls
text = wx.StaticText(self, -1, "Browse to the .pnc file, choose a root and folder name, and press Do It!") #pylint: disable=line-too-long
text.SetFont(wx.Font(14, wx.SWISS, wx.NORMAL, wx.BOLD))
text.SetSize(text.GetBestSize())
sizer.Add(text, 0, sflags, space)
self.btn_infile = filebrowse.FileBrowseButton(self, -1, size=(wide-10, -1),
changeCallback=self.cback_infile,
labelText='Use this PNC file')
self.btn_infile.SetValue('/Users/guy/Downloads/JpegData.PNC')
sizer.Add(self.btn_infile, 0, sflags, space)
self.file_browse_root = filebrowse.DirBrowseButton(self, -1, size=(wide-10, -1),
changeCallback=self.cback_file_root, #pylint: disable=line-too-long
labelText='To put JPG files here')
# self.file_browse_root.SetValue( '/Users/guy/Pictures' )
self.file_browse_root.SetValue('/Users/guy/python/test')
# self.file_browse_root.callCallback = False
sizer.Add(self.file_browse_root, 0, sflags, space)
# file name root and starting number
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, "Optional new dir:"), 0, sflags, space)
self.tc_out_dir = wx.TextCtrl(self, -1, '')
hsizer.Add(self.tc_out_dir, 0, sflags, space)
hsizer.Add(wx.StaticText(self, -1, "Filename root:"), 0, sflags, space)
self.tc_out_fname = wx.TextCtrl(self, -1, 'gcam')
hsizer.Add(self.tc_out_fname, 0, sflags, space)
# hsizer.Add(wx.StaticText(self, -1, "File number start:"), 0, sflags, space)
sizer.Add(hsizer, 0, sflags, space)
self.cb_move_file = wx.CheckBox(self, -1, 'Move Input file, too')
sizer.Add(self.cb_move_file, 0, sflags, space)
# bind the button events to handlers
hsizer2 = wx.BoxSizer(wx.HORIZONTAL)
funbtn = wx.Button(self, -1, "Do it")
self.Bind(wx.EVT_BUTTON, self.evh_doit, funbtn)
hsizer2.Add(funbtn, 0, sflags, space)
btn = wx.Button(self, -1, "Close")
self.Bind(wx.EVT_BUTTON, self.evh_close, btn)
hsizer2.Add(btn, 0, sflags, space)
sizer.Add(hsizer2, 0, sflags, space)
self.SetSizer(sizer)
def evh_close(self, evt): #pylint: disable=unused-argument
"""Event handler for the button click."""
self.Close()
def evh_doit(self, evt): #pylint: disable=unused-argument
"""Event handler for the button click."""
self.SetStatusText('working...')
print ''
out_dir = self.file_browse_root.GetValue()
out_new_dir = self.tc_out_dir.GetValue()
out_dir = os.path.join(out_dir, out_new_dir)
b_success = pnc.get_photos(self.btn_infile.GetValue(),
out_dir, self.tc_out_fname.GetValue(),
self.cb_move_file.GetValue())
if b_success:
self.SetStatusText('Done!')
else:
self.SetStatusText('Failed')
def cback_infile(self, evt): #pylint: disable=unused-argument
""" dummy callback """
pass
def cback_file_root(self, evt): #pylint: disable=unused-argument
""" dummy callback """
pass
class MyApp(wx.App):
""" a simple GUI """
def OnInit(self): #pylint: disable=invalid-name
""" let's get this party started """
frame = MyFrame(None, "Panasonic .PNC to .JPG converter")
self.SetTopWindow(frame)
frame.Show(True)
return True
# app = MyApp(redirect=True)
app = MyApp() #pylint: disable=invalid-name
app.MainLoop()
| 0 | 0 |
f7357be79ed5cf787004c67c6e35b3966042133a | 659 | py | Python | ouch_server.py | jahinzee/theouchteam | 870767cae81ad37b4191ded64c3e83eb48be982a | [
"MIT"
] | 3 | 2022-01-09T02:40:31.000Z | 2022-02-01T03:57:40.000Z | ouch_server.py | jahinzee/theouchteam | 870767cae81ad37b4191ded64c3e83eb48be982a | [
"MIT"
] | null | null | null | ouch_server.py | jahinzee/theouchteam | 870767cae81ad37b4191ded64c3e83eb48be982a | [
"MIT"
] | 1 | 2022-01-21T08:05:27.000Z | 2022-01-21T08:05:27.000Z | import sys
from src.Exchange import Exchange
if __name__ == "__main__":
exchange = None
if len(sys.argv) == 2:
if sys.argv[1] == "debug":
# Exchange outputs using debug mode.
exchange = Exchange(debug="dump")
elif sys.argv[1] == "none":
# Exchange won't output anything.
exchange = Exchange(debug="none")
else:
raise Exception("Command line argument should be either 'dump' or 'none'")
else:
exchange = Exchange()
exchange.open_exchange()
input() # Pressing the enter key will cause the server process to terminate.
exchange.close_exchange() | 32.95 | 86 | 0.608498 | import sys
from src.Exchange import Exchange
if __name__ == "__main__":
exchange = None
if len(sys.argv) == 2:
if sys.argv[1] == "debug":
# Exchange outputs using debug mode.
exchange = Exchange(debug="dump")
elif sys.argv[1] == "none":
# Exchange won't output anything.
exchange = Exchange(debug="none")
else:
raise Exception("Command line argument should be either 'dump' or 'none'")
else:
exchange = Exchange()
exchange.open_exchange()
input() # Pressing the enter key will cause the server process to terminate.
exchange.close_exchange() | 0 | 0 |
9f8dc9d33b7c561ec7708fe6db9f376c20086e3c | 42 | py | Python | ml-agents/mlagents/trainers/components/reward_signals/extrinsic/__init__.py | russellcaughey/ml-agents | 493c75bf683d35d512ae6fb57d4a1a332116df15 | [
"Apache-2.0"
] | 3 | 2018-09-18T13:40:29.000Z | 2019-02-14T07:30:09.000Z | ml-agents/mlagents/trainers/components/reward_signals/extrinsic/__init__.py | russellcaughey/ml-agents | 493c75bf683d35d512ae6fb57d4a1a332116df15 | [
"Apache-2.0"
] | 1 | 2020-04-27T01:52:49.000Z | 2020-04-27T01:52:49.000Z | ml-agents/mlagents/trainers/components/reward_signals/extrinsic/__init__.py | russellcaughey/ml-agents | 493c75bf683d35d512ae6fb57d4a1a332116df15 | [
"Apache-2.0"
] | 2 | 2019-09-10T16:05:48.000Z | 2020-07-24T20:40:26.000Z | from .signal import ExtrinsicRewardSignal
| 21 | 41 | 0.880952 | from .signal import ExtrinsicRewardSignal
| 0 | 0 |
a3dcdb967f844c2c93436cc07445e0c92c4d3a7d | 99 | py | Python | server_prod.py | techx/evolution-chamber | dea9b7d563df6f06d270078f5c512e3f7e367a92 | [
"MIT"
] | 4 | 2015-06-22T15:44:57.000Z | 2015-06-22T15:57:03.000Z | server_prod.py | techx/evolution-chamber | dea9b7d563df6f06d270078f5c512e3f7e367a92 | [
"MIT"
] | null | null | null | server_prod.py | techx/evolution-chamber | dea9b7d563df6f06d270078f5c512e3f7e367a92 | [
"MIT"
] | 2 | 2015-07-09T15:21:37.000Z | 2016-02-02T15:59:09.000Z | import server
if __name__ == "__main__":
server.app.run(host='0.0.0.0',port=5000,debug=False)
| 19.8 | 56 | 0.686869 | import server
if __name__ == "__main__":
server.app.run(host='0.0.0.0',port=5000,debug=False)
| 0 | 0 |
da72584d02e46192004671f6611a889c0dd3c753 | 2,533 | py | Python | datahub/email_ingestion/emails.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 6 | 2019-12-02T16:11:24.000Z | 2022-03-18T10:02:02.000Z | datahub/email_ingestion/emails.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 1,696 | 2019-10-31T14:08:37.000Z | 2022-03-29T12:35:57.000Z | datahub/email_ingestion/emails.py | Staberinde/data-hub-api | 3d0467dbceaf62a47158eea412a3dba827073300 | [
"MIT"
] | 9 | 2019-11-22T12:42:03.000Z | 2021-09-03T14:25:05.000Z | import tempfile
from logging import getLogger
import mailparser
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from datahub.documents import utils as documents
from datahub.interaction.email_processors.processors import CalendarInteractionEmailProcessor
logger = getLogger(__name__)
BUCKET_ID = 'mailbox'
def get_mail_docs_in_bucket():
"""
Gets all mail documents in the bucket.
"""
if BUCKET_ID not in settings.DOCUMENT_BUCKETS:
raise ImproperlyConfigured(f'Bucket "{BUCKET_ID}" is missing in settings')
config = settings.DOCUMENT_BUCKETS[BUCKET_ID]
if 'bucket' not in config:
raise ImproperlyConfigured(f'Bucket "{BUCKET_ID}" not configured properly in settings')
name = config['bucket']
if not name:
raise ImproperlyConfigured(
f'Bucket "{BUCKET_ID}" bucket value not configured properly in settings',
)
client = documents.get_s3_client_for_bucket(bucket_id=BUCKET_ID)
paginator = client.get_paginator('list_objects')
for page in paginator.paginate(Bucket=name):
for doc in page.get('Contents') or []:
key = doc['Key']
with tempfile.TemporaryFile(mode='w+b') as f:
client.download_fileobj(Bucket=name, Key=key, Fileobj=f)
f.seek(0)
content = f.read()
yield {'source': key, 'content': content}
def process_ingestion_emails():
"""
Gets all new mail documents in the bucket and process each message.
"""
processor = CalendarInteractionEmailProcessor()
for message in get_mail_docs_in_bucket():
source = message['source']
try:
documents.delete_document(bucket_id=BUCKET_ID, document_key=message['source'])
except Exception as e:
logger.exception('Error deleting message: "%s", error: "%s"', source, e)
continue
try:
email = mailparser.parse_from_bytes(message['content'])
processed, reason = processor.process_email(message=email)
if not processed:
logger.error('Error parsing message: "%s", error: "%s"', source, reason)
else:
logger.info(reason)
except Exception as e:
logger.exception('Error processing message: "%s", error: "%s"', source, e)
logger.info(
'Successfully processed message "%s" and deleted document from bucket "%s"',
source,
BUCKET_ID,
)
| 33.773333 | 95 | 0.649428 | import tempfile
from logging import getLogger
import mailparser
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from datahub.documents import utils as documents
from datahub.interaction.email_processors.processors import CalendarInteractionEmailProcessor
logger = getLogger(__name__)
BUCKET_ID = 'mailbox'
def get_mail_docs_in_bucket():
"""
Gets all mail documents in the bucket.
"""
if BUCKET_ID not in settings.DOCUMENT_BUCKETS:
raise ImproperlyConfigured(f'Bucket "{BUCKET_ID}" is missing in settings')
config = settings.DOCUMENT_BUCKETS[BUCKET_ID]
if 'bucket' not in config:
raise ImproperlyConfigured(f'Bucket "{BUCKET_ID}" not configured properly in settings')
name = config['bucket']
if not name:
raise ImproperlyConfigured(
f'Bucket "{BUCKET_ID}" bucket value not configured properly in settings',
)
client = documents.get_s3_client_for_bucket(bucket_id=BUCKET_ID)
paginator = client.get_paginator('list_objects')
for page in paginator.paginate(Bucket=name):
for doc in page.get('Contents') or []:
key = doc['Key']
with tempfile.TemporaryFile(mode='w+b') as f:
client.download_fileobj(Bucket=name, Key=key, Fileobj=f)
f.seek(0)
content = f.read()
yield {'source': key, 'content': content}
def process_ingestion_emails():
"""
Gets all new mail documents in the bucket and process each message.
"""
processor = CalendarInteractionEmailProcessor()
for message in get_mail_docs_in_bucket():
source = message['source']
try:
documents.delete_document(bucket_id=BUCKET_ID, document_key=message['source'])
except Exception as e:
logger.exception('Error deleting message: "%s", error: "%s"', source, e)
continue
try:
email = mailparser.parse_from_bytes(message['content'])
processed, reason = processor.process_email(message=email)
if not processed:
logger.error('Error parsing message: "%s", error: "%s"', source, reason)
else:
logger.info(reason)
except Exception as e:
logger.exception('Error processing message: "%s", error: "%s"', source, e)
logger.info(
'Successfully processed message "%s" and deleted document from bucket "%s"',
source,
BUCKET_ID,
)
| 0 | 0 |
0af7288a9052da637b85d240b67185965f20ec48 | 1,105 | py | Python | classes/rooms.py | Loekring/Neversoft | a9e600131585741652b62b2dbbaa2febc1656843 | [
"MIT"
] | 1 | 2018-01-21T21:15:52.000Z | 2018-01-21T21:15:52.000Z | classes/rooms.py | Loekring/Neversoft | a9e600131585741652b62b2dbbaa2febc1656843 | [
"MIT"
] | null | null | null | classes/rooms.py | Loekring/Neversoft | a9e600131585741652b62b2dbbaa2febc1656843 | [
"MIT"
] | null | null | null | import random as r
offBoundsMsgs = ["Der er ikkje noko i den retninga.", "Du mtte ein vegg.", "Du kjem deg ikkje vidare i den retninga."]
roomSizeX, roomSizeY = 2, 1
class Rooms:
# Dette er baseklassa til allle romma
def __init__(self, name, smell, feel, taste, look, sound, jump):
self.name = name
self.smell = smell
self.feel = feel
self.taste = taste
self.look = look
self.sound = sound
self.jump = jump
def __str__(self):
return "Du er no i {}.".format(self.name)
def Roomsmell(self):
return "Rommet luktar {}.".format(self.smell)
def Roomfeel(self):
return "Du kjenner {}.".format(self.feel)
def Roomtaste(self):
return "Du sleikjer rundt om i rommet og kjenner smaken av {}.".format(self.taste)
def Roomlook(self):
return "Du ser rundt i rommet og ser {}.".format(self.look)
def Roomsound(self):
return "Om du er heilt stille kan du hre lyden av {}.".format(self.sound)
def Roomjump(self):
return "Du hoppar opp og {}.".format(self.jump)
| 31.571429 | 119 | 0.6181 | import random as r
offBoundsMsgs = ["Der er ikkje noko i den retninga.", "Du møtte ein vegg.", "Du kjem deg ikkje vidare i den retninga."]
roomSizeX, roomSizeY = 2, 1
class Rooms:
# Dette er baseklassa til allle romma
def __init__(self, name, smell, feel, taste, look, sound, jump):
self.name = name
self.smell = smell
self.feel = feel
self.taste = taste
self.look = look
self.sound = sound
self.jump = jump
def __str__(self):
return "Du er no i {}.".format(self.name)
def Roomsmell(self):
return "Rommet luktar {}.".format(self.smell)
def Roomfeel(self):
return "Du kjenner {}.".format(self.feel)
def Roomtaste(self):
return "Du sleikjer rundt om i rommet og kjenner smaken av {}.".format(self.taste)
def Roomlook(self):
return "Du ser rundt i rommet og ser {}.".format(self.look)
def Roomsound(self):
return "Om du er heilt stille kan du høre lyden av {}.".format(self.sound)
def Roomjump(self):
return "Du hoppar opp og {}.".format(self.jump)
| 4 | 0 |
9c6cb2f62249c9249426fed5a021326cf26ae2cd | 3,970 | py | Python | pymatflow/vasp/scripts/vasp-dfpt.py | DeqiTang/pymatflow | bd8776feb40ecef0e6704ee898d9f42ded3b0186 | [
"MIT"
] | 6 | 2020-03-06T16:13:08.000Z | 2022-03-09T07:53:34.000Z | pymatflow/vasp/scripts/vasp-dfpt.py | DeqiTang/pymatflow | bd8776feb40ecef0e6704ee898d9f42ded3b0186 | [
"MIT"
] | 1 | 2021-10-02T02:23:08.000Z | 2021-11-08T13:29:37.000Z | pymatflow/vasp/scripts/vasp-dfpt.py | DeqiTang/pymatflow | bd8776feb40ecef0e6704ee898d9f42ded3b0186 | [
"MIT"
] | 1 | 2021-07-10T16:28:14.000Z | 2021-07-10T16:28:14.000Z | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
import os
import argparse
from pymatflow.vasp.dfpt import dfpt_run
"""
usage:
"""
params = {}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", type=str, default="tmp-vasp-static",
help="directory of the static running")
parser.add_argument("-f", "--file", type=str,
help="the xyz file name")
parser.add_argument("--runopt", type=str, default="gen",
help="gen, run, or genrun")
parser.add_argument("--auto", type=int, default=3,
help="auto:0 nothing, 1: copying files to server, 2: copying and executing in remote server, 3: pymatflow used in server with direct submit, in order use auto=1, 2, you must make sure there is a working ~/.pymatflow/server_[pbs|yh].conf")
parser.add_argument("--mode", type=int, default=0,
choices=[0, 1],
help="run mode for dfpt. 0: brand new with a new directory; 1: continue in the existing directory")
# --------------------------------------------------------
# INCAR PARAMETERS
# --------------------------------------------------------
parser.add_argument("--prec", type=str, default="Normal",
choices=["Normal", "Accurate", "A", "N"],
help="PREC, default value: Normal")
parser.add_argument("--encut", type=int, default=300,
help="ENCUT, default value: 300 eV")
parser.add_argument("--ediff", type=float, default=1.0e-4,
help="EDIFF, default value: 1.0e-4")
parser.add_argument("--kpoints-mp", type=int, nargs="+",
default=[1, 1, 1, 0, 0, 0],
help="set kpoints like -k 1 1 1 0 0 0")
parser.add_argument("--ismear", type=int, default=0,
help="smearing type(methfessel-paxton(>0), gaussian(0), fermi-dirac(-1), tetra(-4), tetra-bloch-dorrected(-5)), default: 0")
parser.add_argument("--sigma", type=float, default=0.01,
help="determines the width of the smearing in eV.")
# -----------------------------------------------------------------
# ----------------------
# properties parametes
# ---------------------
#parser.add_argument("--lorbit", help="", type=int, default=None)
#parser.add_argument("--loptics", help="", type=str, default="FALSE")
# -----------------------------------------------------------------
# run params
# -----------------------------------------------------------------
parser.add_argument("--mpi", type=str, default="",
help="MPI command")
parser.add_argument("--server", type=str, default="pbs",
choices=["pbs", "yh", "lsf_sz"],
help="type of remote server, can be pbs or yh or lsf_sz")
parser.add_argument("--jobname", type=str, default="vasp-dfpt",
help="jobname on the pbs server")
parser.add_argument("--nodes", type=int, default=1,
help="Nodes used in server")
parser.add_argument("--ppn", type=int, default=32,
help="ppn of the server")
# ==========================================================
# transfer parameters from the arg parser to static_run setting
# ==========================================================
args = parser.parse_args()
params["PREC"] = args.prec
params["ENCUT"] = args.encut
params["EDIFF"] = args.ediff
params["ISMEAR"] = args.ismear
params["SIGMA"] = args.sigma
task = dfpt_run()
task.get_xyz(args.file)
task.set_params(params=params)
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.set_run(mpi=args.mpi, server=args.server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn)
task.dfpt(directory=args.directory, runopt=args.runopt, auto=args.auto, mode=args.mode)
| 36.090909 | 251 | 0.522418 | #!/usr/bin/env python
# _*_ coding: utf-8 _*_
import os
import argparse
from pymatflow.vasp.dfpt import dfpt_run
"""
usage:
"""
params = {}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory", type=str, default="tmp-vasp-static",
help="directory of the static running")
parser.add_argument("-f", "--file", type=str,
help="the xyz file name")
parser.add_argument("--runopt", type=str, default="gen",
help="gen, run, or genrun")
parser.add_argument("--auto", type=int, default=3,
help="auto:0 nothing, 1: copying files to server, 2: copying and executing in remote server, 3: pymatflow used in server with direct submit, in order use auto=1, 2, you must make sure there is a working ~/.pymatflow/server_[pbs|yh].conf")
parser.add_argument("--mode", type=int, default=0,
choices=[0, 1],
help="run mode for dfpt. 0: brand new with a new directory; 1: continue in the existing directory")
# --------------------------------------------------------
# INCAR PARAMETERS
# --------------------------------------------------------
parser.add_argument("--prec", type=str, default="Normal",
choices=["Normal", "Accurate", "A", "N"],
help="PREC, default value: Normal")
parser.add_argument("--encut", type=int, default=300,
help="ENCUT, default value: 300 eV")
parser.add_argument("--ediff", type=float, default=1.0e-4,
help="EDIFF, default value: 1.0e-4")
parser.add_argument("--kpoints-mp", type=int, nargs="+",
default=[1, 1, 1, 0, 0, 0],
help="set kpoints like -k 1 1 1 0 0 0")
parser.add_argument("--ismear", type=int, default=0,
help="smearing type(methfessel-paxton(>0), gaussian(0), fermi-dirac(-1), tetra(-4), tetra-bloch-dorrected(-5)), default: 0")
parser.add_argument("--sigma", type=float, default=0.01,
help="determines the width of the smearing in eV.")
# -----------------------------------------------------------------
# ----------------------
# properties parametes
# ---------------------
#parser.add_argument("--lorbit", help="", type=int, default=None)
#parser.add_argument("--loptics", help="", type=str, default="FALSE")
# -----------------------------------------------------------------
# run params
# -----------------------------------------------------------------
parser.add_argument("--mpi", type=str, default="",
help="MPI command")
parser.add_argument("--server", type=str, default="pbs",
choices=["pbs", "yh", "lsf_sz"],
help="type of remote server, can be pbs or yh or lsf_sz")
parser.add_argument("--jobname", type=str, default="vasp-dfpt",
help="jobname on the pbs server")
parser.add_argument("--nodes", type=int, default=1,
help="Nodes used in server")
parser.add_argument("--ppn", type=int, default=32,
help="ppn of the server")
# ==========================================================
# transfer parameters from the arg parser to static_run setting
# ==========================================================
args = parser.parse_args()
params["PREC"] = args.prec
params["ENCUT"] = args.encut
params["EDIFF"] = args.ediff
params["ISMEAR"] = args.ismear
params["SIGMA"] = args.sigma
task = dfpt_run()
task.get_xyz(args.file)
task.set_params(params=params)
task.set_kpoints(kpoints_mp=args.kpoints_mp)
task.set_run(mpi=args.mpi, server=args.server, jobname=args.jobname, nodes=args.nodes, ppn=args.ppn)
task.dfpt(directory=args.directory, runopt=args.runopt, auto=args.auto, mode=args.mode)
| 0 | 0 |
9d62cac37a74dba044cd1a53d16dc1255a546ab1 | 260 | py | Python | python5.py | audstanley/nodePythonProcessWatcher | cf3b707af81c837b99c5b2d955cf0d718e286e81 | [
"MIT"
] | null | null | null | python5.py | audstanley/nodePythonProcessWatcher | cf3b707af81c837b99c5b2d955cf0d718e286e81 | [
"MIT"
] | null | null | null | python5.py | audstanley/nodePythonProcessWatcher | cf3b707af81c837b99c5b2d955cf0d718e286e81 | [
"MIT"
] | null | null | null | from python5_unixSocket import interComs
myInterComs = interComs()
myInterComs.run()
import sys
from time import sleep
while True:
print("MESSAGES FROM PYTHON 5")
sys.stdout.flush()
myInterComs.send( {"wordDawg": "from python5"} )
sleep(0.500) | 23.636364 | 52 | 0.726923 | from python5_unixSocket import interComs
myInterComs = interComs()
myInterComs.run()
import sys
from time import sleep
while True:
print("MESSAGES FROM PYTHON 5")
sys.stdout.flush()
myInterComs.send( {"wordDawg": "from python5"} )
sleep(0.500) | 0 | 0 |
2c4146e35515d5d11823006c020a481717320a31 | 1,909 | py | Python | Revitron.tab/RPM.panel/Setup.pulldown/ProjectSetup.pushbutton/ProjectSetup_script.py | jmcouffin/revitron-ui | f67739488b504cdb0cabe36e088a40fe3cd2b282 | [
"MIT"
] | null | null | null | Revitron.tab/RPM.panel/Setup.pulldown/ProjectSetup.pushbutton/ProjectSetup_script.py | jmcouffin/revitron-ui | f67739488b504cdb0cabe36e088a40fe3cd2b282 | [
"MIT"
] | null | null | null | Revitron.tab/RPM.panel/Setup.pulldown/ProjectSetup.pushbutton/ProjectSetup_script.py | jmcouffin/revitron-ui | f67739488b504cdb0cabe36e088a40fe3cd2b282 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Define extensions to be used with this Revit model. Defined extensions can be installed by using the "Install Extensions" button.
"""
import revitron
import System.Windows
from pyrevit import script
from rpw.ui.forms import FlexForm, TextBox, Button, Label
def openHelp(sender, e):
script.open_url('https://revitron-ui.readthedocs.io/en/latest/tools/rpm.html')
if not revitron.Document().isFamily():
config = revitron.DocumentConfigStorage().get('rpm.extensions')
components = [
Label('You can define a list of pyRevit extensions to be used with the currently active model.\n'
'That list will be stored in the project information and therefore can be easily distributed\n'
'among other team members to easly create a common work environment.\n'
'To install or switch to the extension saved with your project just hit the "Install Extensions" button.\n\n'
'Enter one extension per line providing the type of the extension ("ui" or "lib")\n'
'and the repository URL separated by a TAB as follows:',
FontSize=14,
Height=140,
Width=650),
Label('ui https://ui-extension-repository.git\r\nlib https://lib-extension-repository.git',
FontFamily=System.Windows.Media.FontFamily('Consolas'),
FontSize=14,
Height=50,
Width=650),
TextBox('extensions',
Text=config,
TextWrapping=System.Windows.TextWrapping.Wrap,
AcceptsTab=True,
AcceptsReturn=True,
Multiline=True,
Height=200,
Width=650,
FontFamily=System.Windows.Media.FontFamily('Consolas'),
FontSize=14),
Button('Open Documentation', on_click=openHelp, Width=650),
Button('Save', Width=650)
]
form = FlexForm('Project Extensions', components)
form.show()
if 'extensions' in form.values:
revitron.DocumentConfigStorage().set('rpm.extensions', form.values.get('extensions'))
| 37.431373 | 130 | 0.707177 | # -*- coding: utf-8 -*-
"""
Define extensions to be used with this Revit model. Defined extensions can be installed by using the "Install Extensions" button.
"""
import revitron
import System.Windows
from pyrevit import script
from rpw.ui.forms import FlexForm, TextBox, Button, Label
def openHelp(sender, e):
script.open_url('https://revitron-ui.readthedocs.io/en/latest/tools/rpm.html')
if not revitron.Document().isFamily():
config = revitron.DocumentConfigStorage().get('rpm.extensions')
components = [
Label('You can define a list of pyRevit extensions to be used with the currently active model.\n'
'That list will be stored in the project information and therefore can be easily distributed\n'
'among other team members to easly create a common work environment.\n'
'To install or switch to the extension saved with your project just hit the "Install Extensions" button.\n\n'
'Enter one extension per line providing the type of the extension ("ui" or "lib")\n'
'and the repository URL separated by a TAB as follows:',
FontSize=14,
Height=140,
Width=650),
Label('ui https://ui-extension-repository.git\r\nlib https://lib-extension-repository.git',
FontFamily=System.Windows.Media.FontFamily('Consolas'),
FontSize=14,
Height=50,
Width=650),
TextBox('extensions',
Text=config,
TextWrapping=System.Windows.TextWrapping.Wrap,
AcceptsTab=True,
AcceptsReturn=True,
Multiline=True,
Height=200,
Width=650,
FontFamily=System.Windows.Media.FontFamily('Consolas'),
FontSize=14),
Button('Open Documentation', on_click=openHelp, Width=650),
Button('Save', Width=650)
]
form = FlexForm('Project Extensions', components)
form.show()
if 'extensions' in form.values:
revitron.DocumentConfigStorage().set('rpm.extensions', form.values.get('extensions'))
| 0 | 0 |
d570100b492c0df602a33bf7fd31f800015b364c | 3,653 | py | Python | src/features/spectrum.py | vikigenius/neural_speaker_identification | a723290808d748daf65163b71aef2c5376319db3 | [
"MIT"
] | 1 | 2019-07-27T00:32:02.000Z | 2019-07-27T00:32:02.000Z | src/features/spectrum.py | vikigenius/neural_speaker_identification | a723290808d748daf65163b71aef2c5376319db3 | [
"MIT"
] | null | null | null | src/features/spectrum.py | vikigenius/neural_speaker_identification | a723290808d748daf65163b71aef2c5376319db3 | [
"MIT"
] | 1 | 2019-07-27T00:32:06.000Z | 2019-07-27T00:32:06.000Z | #!/usr/bin/env python
import logging
import numpy as np
import librosa
import scipy
from random import randint
from src.utils.math_utils import nextpow2
logger = logging.getLogger(__name__)
class Spectrum(object):
def __init__(self, hparams):
self.sample_freq = hparams.sample_freq
self.duration = hparams.duration
self.preprocess = hparams.preprocess
self.Tw = hparams.window_size
self.Ts = hparams.window_shift
self.win_type = hparams.window_type
if self.sample_freq == 16000:
self.dc_alpha = 0.99
elif self.sample_freq == 8000:
self.dc_alpha = 0.999
else:
raise ValueError('Only 16 and 8Khz supported')
self.pe_alpha = 0.97
def _sample(self, signal, seqlen: int):
"""
Helper function to sample a contiguos subsequence of
length seqlen from signal
Args:
signal: numpy.ndarray, the signal
seqlen: int, the sequence length
Returns:
numpy.ndarray, the sampled signal
"""
nframes = len(signal)
roffset = randint(0, nframes - seqlen)
sampled = signal[roffset:roffset+seqlen]
return sampled
def _get_resampled_chunks(self, afile: str):
"""
Takes in a string path afile and returns chunks of audio each
representing a 16-bit mono channel with sampling rate = 16000
Args:
afile: path of audio file
Returns:
List[np.ndarray]
"""
# Load the file
signal, _ = librosa.load(afile, sr=self.sample_freq, mono=True)
nframes = len(signal)
duration = nframes/self.sample_freq
if duration <= self.duration:
logger.warn(f'Duration less than specified for {afile}')
chunks = []
if duration > 2*self.duration:
# Can sample 2 chunks
mid = int(nframes/2)
chunks.append(signal[:mid])
chunks.append(signal[mid:])
else:
chunks.append(signal)
num_samples = int(self.duration*self.sample_freq)
chunks = [self._sample(chunk, num_samples) for chunk in chunks]
return chunks
def _preprocess(self, signal):
# Remove DC component and add a small dither
signal = scipy.signal.lfilter([1, -1], [1, -self.dc_alpha], signal)
dither = np.random.random_sample(
signal.shape) + np.random.random_sample(
signal.shape) - 1
spow = np.std(signal)
signal = signal + 1e-6*spow*dither
signal = scipy.signal.lfilter([1 - self.pe_alpha], 1, signal)
return signal
def generate(self, afile: str):
"""
Takes in a string path afile and returns a numpy nd array
representing the magnitude spectrum of the signal
Args:
afile: path of audio file
Returns:
numpy.ndarray
"""
resampled_chunks = self._get_resampled_chunks(afile)
if self.preprocess:
processed = [self._preprocess(chunk) for chunk in resampled_chunks]
else:
processed = resampled_chunks
# stft
sf = self.sample_freq
Tw = self.Tw # Window size
Ts = self.Ts
Nw = round(1e-3*Tw*sf)
Ns = round(1e-3*Ts*sf)
n_fft = 2**nextpow2(Nw)
spectrograms = [librosa.core.stft(
chunk, n_fft=n_fft,
hop_length=Ns, win_length=Nw,
window=self.win_type) for chunk in processed]
mag_specs = [np.abs(chunk) for chunk in spectrograms]
return mag_specs
| 30.957627 | 79 | 0.595401 | #!/usr/bin/env python
import logging
import numpy as np
import librosa
import scipy
from random import randint
from src.utils.math_utils import nextpow2
logger = logging.getLogger(__name__)
class Spectrum(object):
def __init__(self, hparams):
self.sample_freq = hparams.sample_freq
self.duration = hparams.duration
self.preprocess = hparams.preprocess
self.Tw = hparams.window_size
self.Ts = hparams.window_shift
self.win_type = hparams.window_type
if self.sample_freq == 16000:
self.dc_alpha = 0.99
elif self.sample_freq == 8000:
self.dc_alpha = 0.999
else:
raise ValueError('Only 16 and 8Khz supported')
self.pe_alpha = 0.97
def _sample(self, signal, seqlen: int):
"""
Helper function to sample a contiguos subsequence of
length seqlen from signal
Args:
signal: numpy.ndarray, the signal
seqlen: int, the sequence length
Returns:
numpy.ndarray, the sampled signal
"""
nframes = len(signal)
roffset = randint(0, nframes - seqlen)
sampled = signal[roffset:roffset+seqlen]
return sampled
def _get_resampled_chunks(self, afile: str):
"""
Takes in a string path afile and returns chunks of audio each
representing a 16-bit mono channel with sampling rate = 16000
Args:
afile: path of audio file
Returns:
List[np.ndarray]
"""
# Load the file
signal, _ = librosa.load(afile, sr=self.sample_freq, mono=True)
nframes = len(signal)
duration = nframes/self.sample_freq
if duration <= self.duration:
logger.warn(f'Duration less than specified for {afile}')
chunks = []
if duration > 2*self.duration:
# Can sample 2 chunks
mid = int(nframes/2)
chunks.append(signal[:mid])
chunks.append(signal[mid:])
else:
chunks.append(signal)
num_samples = int(self.duration*self.sample_freq)
chunks = [self._sample(chunk, num_samples) for chunk in chunks]
return chunks
def _preprocess(self, signal):
# Remove DC component and add a small dither
signal = scipy.signal.lfilter([1, -1], [1, -self.dc_alpha], signal)
dither = np.random.random_sample(
signal.shape) + np.random.random_sample(
signal.shape) - 1
spow = np.std(signal)
signal = signal + 1e-6*spow*dither
signal = scipy.signal.lfilter([1 - self.pe_alpha], 1, signal)
return signal
def generate(self, afile: str):
"""
Takes in a string path afile and returns a numpy nd array
representing the magnitude spectrum of the signal
Args:
afile: path of audio file
Returns:
numpy.ndarray
"""
resampled_chunks = self._get_resampled_chunks(afile)
if self.preprocess:
processed = [self._preprocess(chunk) for chunk in resampled_chunks]
else:
processed = resampled_chunks
# stft
sf = self.sample_freq
Tw = self.Tw # Window size
Ts = self.Ts
Nw = round(1e-3*Tw*sf)
Ns = round(1e-3*Ts*sf)
n_fft = 2**nextpow2(Nw)
spectrograms = [librosa.core.stft(
chunk, n_fft=n_fft,
hop_length=Ns, win_length=Nw,
window=self.win_type) for chunk in processed]
mag_specs = [np.abs(chunk) for chunk in spectrograms]
return mag_specs
| 0 | 0 |
1e6f1908f373e61b9009dc90b043eed4cfc23a7c | 1,561 | py | Python | src/network.py | Renerick/python-neural-network | 552da90999622232d0e7061c690c972f7a2201d0 | [
"MIT"
] | 1 | 2019-08-12T09:15:12.000Z | 2019-08-12T09:15:12.000Z | src/network.py | Renerick/python-neural-network | 552da90999622232d0e7061c690c972f7a2201d0 | [
"MIT"
] | null | null | null | src/network.py | Renerick/python-neural-network | 552da90999622232d0e7061c690c972f7a2201d0 | [
"MIT"
] | null | null | null | import numpy as np
import pickle
@np.vectorize
def deriv(x):
return 1. if x > 0 else 0.
class Network:
def __init__(self, *args):
np.random.seed(1)
all_layers = args
self.hidden_layers = []
self.biases = []
for l in zip(all_layers, all_layers[1:]):
self.hidden_layers.append(np.random.rand(*l) * 2 - 1)
self.biases.append(np.random.rand(l[1]) * 2 - 1)
self.iteration = 0
self.epoch = 0
self.learning_rate = 0.1
def predict(self, input_data):
values = np.array(input_data)
for layer, bias in zip(self.hidden_layers, self.biases):
values = np.maximum(np.dot(values, layer) + bias, 0)
return values
def train(self, input_data, target):
self.iteration += 1
target = np.array(target)
prediction = self.predict(input_data)
for layer in self.hidden_layers[::-1]:
errors = target - prediction
gradients = deriv(prediction)
gradients *= errors
gradients *= self.learning_rate
delta = errors * gradients
print(target, prediction, errors, gradients, layer, delta)
target = layer
layer -= delta
prediction = layer
@staticmethod
def load(path="model.bin"):
f = open(path, 'rb')
network = pickle.load(f)
f.close()
return network
def save(self, path="model.bin"):
f = open(path, 'wb')
pickle.dump(self, f)
f.close()
| 26.457627 | 70 | 0.559257 | import numpy as np
import pickle
@np.vectorize
def deriv(x):
return 1. if x > 0 else 0.
class Network:
def __init__(self, *args):
np.random.seed(1)
all_layers = args
self.hidden_layers = []
self.biases = []
for l in zip(all_layers, all_layers[1:]):
self.hidden_layers.append(np.random.rand(*l) * 2 - 1)
self.biases.append(np.random.rand(l[1]) * 2 - 1)
self.iteration = 0
self.epoch = 0
self.learning_rate = 0.1
def predict(self, input_data):
values = np.array(input_data)
for layer, bias in zip(self.hidden_layers, self.biases):
values = np.maximum(np.dot(values, layer) + bias, 0)
return values
def train(self, input_data, target):
self.iteration += 1
target = np.array(target)
prediction = self.predict(input_data)
for layer in self.hidden_layers[::-1]:
errors = target - prediction
gradients = deriv(prediction)
gradients *= errors
gradients *= self.learning_rate
delta = errors * gradients
print(target, prediction, errors, gradients, layer, delta)
target = layer
layer -= delta
prediction = layer
@staticmethod
def load(path="model.bin"):
f = open(path, 'rb')
network = pickle.load(f)
f.close()
return network
def save(self, path="model.bin"):
f = open(path, 'wb')
pickle.dump(self, f)
f.close()
| 0 | 0 |
01fd056ce41c1c67b73640a90525a86f7223ab98 | 51,070 | py | Python | backend/grafit/migrations/0003_load_data.py | fossabot/grafit | c7328cc7ed4d37d36fc735944aa8763fad090d97 | [
"MIT"
] | 16 | 2018-10-12T16:33:52.000Z | 2020-06-23T20:11:34.000Z | backend/grafit/migrations/0003_load_data.py | fossabot/grafit | c7328cc7ed4d37d36fc735944aa8763fad090d97 | [
"MIT"
] | 41 | 2018-10-14T21:28:38.000Z | 2021-06-10T22:01:45.000Z | backend/grafit/migrations/0003_load_data.py | fossabot/grafit | c7328cc7ed4d37d36fc735944aa8763fad090d97 | [
"MIT"
] | 4 | 2018-10-28T10:47:26.000Z | 2020-07-20T04:17:04.000Z | # Generated by Django 2.1.2 on 2018-10-25 09:36
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('grafit', '0002_article'),
]
operations = [
migrations.RunSQL("""
INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.
10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]
On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries
MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.
Indexing
Fields in a MongoDB document can be indexed with primary and secondary indices.
Replication
MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.
Load balancing[10]
MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard enabling an even data distribution.
MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');
INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to "non SQL" or "non relational")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the "NoSQL" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called "Not only SQL" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]
Motivations for this approach include: simplicity of design, simpler "horizontal" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as "more flexible" than relational database tables.[9]
Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)
Instead, most NoSQL databases offer a concept of "eventual consistency" in which database changes are propagated to all nodes "eventually" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases "do not allow referential integrity constraints to span databases."[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');
INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]
Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because "SEQUEL" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]
In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard "Database Language SQL" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');
INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. "Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system."
"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services."
"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs."');
INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdminone of the most popular applications for managing the MySQL databasewhich could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.
Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).
Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.
According to an advisory released by phpMyAdmin, "by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc."
phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.
Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.
Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.
"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attackers choice." Barot explains in a blog post.
However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.
"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name," Barot says. "This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc."
Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.
');
INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.
When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.
"Who care about database design? What about mockups? What about workflows?"
Let me tell you about "Bob''s Luxury Goods." I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a "one-to-many" relationship between customers and addresses.
That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as "in use" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.
We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was "cheaper" to remove the restriction on "flagged" addresses and allow a duplicate address to be used.
Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the "typo", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.
That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.
Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.
"Curtis," they said, "just enter a dummy customer called ''Occupant'' and attach all addresses to that."
Except you couldn''t enter a customer without an order.
Except you couldn''t enter an order without at least one item on it.
Except you couldn''t enter an item unless it was listed in inventory.
Except that reserved the "inventory" item and made it unavailable.
Except, except, except ...
It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a "paid" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.
Then, and only then, could I write the code to provide "generic" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.
If they had simply had a proper database design up front, they could have reused their existing system with little trouble.
That''s what bad database design costs you and why I usually start with that before writing my software.
Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');
INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.
Learn more about actions
As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During todays Universe keynote, we heard directly from developers, and were excited to share their work with you');
INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '
The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.
Until youve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.
Affected products
GitHub Desktop
GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.
Atom
Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.
Ensure youre on the latest Atom release by completing any of the following:
Windows: From the toolbar, click Help -> Check for Updates
MacOS: From the menu bar, click Atom -> Check for Update
Linux: Update manually by downloading the latest release from atom.io
Git on the command line and other clients
In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.
Additional notes
Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.
Details of the vulnerability
This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh programspawned by Gitwould then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.
The problem was reported on September 23 by @joernchen, both to Gits private security list, as well as to GitHubs Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.
The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Gits object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).
The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.
We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. Were happy to report that no instances were found (and now, with our detection, none can be added).
Please update your copy of Git soon, and happy cloning!
');
INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.
The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variantsHex, Hanako, and Taylortargeting different MS SQL and MySQL servers for both Windows and Linux.
The goals of all the three variants are differentHex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.
So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.
To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.
What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');
INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.
rbanffy on Aug 18, 2012 [-]
I think this would be a mistake.
This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.
All that is lost is the MySQL name and brand.
PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.
Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.
Udo on Aug 18, 2012 [-]
I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!
MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.
Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.
Hence, sensational and petulant "RIP $PRODUCTNAME" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.
The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');
INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?
Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');
INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.
Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.
');
INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.
This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.
What is PostgreSQL?
PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.
A Brief History of PostgreSQL
PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.
1977-1985 A project called INGRES was developed.
Proof-of-concept for relational databases
Established the company Ingres in 1980
Bought by Computer Associates in 1994
1986-1994 POSTGRES
Development of the concepts in INGRES with a focus on object orientation and the query language - Quel
The code base of INGRES was not used as a basis for POSTGRES
Commercialized as Illustra (bought by Informix, bought by IBM)
1994-1995 Postgres95
Support for SQL was added in 1994
Released as Postgres95 in 1995
Re-released as PostgreSQL 6.0 in 1996
Establishment of the PostgreSQL Global Development Team
Key Features of PostgreSQL
PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).
PostgreSQL supports a large part of the SQL standard and offers many modern features including the following
Complex SQL queries
SQL Sub-selects
Foreign keys
Trigger
Views
Transactions
Multiversion concurrency control (MVCC)
Streaming Replication (as of 9.0)
Hot Standby (as of 9.0)
You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new
Data types
Functions
Operators
Aggregate functions
Index methods
Procedural Languages Support
PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');
INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.
I have a lot of experience using Microsofts SQL Server, but up until now, I havent touched PostgreSQL. For personal projects SQL Servers cost and be prohibitive and the release of Robs book added up to a good time to give PostgreSQL a try.
Install Directly or not?
On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Robs book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didnt find a good definitive answer on why that is or isnt the way to do.
In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.
Installing Docker
Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.
After logging back in I then got the following message about hardware-assisted virtualization not being enabled.
After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.
Open a command prompt and run the following command.
docker run hello-world
You should output that starts with the following if your installation is working.
Hello from Docker!
This message shows that your installation appears to be working correctly.
What about Postgres?
Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.
docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres
The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.
docker create -v /var/lib/postgresql/data --name PostgresData alpine
The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.
Now that we have a container that will keep our data safe lets create the actual Postgres container with the following command.
docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres
The only difference from the first example run command is the addition of volumes-from PostgresData which tells the container to use the PostgresData container.
If you run the docker ps -a command it will show you all your containers.
As you can see in my example I have two containers only one of which is actually running. Make sure you dont remove the data container just because it will never show as running.
');
INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.
The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)
Step 1: Install PostgreSQL
Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):
Open a terminal window.
Issue the command sudo apt-get install postgresql.
Type the sudo password necessary to give you admin rights and hit Enter.
Allow apt to pick up any necessary dependencies.
Once the installation is complete, it''s time to set this baby up.
Step 2: Change the default user password
Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.
Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:
Open a terminal window.
Issue the command sudo passwd postgres.
Type (and confirm) that password to be used for this user.
The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:
postgres=#
All other users have to gain access to the prompt like so:
psql DB_NAME
where DB_NAME is the name of an existing database.
');
INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.
This seems to be the journey:
1. Lack of migrations is awesome! We can iterate so quickly for MVP
2. Get users
3. Add features, still enjoying the speed of iteration
4. Get more users
5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)
6. Realise you desperately need joins, transactions and other SQL features
7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.
I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?
My thought is definitely yes.
brandur on Aug 29, 2017 [-]
> I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?
I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.
The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.
The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.
Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.
martinald on Aug 29, 2017 [-]
I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.
I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.
Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.
I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.
I think I assumed the "crowd" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');
INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen the company behind MongoDB moved into a large 30,000 square foot office in Midtown Manhattan.
The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona and a requisite west coast headquarters in Palo Alto.
Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDBs success among startups was because some didn''t critically assess 10gens marketing message.
As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing attacks.1 Today, developer marketing is subtle third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.
A large amount of "engineering" content even when written by engineers is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.
Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy as told by their employees.2
10gens marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');
INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.
I could see myself there in a bowler hat with a fistful of racing chits screaming go, Postgres, go.
Id love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.
Red Bull could sponsor it. Id buy a T-shirt.
kbenson 8 months ago [-]
That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.
You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.
If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.
Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...
1: https://github.com/gothinkster/realworld
2: https://www.techempower.com/benchmarks/
etxm 8 months ago [-]
Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how youd cheat :)
It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.
It would be cool to see things like disaster recovery and chaos proofing as well. ');
INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, weve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.
Leveraging the community
There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.
On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We cant manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.
For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently arent covered by any known CVE in our system.
Always quality focused
No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.
Learn more');
INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.
Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.
Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.
maxxxxx 8 months ago [-]
Agreed. Switching to another system is expensive and the benefit is pretty questionable.
emsy 8 months ago [-]
Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.
TremendousJudge 8 months ago [-]
expand, please?
maxxxxx 8 months ago [-]
I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.
In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.
gopalv 8 months ago [-]
> Depending on your data some databases may be better than others and that should be easy to measure.
And the performance difference could be an accidental feature of the design and completely unintentional.
Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.
Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).
When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.
And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.
Though how it came about isn''t really intentional. ');
"""),
]
| 100.928854 | 1,002 | 0.739495 | # Generated by Django 2.1.2 on 2018-10-25 09:36
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('grafit', '0002_article'),
]
operations = [
migrations.RunSQL("""
INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.
10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]
On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries
MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.
Indexing
Fields in a MongoDB document can be indexed with primary and secondary indices.
Replication
MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.
Load balancing[10]
MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.
MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');
INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to "non SQL" or "non relational")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the "NoSQL" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called "Not only SQL" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]
Motivations for this approach include: simplicity of design, simpler "horizontal" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as "more flexible" than relational database tables.[9]
Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)
Instead, most NoSQL databases offer a concept of "eventual consistency" in which database changes are propagated to all nodes "eventually" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases "do not allow referential integrity constraints to span databases."[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');
INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]
Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because "SEQUEL" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]
In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard "Database Language SQL" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');
INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. "Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system."
"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services."
"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs."');
INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.
Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).
Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.
According to an advisory released by phpMyAdmin, "by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc."
phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.
Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.
Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.
"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice." Barot explains in a blog post.
However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.
"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name," Barot says. "This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc."
Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.
');
INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.
When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.
"Who care about database design? What about mockups? What about workflows?"
Let me tell you about "Bob''s Luxury Goods." I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a "one-to-many" relationship between customers and addresses.
That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as "in use" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.
We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was "cheaper" to remove the restriction on "flagged" addresses and allow a duplicate address to be used.
Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the "typo", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.
That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.
Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.
"Curtis," they said, "just enter a dummy customer called ''Occupant'' and attach all addresses to that."
Except you couldn''t enter a customer without an order.
Except you couldn''t enter an order without at least one item on it.
Except you couldn''t enter an item unless it was listed in inventory.
Except that reserved the "inventory" item and made it unavailable.
Except, except, except ...
It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a "paid" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.
Then, and only then, could I write the code to provide "generic" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.
If they had simply had a proper database design up front, they could have reused their existing system with little trouble.
That''s what bad database design costs you and why I usually start with that before writing my software.
Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');
INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.
Learn more about actions
As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');
INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '
The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.
Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.
Affected products
GitHub Desktop
GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.
Atom
Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.
Ensure you’re on the latest Atom release by completing any of the following:
Windows: From the toolbar, click Help -> Check for Updates
MacOS: From the menu bar, click Atom -> Check for Update
Linux: Update manually by downloading the latest release from atom.io
Git on the command line and other clients
In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.
Additional notes
Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.
Details of the vulnerability
This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.
The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.
The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).
The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.
We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).
Please update your copy of Git soon, and happy cloning!
');
INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.
The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.
The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.
So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.
To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.
What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');
INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.
rbanffy on Aug 18, 2012 [-]
I think this would be a mistake.
This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.
All that is lost is the MySQL name and brand.
PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.
Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.
Udo on Aug 18, 2012 [-]
I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!
MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.
Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.
Hence, sensational and petulant "RIP $PRODUCTNAME" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.
The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');
INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?
Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');
INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.
Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.
');
INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.
This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.
What is PostgreSQL?
PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.
A Brief History of PostgreSQL
PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.
1977-1985 − A project called INGRES was developed.
Proof-of-concept for relational databases
Established the company Ingres in 1980
Bought by Computer Associates in 1994
1986-1994 − POSTGRES
Development of the concepts in INGRES with a focus on object orientation and the query language - Quel
The code base of INGRES was not used as a basis for POSTGRES
Commercialized as Illustra (bought by Informix, bought by IBM)
1994-1995 − Postgres95
Support for SQL was added in 1994
Released as Postgres95 in 1995
Re-released as PostgreSQL 6.0 in 1996
Establishment of the PostgreSQL Global Development Team
Key Features of PostgreSQL
PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).
PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −
Complex SQL queries
SQL Sub-selects
Foreign keys
Trigger
Views
Transactions
Multiversion concurrency control (MVCC)
Streaming Replication (as of 9.0)
Hot Standby (as of 9.0)
You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −
Data types
Functions
Operators
Aggregate functions
Index methods
Procedural Languages Support
PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');
INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.
I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.
Install Directly or not?
On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.
In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.
Installing Docker
Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.
After logging back in I then got the following message about hardware-assisted virtualization not being enabled.
After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.
Open a command prompt and run the following command.
docker run hello-world
You should output that starts with the following if your installation is working.
Hello from Docker!
This message shows that your installation appears to be working correctly.
What about Postgres?
Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.
docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres
The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.
docker create -v /var/lib/postgresql/data --name PostgresData alpine
The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.
Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.
docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres
The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.
If you run the docker ps -a command it will show you all your containers.
As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.
');
INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.
The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)
Step 1: Install PostgreSQL
Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):
Open a terminal window.
Issue the command sudo apt-get install postgresql.
Type the sudo password necessary to give you admin rights and hit Enter.
Allow apt to pick up any necessary dependencies.
Once the installation is complete, it''s time to set this baby up.
Step 2: Change the default user password
Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.
Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:
Open a terminal window.
Issue the command sudo passwd postgres.
Type (and confirm) that password to be used for this user.
The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:
postgres=#
All other users have to gain access to the prompt like so:
psql DB_NAME
where DB_NAME is the name of an existing database.
');
INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.
This seems to be the journey:
1. Lack of migrations is awesome! We can iterate so quickly for MVP
2. Get users
3. Add features, still enjoying the speed of iteration
4. Get more users
5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)
6. Realise you desperately need joins, transactions and other SQL features
7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.
I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?
My thought is definitely yes.
brandur on Aug 29, 2017 [-]
> I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?
I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.
The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.
The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.
Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.
martinald on Aug 29, 2017 [-]
I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.
I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.
Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.
I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.
I think I assumed the "crowd" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');
INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.
The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.
Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.
As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.
A large amount of "engineering" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.
Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2
10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');
INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.
I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”
I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.
Red Bull could sponsor it. I’d buy a T-shirt.
kbenson 8 months ago [-]
That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.
You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.
If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.
Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...
1: https://github.com/gothinkster/realworld
2: https://www.techempower.com/benchmarks/
etxm 8 months ago [-]
Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)
It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.
It would be cool to see things like disaster recovery and chaos proofing as well. ');
INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.
Leveraging the community
There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.
On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.
For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.
Always quality focused
No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.
Learn more');
INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.
Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.
Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.
maxxxxx 8 months ago [-]
Agreed. Switching to another system is expensive and the benefit is pretty questionable.
emsy 8 months ago [-]
Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.
TremendousJudge 8 months ago [-]
expand, please?
maxxxxx 8 months ago [-]
I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.
In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.
gopalv 8 months ago [-]
> Depending on your data some databases may be better than others and that should be easy to measure.
And the performance difference could be an accidental feature of the design and completely unintentional.
Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.
Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).
When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.
And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.
Though how it came about isn''t really intentional. ');
"""),
]
| 159 | 0 |
ddfea5bd5d0e0cf8608cb0a07599e5e6b06f933e | 494 | py | Python | Python Script Tools/18.0 Create Dataframe And Store It In a CSV.py | juan1305/0.11-incremento_descremento | 954ddb32180c3197e5b01cf95d20f5325ada8a29 | [
"MIT"
] | 1 | 2020-04-13T00:16:16.000Z | 2020-04-13T00:16:16.000Z | Python Script Tools/18.0 Create Dataframe And Store It In a CSV.py | juan1305/0.11-incremento_descremento | 954ddb32180c3197e5b01cf95d20f5325ada8a29 | [
"MIT"
] | null | null | null | Python Script Tools/18.0 Create Dataframe And Store It In a CSV.py | juan1305/0.11-incremento_descremento | 954ddb32180c3197e5b01cf95d20f5325ada8a29 | [
"MIT"
] | null | null | null | import pandas as pd
# Crear diccionario donde key sera columna a crear
# y su valuela informacion de cada columna
data = {'paises': ['Mexico', 'Espaa', 'Estados Unidos'],
'Ciudades': ['Monterrey,' 'Madrid', 'Nueva York'],
'Casos': [4291, 3829, 10283]}
# Crear un DataFrame pasando el diccioario y
# sealizar las columnas creadas
df = pd.DataFrame(data, columns=['paises', 'Ciudades', 'Casos'])
# Imprimir la info
print(df)
# Almacenar en archivo CSV
df.to_csv('myDataFrame.csv') | 27.444444 | 64 | 0.700405 | import pandas as pd
# Crear diccionario donde key sera columna a crear
# y su valuela informacion de cada columna
data = {'paises': ['Mexico', 'España', 'Estados Unidos'],
'Ciudades': ['Monterrey,' 'Madrid', 'Nueva York'],
'Casos': [4291, 3829, 10283]}
# Crear un DataFrame pasando el diccioario y
# señalizar las columnas creadas
df = pd.DataFrame(data, columns=['paises', 'Ciudades', 'Casos'])
# Imprimir la info
print(df)
# Almacenar en archivo CSV
df.to_csv('myDataFrame.csv') | 4 | 0 |
d3b313c3dd0ec4a73ea6c33bd5b776e0285a4fc6 | 30,581 | py | Python | pxr/usd/usdLux/testenv/testUsdLuxLight.py | yurivict/USD | 3b097e3ba8fabf1777a1256e241ea15df83f3065 | [
"Apache-2.0"
] | 1 | 2021-09-25T12:49:37.000Z | 2021-09-25T12:49:37.000Z | pxr/usd/usdLux/testenv/testUsdLuxLight.py | yurivict/USD | 3b097e3ba8fabf1777a1256e241ea15df83f3065 | [
"Apache-2.0"
] | null | null | null | pxr/usd/usdLux/testenv/testUsdLuxLight.py | yurivict/USD | 3b097e3ba8fabf1777a1256e241ea15df83f3065 | [
"Apache-2.0"
] | 1 | 2018-10-03T19:08:33.000Z | 2018-10-03T19:08:33.000Z | #!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from __future__ import print_function
from pxr import Gf, Sdf, Sdr, Tf, Usd, UsdGeom, UsdLux, UsdShade, Plug
import unittest, math
class TestUsdLuxLight(unittest.TestCase):
def test_BlackbodySpectrum(self):
warm_color = UsdLux.BlackbodyTemperatureAsRgb(1000)
whitepoint = UsdLux.BlackbodyTemperatureAsRgb(6500)
cool_color = UsdLux.BlackbodyTemperatureAsRgb(10000)
# Whitepoint is ~= (1,1,1)
assert Gf.IsClose(whitepoint, Gf.Vec3f(1.0), 0.1)
# Warm has more red than green or blue
assert warm_color[0] > warm_color[1]
assert warm_color[0] > warm_color[2]
# Cool has more blue than red or green
assert cool_color[2] > cool_color[0]
assert cool_color[2] > cool_color[1]
def test_BasicConnectableLights(self):
# Try checking connectableAPI on core lux types first before going
# through the prim.
self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI(
UsdLux.RectLight))
self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI(
UsdLux.PluginLightFilter))
stage = Usd.Stage.CreateInMemory()
rectLight = UsdLux.RectLight.Define(stage, '/RectLight')
self.assertTrue(rectLight)
lightAPI = rectLight.LightAPI()
self.assertTrue(lightAPI)
self.assertTrue(lightAPI.ConnectableAPI())
# Rect light has the following built-in inputs attributes.
inputNames = ['color',
'colorTemperature',
'diffuse',
'enableColorTemperature',
'exposure',
'height',
'intensity',
'normalize',
'specular',
'texture:file',
'width']
# GetInputs returns only authored inputs by default
self.assertEqual(lightAPI.GetInputs(), [])
# GetInputs(false) is a super-set of all the built-ins.
# There could be other inputs coming from any auto applied APISchemas.
allInputs = [inputName.GetBaseName() for inputName in
lightAPI.GetInputs(onlyAuthored=False)]
self.assertTrue(set(inputNames).issubset(set(allInputs)))
# Verify each input's attribute is prefixed.
for name in inputNames:
self.assertEqual(lightAPI.GetInput(name).GetAttr().GetName(),
"inputs:" + name)
# Verify input attributes match the getter API attributes.
self.assertEqual(lightAPI.GetInput('color').GetAttr(),
rectLight.GetColorAttr())
self.assertEqual(lightAPI.GetInput('texture:file').GetAttr(),
rectLight.GetTextureFileAttr())
# Create a new input, and verify that the input interface conforming
# attribute is created.
lightInput = lightAPI.CreateInput('newInput', Sdf.ValueTypeNames.Float)
self.assertIn(lightInput, lightAPI.GetInputs())
# By default GetInputs() returns onlyAuthored inputs, of which
# there is now 1.
self.assertEqual(len(lightAPI.GetInputs()), 1)
self.assertEqual(lightAPI.GetInput('newInput'), lightInput)
self.assertEqual(lightInput.GetAttr(),
lightAPI.GetPrim().GetAttribute("inputs:newInput"))
# Rect light has no authored outputs.
self.assertEqual(lightAPI.GetOutputs(), [])
# Rect light has no built-in outputs, either.
self.assertEqual(lightAPI.GetOutputs(onlyAuthored=False), [])
# Create a new output, and verify that the output interface conforming
# attribute is created.
lightOutput = lightAPI.CreateOutput('newOutput', Sdf.ValueTypeNames.Float)
self.assertEqual(lightAPI.GetOutputs(), [lightOutput])
self.assertEqual(lightAPI.GetOutputs(onlyAuthored=False), [lightOutput])
self.assertEqual(lightAPI.GetOutput('newOutput'), lightOutput)
self.assertEqual(lightOutput.GetAttr(),
lightAPI.GetPrim().GetAttribute("outputs:newOutput"))
# Do the same with a light filter
lightFilter = UsdLux.LightFilter.Define(stage, '/LightFilter')
self.assertTrue(lightFilter)
self.assertTrue(lightFilter.ConnectableAPI())
# Light filter has no built-in inputs.
self.assertEqual(lightFilter.GetInputs(), [])
# Create a new input, and verify that the input interface conforming
# attribute is created.
filterInput = lightFilter.CreateInput('newInput',
Sdf.ValueTypeNames.Float)
self.assertEqual(lightFilter.GetInputs(), [filterInput])
self.assertEqual(lightFilter.GetInput('newInput'), filterInput)
self.assertEqual(filterInput.GetAttr(),
lightFilter.GetPrim().GetAttribute("inputs:newInput"))
# Light filter has no built-in outputs.
self.assertEqual(lightFilter.GetOutputs(), [])
self.assertEqual(lightFilter.GetOutputs(onlyAuthored=False), [])
# Create a new output, and verify that the output interface conforming
# attribute is created.
filterOutput = lightFilter.CreateOutput('newOutput',
Sdf.ValueTypeNames.Float)
self.assertEqual(lightFilter.GetOutputs(), [filterOutput])
self.assertEqual(lightFilter.GetOutputs(onlyAuthored=False),
[filterOutput])
self.assertEqual(lightFilter.GetOutput('newOutput'), filterOutput)
self.assertEqual(filterOutput.GetAttr(),
lightFilter.GetPrim().GetAttribute("outputs:newOutput"))
# Test the connection behavior customization.
# Create a connectable prim with an output under the light.
lightGraph = UsdShade.NodeGraph.Define(stage, '/RectLight/Prim')
self.assertTrue(lightGraph)
lightGraphOutput = lightGraph.CreateOutput(
'graphOut', Sdf.ValueTypeNames.Float)
self.assertTrue(lightGraphOutput)
# Create a connectable prim with an output under the light filter.
filterGraph = UsdShade.NodeGraph.Define(stage, '/LightFilter/Prim')
self.assertTrue(filterGraph)
filterGraphOutput = filterGraph.CreateOutput(
'graphOut', Sdf.ValueTypeNames.Float)
self.assertTrue(filterGraphOutput)
# Light outputs can be connected.
self.assertTrue(lightOutput.CanConnect(lightGraphOutput))
self.assertTrue(lightOutput.CanConnect(filterGraphOutput))
# Light inputs diverge from the default behavior and should be
# connectable across its own scope (encapsulation is not required)
self.assertTrue(lightInput.CanConnect(lightOutput))
self.assertTrue(lightInput.CanConnect(lightGraphOutput))
self.assertTrue(lightInput.CanConnect(filterGraphOutput))
# From the default behavior light filter outputs cannot be connected.
self.assertFalse(filterOutput.CanConnect(lightGraphOutput))
self.assertFalse(filterOutput.CanConnect(filterGraphOutput))
# Light filters inputs diverge from the default behavior and should be
# connectable across its own scope (encapsulation is not required)
self.assertTrue(filterInput.CanConnect(filterOutput))
self.assertTrue(filterInput.CanConnect(filterGraphOutput))
self.assertTrue(filterInput.CanConnect(lightGraphOutput))
# The shaping API can add more connectable attributes to the light
# and implements the same connectable interface functions. We test
# those here.
shapingAPI = UsdLux.ShapingAPI.Apply(lightAPI.GetPrim())
self.assertTrue(shapingAPI)
self.assertTrue(shapingAPI.ConnectableAPI())
# Verify input attributes match the getter API attributes.
self.assertEqual(shapingAPI.GetInput('shaping:cone:angle').GetAttr(),
shapingAPI.GetShapingConeAngleAttr())
self.assertEqual(shapingAPI.GetInput('shaping:focus').GetAttr(),
shapingAPI.GetShapingFocusAttr())
# These inputs have the same connectable behaviors as all light inputs,
# i.e. they should also diverge from the default behavior of only be
# connected to sources from immediate descendant (encapsultated) prims
# of the light.
shapingInput = shapingAPI.GetInput('shaping:focus')
self.assertTrue(shapingInput.CanConnect(lightOutput))
self.assertTrue(shapingInput.CanConnect(lightGraphOutput))
self.assertTrue(shapingInput.CanConnect(filterGraphOutput))
# The shadow API can add more connectable attributes to the light
# and implements the same connectable interface functions. We test
# those here.
shadowAPI = UsdLux.ShadowAPI.Apply(lightAPI.GetPrim())
self.assertTrue(shadowAPI)
self.assertTrue(shadowAPI.ConnectableAPI())
# Verify input attributes match the getter API attributes.
self.assertEqual(shadowAPI.GetInput('shadow:color').GetAttr(),
shadowAPI.GetShadowColorAttr())
self.assertEqual(shadowAPI.GetInput('shadow:distance').GetAttr(),
shadowAPI.GetShadowDistanceAttr())
# These inputs have the same connectable behaviors as all light inputs,
# i.e. they should also diverge from the default behavior of only be
# connected to sources from immediate descendant (encapsultated) prims
# of the light.
shadowInput = shadowAPI.GetInput('shadow:color')
self.assertTrue(shadowInput.CanConnect(lightOutput))
self.assertTrue(shadowInput.CanConnect(lightGraphOutput))
self.assertTrue(shadowInput.CanConnect(filterGraphOutput))
# Even though the shadow and shaping API schemas provide connectable
# attributes and an interface for the ConnectableAPI, the typed schema
# of the prim is still what provides its connectable behavior. Here
# we verify that applying these APIs to a prim whose type is not
# connectable does NOT cause the prim to conform to the Connectable API.
nonConnectablePrim = stage.DefinePrim("/Sphere", "Sphere")
shadowAPI = UsdLux.ShadowAPI.Apply(nonConnectablePrim)
self.assertTrue(shadowAPI)
self.assertFalse(shadowAPI.ConnectableAPI())
shapingAPI = UsdLux.ShapingAPI.Apply(nonConnectablePrim)
self.assertTrue(shapingAPI)
self.assertFalse(shapingAPI.ConnectableAPI())
def test_DomeLight_OrientToStageUpAxis(self):
stage = Usd.Stage.CreateInMemory()
# Try Y-up first. Explicitly set this to override any site-level
# override.
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
# Create a dome.
light = UsdLux.DomeLight.Define(stage, '/dome')
# No Xform ops to begin with.
self.assertEqual(light.GetOrderedXformOps(), [])
# Align to up axis.
light.OrientToStageUpAxis()
# Since the stage is already Y-up, no additional xform op was required.
self.assertEqual(light.GetOrderedXformOps(), [])
# Now change the stage to Z-up and re-align the dome.
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
light.OrientToStageUpAxis()
# That should require a +90 deg rotate on X.
ops = light.GetOrderedXformOps()
self.assertEqual(len(ops), 1)
self.assertEqual(ops[0].GetBaseName(),
UsdLux.Tokens.orientToStageUpAxis)
self.assertEqual(ops[0].GetOpType(), UsdGeom.XformOp.TypeRotateX)
self.assertEqual(ops[0].GetAttr().Get(), 90.0)
def test_UsdLux_HasConnectableAPI(self):
self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI(
UsdLux.LightAPI))
self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI(
UsdLux.LightFilter))
def test_GetShaderId(self):
# Test the LightAPI shader ID API
# UsdLuxLightAPI and UsdLuxLightFilter implement the same API for
# their shaderId attributes so we can test them using the same function.
def _TestShaderIDs(lightOrFilter, shaderIdAttrName):
# The default render context's shaderId attribute does exist in the
# API. These attributes do not yet exist for other contexts.
self.assertEqual(
lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(),
shaderIdAttrName)
self.assertFalse(
lightOrFilter.GetShaderIdAttrForRenderContext("ri"))
self.assertFalse(
lightOrFilter.GetShaderIdAttrForRenderContext("other"))
# By default LightAPI shader IDs are empty for all render contexts.
self.assertEqual(lightOrFilter.GetShaderId([]), "")
self.assertEqual(lightOrFilter.GetShaderId(["other", "ri"]), "")
# Set a value in the default shaderID attr.
lightOrFilter.GetShaderIdAttr().Set("DefaultLight")
# No new attributes were created.
self.assertEqual(
lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(),
shaderIdAttrName)
self.assertFalse(
lightOrFilter.GetShaderIdAttrForRenderContext("ri"))
self.assertFalse(
lightOrFilter.GetShaderIdAttrForRenderContext("other"))
# The default value is now the shaderID returned for all render
# contexts since no render contexts define their own shader ID
self.assertEqual(
lightOrFilter.GetShaderId([]), "DefaultLight")
self.assertEqual(
lightOrFilter.GetShaderId(["other", "ri"]), "DefaultLight")
# Create a shaderID attr for the "ri" render context with a new ID
# value.
lightOrFilter.CreateShaderIdAttrForRenderContext("ri", "SphereLight")
# The shaderId attr for "ri" now exists
self.assertEqual(
lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(),
shaderIdAttrName)
self.assertEqual(
lightOrFilter.GetShaderIdAttrForRenderContext("ri").GetName(),
"ri:" + shaderIdAttrName)
self.assertFalse(
lightOrFilter.GetShaderIdAttrForRenderContext("other"))
# When passed no render contexts we still return the default
# shader ID.
self.assertEqual(lightOrFilter.GetShaderId([]), "DefaultLight")
# Since we defined a shader ID for "ri" but not "other", the "ri"
# shader ID is returned when queryring for both. Querying for just
# "other" falls back to the default shaderID
self.assertEqual(
lightOrFilter.GetShaderId(["other", "ri"]), "SphereLight")
self.assertEqual(
lightOrFilter.GetShaderId(["ri"]), "SphereLight")
self.assertEqual(
lightOrFilter.GetShaderId(["other"]), "DefaultLight")
# Create an untyped prim with a LightAPI applied and test the ShaderId
# functions of UsdLux.LightAPI
stage = Usd.Stage.CreateInMemory()
prim = stage.DefinePrim("/PrimLight")
light = UsdLux.LightAPI.Apply(prim)
self.assertTrue(light)
_TestShaderIDs(light, "light:shaderId")
# Create a LightFilter prim and test the ShaderId functions of
# UsdLux.LightFilter
lightFilter = UsdLux.LightFilter.Define(stage, "/PrimLightFilter")
self.assertTrue(lightFilter)
_TestShaderIDs(lightFilter, "lightFilter:shaderId")
def test_LightExtentAndBBox(self):
# Test extent and bbox computations for the boundable lights.
time = Usd.TimeCode.Default()
# Helper for computing the extent and bounding boxes for a light and
# comparing against an expect extent pair.
def _VerifyExtentAndBBox(light, expectedExtent):
self.assertEqual(
UsdGeom.Boundable.ComputeExtentFromPlugins(light, time),
expectedExtent)
self.assertEqual(
light.ComputeLocalBound(time, "default"),
Gf.BBox3d(
Gf.Range3d(
Gf.Vec3d(expectedExtent[0]),
Gf.Vec3d(expectedExtent[1])),
Gf.Matrix4d(1.0)))
# Create a prim of each boundable light type.
stage = Usd.Stage.CreateInMemory()
rectLight = UsdLux.RectLight.Define(stage, "/RectLight")
self.assertTrue(rectLight)
diskLight = UsdLux.DiskLight.Define(stage, "/DiskLight")
self.assertTrue(diskLight)
cylLight = UsdLux.CylinderLight.Define(stage, "/CylLight")
self.assertTrue(cylLight)
sphereLight = UsdLux.SphereLight.Define(stage, "/SphereLight")
self.assertTrue(sphereLight)
# Verify the extent and bbox computations for each light given its
# fallback attribute values.
_VerifyExtentAndBBox(rectLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)])
_VerifyExtentAndBBox(diskLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)])
_VerifyExtentAndBBox(cylLight, [(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)])
_VerifyExtentAndBBox(sphereLight, [(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)])
# Change the size related attribute of each light and verify the extents
# and bounding boxes are updated.
rectLight.CreateWidthAttr(4.0)
rectLight.CreateHeightAttr(6.0)
_VerifyExtentAndBBox(rectLight, [(-2.0, -3.0, 0.0), (2.0, 3.0, 0.0)])
diskLight.CreateRadiusAttr(5.0)
_VerifyExtentAndBBox(diskLight, [(-5.0, -5.0, 0.0), (5.0, 5.0, 0.0)])
cylLight.CreateRadiusAttr(4.0)
cylLight.CreateLengthAttr(10.0)
_VerifyExtentAndBBox(cylLight, [(-4.0, -4.0, -5.0), (4.0, 4.0, 5.0)])
sphereLight.CreateRadiusAttr(3.0)
_VerifyExtentAndBBox(sphereLight, [(-3.0, -3.0, -3.0), (3.0, 3.0, 3.0)])
# Special case for portal light. Portal lights don't have any attributes
# that affect their extent. Extent values are used only when
# explicitly authored but portal lights' do register a
# ComputeExtentFuction, which sets the extent as default from the
# schema.
portalLight = UsdLux.PortalLight.Define(stage, "/PortalLight")
self.assertTrue(portalLight)
_VerifyExtentAndBBox(portalLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)])
# For completeness verify that distant and dome lights are not
# boundable.
domeLight = UsdLux.DomeLight.Define(stage, "/DomeLight")
self.assertTrue(domeLight)
self.assertFalse(UsdGeom.Boundable(domeLight))
distLight = UsdLux.DistantLight.Define(stage, "/DistLight")
self.assertTrue(distLight)
self.assertFalse(UsdGeom.Boundable(distLight))
def test_SdrShaderNodesForLights(self):
"""
Test the automatic registration of SdrShaderNodes for all the UsdLux
light types.
"""
# The expected shader node inputs that should be found for all of our
# UsdLux light types.
expectedLightInputNames = [
# LightAPI
'color',
'colorTemperature',
'diffuse',
'enableColorTemperature',
'exposure',
'intensity',
'normalize',
'specular',
# ShadowAPI
'shadow:color',
'shadow:distance',
'shadow:enable',
'shadow:falloff',
'shadow:falloffGamma',
# ShapingAPI
'shaping:cone:angle',
'shaping:cone:softness',
'shaping:focus',
'shaping:focusTint',
'shaping:ies:angleScale',
'shaping:ies:file',
'shaping:ies:normalize'
]
# Map of the names of the expected light nodes to the additional inputs
# we expect for those types.
expectedLightNodes = {
'CylinderLight' : ['length', 'radius'],
'DiskLight' : ['radius'],
'DistantLight' : ['angle'],
'DomeLight' : ['texture:file', 'texture:format'],
'GeometryLight' : [],
'PortalLight' : [],
'RectLight' : ['width', 'height', 'texture:file'],
'SphereLight' : ['radius'],
'MeshLight' : [],
'VolumeLight' : []
}
# Get all the derived types of UsdLuxBoundableLightBase and
# UsdLuxNonboundableLightBase that are defined in UsdLux
lightTypes = list(filter(
Plug.Registry().GetPluginWithName("usdLux").DeclaresType,
Tf.Type(UsdLux.BoundableLightBase).GetAllDerivedTypes() +
Tf.Type(UsdLux.NonboundableLightBase).GetAllDerivedTypes()))
self.assertTrue(lightTypes)
# Augment lightTypes to include MeshLightAPI and VolumeLightAPI
lightTypes.append(
Tf.Type.FindByName('UsdLuxMeshLightAPI'))
lightTypes.append(
Tf.Type.FindByName('UsdLuxVolumeLightAPI'))
# Verify that at least one known light type is in our list to guard
# against this giving false positives if no light types are available.
self.assertIn(UsdLux.RectLight, lightTypes)
self.assertEqual(len(lightTypes), len(expectedLightNodes))
stage = Usd.Stage.CreateInMemory()
prim = stage.DefinePrim("/Prim")
usdSchemaReg = Usd.SchemaRegistry()
for lightType in lightTypes:
print("Test SdrNode for schema type " + str(lightType))
if usdSchemaReg.IsAppliedAPISchema(lightType):
prim.ApplyAPI(lightType)
else:
typeName = usdSchemaReg.GetConcreteSchemaTypeName(lightType)
if not typeName:
continue
prim.SetTypeName(typeName)
light = UsdLux.LightAPI(prim)
self.assertTrue(light)
sdrIdentifier = light.GetShaderId([])
self.assertTrue(sdrIdentifier)
prim.ApplyAPI(UsdLux.ShadowAPI)
prim.ApplyAPI(UsdLux.ShapingAPI)
# Every concrete light type and some API schemas (with appropriate
# shaderId as sdr Identifier) in usdLux domain will have an
# SdrShaderNode with source type 'USD' registered for it under its
# USD schema type name.
node = Sdr.Registry().GetNodeByIdentifier(sdrIdentifier, ['USD'])
self.assertTrue(node is not None)
self.assertIn(sdrIdentifier, expectedLightNodes)
# Names, identifier, and role for the node all match the USD schema
# type name
self.assertEqual(node.GetIdentifier(), sdrIdentifier)
self.assertEqual(node.GetName(), sdrIdentifier)
self.assertEqual(node.GetImplementationName(), sdrIdentifier)
self.assertEqual(node.GetRole(), sdrIdentifier)
self.assertTrue(node.GetInfoString().startswith(sdrIdentifier))
# The context is always 'light' for lights.
# Source type is 'USD'
self.assertEqual(node.GetContext(), 'light')
self.assertEqual(node.GetSourceType(), 'USD')
# Help string is generated and encoded in the node's metadata (no
# need to verify the specific wording).
self.assertTrue(set(node.GetMetadata().keys()), {'primvars', 'help'})
self.assertEqual(node.GetMetadata()["help"], node.GetHelp())
# Source code and URIs are all empty.
self.assertFalse(node.GetSourceCode())
self.assertFalse(node.GetResolvedDefinitionURI())
self.assertFalse(node.GetResolvedImplementationURI())
# Other classifications are left empty.
self.assertFalse(node.GetCategory())
self.assertFalse(node.GetDepartments())
self.assertFalse(node.GetFamily())
self.assertFalse(node.GetLabel())
self.assertFalse(node.GetVersion())
self.assertFalse(node.GetAllVstructNames())
self.assertEqual(node.GetPages(), [''])
# The node will be valid for our light types.
self.assertTrue(node.IsValid())
# Helper for comparing an SdrShaderProperty from node to the
# corresponding UsdShadeInput/UsdShadeOutput from a UsdLux light
def _CompareLightPropToNodeProp(nodeInput, primInput):
# Input names and default values match.
primDefaultValue = primInput.GetAttr().Get()
self.assertEqual(nodeInput.GetName(), primInput.GetBaseName())
self.assertEqual(nodeInput.GetDefaultValue(), primDefaultValue)
# Some USD property types don't match exactly one to one and are
# converted to different types. In particular relevance to
# lights and Token becomes String.
expectedTypeName = primInput.GetTypeName()
# Array valued attributes have their array size determined from
# the default value and will be converted to scalar in the
# SdrProperty if the array size is zero.
if expectedTypeName.isArray:
if not primDefaultValue or len(primDefaultValue) == 0:
expectedTypeName = expectedTypeName.scalarType
elif expectedTypeName == Sdf.ValueTypeNames.Token:
expectedTypeName = Sdf.ValueTypeNames.String
# Bool SdfTypes should Have Int SdrTypes, but still return as
# Bool when queried for GetTypeAsSdfType
if expectedTypeName == Sdf.ValueTypeNames.Bool:
self.assertEqual(nodeInput.GetType(),
Sdf.ValueTypeNames.Int)
# Verify the node's input type maps back to USD property's type
# (with the noted above exceptions).
self.assertEqual(
nodeInput.GetTypeAsSdfType()[0], expectedTypeName,
msg="{}.{} Type {} != {}".format(
str(node.GetName()),
str(nodeInput.GetName()),
str(nodeInput.GetTypeAsSdfType()[0]),
str(expectedTypeName)))
# If the USD property type is an Asset, it will be listed in
# the node's asset identifier inputs.
if expectedTypeName == Sdf.ValueTypeNames.Asset:
self.assertIn(nodeInput.GetName(),
node.GetAssetIdentifierInputNames())
# There will be a one to one correspondence between node inputs
# and prim inputs. Note that the prim may have additional inputs
# because of auto applied API schemas, but we only need to verify
# that the node has ONLY the expected inputs and the prim at least
# has those input proerties.
expectedInputNames = \
expectedLightInputNames + expectedLightNodes[sdrIdentifier]
# Verify node has exactly the expected inputs.
self.assertEqual(sorted(expectedInputNames),
sorted(node.GetInputNames()))
# Verify each node input matches a prim input.
for inputName in expectedInputNames:
nodeInput = node.GetInput(inputName)
primInput = light.GetInput(inputName)
self.assertFalse(nodeInput.IsOutput())
_CompareLightPropToNodeProp(nodeInput, primInput)
# None of the UsdLux base lights have outputs
self.assertEqual(node.GetOutputNames(), [])
self.assertEqual(light.GetOutputs(onlyAuthored=False), [])
# The reverse is tested just above, but for all asset identifier
# inputs listed for the node there is a corresponding asset value
# input property on the prim.
for inputName in node.GetAssetIdentifierInputNames():
self.assertEqual(light.GetInput(inputName).GetTypeName(),
Sdf.ValueTypeNames.Asset)
# These primvars come from sdrMetadata on the prim itself which
# isn't supported for light schemas so it will always be empty.
self.assertFalse(node.GetPrimvars())
# sdrMetadata on input properties is supported so additional
# primvar properties will correspond to prim inputs with that
# metadata set.
for propName in node.GetAdditionalPrimvarProperties():
self.assertTrue(light.GetInput(propName).GetSdrMetadataByKey(
'primvarProperty'))
# Default input can also be specified in the property's sdrMetadata.
if node.GetDefaultInput():
defaultInput = light.GetInput(
node.GetDefaultInput().GetName())
self.assertTrue(defaultInput.GetSdrMetadataByKey('defaultInput'))
if __name__ == '__main__':
unittest.main()
| 48.083333 | 82 | 0.632059 | #!/pxrpythonsubst
#
# Copyright 2017 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
from __future__ import print_function
from pxr import Gf, Sdf, Sdr, Tf, Usd, UsdGeom, UsdLux, UsdShade, Plug
import unittest, math
class TestUsdLuxLight(unittest.TestCase):
def test_BlackbodySpectrum(self):
warm_color = UsdLux.BlackbodyTemperatureAsRgb(1000)
whitepoint = UsdLux.BlackbodyTemperatureAsRgb(6500)
cool_color = UsdLux.BlackbodyTemperatureAsRgb(10000)
# Whitepoint is ~= (1,1,1)
assert Gf.IsClose(whitepoint, Gf.Vec3f(1.0), 0.1)
# Warm has more red than green or blue
assert warm_color[0] > warm_color[1]
assert warm_color[0] > warm_color[2]
# Cool has more blue than red or green
assert cool_color[2] > cool_color[0]
assert cool_color[2] > cool_color[1]
def test_BasicConnectableLights(self):
# Try checking connectableAPI on core lux types first before going
# through the prim.
self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI(
UsdLux.RectLight))
self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI(
UsdLux.PluginLightFilter))
stage = Usd.Stage.CreateInMemory()
rectLight = UsdLux.RectLight.Define(stage, '/RectLight')
self.assertTrue(rectLight)
lightAPI = rectLight.LightAPI()
self.assertTrue(lightAPI)
self.assertTrue(lightAPI.ConnectableAPI())
# Rect light has the following built-in inputs attributes.
inputNames = ['color',
'colorTemperature',
'diffuse',
'enableColorTemperature',
'exposure',
'height',
'intensity',
'normalize',
'specular',
'texture:file',
'width']
# GetInputs returns only authored inputs by default
self.assertEqual(lightAPI.GetInputs(), [])
# GetInputs(false) is a super-set of all the built-ins.
# There could be other inputs coming from any auto applied APISchemas.
allInputs = [inputName.GetBaseName() for inputName in
lightAPI.GetInputs(onlyAuthored=False)]
self.assertTrue(set(inputNames).issubset(set(allInputs)))
# Verify each input's attribute is prefixed.
for name in inputNames:
self.assertEqual(lightAPI.GetInput(name).GetAttr().GetName(),
"inputs:" + name)
# Verify input attributes match the getter API attributes.
self.assertEqual(lightAPI.GetInput('color').GetAttr(),
rectLight.GetColorAttr())
self.assertEqual(lightAPI.GetInput('texture:file').GetAttr(),
rectLight.GetTextureFileAttr())
# Create a new input, and verify that the input interface conforming
# attribute is created.
lightInput = lightAPI.CreateInput('newInput', Sdf.ValueTypeNames.Float)
self.assertIn(lightInput, lightAPI.GetInputs())
# By default GetInputs() returns onlyAuthored inputs, of which
# there is now 1.
self.assertEqual(len(lightAPI.GetInputs()), 1)
self.assertEqual(lightAPI.GetInput('newInput'), lightInput)
self.assertEqual(lightInput.GetAttr(),
lightAPI.GetPrim().GetAttribute("inputs:newInput"))
# Rect light has no authored outputs.
self.assertEqual(lightAPI.GetOutputs(), [])
# Rect light has no built-in outputs, either.
self.assertEqual(lightAPI.GetOutputs(onlyAuthored=False), [])
# Create a new output, and verify that the output interface conforming
# attribute is created.
lightOutput = lightAPI.CreateOutput('newOutput', Sdf.ValueTypeNames.Float)
self.assertEqual(lightAPI.GetOutputs(), [lightOutput])
self.assertEqual(lightAPI.GetOutputs(onlyAuthored=False), [lightOutput])
self.assertEqual(lightAPI.GetOutput('newOutput'), lightOutput)
self.assertEqual(lightOutput.GetAttr(),
lightAPI.GetPrim().GetAttribute("outputs:newOutput"))
# Do the same with a light filter
lightFilter = UsdLux.LightFilter.Define(stage, '/LightFilter')
self.assertTrue(lightFilter)
self.assertTrue(lightFilter.ConnectableAPI())
# Light filter has no built-in inputs.
self.assertEqual(lightFilter.GetInputs(), [])
# Create a new input, and verify that the input interface conforming
# attribute is created.
filterInput = lightFilter.CreateInput('newInput',
Sdf.ValueTypeNames.Float)
self.assertEqual(lightFilter.GetInputs(), [filterInput])
self.assertEqual(lightFilter.GetInput('newInput'), filterInput)
self.assertEqual(filterInput.GetAttr(),
lightFilter.GetPrim().GetAttribute("inputs:newInput"))
# Light filter has no built-in outputs.
self.assertEqual(lightFilter.GetOutputs(), [])
self.assertEqual(lightFilter.GetOutputs(onlyAuthored=False), [])
# Create a new output, and verify that the output interface conforming
# attribute is created.
filterOutput = lightFilter.CreateOutput('newOutput',
Sdf.ValueTypeNames.Float)
self.assertEqual(lightFilter.GetOutputs(), [filterOutput])
self.assertEqual(lightFilter.GetOutputs(onlyAuthored=False),
[filterOutput])
self.assertEqual(lightFilter.GetOutput('newOutput'), filterOutput)
self.assertEqual(filterOutput.GetAttr(),
lightFilter.GetPrim().GetAttribute("outputs:newOutput"))
# Test the connection behavior customization.
# Create a connectable prim with an output under the light.
lightGraph = UsdShade.NodeGraph.Define(stage, '/RectLight/Prim')
self.assertTrue(lightGraph)
lightGraphOutput = lightGraph.CreateOutput(
'graphOut', Sdf.ValueTypeNames.Float)
self.assertTrue(lightGraphOutput)
# Create a connectable prim with an output under the light filter.
filterGraph = UsdShade.NodeGraph.Define(stage, '/LightFilter/Prim')
self.assertTrue(filterGraph)
filterGraphOutput = filterGraph.CreateOutput(
'graphOut', Sdf.ValueTypeNames.Float)
self.assertTrue(filterGraphOutput)
# Light outputs can be connected.
self.assertTrue(lightOutput.CanConnect(lightGraphOutput))
self.assertTrue(lightOutput.CanConnect(filterGraphOutput))
# Light inputs diverge from the default behavior and should be
# connectable across its own scope (encapsulation is not required)
self.assertTrue(lightInput.CanConnect(lightOutput))
self.assertTrue(lightInput.CanConnect(lightGraphOutput))
self.assertTrue(lightInput.CanConnect(filterGraphOutput))
# From the default behavior light filter outputs cannot be connected.
self.assertFalse(filterOutput.CanConnect(lightGraphOutput))
self.assertFalse(filterOutput.CanConnect(filterGraphOutput))
# Light filters inputs diverge from the default behavior and should be
# connectable across its own scope (encapsulation is not required)
self.assertTrue(filterInput.CanConnect(filterOutput))
self.assertTrue(filterInput.CanConnect(filterGraphOutput))
self.assertTrue(filterInput.CanConnect(lightGraphOutput))
# The shaping API can add more connectable attributes to the light
# and implements the same connectable interface functions. We test
# those here.
shapingAPI = UsdLux.ShapingAPI.Apply(lightAPI.GetPrim())
self.assertTrue(shapingAPI)
self.assertTrue(shapingAPI.ConnectableAPI())
# Verify input attributes match the getter API attributes.
self.assertEqual(shapingAPI.GetInput('shaping:cone:angle').GetAttr(),
shapingAPI.GetShapingConeAngleAttr())
self.assertEqual(shapingAPI.GetInput('shaping:focus').GetAttr(),
shapingAPI.GetShapingFocusAttr())
# These inputs have the same connectable behaviors as all light inputs,
# i.e. they should also diverge from the default behavior of only be
# connected to sources from immediate descendant (encapsultated) prims
# of the light.
shapingInput = shapingAPI.GetInput('shaping:focus')
self.assertTrue(shapingInput.CanConnect(lightOutput))
self.assertTrue(shapingInput.CanConnect(lightGraphOutput))
self.assertTrue(shapingInput.CanConnect(filterGraphOutput))
# The shadow API can add more connectable attributes to the light
# and implements the same connectable interface functions. We test
# those here.
shadowAPI = UsdLux.ShadowAPI.Apply(lightAPI.GetPrim())
self.assertTrue(shadowAPI)
self.assertTrue(shadowAPI.ConnectableAPI())
# Verify input attributes match the getter API attributes.
self.assertEqual(shadowAPI.GetInput('shadow:color').GetAttr(),
shadowAPI.GetShadowColorAttr())
self.assertEqual(shadowAPI.GetInput('shadow:distance').GetAttr(),
shadowAPI.GetShadowDistanceAttr())
# These inputs have the same connectable behaviors as all light inputs,
# i.e. they should also diverge from the default behavior of only be
# connected to sources from immediate descendant (encapsultated) prims
# of the light.
shadowInput = shadowAPI.GetInput('shadow:color')
self.assertTrue(shadowInput.CanConnect(lightOutput))
self.assertTrue(shadowInput.CanConnect(lightGraphOutput))
self.assertTrue(shadowInput.CanConnect(filterGraphOutput))
# Even though the shadow and shaping API schemas provide connectable
# attributes and an interface for the ConnectableAPI, the typed schema
# of the prim is still what provides its connectable behavior. Here
# we verify that applying these APIs to a prim whose type is not
# connectable does NOT cause the prim to conform to the Connectable API.
nonConnectablePrim = stage.DefinePrim("/Sphere", "Sphere")
shadowAPI = UsdLux.ShadowAPI.Apply(nonConnectablePrim)
self.assertTrue(shadowAPI)
self.assertFalse(shadowAPI.ConnectableAPI())
shapingAPI = UsdLux.ShapingAPI.Apply(nonConnectablePrim)
self.assertTrue(shapingAPI)
self.assertFalse(shapingAPI.ConnectableAPI())
def test_DomeLight_OrientToStageUpAxis(self):
stage = Usd.Stage.CreateInMemory()
# Try Y-up first. Explicitly set this to override any site-level
# override.
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y)
# Create a dome.
light = UsdLux.DomeLight.Define(stage, '/dome')
# No Xform ops to begin with.
self.assertEqual(light.GetOrderedXformOps(), [])
# Align to up axis.
light.OrientToStageUpAxis()
# Since the stage is already Y-up, no additional xform op was required.
self.assertEqual(light.GetOrderedXformOps(), [])
# Now change the stage to Z-up and re-align the dome.
UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)
light.OrientToStageUpAxis()
# That should require a +90 deg rotate on X.
ops = light.GetOrderedXformOps()
self.assertEqual(len(ops), 1)
self.assertEqual(ops[0].GetBaseName(),
UsdLux.Tokens.orientToStageUpAxis)
self.assertEqual(ops[0].GetOpType(), UsdGeom.XformOp.TypeRotateX)
self.assertEqual(ops[0].GetAttr().Get(), 90.0)
def test_UsdLux_HasConnectableAPI(self):
self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI(
UsdLux.LightAPI))
self.assertTrue(UsdShade.ConnectableAPI.HasConnectableAPI(
UsdLux.LightFilter))
def test_GetShaderId(self):
# Test the LightAPI shader ID API
# UsdLuxLightAPI and UsdLuxLightFilter implement the same API for
# their shaderId attributes so we can test them using the same function.
def _TestShaderIDs(lightOrFilter, shaderIdAttrName):
# The default render context's shaderId attribute does exist in the
# API. These attributes do not yet exist for other contexts.
self.assertEqual(
lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(),
shaderIdAttrName)
self.assertFalse(
lightOrFilter.GetShaderIdAttrForRenderContext("ri"))
self.assertFalse(
lightOrFilter.GetShaderIdAttrForRenderContext("other"))
# By default LightAPI shader IDs are empty for all render contexts.
self.assertEqual(lightOrFilter.GetShaderId([]), "")
self.assertEqual(lightOrFilter.GetShaderId(["other", "ri"]), "")
# Set a value in the default shaderID attr.
lightOrFilter.GetShaderIdAttr().Set("DefaultLight")
# No new attributes were created.
self.assertEqual(
lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(),
shaderIdAttrName)
self.assertFalse(
lightOrFilter.GetShaderIdAttrForRenderContext("ri"))
self.assertFalse(
lightOrFilter.GetShaderIdAttrForRenderContext("other"))
# The default value is now the shaderID returned for all render
# contexts since no render contexts define their own shader ID
self.assertEqual(
lightOrFilter.GetShaderId([]), "DefaultLight")
self.assertEqual(
lightOrFilter.GetShaderId(["other", "ri"]), "DefaultLight")
# Create a shaderID attr for the "ri" render context with a new ID
# value.
lightOrFilter.CreateShaderIdAttrForRenderContext("ri", "SphereLight")
# The shaderId attr for "ri" now exists
self.assertEqual(
lightOrFilter.GetShaderIdAttrForRenderContext("").GetName(),
shaderIdAttrName)
self.assertEqual(
lightOrFilter.GetShaderIdAttrForRenderContext("ri").GetName(),
"ri:" + shaderIdAttrName)
self.assertFalse(
lightOrFilter.GetShaderIdAttrForRenderContext("other"))
# When passed no render contexts we still return the default
# shader ID.
self.assertEqual(lightOrFilter.GetShaderId([]), "DefaultLight")
# Since we defined a shader ID for "ri" but not "other", the "ri"
# shader ID is returned when queryring for both. Querying for just
# "other" falls back to the default shaderID
self.assertEqual(
lightOrFilter.GetShaderId(["other", "ri"]), "SphereLight")
self.assertEqual(
lightOrFilter.GetShaderId(["ri"]), "SphereLight")
self.assertEqual(
lightOrFilter.GetShaderId(["other"]), "DefaultLight")
# Create an untyped prim with a LightAPI applied and test the ShaderId
# functions of UsdLux.LightAPI
stage = Usd.Stage.CreateInMemory()
prim = stage.DefinePrim("/PrimLight")
light = UsdLux.LightAPI.Apply(prim)
self.assertTrue(light)
_TestShaderIDs(light, "light:shaderId")
# Create a LightFilter prim and test the ShaderId functions of
# UsdLux.LightFilter
lightFilter = UsdLux.LightFilter.Define(stage, "/PrimLightFilter")
self.assertTrue(lightFilter)
_TestShaderIDs(lightFilter, "lightFilter:shaderId")
def test_LightExtentAndBBox(self):
# Test extent and bbox computations for the boundable lights.
time = Usd.TimeCode.Default()
# Helper for computing the extent and bounding boxes for a light and
# comparing against an expect extent pair.
def _VerifyExtentAndBBox(light, expectedExtent):
self.assertEqual(
UsdGeom.Boundable.ComputeExtentFromPlugins(light, time),
expectedExtent)
self.assertEqual(
light.ComputeLocalBound(time, "default"),
Gf.BBox3d(
Gf.Range3d(
Gf.Vec3d(expectedExtent[0]),
Gf.Vec3d(expectedExtent[1])),
Gf.Matrix4d(1.0)))
# Create a prim of each boundable light type.
stage = Usd.Stage.CreateInMemory()
rectLight = UsdLux.RectLight.Define(stage, "/RectLight")
self.assertTrue(rectLight)
diskLight = UsdLux.DiskLight.Define(stage, "/DiskLight")
self.assertTrue(diskLight)
cylLight = UsdLux.CylinderLight.Define(stage, "/CylLight")
self.assertTrue(cylLight)
sphereLight = UsdLux.SphereLight.Define(stage, "/SphereLight")
self.assertTrue(sphereLight)
# Verify the extent and bbox computations for each light given its
# fallback attribute values.
_VerifyExtentAndBBox(rectLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)])
_VerifyExtentAndBBox(diskLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)])
_VerifyExtentAndBBox(cylLight, [(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)])
_VerifyExtentAndBBox(sphereLight, [(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)])
# Change the size related attribute of each light and verify the extents
# and bounding boxes are updated.
rectLight.CreateWidthAttr(4.0)
rectLight.CreateHeightAttr(6.0)
_VerifyExtentAndBBox(rectLight, [(-2.0, -3.0, 0.0), (2.0, 3.0, 0.0)])
diskLight.CreateRadiusAttr(5.0)
_VerifyExtentAndBBox(diskLight, [(-5.0, -5.0, 0.0), (5.0, 5.0, 0.0)])
cylLight.CreateRadiusAttr(4.0)
cylLight.CreateLengthAttr(10.0)
_VerifyExtentAndBBox(cylLight, [(-4.0, -4.0, -5.0), (4.0, 4.0, 5.0)])
sphereLight.CreateRadiusAttr(3.0)
_VerifyExtentAndBBox(sphereLight, [(-3.0, -3.0, -3.0), (3.0, 3.0, 3.0)])
# Special case for portal light. Portal lights don't have any attributes
# that affect their extent. Extent values are used only when
# explicitly authored but portal lights' do register a
# ComputeExtentFuction, which sets the extent as default from the
# schema.
portalLight = UsdLux.PortalLight.Define(stage, "/PortalLight")
self.assertTrue(portalLight)
_VerifyExtentAndBBox(portalLight, [(-0.5, -0.5, 0.0), (0.5, 0.5, 0.0)])
# For completeness verify that distant and dome lights are not
# boundable.
domeLight = UsdLux.DomeLight.Define(stage, "/DomeLight")
self.assertTrue(domeLight)
self.assertFalse(UsdGeom.Boundable(domeLight))
distLight = UsdLux.DistantLight.Define(stage, "/DistLight")
self.assertTrue(distLight)
self.assertFalse(UsdGeom.Boundable(distLight))
def test_SdrShaderNodesForLights(self):
"""
Test the automatic registration of SdrShaderNodes for all the UsdLux
light types.
"""
# The expected shader node inputs that should be found for all of our
# UsdLux light types.
expectedLightInputNames = [
# LightAPI
'color',
'colorTemperature',
'diffuse',
'enableColorTemperature',
'exposure',
'intensity',
'normalize',
'specular',
# ShadowAPI
'shadow:color',
'shadow:distance',
'shadow:enable',
'shadow:falloff',
'shadow:falloffGamma',
# ShapingAPI
'shaping:cone:angle',
'shaping:cone:softness',
'shaping:focus',
'shaping:focusTint',
'shaping:ies:angleScale',
'shaping:ies:file',
'shaping:ies:normalize'
]
# Map of the names of the expected light nodes to the additional inputs
# we expect for those types.
expectedLightNodes = {
'CylinderLight' : ['length', 'radius'],
'DiskLight' : ['radius'],
'DistantLight' : ['angle'],
'DomeLight' : ['texture:file', 'texture:format'],
'GeometryLight' : [],
'PortalLight' : [],
'RectLight' : ['width', 'height', 'texture:file'],
'SphereLight' : ['radius'],
'MeshLight' : [],
'VolumeLight' : []
}
# Get all the derived types of UsdLuxBoundableLightBase and
# UsdLuxNonboundableLightBase that are defined in UsdLux
lightTypes = list(filter(
Plug.Registry().GetPluginWithName("usdLux").DeclaresType,
Tf.Type(UsdLux.BoundableLightBase).GetAllDerivedTypes() +
Tf.Type(UsdLux.NonboundableLightBase).GetAllDerivedTypes()))
self.assertTrue(lightTypes)
# Augment lightTypes to include MeshLightAPI and VolumeLightAPI
lightTypes.append(
Tf.Type.FindByName('UsdLuxMeshLightAPI'))
lightTypes.append(
Tf.Type.FindByName('UsdLuxVolumeLightAPI'))
# Verify that at least one known light type is in our list to guard
# against this giving false positives if no light types are available.
self.assertIn(UsdLux.RectLight, lightTypes)
self.assertEqual(len(lightTypes), len(expectedLightNodes))
stage = Usd.Stage.CreateInMemory()
prim = stage.DefinePrim("/Prim")
usdSchemaReg = Usd.SchemaRegistry()
for lightType in lightTypes:
print("Test SdrNode for schema type " + str(lightType))
if usdSchemaReg.IsAppliedAPISchema(lightType):
prim.ApplyAPI(lightType)
else:
typeName = usdSchemaReg.GetConcreteSchemaTypeName(lightType)
if not typeName:
continue
prim.SetTypeName(typeName)
light = UsdLux.LightAPI(prim)
self.assertTrue(light)
sdrIdentifier = light.GetShaderId([])
self.assertTrue(sdrIdentifier)
prim.ApplyAPI(UsdLux.ShadowAPI)
prim.ApplyAPI(UsdLux.ShapingAPI)
# Every concrete light type and some API schemas (with appropriate
# shaderId as sdr Identifier) in usdLux domain will have an
# SdrShaderNode with source type 'USD' registered for it under its
# USD schema type name.
node = Sdr.Registry().GetNodeByIdentifier(sdrIdentifier, ['USD'])
self.assertTrue(node is not None)
self.assertIn(sdrIdentifier, expectedLightNodes)
# Names, identifier, and role for the node all match the USD schema
# type name
self.assertEqual(node.GetIdentifier(), sdrIdentifier)
self.assertEqual(node.GetName(), sdrIdentifier)
self.assertEqual(node.GetImplementationName(), sdrIdentifier)
self.assertEqual(node.GetRole(), sdrIdentifier)
self.assertTrue(node.GetInfoString().startswith(sdrIdentifier))
# The context is always 'light' for lights.
# Source type is 'USD'
self.assertEqual(node.GetContext(), 'light')
self.assertEqual(node.GetSourceType(), 'USD')
# Help string is generated and encoded in the node's metadata (no
# need to verify the specific wording).
self.assertTrue(set(node.GetMetadata().keys()), {'primvars', 'help'})
self.assertEqual(node.GetMetadata()["help"], node.GetHelp())
# Source code and URIs are all empty.
self.assertFalse(node.GetSourceCode())
self.assertFalse(node.GetResolvedDefinitionURI())
self.assertFalse(node.GetResolvedImplementationURI())
# Other classifications are left empty.
self.assertFalse(node.GetCategory())
self.assertFalse(node.GetDepartments())
self.assertFalse(node.GetFamily())
self.assertFalse(node.GetLabel())
self.assertFalse(node.GetVersion())
self.assertFalse(node.GetAllVstructNames())
self.assertEqual(node.GetPages(), [''])
# The node will be valid for our light types.
self.assertTrue(node.IsValid())
# Helper for comparing an SdrShaderProperty from node to the
# corresponding UsdShadeInput/UsdShadeOutput from a UsdLux light
def _CompareLightPropToNodeProp(nodeInput, primInput):
# Input names and default values match.
primDefaultValue = primInput.GetAttr().Get()
self.assertEqual(nodeInput.GetName(), primInput.GetBaseName())
self.assertEqual(nodeInput.GetDefaultValue(), primDefaultValue)
# Some USD property types don't match exactly one to one and are
# converted to different types. In particular relevance to
# lights and Token becomes String.
expectedTypeName = primInput.GetTypeName()
# Array valued attributes have their array size determined from
# the default value and will be converted to scalar in the
# SdrProperty if the array size is zero.
if expectedTypeName.isArray:
if not primDefaultValue or len(primDefaultValue) == 0:
expectedTypeName = expectedTypeName.scalarType
elif expectedTypeName == Sdf.ValueTypeNames.Token:
expectedTypeName = Sdf.ValueTypeNames.String
# Bool SdfTypes should Have Int SdrTypes, but still return as
# Bool when queried for GetTypeAsSdfType
if expectedTypeName == Sdf.ValueTypeNames.Bool:
self.assertEqual(nodeInput.GetType(),
Sdf.ValueTypeNames.Int)
# Verify the node's input type maps back to USD property's type
# (with the noted above exceptions).
self.assertEqual(
nodeInput.GetTypeAsSdfType()[0], expectedTypeName,
msg="{}.{} Type {} != {}".format(
str(node.GetName()),
str(nodeInput.GetName()),
str(nodeInput.GetTypeAsSdfType()[0]),
str(expectedTypeName)))
# If the USD property type is an Asset, it will be listed in
# the node's asset identifier inputs.
if expectedTypeName == Sdf.ValueTypeNames.Asset:
self.assertIn(nodeInput.GetName(),
node.GetAssetIdentifierInputNames())
# There will be a one to one correspondence between node inputs
# and prim inputs. Note that the prim may have additional inputs
# because of auto applied API schemas, but we only need to verify
# that the node has ONLY the expected inputs and the prim at least
# has those input proerties.
expectedInputNames = \
expectedLightInputNames + expectedLightNodes[sdrIdentifier]
# Verify node has exactly the expected inputs.
self.assertEqual(sorted(expectedInputNames),
sorted(node.GetInputNames()))
# Verify each node input matches a prim input.
for inputName in expectedInputNames:
nodeInput = node.GetInput(inputName)
primInput = light.GetInput(inputName)
self.assertFalse(nodeInput.IsOutput())
_CompareLightPropToNodeProp(nodeInput, primInput)
# None of the UsdLux base lights have outputs
self.assertEqual(node.GetOutputNames(), [])
self.assertEqual(light.GetOutputs(onlyAuthored=False), [])
# The reverse is tested just above, but for all asset identifier
# inputs listed for the node there is a corresponding asset value
# input property on the prim.
for inputName in node.GetAssetIdentifierInputNames():
self.assertEqual(light.GetInput(inputName).GetTypeName(),
Sdf.ValueTypeNames.Asset)
# These primvars come from sdrMetadata on the prim itself which
# isn't supported for light schemas so it will always be empty.
self.assertFalse(node.GetPrimvars())
# sdrMetadata on input properties is supported so additional
# primvar properties will correspond to prim inputs with that
# metadata set.
for propName in node.GetAdditionalPrimvarProperties():
self.assertTrue(light.GetInput(propName).GetSdrMetadataByKey(
'primvarProperty'))
# Default input can also be specified in the property's sdrMetadata.
if node.GetDefaultInput():
defaultInput = light.GetInput(
node.GetDefaultInput().GetName())
self.assertTrue(defaultInput.GetSdrMetadataByKey('defaultInput'))
if __name__ == '__main__':
unittest.main()
| 0 | 0 |
0e80c9e7dca15d7cd5266e3c0a1290507d1a7a09 | 3,801 | py | Python | scripts/fix_rttm.py | sehgal-simran/RPNSD | 5ec70d11e3d177fb87a8499b63cd1c5ba60549b6 | [
"MIT"
] | 59 | 2020-02-19T11:23:14.000Z | 2022-02-06T09:31:32.000Z | scripts/fix_rttm.py | yuzhms/RPNSD | 031377388cb498c0dee080a76bd588a9ee8b39e0 | [
"MIT"
] | 11 | 2020-03-05T10:23:43.000Z | 2021-10-11T02:15:28.000Z | scripts/fix_rttm.py | yuzhms/RPNSD | 031377388cb498c0dee080a76bd588a9ee8b39e0 | [
"MIT"
] | 13 | 2020-02-19T02:30:43.000Z | 2021-01-13T03:06:42.000Z | #!/usr/bin/env python3
# This script fixes some problems the RTTM file
# including invalid time boundaries and others
import os
import sys
import numpy as np
import argparse
def get_args():
parser = argparse.ArgumentParser(
description="Fix RTTM file")
parser.add_argument("rttm_file", type=str,
help="Input RTTM file")
parser.add_argument("rttm_output_file", type=str,
help="Output RTTM file")
parser.add_argument("--channel", type=int, default=1,
help="Channel information in the RTTM file")
parser.add_argument("--add_uttname", type=int, default=0,
help="Whether to add uttname to spkname")
args = parser.parse_args()
return args
def load_rttm(filename):
utt2seg = {}
with open(filename, 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
line_split = line.split()
uttname, start_t, duration, spkname = line_split[1], float(line_split[3]), float(line_split[4]), line_split[7]
if duration <= 0:
print("Invalid line")
print(line)
continue
end_t = start_t + duration
if uttname not in utt2seg:
utt2seg[uttname] = []
utt2seg[uttname].append([start_t, end_t, spkname])
return utt2seg
def merge_same_spk(seg_array):
spk_list = list(set(seg_array[:, 2]))
seg_array_list = []
for spk in spk_list:
seg_array_spk = seg_array[seg_array[:, 2] == spk]
seg_list_spk = []
for i in range(len(seg_array_spk)):
if i == 0:
seg_list_spk.append(seg_array_spk[i, :])
else:
if seg_array_spk[i, 0] > seg_list_spk[-1][1]:
seg_list_spk.append(seg_array_spk[i, :])
else:
seg_list_spk[-1][1] = max(seg_list_spk[-1][1], seg_array_spk[i, 1])
seg_array_spk_new = np.array(seg_list_spk)
seg_array_list.append(seg_array_spk_new)
seg_array_new = np.concatenate(seg_array_list)
seg_array_new = seg_array_new[seg_array_new[:, 0].argsort(), :]
return seg_array_new
def fix_rttm(utt2seg):
uttlist = list(utt2seg.keys())
uttlist.sort()
utt2seg_new = {}
for utt in uttlist:
seg_list = utt2seg[utt]
spk_list = list(set([seg[2] for seg in seg_list]))
spk_list.sort()
seg_array = np.array([[seg[0], seg[1], spk_list.index(seg[2])] for seg in seg_list])
seg_array = seg_array[seg_array[:, 0].argsort(), :]
seg_array_new = merge_same_spk(seg_array)
seg_list = []
for i in range(len(seg_array_new)):
seg_list.append([seg_array_new[i, 0], seg_array_new[i, 1], spk_list[int(seg_array_new[i, 2])]])
utt2seg_new[utt] = seg_list
return utt2seg_new
def write_rttm(utt2seg, rttm_output_file, add_uttname, channel):
uttlist = list(utt2seg.keys())
uttlist.sort()
with open(rttm_output_file, 'w') as fh:
for utt in uttlist:
seg_list = utt2seg[utt]
for seg in seg_list:
if add_uttname:
fh.write("SPEAKER {} {} {:.2f} {:.2f} <NA> <NA> {}_{} <NA> <NA>\n".format(utt, channel, seg[0], seg[1] - seg[0], utt, seg[2]))
else:
fh.write("SPEAKER {} {} {:.2f} {:.2f} <NA> <NA> {} <NA> <NA>\n".format(utt, channel, seg[0], seg[1] - seg[0], seg[2]))
return 0
def main():
args = get_args()
# load input RTTM
utt2seg = load_rttm(args.rttm_file)
# fix RTTM file
utt2seg_new = fix_rttm(utt2seg)
# write output RTTM
write_rttm(utt2seg_new, args.rttm_output_file, args.add_uttname, args.channel)
return 0
if __name__ == "__main__":
main()
| 36.548077 | 146 | 0.594843 | #!/usr/bin/env python3
# This script fixes some problems the RTTM file
# including invalid time boundaries and others
import os
import sys
import numpy as np
import argparse
def get_args():
parser = argparse.ArgumentParser(
description="Fix RTTM file")
parser.add_argument("rttm_file", type=str,
help="Input RTTM file")
parser.add_argument("rttm_output_file", type=str,
help="Output RTTM file")
parser.add_argument("--channel", type=int, default=1,
help="Channel information in the RTTM file")
parser.add_argument("--add_uttname", type=int, default=0,
help="Whether to add uttname to spkname")
args = parser.parse_args()
return args
def load_rttm(filename):
utt2seg = {}
with open(filename, 'r') as fh:
content = fh.readlines()
for line in content:
line = line.strip('\n')
line_split = line.split()
uttname, start_t, duration, spkname = line_split[1], float(line_split[3]), float(line_split[4]), line_split[7]
if duration <= 0:
print("Invalid line")
print(line)
continue
end_t = start_t + duration
if uttname not in utt2seg:
utt2seg[uttname] = []
utt2seg[uttname].append([start_t, end_t, spkname])
return utt2seg
def merge_same_spk(seg_array):
spk_list = list(set(seg_array[:, 2]))
seg_array_list = []
for spk in spk_list:
seg_array_spk = seg_array[seg_array[:, 2] == spk]
seg_list_spk = []
for i in range(len(seg_array_spk)):
if i == 0:
seg_list_spk.append(seg_array_spk[i, :])
else:
if seg_array_spk[i, 0] > seg_list_spk[-1][1]:
seg_list_spk.append(seg_array_spk[i, :])
else:
seg_list_spk[-1][1] = max(seg_list_spk[-1][1], seg_array_spk[i, 1])
seg_array_spk_new = np.array(seg_list_spk)
seg_array_list.append(seg_array_spk_new)
seg_array_new = np.concatenate(seg_array_list)
seg_array_new = seg_array_new[seg_array_new[:, 0].argsort(), :]
return seg_array_new
def fix_rttm(utt2seg):
uttlist = list(utt2seg.keys())
uttlist.sort()
utt2seg_new = {}
for utt in uttlist:
seg_list = utt2seg[utt]
spk_list = list(set([seg[2] for seg in seg_list]))
spk_list.sort()
seg_array = np.array([[seg[0], seg[1], spk_list.index(seg[2])] for seg in seg_list])
seg_array = seg_array[seg_array[:, 0].argsort(), :]
seg_array_new = merge_same_spk(seg_array)
seg_list = []
for i in range(len(seg_array_new)):
seg_list.append([seg_array_new[i, 0], seg_array_new[i, 1], spk_list[int(seg_array_new[i, 2])]])
utt2seg_new[utt] = seg_list
return utt2seg_new
def write_rttm(utt2seg, rttm_output_file, add_uttname, channel):
uttlist = list(utt2seg.keys())
uttlist.sort()
with open(rttm_output_file, 'w') as fh:
for utt in uttlist:
seg_list = utt2seg[utt]
for seg in seg_list:
if add_uttname:
fh.write("SPEAKER {} {} {:.2f} {:.2f} <NA> <NA> {}_{} <NA> <NA>\n".format(utt, channel, seg[0], seg[1] - seg[0], utt, seg[2]))
else:
fh.write("SPEAKER {} {} {:.2f} {:.2f} <NA> <NA> {} <NA> <NA>\n".format(utt, channel, seg[0], seg[1] - seg[0], seg[2]))
return 0
def main():
args = get_args()
# load input RTTM
utt2seg = load_rttm(args.rttm_file)
# fix RTTM file
utt2seg_new = fix_rttm(utt2seg)
# write output RTTM
write_rttm(utt2seg_new, args.rttm_output_file, args.add_uttname, args.channel)
return 0
if __name__ == "__main__":
main()
| 0 | 0 |
78c5929686706d7b4c5c6bb30eecae092b7caa4b | 997 | py | Python | polymorphism/polymorphism_demos.py | Minkov/python-oop | db9651eef374c0e74c32cb6f2bf07c734cc1d051 | [
"MIT"
] | 3 | 2021-11-16T04:52:53.000Z | 2022-02-07T20:28:41.000Z | polymorphism/polymorphism_demos.py | Minkov/python-oop | db9651eef374c0e74c32cb6f2bf07c734cc1d051 | [
"MIT"
] | null | null | null | polymorphism/polymorphism_demos.py | Minkov/python-oop | db9651eef374c0e74c32cb6f2bf07c734cc1d051 | [
"MIT"
] | 1 | 2021-12-07T07:04:38.000Z | 2021-12-07T07:04:38.000Z | import math
class Shape:
def area(self):
pass
class Rect(Shape):
def __init__(self, width, height):
self.width = width
self.height = height
def area(self):
return self.width * self.height
class Circle(Shape):
def __init__(self, radius):
self.radius = radius
def area(self):
return self.radius * self.radius * math.pi
def print_area(shape: Shape):
# if isinstance(shape, Rect):
# print(shape.rect_area())
# elif isinstance(shape, Circle):
# print(shape.circle_area())
print(shape.area())
# print(shape.width, shape.height)
r = Rect(2, 5)
c = Circle(3)
shapes: list[Shape] = [
r,
c,
]
[print_area(s) for s in shapes]
print(isinstance(r, Rect))
print(isinstance(r, Circle))
print(isinstance(r, Shape))
# print_area(2)
print(Rect.mro())
class Person:
def say_hello(self):
print("Hello! 1")
def say_hello(self):
print("Hello! 2")
Person().say_hello()
| 16.616667 | 50 | 0.608826 | import math
class Shape:
def area(self):
pass
class Rect(Shape):
def __init__(self, width, height):
self.width = width
self.height = height
def area(self):
return self.width * self.height
class Circle(Shape):
def __init__(self, radius):
self.radius = radius
def area(self):
return self.radius * self.radius * math.pi
def print_area(shape: Shape):
# if isinstance(shape, Rect):
# print(shape.rect_area())
# elif isinstance(shape, Circle):
# print(shape.circle_area())
print(shape.area())
# print(shape.width, shape.height)
r = Rect(2, 5)
c = Circle(3)
shapes: list[Shape] = [
r,
c,
]
[print_area(s) for s in shapes]
print(isinstance(r, Rect))
print(isinstance(r, Circle))
print(isinstance(r, Shape))
# print_area(2)
print(Rect.mro())
class Person:
def say_hello(self):
print("Hello! 1")
def say_hello(self):
print("Hello! 2")
Person().say_hello()
| 0 | 0 |