code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import sys
from contextlib import contextmanager
from os import path
import importlib
# https://stackoverflow.com/a/41904558/2281355
@contextmanager
def add_to_path(p):
import sys
old_path = sys.path
old_modules = sys.modules
sys.modules = old_modules.copy()
sys.path = sys.path[:]
sys.path.insert(0, p)
try:
yield
finally:
sys.path = old_path
sys.modules = old_modules
def extract_javascript(fileobj, keywords, comment_tags, options):
# import the original lexer before altering sys.path
# this way, our mocked tokenizer can still access the original lexer
# and utilities
import babel.messages.jslexer
with add_to_path(path.dirname(__file__)):
# replace the jslexer
# first, reload all parent namespace so that it can adapt the new sys.path...
import babel
importlib.reload(babel)
import babel.messages
importlib.reload(babel.messages)
# this should load our mocked jslexer
importlib.reload(babel.messages.jslexer)
# babel.messages.extract is not changed, so we can use directly
from babel.messages.extract import extract_javascript
yield from extract_javascript(fileobj, keywords, comment_tags, options)
| [
"importlib.reload",
"babel.messages.extract.extract_javascript",
"os.path.dirname",
"sys.path.insert"
] | [((307, 328), 'sys.path.insert', 'sys.path.insert', (['(0)', 'p'], {}), '(0, p)\n', (322, 328), False, 'import sys\n'), ((870, 893), 'importlib.reload', 'importlib.reload', (['babel'], {}), '(babel)\n', (886, 893), False, 'import importlib\n'), ((932, 964), 'importlib.reload', 'importlib.reload', (['babel.messages'], {}), '(babel.messages)\n', (948, 964), False, 'import importlib\n'), ((1019, 1059), 'importlib.reload', 'importlib.reload', (['babel.messages.jslexer'], {}), '(babel.messages.jslexer)\n', (1035, 1059), False, 'import importlib\n'), ((700, 722), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (712, 722), False, 'from os import path\n'), ((1214, 1274), 'babel.messages.extract.extract_javascript', 'extract_javascript', (['fileobj', 'keywords', 'comment_tags', 'options'], {}), '(fileobj, keywords, comment_tags, options)\n', (1232, 1274), False, 'from babel.messages.extract import extract_javascript\n')] |
import psycopg2
from psycopg2 import Error
try:
# Connect to an existing database
connection = psycopg2.connect(user="sa",
password="<PASSWORD>",
host="127.0.0.1",
port="5432",
database="soildb")
# Create a cursor to perform database operations
cursor = connection.cursor()
# Print PostgreSQL details
print("PostgreSQL server information")
print(connection.get_dsn_parameters(), "\n")
# Executing a SQL query
cursor.execute("SELECT version();")
# Fetch result
record = cursor.fetchone()
print("You are connected to - ", record, "\n")
print("Retrieving soil records...")
cursor.execute("SELECT * from soil_profile limit 10")
# Fetch result
records = cursor.fetchall()
print(f"soil records = {records}")
except (Exception, Error) as error:
print("Error while connecting to PostgreSQL", error)
finally:
if connection:
cursor.close()
connection.close()
print("PostgreSQL connection is closed") | [
"psycopg2.connect"
] | [((104, 209), 'psycopg2.connect', 'psycopg2.connect', ([], {'user': '"""sa"""', 'password': '"""<PASSWORD>"""', 'host': '"""127.0.0.1"""', 'port': '"""5432"""', 'database': '"""soildb"""'}), "(user='sa', password='<PASSWORD>', host='127.0.0.1', port=\n '5432', database='soildb')\n", (120, 209), False, 'import psycopg2\n')] |
# -*- encoding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1433361565.2110319
_template_filename='templates/webapps/galaxy/admin/tool_sheds.mako'
_template_uri='/webapps/galaxy/admin/tool_sheds.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='ascii'
_exports = ['stylesheets', 'title']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
# SOURCE LINE 2
ns = runtime.TemplateNamespace('__anon_0x7f903c23edd0', context._clean_inheritance_tokens(), templateuri=u'/message.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x7f903c23edd0')] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/base.mako', _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f903c23edd0')._populate(_import_ns, [u'render_msg'])
status = _import_ns.get('status', context.get('status', UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
message = _import_ns.get('message', context.get('message', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
render_msg = _import_ns.get('render_msg', context.get('render_msg', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n')
# SOURCE LINE 2
__M_writer(u'\n\n')
# SOURCE LINE 4
__M_writer(u'\n\n')
# SOURCE LINE 9
__M_writer(u'\n\n')
# SOURCE LINE 11
if message:
# SOURCE LINE 12
__M_writer(u' ')
__M_writer(unicode(render_msg( message, status )))
__M_writer(u'\n')
pass
# SOURCE LINE 14
__M_writer(u'\n<div class="toolForm">\n <div class="toolFormTitle">Accessible Galaxy tool sheds</div>\n <div class="toolFormBody">\n <div class="form-row">\n <table class="grid">\n ')
# SOURCE LINE 20
shed_id = 0
__M_locals_builtin_stored = __M_locals_builtin()
__M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin_stored[__M_key]) for __M_key in ['shed_id'] if __M_key in __M_locals_builtin_stored]))
__M_writer(u'\n')
# SOURCE LINE 21
for name, url in trans.app.tool_shed_registry.tool_sheds.items():
# SOURCE LINE 22
__M_writer(u' <tr class="libraryTitle">\n <td>\n <div style="float: left; margin-left: 1px;" class="menubutton split popup" id="dataset-')
# SOURCE LINE 24
__M_writer(unicode(shed_id))
__M_writer(u'-popup">\n <a class="view-info" href="')
# SOURCE LINE 25
__M_writer(unicode(h.url_for( controller='admin_toolshed', action='browse_tool_shed', tool_shed_url=url )))
__M_writer(u'">')
__M_writer(unicode(name))
__M_writer(u'</a>\n </div>\n <div popupmenu="dataset-')
# SOURCE LINE 27
__M_writer(unicode(shed_id))
__M_writer(u'-popup">\n <a class="action-button" href="')
# SOURCE LINE 28
__M_writer(unicode(h.url_for( controller='admin_toolshed', action='browse_tool_shed', tool_shed_url=url )))
__M_writer(u'">Browse valid repositories</a>\n <a class="action-button" href="')
# SOURCE LINE 29
__M_writer(unicode(h.url_for( controller='admin_toolshed', action='find_tools_in_tool_shed', tool_shed_url=url )))
__M_writer(u'">Search for valid tools</a>\n <a class="action-button" href="')
# SOURCE LINE 30
__M_writer(unicode(h.url_for( controller='admin_toolshed', action='find_workflows_in_tool_shed', tool_shed_url=url )))
__M_writer(u'">Search for workflows</a>\n </div>\n </td>\n </tr>\n ')
# SOURCE LINE 34
shed_id += 1
__M_locals_builtin_stored = __M_locals_builtin()
__M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin_stored[__M_key]) for __M_key in ['shed_id'] if __M_key in __M_locals_builtin_stored]))
__M_writer(u'\n')
pass
# SOURCE LINE 36
__M_writer(u' </tr>\n </table>\n </div>\n <div style="clear: both"></div>\n </div>\n</div>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_stylesheets(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f903c23edd0')._populate(_import_ns, [u'render_msg'])
h = _import_ns.get('h', context.get('h', UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 6
__M_writer(u'\n ')
# SOURCE LINE 7
__M_writer(unicode(parent.stylesheets()))
__M_writer(u'\n ')
# SOURCE LINE 8
__M_writer(unicode(h.css( "library" )))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_title(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x7f903c23edd0')._populate(_import_ns, [u'render_msg'])
__M_writer = context.writer()
# SOURCE LINE 4
__M_writer(u'Configured Galaxy tool sheds')
return ''
finally:
context.caller_stack._pop_frame()
| [
"mako.cache.Cache",
"mako.runtime._inherit_from"
] | [((340, 377), 'mako.cache.Cache', 'cache.Cache', (['__name__', '_modified_time'], {}), '(__name__, _modified_time)\n', (351, 377), False, 'from mako import runtime, filters, cache\n'), ((1042, 1102), 'mako.runtime._inherit_from', 'runtime._inherit_from', (['context', 'u"""/base.mako"""', '_template_uri'], {}), "(context, u'/base.mako', _template_uri)\n", (1063, 1102), False, 'from mako import runtime, filters, cache\n')] |
import time
import grbl
import pytest
import gcode
@pytest.fixture(scope="session")
def cnc(request):
grbl_cfg = {
"port": request.config.getoption("--port"),
"baudrate": request.config.getoption("--baudrate"),
}
cnc = grbl.Grbl(**grbl_cfg)
time.sleep(2)
cnc.reset()
# Metric
cnc.cmd("G21")
cnc.cmd("G91")
cnc.cmd("G0X5Y5F300")
# Set this to 0.
# TODO: Get end-stops installed.
cnc.cmd("G92X0Y0Z0")
yield cnc
cnc.cmd("G90")
cnc.cmd("G0X0Y0F300")
def test_default_line():
print(gcode.Line())
def test_00_row1(cnc):
prog = gcode.GCode(machine=cnc)
prog.G90()
prog.G0(X=0, Y=0)
prog.run()
cnc.reset()
@pytest.mark.parametrize("laser_power", [10, 50, 75, 100, 150, 200, 255])
def test_01_laser_power(cnc, laser_power):
prog = gcode.Line(power=laser_power, machine=cnc)
cnc.cmd("G91")
prog.run()
cnc.reset()
def test_02_row2(cnc):
prog = gcode.GCode(machine=cnc)
prog.G90()
prog.G0(X=0, Y=10)
prog.run()
cnc.reset()
@pytest.mark.parametrize("feed", [30, 60, 120, 180, 240, 300])
def test_03_laser_feed(cnc, feed):
prog = gcode.Line(power=255, feed=feed, machine=cnc)
cnc.cmd("G91")
prog.run()
cnc.reset()
def test_04_row3(cnc):
prog = gcode.GCode(machine=cnc)
prog.G90()
prog.G0(X=0, Y=20)
prog.run()
cnc.reset()
@pytest.mark.parametrize("dynamic_power", [True, False])
@pytest.mark.parametrize("power", [150, 200, 255])
@pytest.mark.parametrize("feed", [30, 180])
def test_05_laser_power_feed(cnc, dynamic_power, power, feed):
prog = gcode.Line(machine=cnc, dynamic_power=dynamic_power, power=power, feed=feed)
cnc.cmd("G91")
prog.run()
cnc.reset()
| [
"gcode.GCode",
"grbl.Grbl",
"pytest.fixture",
"time.sleep",
"pytest.mark.parametrize",
"gcode.Line"
] | [((56, 87), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (70, 87), False, 'import pytest\n'), ((709, 781), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""laser_power"""', '[10, 50, 75, 100, 150, 200, 255]'], {}), "('laser_power', [10, 50, 75, 100, 150, 200, 255])\n", (732, 781), False, 'import pytest\n'), ((1062, 1123), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""feed"""', '[30, 60, 120, 180, 240, 300]'], {}), "('feed', [30, 60, 120, 180, 240, 300])\n", (1085, 1123), False, 'import pytest\n'), ((1399, 1454), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dynamic_power"""', '[True, False]'], {}), "('dynamic_power', [True, False])\n", (1422, 1454), False, 'import pytest\n'), ((1456, 1505), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""power"""', '[150, 200, 255]'], {}), "('power', [150, 200, 255])\n", (1479, 1505), False, 'import pytest\n'), ((1507, 1549), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""feed"""', '[30, 180]'], {}), "('feed', [30, 180])\n", (1530, 1549), False, 'import pytest\n'), ((251, 272), 'grbl.Grbl', 'grbl.Grbl', ([], {}), '(**grbl_cfg)\n', (260, 272), False, 'import grbl\n'), ((277, 290), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (287, 290), False, 'import time\n'), ((613, 637), 'gcode.GCode', 'gcode.GCode', ([], {'machine': 'cnc'}), '(machine=cnc)\n', (624, 637), False, 'import gcode\n'), ((836, 878), 'gcode.Line', 'gcode.Line', ([], {'power': 'laser_power', 'machine': 'cnc'}), '(power=laser_power, machine=cnc)\n', (846, 878), False, 'import gcode\n'), ((965, 989), 'gcode.GCode', 'gcode.GCode', ([], {'machine': 'cnc'}), '(machine=cnc)\n', (976, 989), False, 'import gcode\n'), ((1170, 1215), 'gcode.Line', 'gcode.Line', ([], {'power': '(255)', 'feed': 'feed', 'machine': 'cnc'}), '(power=255, feed=feed, machine=cnc)\n', (1180, 1215), False, 'import gcode\n'), ((1302, 1326), 'gcode.GCode', 'gcode.GCode', ([], {'machine': 'cnc'}), '(machine=cnc)\n', (1313, 1326), False, 'import gcode\n'), ((1624, 1700), 'gcode.Line', 'gcode.Line', ([], {'machine': 'cnc', 'dynamic_power': 'dynamic_power', 'power': 'power', 'feed': 'feed'}), '(machine=cnc, dynamic_power=dynamic_power, power=power, feed=feed)\n', (1634, 1700), False, 'import gcode\n'), ((563, 575), 'gcode.Line', 'gcode.Line', ([], {}), '()\n', (573, 575), False, 'import gcode\n')] |
import numpy as np
import autoeap
from numpy.testing import assert_array_almost_equal
import os
PACKAGEDIR = os.path.abspath(os.path.dirname(__file__))
def test_raw_lightcurve():
time,flux,flux_err = autoeap.createlightcurve('EPIC220198696',campaign=8)
lc = np.genfromtxt(os.path.join(PACKAGEDIR,"EPIC220198696_c8_autoEAP.lc"),skip_header=1).T
assert_array_almost_equal(time,lc[0])
assert_array_almost_equal(flux,lc[1].astype(np.float32))
assert_array_almost_equal(flux_err,lc[2].astype(np.float32))
| [
"os.path.dirname",
"os.path.join",
"autoeap.createlightcurve",
"numpy.testing.assert_array_almost_equal"
] | [((126, 151), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (141, 151), False, 'import os\n'), ((206, 259), 'autoeap.createlightcurve', 'autoeap.createlightcurve', (['"""EPIC220198696"""'], {'campaign': '(8)'}), "('EPIC220198696', campaign=8)\n", (230, 259), False, 'import autoeap\n'), ((360, 398), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['time', 'lc[0]'], {}), '(time, lc[0])\n', (385, 398), False, 'from numpy.testing import assert_array_almost_equal\n'), ((283, 338), 'os.path.join', 'os.path.join', (['PACKAGEDIR', '"""EPIC220198696_c8_autoEAP.lc"""'], {}), "(PACKAGEDIR, 'EPIC220198696_c8_autoEAP.lc')\n", (295, 338), False, 'import os\n')] |
import logging
import os
import urllib3
import ast
from common.utils.requests import http
from common.utils.networks import ETH
from common.services import cointainer_web3 as web3
from common.utils.ethereum import ERC20_ABI
logger = logging.getLogger('watchtower.common.services.unchained')
class UnchainedClient(object):
def __init__(self, network):
baseurl = self.get_baseurl(network)
if baseurl is None:
raise Exception(
'UnchainedClient is not supported for network: {}'.format(network)
)
self.network = network
self.baseurl = baseurl
@staticmethod
def get_baseurl(network):
return {
ETH: os.getenv('UNCHAINED_ETH_URL')
}.get(network)
def get_balances(self, address, account_id, supported_tokens=None):
if not address:
logger.error("Unable to get %s balances for account: %s. No associated address.", self.network, account_id)
return dict()
resp = http.get('{}/api/v2/address/{}?details=tokenBalances'.format(self.baseurl, address)).json_data
balances = {token.get('contract').lower(): token.get('balance') for token in resp.get('tokens', list())}
balances[ETH] = resp.get('balance')
try:
weth_contract_address = supported_tokens.get('WETH') if supported_tokens and supported_tokens.get('WETH') else None
if weth_contract_address:
if balances.get(weth_contract_address) is None:
weth_address = web3.toChecksumAddress(weth_contract_address)
if weth_address:
contract = web3.eth.contract(address=weth_address, abi=ERC20_ABI)
balance = contract.functions.balanceOf(address).call()
balances[weth_address.lower()] = balance
except Exception as e:
logger.error("Failed to fetch WETH: %s balance for address: %s", weth_contract_address, address)
logger.error(e)
return balances
def get_client(network):
return UnchainedClient(network)
| [
"common.services.cointainer_web3.eth.contract",
"common.services.cointainer_web3.toChecksumAddress",
"os.getenv",
"logging.getLogger"
] | [((236, 293), 'logging.getLogger', 'logging.getLogger', (['"""watchtower.common.services.unchained"""'], {}), "('watchtower.common.services.unchained')\n", (253, 293), False, 'import logging\n'), ((705, 735), 'os.getenv', 'os.getenv', (['"""UNCHAINED_ETH_URL"""'], {}), "('UNCHAINED_ETH_URL')\n", (714, 735), False, 'import os\n'), ((1550, 1595), 'common.services.cointainer_web3.toChecksumAddress', 'web3.toChecksumAddress', (['weth_contract_address'], {}), '(weth_contract_address)\n', (1572, 1595), True, 'from common.services import cointainer_web3 as web3\n'), ((1668, 1722), 'common.services.cointainer_web3.eth.contract', 'web3.eth.contract', ([], {'address': 'weth_address', 'abi': 'ERC20_ABI'}), '(address=weth_address, abi=ERC20_ABI)\n', (1685, 1722), True, 'from common.services import cointainer_web3 as web3\n')] |
from django.core.management.base import BaseCommand
from rest_framework.authtoken.models import Token
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--force',
action='store_true',
help='WARNING - Understand that this logs out and *PERMANENTLY* DELETES THE TOKENS FOR ALL USERS',
)
def handle(self, *args, **options):
if not options["force"]:
print("Include --force if you understand that this will log out all users.")
else:
Token.objects.all().delete()
print("All auth tokens deleted.")
| [
"rest_framework.authtoken.models.Token.objects.all"
] | [((573, 592), 'rest_framework.authtoken.models.Token.objects.all', 'Token.objects.all', ([], {}), '()\n', (590, 592), False, 'from rest_framework.authtoken.models import Token\n')] |
from config import Config
from main import determine_colour, return_tile_colour, coordinates_to_notation
def test_determine_colour():
assert determine_colour(0, 0)
assert not determine_colour(0, 7)
assert not determine_colour(7, 0)
assert determine_colour(7, 7)
def test_return_tile_colour():
assert return_tile_colour(True) == Config.COLOUR_WHITE
assert return_tile_colour(False) == Config.COLOUR_BLACK
def test_coordinates_to_notation():
assert coordinates_to_notation(0, 0) == "A8"
assert coordinates_to_notation(7, 0) == "H8"
assert coordinates_to_notation(0, 7) == "A1"
assert coordinates_to_notation(7, 7) == "H1"
| [
"main.return_tile_colour",
"main.determine_colour",
"main.coordinates_to_notation"
] | [((147, 169), 'main.determine_colour', 'determine_colour', (['(0)', '(0)'], {}), '(0, 0)\n', (163, 169), False, 'from main import determine_colour, return_tile_colour, coordinates_to_notation\n'), ((257, 279), 'main.determine_colour', 'determine_colour', (['(7)', '(7)'], {}), '(7, 7)\n', (273, 279), False, 'from main import determine_colour, return_tile_colour, coordinates_to_notation\n'), ((185, 207), 'main.determine_colour', 'determine_colour', (['(0)', '(7)'], {}), '(0, 7)\n', (201, 207), False, 'from main import determine_colour, return_tile_colour, coordinates_to_notation\n'), ((223, 245), 'main.determine_colour', 'determine_colour', (['(7)', '(0)'], {}), '(7, 0)\n', (239, 245), False, 'from main import determine_colour, return_tile_colour, coordinates_to_notation\n'), ((324, 348), 'main.return_tile_colour', 'return_tile_colour', (['(True)'], {}), '(True)\n', (342, 348), False, 'from main import determine_colour, return_tile_colour, coordinates_to_notation\n'), ((383, 408), 'main.return_tile_colour', 'return_tile_colour', (['(False)'], {}), '(False)\n', (401, 408), False, 'from main import determine_colour, return_tile_colour, coordinates_to_notation\n'), ((481, 510), 'main.coordinates_to_notation', 'coordinates_to_notation', (['(0)', '(0)'], {}), '(0, 0)\n', (504, 510), False, 'from main import determine_colour, return_tile_colour, coordinates_to_notation\n'), ((530, 559), 'main.coordinates_to_notation', 'coordinates_to_notation', (['(7)', '(0)'], {}), '(7, 0)\n', (553, 559), False, 'from main import determine_colour, return_tile_colour, coordinates_to_notation\n'), ((579, 608), 'main.coordinates_to_notation', 'coordinates_to_notation', (['(0)', '(7)'], {}), '(0, 7)\n', (602, 608), False, 'from main import determine_colour, return_tile_colour, coordinates_to_notation\n'), ((628, 657), 'main.coordinates_to_notation', 'coordinates_to_notation', (['(7)', '(7)'], {}), '(7, 7)\n', (651, 657), False, 'from main import determine_colour, return_tile_colour, coordinates_to_notation\n')] |
# Essential modules import
import json
from paho.mqtt.client import *
# Variables modules import
from tools import *
# Importing custom Utility modules
from utility.logger import MyLogger
log = MyLogger("mqtt") # Logger
class Marker(Client):
'''
Client Marker : Broker client to send and/or receive MQTT publications.
'''
def __init__(self):
Client.__init__(self)
if USER: # Set authentication, if set
self.username_pw_set(USER, PASSW)
self.connect(IP_BROKER, PORT) # Connecting Client to Broker
self.obj = []
def on_connect(self, client, userdata, flags, rc):
'''
Do something when the Client successfully connect to the Broker.
'''
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
for topic in SUBSCRIPTIONS:
self.subscribe(topic)
def on_message(self, client, userdata, msg):
'''
Client receiving a publication.
'''
try:
topic = msg.topic
try: # Checks if payload is a valid JSON
j = json.loads(msg.payload.decode("utf-8")); log_message = ''
for key, value in j.items():
log_message += "[%s > %s]" % (key, value)
message = j
json_check = True
except:
log_message = msg.payload
message = msg.payload
json_check = False
log.info("%s received from %s ~ %s" % (log.timestamp(), topic, log_message))
if self.obj: # Check if any object is connected to MQTT
for _obj in self.obj:
_obj.receivedMQTT(topic, message, json=json_check) # receiveMQTT() function to receive MQTT pubs
except Exception as e:
log.warning("Failed something..")
log.exception(str(e))
def send_message(self, topic, message):
'''
Publish to destination topic a message.
topic : <STRING> Topic where to send.
message : <STRING> Payload to send.
'''
try:
if type(message) == dict:
self.publish(topic, json.dumps(message))
else:
self.publish(topic, message)
log.info("Sucseful")
#log.info("%s published to %s ~ %s" % (log.timestamp(), topic, message))
except Exception as e:
log.warning("Failed something..")
log.exception(str(e))
def attach(self, _object):
'''
Attach a * to receive MQTT publication with :
def receiveMQTT(topic, message, json=False):.
#CODE
json = <BOOL> True when message is a stringified JSON.
'''
try:
self.obj.append(_object)
log.info("Attached to broker")
except Exception as e:
log.exception(str(e))
log.error("Not attached to broker")
| [
"utility.logger.MyLogger",
"json.dumps"
] | [((194, 210), 'utility.logger.MyLogger', 'MyLogger', (['"""mqtt"""'], {}), "('mqtt')\n", (202, 210), False, 'from utility.logger import MyLogger\n'), ((2320, 2339), 'json.dumps', 'json.dumps', (['message'], {}), '(message)\n', (2330, 2339), False, 'import json\n')] |
import os, sys, ConfigParser
sys.path.insert(0, os.path.join(os.getcwd(), "Jinja2-2.3-py2.5.egg"))
sys.path.append(os.path.join(os.getcwd(), "netifaces-0.5-py2.5-linux-i686.egg"))
import jinja2, netifaces
_config = ConfigParser.SafeConfigParser()
_config.read("config.ini")
# iptables forwarding configuration generator
def update(data):
jinja = jinja2.Environment(loader=jinja2.loaders.FileSystemLoader("template"))
ip={}
for iface in netifaces.interfaces():
try:
ip[iface] = netifaces.ifaddresses(iface)[netifaces.AF_INET][0]['addr']
except KeyError:
pass
except ValueError:
pass
d = jinja.get_template("vmfw.sh").render(port=data, ip=ip, vmip=_config.get("iface", "vmIP"))
open("sysconf/vmfw.sh", "w").write(d)
def restart():
os.system("sysconf/vmfw.sh")
if __name__ == "__main__":
from models import PortForward
update(PortForward.select())
restart() | [
"netifaces.interfaces",
"ConfigParser.SafeConfigParser",
"models.PortForward.select",
"os.getcwd",
"os.system",
"netifaces.ifaddresses",
"jinja2.loaders.FileSystemLoader"
] | [((221, 252), 'ConfigParser.SafeConfigParser', 'ConfigParser.SafeConfigParser', ([], {}), '()\n', (250, 252), False, 'import os, sys, ConfigParser\n'), ((453, 475), 'netifaces.interfaces', 'netifaces.interfaces', ([], {}), '()\n', (473, 475), False, 'import jinja2, netifaces\n'), ((776, 804), 'os.system', 'os.system', (['"""sysconf/vmfw.sh"""'], {}), "('sysconf/vmfw.sh')\n", (785, 804), False, 'import os, sys, ConfigParser\n'), ((62, 73), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (71, 73), False, 'import os, sys, ConfigParser\n'), ((130, 141), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (139, 141), False, 'import os, sys, ConfigParser\n'), ((877, 897), 'models.PortForward.select', 'PortForward.select', ([], {}), '()\n', (895, 897), False, 'from models import PortForward\n'), ((385, 428), 'jinja2.loaders.FileSystemLoader', 'jinja2.loaders.FileSystemLoader', (['"""template"""'], {}), "('template')\n", (416, 428), False, 'import jinja2, netifaces\n'), ((501, 529), 'netifaces.ifaddresses', 'netifaces.ifaddresses', (['iface'], {}), '(iface)\n', (522, 529), False, 'import jinja2, netifaces\n')] |
from praline.client.project.pipeline.cache import Cache
from praline.client.project.pipeline.stage_resources import StageResources
from praline.client.project.pipeline.stages.stage import Stage
from praline.client.repository.remote_proxy import RemoteProxy
from praline.common.algorithm.graph.instance_traversal import multiple_instance_depth_first_traversal
from praline.common.algorithm.graph.simple_traversal import root_last_traversal
from praline.common.file_system import FileSystem, join
from praline.common.tracing import trace
from typing import Any, Dict, List
class MultipleSuppliersError(Exception):
pass
class CyclicStagesError(Exception):
pass
class UnsatisfiableStageError(Exception):
pass
class ResourceNotSuppliedError(Exception):
pass
def get_stage_program_arguments(stage: str, program_arguments: Dict[str, Any]):
arguments = {
'global': program_arguments['global'],
'byStage': program_arguments['byStage'].get(stage, {})
}
return arguments
@trace(parameters=[])
def create_pipeline(target_stage: str,
stages: Dict[str, Stage],
file_system: FileSystem,
program_arguments: Dict[str, Any],
configuration: Dict[str, Any]) -> List[str]:
def on_cycle(cycle: List[str]):
raise CyclicStagesError(f"cyclic dependencies for stages {cycle}")
def visitor(stage_name: str):
requirements_set = stages[stage_name].requirements
required_stages_set = []
for requirements in requirements_set:
required_stages = []
for requirement in requirements:
suppliers = [stage.name for stage in stages.values() if requirement in stage.output]
if not suppliers:
raise UnsatisfiableStageError(f"stage '{stage_name}' cannot be satisfied because no stage supplies resource '{requirement}'")
elif len(suppliers) > 1:
raise MultipleSuppliersError(f"resource '{requirement}' is supplied by multiple stages: {', '.join(suppliers)}")
elif suppliers[0] not in required_stages:
required_stages.append(suppliers[0])
required_stages_set.append(required_stages)
return required_stages_set
def validator(stage: str, subtree: Dict[str, List[str]]):
stage_program_arguments = get_stage_program_arguments(stage, program_arguments)
return stages[stage].predicate(file_system, stage_program_arguments, configuration)
trees = multiple_instance_depth_first_traversal(target_stage, visitor, validator, on_cycle)
if trees:
stage_subtree = trees[0]
stage_order = root_last_traversal(target_stage, lambda n: stage_subtree[n][1])
pipeline = [(stage_subtree[stage][0], stage) for stage in stage_order]
return pipeline
raise UnsatisfiableStageError(f"could not create a pipeline to satisfy stage '{target_stage}'")
@trace
def invoke_stage(target_stage: str, stages: Dict[str, Stage], file_system: FileSystem, program_arguments: Dict[str, Any], configuration: Dict[str, Any], remote_proxy: RemoteProxy) -> None:
resources = {}
pipeline = create_pipeline(target_stage, stages, file_system, program_arguments, configuration)
project_directory = file_system.get_working_directory()
cache_path = join(project_directory, 'target', 'cache.pickle')
for activation, stage_name in pipeline:
stage = stages[stage_name]
stage_resources = StageResources(stage_name, activation, {resource : resources[resource] for resource in stage.requirements[activation]}, stage.output)
stage_program_arguments = get_stage_program_arguments(stage_name, program_arguments)
if stage.cacheable:
with Cache(file_system, cache_path) as cache:
cache[stage_name] = stage_cache = cache.get(stage_name, {})
stage.invoker(file_system, stage_resources, stage_cache, stage_program_arguments, configuration, remote_proxy)
else:
stage.invoker(file_system, stage_resources, None, stage_program_arguments, configuration, remote_proxy)
for resource in stage.output:
if resource not in stage_resources:
raise ResourceNotSuppliedError(f"stage '{stage_name}' didn't supply resource '{resource}'")
resources.update(stage_resources.resources)
| [
"praline.common.file_system.join",
"praline.client.project.pipeline.cache.Cache",
"praline.common.algorithm.graph.instance_traversal.multiple_instance_depth_first_traversal",
"praline.common.algorithm.graph.simple_traversal.root_last_traversal",
"praline.client.project.pipeline.stage_resources.StageResources",
"praline.common.tracing.trace"
] | [((1017, 1037), 'praline.common.tracing.trace', 'trace', ([], {'parameters': '[]'}), '(parameters=[])\n', (1022, 1037), False, 'from praline.common.tracing import trace\n'), ((2571, 2658), 'praline.common.algorithm.graph.instance_traversal.multiple_instance_depth_first_traversal', 'multiple_instance_depth_first_traversal', (['target_stage', 'visitor', 'validator', 'on_cycle'], {}), '(target_stage, visitor, validator,\n on_cycle)\n', (2610, 2658), False, 'from praline.common.algorithm.graph.instance_traversal import multiple_instance_depth_first_traversal\n'), ((3398, 3447), 'praline.common.file_system.join', 'join', (['project_directory', '"""target"""', '"""cache.pickle"""'], {}), "(project_directory, 'target', 'cache.pickle')\n", (3402, 3447), False, 'from praline.common.file_system import FileSystem, join\n'), ((2726, 2790), 'praline.common.algorithm.graph.simple_traversal.root_last_traversal', 'root_last_traversal', (['target_stage', '(lambda n: stage_subtree[n][1])'], {}), '(target_stage, lambda n: stage_subtree[n][1])\n', (2745, 2790), False, 'from praline.common.algorithm.graph.simple_traversal import root_last_traversal\n'), ((3554, 3690), 'praline.client.project.pipeline.stage_resources.StageResources', 'StageResources', (['stage_name', 'activation', '{resource: resources[resource] for resource in stage.requirements[activation]}', 'stage.output'], {}), '(stage_name, activation, {resource: resources[resource] for\n resource in stage.requirements[activation]}, stage.output)\n', (3568, 3690), False, 'from praline.client.project.pipeline.stage_resources import StageResources\n'), ((3826, 3856), 'praline.client.project.pipeline.cache.Cache', 'Cache', (['file_system', 'cache_path'], {}), '(file_system, cache_path)\n', (3831, 3856), False, 'from praline.client.project.pipeline.cache import Cache\n')] |
import sys
import argparse
from textwrap import dedent
def main(kwargs):
with kwargs["infile"] as indata,\
kwargs["ofile"] as odata:
for line in indata:
odata.write(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog = "argparse_example", # default is sys.argv[0],
formatter_class = argparse.RawDescriptionHelpFormatter,
description = dedent('''
Please do not mess up this text!
--------------------------------
I have indented it
exactly the way
I want it
'''),
epilog = "Contact:<EMAIL>"
)
parser.add_argument("infile", nargs="?", type=argparse.FileType("r"), default=sys.stdin)
parser.add_argument("ofile", nargs="?", type=argparse.FileType("w"), default=sys.stdout)
args = parser.parse_args()
main(vars(args)) | [
"textwrap.dedent",
"argparse.FileType"
] | [((426, 626), 'textwrap.dedent', 'dedent', (['"""\n Please do not mess up this text!\n --------------------------------\n I have indented it\n exactly the way\n I want it\n """'], {}), '(\n """\n Please do not mess up this text!\n --------------------------------\n I have indented it\n exactly the way\n I want it\n """\n )\n', (432, 626), False, 'from textwrap import dedent\n'), ((708, 730), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (725, 730), False, 'import argparse\n'), ((800, 822), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (817, 822), False, 'import argparse\n')] |
# Obliterate unused leaf bones in VRoid models!
import bpy
context = bpy.context
obj = context.object
# By default, VRM Importer includes leaf bones automatically.
# It's cool and stuff, but it's not necessary for Blender, and will spew out
# scary long warning when imported to UE4.
# Use this script to obliterate those leaf bones in one click.
if obj.type == 'ARMATURE':
armature = obj.data
bpy.ops.object.mode_set(mode='EDIT')
for bone in armature.edit_bones:
if bone.name.endswith("_end") :
armature.edit_bones.remove(bone)
else:
continue
bpy.ops.object.mode_set(mode='OBJECT') | [
"bpy.ops.object.mode_set"
] | [((402, 438), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""EDIT"""'}), "(mode='EDIT')\n", (425, 438), False, 'import bpy\n'), ((578, 616), 'bpy.ops.object.mode_set', 'bpy.ops.object.mode_set', ([], {'mode': '"""OBJECT"""'}), "(mode='OBJECT')\n", (601, 616), False, 'import bpy\n')] |
# ------------------------------------------------------------------------------
# Hippocampus segmentation task for the HarP dataset
# (http://www.hippocampal-protocol.net/SOPs/index.php)
# ------------------------------------------------------------------------------
import os
import re
import SimpleITK as sitk
import nibabel as nib
import numpy as np
import mp.data.datasets.dataset_utils as du
from mp.data.datasets.dataset_segmentation import SegmentationDataset, SegmentationInstance
from mp.paths import storage_data_path
from mp.utils.mask_bounding_box import mask_bbox_3D
from mp.utils.load_restore import join_path
class HarP(SegmentationDataset):
r"""Class for the segmentation of the HarP dataset,
found at http://www.hippocampal-protocol.net/SOPs/index.php
with the masks as .nii files and the scans as .mnc files.
"""
def __init__(self, subset=None, hold_out_ixs=None):
# Part is either: "Training", "Validation" or "All"
default = {"Part": "All"}
if subset is not None:
default.update(subset)
subset = default
else:
subset = default
if hold_out_ixs is None:
hold_out_ixs = []
global_name = 'HarP'
name = du.get_dataset_name(global_name, subset)
dataset_path = os.path.join(storage_data_path, global_name)
original_data_path = du.get_original_data_path(global_name)
# Build instances
instances = []
folders = []
if subset["Part"] in ["Training", "All"]:
folders.append(("100", "Training"))
if subset["Part"] in ["Validation", "All"]:
folders.append(("35", "Validation"))
for orig_folder, dst_folder in folders:
# Paths with the sub-folder for the current subset
dst_folder_path = os.path.join(dataset_path, dst_folder)
# Copy the images if not done already
if not os.path.isdir(dst_folder_path):
_extract_images(original_data_path, dst_folder_path, orig_folder)
# Fetch all patient/study names
study_names = set(file_name.split('.nii')[0].split('_gt')[0] for file_name
in os.listdir(os.path.join(dataset_path, dst_folder)))
for study_name in study_names:
instances.append(SegmentationInstance(
x_path=os.path.join(dataset_path, dst_folder, study_name + '.nii.gz'),
y_path=os.path.join(dataset_path, dst_folder, study_name + '_gt.nii.gz'),
name=study_name,
group_id=None
))
label_names = ['background', 'hippocampus']
super().__init__(instances, name=name, label_names=label_names,
modality='T1w MRI', nr_channels=1, hold_out_ixs=hold_out_ixs)
def _extract_images(source_path, target_path, subset):
r"""Extracts images, merges mask labels (if specified) and saves the
modified images.
"""
# Folder 100 is for training (100 subjects), 35 subjects are left over for validation
affine = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
images_path = os.path.join(source_path, subset)
labels_path = os.path.join(source_path, f'Labels_{subset}_NIFTI')
# Create directories
if not os.path.isdir(target_path):
os.makedirs(target_path)
files_with_swapped_masks = {"ADNI_007_S_1304_74384_ACPC.mnc",
"ADNI_016_S_4121_280306_ACPC.mnc",
"ADNI_029_S_4279_265980_ACPC.mnc",
"ADNI_136_S_0429_109839_ACPC.mnc"}
# For each MRI, there are 2 segmentation (left and right hippocampus)
for filename in os.listdir(images_path):
# Loading the .mnc file and converting it to a .nii.gz file
minc = nib.load(os.path.join(images_path, filename))
x: np.array = nib.Nifti1Image(np.asarray(minc.dataobj), affine=affine).get_data()
# We need to recover the study name of the image name to construct the name of the segmentation files
match = re.match(r"ADNI_[0-9]+_S_[0-9]+_[0-9]+", filename)
if match is None:
raise Exception(f"A file ({filename}) does not match the expected file naming format")
# For each side of the brain
for side in ("_L", "_R"):
study_name = match[0] + side
y = sitk.ReadImage(os.path.join(labels_path, study_name + ".nii"))
y = sitk.GetArrayFromImage(y)
# Shape expected: (189, 233, 197)
assert x.shape == y.shape
# BUGFIX: Some segmentation have some weird values eg {26896.988, 26897.988} instead of {0, 1}
y = (y - np.min(y.flat)).astype(np.uint32)
# Cropping bounds computed to fit the ground truth
if (side == "_L") ^ (filename in files_with_swapped_masks):
y = y[40: 104, 78: 142, 49: 97]
x_cropped = x[40: 104, 78: 142, 49: 97]
else:
y = y[40: 104, 78: 142, 97: 145]
x_cropped = x[40: 104, 78: 142, 97: 145]
# Need to do move an axis as numpy coordinates are [z, y, x] and SimpleITK's are [x, y, z]
x_cropped = np.moveaxis(x_cropped, [0, 2], [2, 0])
# Changing the study name if needed
if filename in files_with_swapped_masks:
study_name = match[0] + ("_R" if side == "_L" else "_L")
# Save new images so they can be loaded directly
sitk.WriteImage(sitk.GetImageFromArray(y),
join_path([target_path, study_name + "_gt.nii.gz"]))
nib.save(nib.Nifti1Image(x_cropped, affine),
join_path([target_path, study_name + ".nii.gz"]))
| [
"nibabel.Nifti1Image",
"numpy.moveaxis",
"os.makedirs",
"mp.data.datasets.dataset_utils.get_dataset_name",
"os.path.isdir",
"numpy.asarray",
"re.match",
"SimpleITK.GetArrayFromImage",
"mp.data.datasets.dataset_utils.get_original_data_path",
"numpy.min",
"numpy.array",
"SimpleITK.GetImageFromArray",
"mp.utils.load_restore.join_path",
"os.path.join",
"os.listdir"
] | [((3132, 3198), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (3140, 3198), True, 'import numpy as np\n'), ((3287, 3320), 'os.path.join', 'os.path.join', (['source_path', 'subset'], {}), '(source_path, subset)\n', (3299, 3320), False, 'import os\n'), ((3339, 3390), 'os.path.join', 'os.path.join', (['source_path', 'f"""Labels_{subset}_NIFTI"""'], {}), "(source_path, f'Labels_{subset}_NIFTI')\n", (3351, 3390), False, 'import os\n'), ((3852, 3875), 'os.listdir', 'os.listdir', (['images_path'], {}), '(images_path)\n', (3862, 3875), False, 'import os\n'), ((1253, 1293), 'mp.data.datasets.dataset_utils.get_dataset_name', 'du.get_dataset_name', (['global_name', 'subset'], {}), '(global_name, subset)\n', (1272, 1293), True, 'import mp.data.datasets.dataset_utils as du\n'), ((1317, 1361), 'os.path.join', 'os.path.join', (['storage_data_path', 'global_name'], {}), '(storage_data_path, global_name)\n', (1329, 1361), False, 'import os\n'), ((1391, 1429), 'mp.data.datasets.dataset_utils.get_original_data_path', 'du.get_original_data_path', (['global_name'], {}), '(global_name)\n', (1416, 1429), True, 'import mp.data.datasets.dataset_utils as du\n'), ((3428, 3454), 'os.path.isdir', 'os.path.isdir', (['target_path'], {}), '(target_path)\n', (3441, 3454), False, 'import os\n'), ((3464, 3488), 'os.makedirs', 'os.makedirs', (['target_path'], {}), '(target_path)\n', (3475, 3488), False, 'import os\n'), ((4223, 4272), 're.match', 're.match', (['"""ADNI_[0-9]+_S_[0-9]+_[0-9]+"""', 'filename'], {}), "('ADNI_[0-9]+_S_[0-9]+_[0-9]+', filename)\n", (4231, 4272), False, 'import re\n'), ((1842, 1880), 'os.path.join', 'os.path.join', (['dataset_path', 'dst_folder'], {}), '(dataset_path, dst_folder)\n', (1854, 1880), False, 'import os\n'), ((3969, 4004), 'os.path.join', 'os.path.join', (['images_path', 'filename'], {}), '(images_path, filename)\n', (3981, 4004), False, 'import os\n'), ((4608, 4633), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['y'], {}), '(y)\n', (4630, 4633), True, 'import SimpleITK as sitk\n'), ((5373, 5411), 'numpy.moveaxis', 'np.moveaxis', (['x_cropped', '[0, 2]', '[2, 0]'], {}), '(x_cropped, [0, 2], [2, 0])\n', (5384, 5411), True, 'import numpy as np\n'), ((1951, 1981), 'os.path.isdir', 'os.path.isdir', (['dst_folder_path'], {}), '(dst_folder_path)\n', (1964, 1981), False, 'import os\n'), ((4544, 4590), 'os.path.join', 'os.path.join', (['labels_path', "(study_name + '.nii')"], {}), "(labels_path, study_name + '.nii')\n", (4556, 4590), False, 'import os\n'), ((5677, 5702), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['y'], {}), '(y)\n', (5699, 5702), True, 'import SimpleITK as sitk\n'), ((5732, 5783), 'mp.utils.load_restore.join_path', 'join_path', (["[target_path, study_name + '_gt.nii.gz']"], {}), "([target_path, study_name + '_gt.nii.gz'])\n", (5741, 5783), False, 'from mp.utils.load_restore import join_path\n'), ((5806, 5840), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['x_cropped', 'affine'], {}), '(x_cropped, affine)\n', (5821, 5840), True, 'import nibabel as nib\n'), ((5863, 5911), 'mp.utils.load_restore.join_path', 'join_path', (["[target_path, study_name + '.nii.gz']"], {}), "([target_path, study_name + '.nii.gz'])\n", (5872, 5911), False, 'from mp.utils.load_restore import join_path\n'), ((4044, 4068), 'numpy.asarray', 'np.asarray', (['minc.dataobj'], {}), '(minc.dataobj)\n', (4054, 4068), True, 'import numpy as np\n'), ((4847, 4861), 'numpy.min', 'np.min', (['y.flat'], {}), '(y.flat)\n', (4853, 4861), True, 'import numpy as np\n'), ((2241, 2279), 'os.path.join', 'os.path.join', (['dataset_path', 'dst_folder'], {}), '(dataset_path, dst_folder)\n', (2253, 2279), False, 'import os\n'), ((2408, 2470), 'os.path.join', 'os.path.join', (['dataset_path', 'dst_folder', "(study_name + '.nii.gz')"], {}), "(dataset_path, dst_folder, study_name + '.nii.gz')\n", (2420, 2470), False, 'import os\n'), ((2499, 2564), 'os.path.join', 'os.path.join', (['dataset_path', 'dst_folder', "(study_name + '_gt.nii.gz')"], {}), "(dataset_path, dst_folder, study_name + '_gt.nii.gz')\n", (2511, 2564), False, 'import os\n')] |
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
# code changed to Python3
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from sklearn.metrics.pairwise import cosine_similarity
from urllib.request import urlretrieve
import pickle
import IPython
# Config the matlotlib backend as plotting inline in IPython
# %matplotlib inline
url = 'http://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 1% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
"""Download a file if not present, and make sure it's the right size."""
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
#IPython.display.display_png('notMNIST_large/B/MDEtMDEtMDAudHRm.png')
#IPython.display.display_png('notMNIST_large/J/Nng3b2N0IEFsdGVybmF0ZSBSZWd1bGFyLnR0Zg==.png')
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
"""Load the data for a single letter label."""
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1800)
def load_dataset(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
# Display a random matrix with a specified figure number and a grayscale colormap
# largeNameA = train_datasets[0]
# print(largeNameA)
# largeDataA = load_dataset(largeNameA)
# img1 = largeDataA[0, :, :]
# plt.matshow(img1, cmap=plt.cm.gray)
# plt.show()
#
# smallNameJ = test_datasets[9]
# print(smallNameJ)
# smallDataJ = load_dataset(smallNameJ)
# img2 = smallDataJ[0, :, :]
# plt.matshow(img2, cmap=plt.cm.gray)
# plt.show()
# Check whether the data is balanced between classes
# for name in train_datasets:
# dataset = load_dataset(name)
# print(name, ' size:', dataset.shape)
#
# for name in test_datasets:
# dataset = load_dataset(name)
# print(name, ' size:', dataset.shape)
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class + tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
# def show_images(dataset, labels, count):
# for i in range(0,count):
# print(labels[i])
# plt.matshow(dataset[i,:,:], cmap=plt.cm.gray)
# plt.show()
# show_images(train_dataset, train_labels, 3)
# show_images(test_dataset, test_labels, 3)
# show_images(valid_dataset, valid_labels, 3)
pickle_file = 'notMNIST.pickle'
if not os.path.exists(pickle_file):
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
indices = np.arange(train_dataset.shape[0])
np.random.shuffle(indices)
train_dataset = train_dataset[indices]
train_labels = train_labels[indices]
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
def load_datasets(pickle_file):
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
f = open(pickle_file, 'rb')
save = pickle.load(f)
f.close()
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
return train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels
train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = load_datasets(pickle_file)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
def sanitize_dataset(dataset, labels, filter_dataset, similarity_epsilon):
similarity = cosine_similarity(np.reshape(dataset, (dataset.shape[0],-1)), np.reshape(filter_dataset, (filter_dataset.shape[0],-1)))
same_filter = np.sum(similarity == 1, axis=1) > 0
similar_filter = np.sum(similarity > 1-similarity_epsilon, axis=1) > 0
same_count = np.sum(same_filter)
similar_count = np.sum(similar_filter)
filtered_dataset = dataset[same_filter==False]
filtered_labels = labels[same_filter==False]
return filtered_dataset, filtered_labels, same_count, similar_count
sanit_pickle_file = 'notMNIST_sanit.pickle'
if not os.path.exists(sanit_pickle_file):
filtered_valid_dataset, filtered_valid_labels, train_valid_same, train_valid_similar = \
sanitize_dataset(valid_dataset, valid_labels, train_dataset, 0.001)
print("training-validation: same=", train_valid_same, "similar=", train_valid_similar)
filtered_test_dataset, filtered_test_labels, train_test_same, train_test_similar = \
sanitize_dataset(test_dataset, test_labels, train_dataset, 0.001)
print("training-testing: same=", train_test_same, "similar=", train_test_similar)
filtered_test_dataset, filtered_test_labels, valid_test_same, valid_test_similar = \
sanitize_dataset(filtered_test_dataset, filtered_test_labels, filtered_valid_dataset, 0.001)
print("validation-testing: same=", valid_test_same, "similar=", valid_test_similar)
try:
f = open(sanit_pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': filtered_valid_dataset,
'valid_labels': filtered_valid_labels,
'test_dataset': filtered_test_dataset,
'test_labels': filtered_test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
train_dataset, train_labels, filtered_valid_dataset, filtered_valid_labels, filtered_test_dataset, filtered_test_labels = load_datasets(sanit_pickle_file)
print('Training (sanitized):', train_dataset.shape, train_labels.shape)
print('Validation (sanitized):', filtered_valid_dataset.shape, filtered_valid_labels.shape)
print('Testing (sanitized):', filtered_test_dataset.shape, filtered_test_labels.shape)
def train_model(dataset, labels, size=None):
maxSize = dataset.shape[0]
if size is None:
size = maxSize
elif size > maxSize:
size = maxSize
else:
dataset = dataset[0:size]
labels = labels[0:size]
X = np.reshape(dataset, (size,-1))
y = labels
lr = LogisticRegression(n_jobs=4)
lr.fit(X, y)
return lr
def model_score(model, dataset, labels):
X = np.reshape(dataset, (dataset.shape[0],-1))
y = labels
return model.score(X, y)
def train(size=None):
if size is None:
print("Training with all examples:")
else:
print("Training with ", size, " examples:")
model = train_model(train_dataset, train_labels, size)
print(" validation score: ", model_score(model, valid_dataset, valid_labels))
print(" test score: ", model_score(model, test_dataset, test_labels))
print(" validation score (sanitized): ", model_score(model, filtered_valid_dataset, filtered_valid_labels))
print(" test score (sanitized): ", model_score(model, filtered_test_dataset, filtered_test_labels))
for size in [50, 100, 1000, 5000]:
train(size)
# training on all examples:
#train()
| [
"sys.stdout.write",
"pickle.dump",
"numpy.random.seed",
"numpy.sum",
"pickle.load",
"numpy.arange",
"sys.stdout.flush",
"numpy.mean",
"numpy.ndarray",
"os.path.join",
"numpy.std",
"os.path.exists",
"numpy.reshape",
"tarfile.open",
"numpy.random.shuffle",
"os.stat",
"urllib.request.urlretrieve",
"sklearn.linear_model.LogisticRegression",
"os.listdir",
"scipy.ndimage.imread",
"os.path.isdir",
"os.path.splitext"
] | [((2095, 2114), 'numpy.random.seed', 'np.random.seed', (['(133)'], {}), '(133)\n', (2109, 2114), True, 'import numpy as np\n'), ((1683, 1700), 'os.stat', 'os.stat', (['filename'], {}), '(filename)\n', (1690, 1700), False, 'import os\n'), ((3488, 3506), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (3498, 3506), False, 'import os\n'), ((8594, 8621), 'os.path.exists', 'os.path.exists', (['pickle_file'], {}), '(pickle_file)\n', (8608, 8621), False, 'import os\n'), ((8923, 8956), 'numpy.arange', 'np.arange', (['train_dataset.shape[0]'], {}), '(train_dataset.shape[0])\n', (8932, 8956), True, 'import numpy as np\n'), ((8962, 8988), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (8979, 8988), True, 'import numpy as np\n'), ((9607, 9627), 'os.stat', 'os.stat', (['pickle_file'], {}), '(pickle_file)\n', (9614, 9627), False, 'import os\n'), ((9731, 9745), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9742, 9745), False, 'import pickle\n'), ((10772, 10791), 'numpy.sum', 'np.sum', (['same_filter'], {}), '(same_filter)\n', (10778, 10791), True, 'import numpy as np\n'), ((10813, 10835), 'numpy.sum', 'np.sum', (['similar_filter'], {}), '(similar_filter)\n', (10819, 10835), True, 'import numpy as np\n'), ((11068, 11101), 'os.path.exists', 'os.path.exists', (['sanit_pickle_file'], {}), '(sanit_pickle_file)\n', (11082, 11101), False, 'import os\n'), ((13102, 13133), 'numpy.reshape', 'np.reshape', (['dataset', '(size, -1)'], {}), '(dataset, (size, -1))\n', (13112, 13133), True, 'import numpy as np\n'), ((13159, 13187), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'n_jobs': '(4)'}), '(n_jobs=4)\n', (13177, 13187), False, 'from sklearn.linear_model import LogisticRegression\n'), ((13276, 13319), 'numpy.reshape', 'np.reshape', (['dataset', '(dataset.shape[0], -1)'], {}), '(dataset, (dataset.shape[0], -1))\n', (13286, 13319), True, 'import numpy as np\n'), ((1555, 1627), 'urllib.request.urlretrieve', 'urlretrieve', (['(url + filename)', 'filename'], {'reporthook': 'download_progress_hook'}), '(url + filename, filename, reporthook=download_progress_hook)\n', (1566, 1627), False, 'from urllib.request import urlretrieve\n'), ((2251, 2270), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (2264, 2270), False, 'import os\n'), ((2533, 2555), 'tarfile.open', 'tarfile.open', (['filename'], {}), '(filename)\n', (2545, 2555), False, 'import tarfile\n'), ((2565, 2583), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2581, 2583), False, 'import sys\n'), ((2662, 2683), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (2674, 2683), False, 'import os\n'), ((3719, 3746), 'os.path.join', 'os.path.join', (['folder', 'image'], {}), '(folder, image)\n', (3731, 3746), False, 'import os\n'), ((4523, 4539), 'numpy.mean', 'np.mean', (['dataset'], {}), '(dataset)\n', (4530, 4539), True, 'import numpy as np\n'), ((4575, 4590), 'numpy.std', 'np.std', (['dataset'], {}), '(dataset)\n', (4581, 4590), True, 'import numpy as np\n'), ((5623, 5637), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5634, 5637), False, 'import pickle\n'), ((6444, 6503), 'numpy.ndarray', 'np.ndarray', (['(nb_rows, img_size, img_size)'], {'dtype': 'np.float32'}), '((nb_rows, img_size, img_size), dtype=np.float32)\n', (6454, 6503), True, 'import numpy as np\n'), ((6522, 6557), 'numpy.ndarray', 'np.ndarray', (['nb_rows'], {'dtype': 'np.int32'}), '(nb_rows, dtype=np.int32)\n', (6532, 6557), True, 'import numpy as np\n'), ((9392, 9437), 'pickle.dump', 'pickle.dump', (['save', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(save, f, pickle.HIGHEST_PROTOCOL)\n', (9403, 9437), False, 'import pickle\n'), ((10521, 10564), 'numpy.reshape', 'np.reshape', (['dataset', '(dataset.shape[0], -1)'], {}), '(dataset, (dataset.shape[0], -1))\n', (10531, 10564), True, 'import numpy as np\n'), ((10565, 10622), 'numpy.reshape', 'np.reshape', (['filter_dataset', '(filter_dataset.shape[0], -1)'], {}), '(filter_dataset, (filter_dataset.shape[0], -1))\n', (10575, 10622), True, 'import numpy as np\n'), ((10642, 10673), 'numpy.sum', 'np.sum', (['(similarity == 1)'], {'axis': '(1)'}), '(similarity == 1, axis=1)\n', (10648, 10673), True, 'import numpy as np\n'), ((10700, 10751), 'numpy.sum', 'np.sum', (['(similarity > 1 - similarity_epsilon)'], {'axis': '(1)'}), '(similarity > 1 - similarity_epsilon, axis=1)\n', (10706, 10751), True, 'import numpy as np\n'), ((12258, 12303), 'pickle.dump', 'pickle.dump', (['save', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(save, f, pickle.HIGHEST_PROTOCOL)\n', (12269, 12303), False, 'import pickle\n'), ((1099, 1133), 'sys.stdout.write', 'sys.stdout.write', (["('%s%%' % percent)"], {}), "('%s%%' % percent)\n", (1115, 1133), False, 'import sys\n'), ((1147, 1165), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1163, 1165), False, 'import sys\n'), ((1194, 1215), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (1210, 1215), False, 'import sys\n'), ((1229, 1247), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1245, 1247), False, 'import sys\n'), ((1454, 1478), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1468, 1478), False, 'import os\n'), ((4844, 4872), 'os.path.exists', 'os.path.exists', (['set_filename'], {}), '(set_filename)\n', (4858, 4872), False, 'import os\n'), ((2191, 2217), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2207, 2217), False, 'import os\n'), ((2700, 2716), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (2710, 2716), False, 'import os\n'), ((2744, 2765), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (2756, 2765), False, 'import os\n'), ((7256, 7270), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7267, 7270), False, 'import pickle\n'), ((7376, 7405), 'numpy.random.shuffle', 'np.random.shuffle', (['letter_set'], {}), '(letter_set)\n', (7393, 7405), True, 'import numpy as np\n'), ((5246, 5294), 'pickle.dump', 'pickle.dump', (['dataset', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(dataset, f, pickle.HIGHEST_PROTOCOL)\n', (5257, 5294), False, 'import pickle\n'), ((3788, 3814), 'scipy.ndimage.imread', 'ndimage.imread', (['image_file'], {}), '(image_file)\n', (3802, 3814), False, 'from scipy import ndimage\n')] |
#!/usr/bin/env python
import serial
import time
from sys import stdout
print("starting jrk_simple_test")
ser = serial.Serial( "/dev/ttyACM0", 9600) # input to the JRK controller for sending it commands
print("connected to: " + ser.portstr + " for sending commands to JRK")
init_cmd = "\xAA"
jrk_id = "\x0B"
set_target_cmd = "\xC0"
stop_cmd = "\xFF"
read_feedback_cmd = "\xA5"
read_current_cmd = "\x8F"
read_scaled_feedback = "\xA7"
get_error_cmd = "\x33"
# For my John Deere tractor steering: 2400 full right; 1450 straight; 450 full left
# clear error bits and read the register; Pololu protocol: 0xAA, device number, 0x33; Reference "Get Error Flags Halting" page 34 of manual
print("Clearing errors on start up")
ser.write([init_cmd, jrk_id, get_error_cmd])
time.sleep(0.1)
cycle_delay = .1
for target in [2048, 4094, 1024, 0]:
lowByte = (target & ord("\x1F")) | ord(set_target_cmd)
highByte = (target >> 5) & ord("\x7F")
ser.write([init_cmd, jrk_id, lowByte, highByte])
time.sleep (0.01)
for i in range(1, 30):
time.sleep (cycle_delay)
ser.write([init_cmd, jrk_id, read_current_cmd])
time.sleep (0.01)
checkCurrent = ord(ser.read())
ser.write([init_cmd, jrk_id, read_feedback_cmd])
time.sleep (0.01)
checkFeedback = (ord(ser.read()) | ord(ser.read())<<8)
time.sleep (0.01)
ser.write([init_cmd, jrk_id, read_scaled_feedback])
time.sleep (0.01)
scaled_feedback = (ord(ser.read()) | ord(ser.read())<<8)
#stdout.write (" \r target: %s feedback is at %s of 4095, interation %s" % (target, checkFeedback, i)) # use this if you don't want the values to scroll
#stdout.flush() # used with the statement above
target_delta = abs(target-scaled_feedback)
print (" target: %s feedback: %s scaled feedback: %s current: %s delta: %s interation %s" % (target, checkFeedback, scaled_feedback, checkCurrent, target_delta, i))
ser.write(stop_cmd)
print ("- Finished.")
ser.write(stop_cmd)
| [
"serial.Serial",
"time.sleep"
] | [((113, 148), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyACM0"""', '(9600)'], {}), "('/dev/ttyACM0', 9600)\n", (126, 148), False, 'import serial\n'), ((767, 782), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (777, 782), False, 'import time\n'), ((984, 1000), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (994, 1000), False, 'import time\n'), ((1029, 1052), 'time.sleep', 'time.sleep', (['cycle_delay'], {}), '(cycle_delay)\n', (1039, 1052), False, 'import time\n'), ((1106, 1122), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1116, 1122), False, 'import time\n'), ((1210, 1226), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1220, 1226), False, 'import time\n'), ((1287, 1303), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1297, 1303), False, 'import time\n'), ((1361, 1377), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1371, 1377), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""livereload.app
Core Server of LiveReload.
"""
import os
import logging
import time
import mimetypes
import webbrowser
from tornado import ioloop
from tornado import escape
from tornado import websocket
from tornado.web import RequestHandler, Application
from tornado.util import ObjectDict
from tornado.options import enable_pretty_logging
from livereload.task import Task
PORT = 35729
ROOT = '.'
LIVERELOAD = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'livereload.js',
)
class LiveReloadHandler(websocket.WebSocketHandler):
waiters = set()
_last_reload_time = None
def allow_draft76(self):
return True
def on_close(self):
if self in LiveReloadHandler.waiters:
LiveReloadHandler.waiters.remove(self)
def send_message(self, message):
if isinstance(message, dict):
message = escape.json_encode(message)
try:
self.write_message(message)
except:
logging.error('Error sending message', exc_info=True)
def watch_tasks(self):
changes = Task.watch()
if not changes:
return
if time.time() - self._last_reload_time < 3:
# if you changed lot of files in one time
# it will refresh too many times
logging.info('ignore this reload action')
return
logging.info('Reload %s waiters', len(self.waiters))
msg = {
'command': 'reload',
'path': '*',
'liveCSS': True
}
self._last_reload_time = time.time()
for waiter in LiveReloadHandler.waiters:
try:
waiter.write_message(msg)
except:
logging.error('Error sending message', exc_info=True)
LiveReloadHandler.waiters.remove(waiter)
def on_message(self, message):
"""Handshake with livereload.js
1. client send 'hello'
2. server reply 'hello'
3. client send 'info'
http://help.livereload.com/kb/ecosystem/livereload-protocol
"""
message = ObjectDict(escape.json_decode(message))
if message.command == 'hello':
handshake = {}
handshake['command'] = 'hello'
protocols = message.protocols
protocols.append(
'http://livereload.com/protocols/2.x-remote-control'
)
handshake['protocols'] = protocols
handshake['serverName'] = 'livereload-tornado'
self.send_message(handshake)
if message.command == 'info' and 'url' in message:
logging.info('Browser Connected: %s' % message.url)
LiveReloadHandler.waiters.add(self)
if not LiveReloadHandler._last_reload_time:
if os.path.exists('Guardfile'):
logging.info('Reading Guardfile')
execfile('Guardfile')
else:
logging.info('No Guardfile')
Task.add(os.getcwd())
LiveReloadHandler._last_reload_time = time.time()
logging.info('Start watching changes')
ioloop.PeriodicCallback(self.watch_tasks, 800).start()
class IndexHandler(RequestHandler):
def get(self, path='/'):
abspath = os.path.join(os.path.abspath(ROOT), path.lstrip('/'))
mime_type, encoding = mimetypes.guess_type(abspath)
if not mime_type:
mime_type = 'text/html'
self.mime_type = mime_type
self.set_header('Content-Type', mime_type)
self.read_path(abspath)
def inject_livereload(self):
if self.mime_type != 'text/html':
return
ua = self.request.headers.get('User-Agent', 'bot').lower()
if 'msie' not in ua:
self.write('<script src="/livereload.js"></script>')
def read_path(self, abspath):
filepath = abspath
if abspath.endswith('/'):
filepath = os.path.join(abspath, 'index.html')
if not os.path.exists(filepath):
self.create_index(abspath)
return
elif not os.path.exists(abspath):
filepath = abspath + '.html'
if os.path.exists(filepath):
for line in open(filepath):
if '</head>' in line:
self.inject_livereload()
self.write(line)
return
self.send_error(404)
return
def create_index(self, root):
self.inject_livereload()
files = os.listdir(root)
self.write('<ul>')
for f in files:
path = os.path.join(root, f)
self.write('<li>')
if os.path.isdir(path):
self.write('<a href="%s/">%s</a>' % (f, f))
else:
self.write('<a href="%s">%s</a>' % (f, f))
self.write('</li>')
self.write('</ul>')
class LiveReloadJSHandler(RequestHandler):
def get(self):
f = open(LIVERELOAD)
self.set_header('Content-Type', 'application/javascript')
for line in f:
if '{{port}}' in line:
line = line.replace('{{port}}', str(PORT))
self.write(line)
f.close()
handlers = [
(r'/livereload', LiveReloadHandler),
(r'/livereload.js', LiveReloadJSHandler),
(r'(.*)', IndexHandler),
]
def start(port=35729, root='.', autoraise=False):
global PORT
PORT = port
global ROOT
if root is None:
root = '.'
ROOT = root
logging.getLogger().setLevel(logging.INFO)
enable_pretty_logging()
app = Application(handlers=handlers)
app.listen(port)
print('Serving path %s on 127.0.0.1:%s' % (root, port))
if autoraise:
webbrowser.open(
'http://127.0.0.1:%s' % port, new=2, autoraise=True
)
ioloop.IOLoop.instance().start()
if __name__ == '__main__':
start(8000)
| [
"tornado.ioloop.IOLoop.instance",
"os.path.join",
"mimetypes.guess_type",
"os.path.abspath",
"logging.error",
"os.path.dirname",
"os.path.exists",
"tornado.escape.json_encode",
"livereload.task.Task.watch",
"tornado.web.Application",
"os.listdir",
"webbrowser.open",
"os.path.isdir",
"os.getcwd",
"tornado.ioloop.PeriodicCallback",
"tornado.escape.json_decode",
"time.time",
"tornado.options.enable_pretty_logging",
"logging.info",
"logging.getLogger"
] | [((5629, 5652), 'tornado.options.enable_pretty_logging', 'enable_pretty_logging', ([], {}), '()\n', (5650, 5652), False, 'from tornado.options import enable_pretty_logging\n'), ((5663, 5693), 'tornado.web.Application', 'Application', ([], {'handlers': 'handlers'}), '(handlers=handlers)\n', (5674, 5693), False, 'from tornado.web import RequestHandler, Application\n'), ((476, 501), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (491, 501), False, 'import os\n'), ((1111, 1123), 'livereload.task.Task.watch', 'Task.watch', ([], {}), '()\n', (1121, 1123), False, 'from livereload.task import Task\n'), ((1601, 1612), 'time.time', 'time.time', ([], {}), '()\n', (1610, 1612), False, 'import time\n'), ((3436, 3465), 'mimetypes.guess_type', 'mimetypes.guess_type', (['abspath'], {}), '(abspath)\n', (3456, 3465), False, 'import mimetypes\n'), ((4264, 4288), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (4278, 4288), False, 'import os\n'), ((4593, 4609), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (4603, 4609), False, 'import os\n'), ((5802, 5870), 'webbrowser.open', 'webbrowser.open', (["('http://127.0.0.1:%s' % port)"], {'new': '(2)', 'autoraise': '(True)'}), "('http://127.0.0.1:%s' % port, new=2, autoraise=True)\n", (5817, 5870), False, 'import webbrowser\n'), ((901, 928), 'tornado.escape.json_encode', 'escape.json_encode', (['message'], {}), '(message)\n', (919, 928), False, 'from tornado import escape\n'), ((1331, 1372), 'logging.info', 'logging.info', (['"""ignore this reload action"""'], {}), "('ignore this reload action')\n", (1343, 1372), False, 'import logging\n'), ((2148, 2175), 'tornado.escape.json_decode', 'escape.json_decode', (['message'], {}), '(message)\n', (2166, 2175), False, 'from tornado import escape\n'), ((2660, 2711), 'logging.info', 'logging.info', (["('Browser Connected: %s' % message.url)"], {}), "('Browser Connected: %s' % message.url)\n", (2672, 2711), False, 'import logging\n'), ((3365, 3386), 'os.path.abspath', 'os.path.abspath', (['ROOT'], {}), '(ROOT)\n', (3380, 3386), False, 'import os\n'), ((4022, 4057), 'os.path.join', 'os.path.join', (['abspath', '"""index.html"""'], {}), "(abspath, 'index.html')\n", (4034, 4057), False, 'import os\n'), ((4680, 4701), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (4692, 4701), False, 'import os\n'), ((4748, 4767), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (4761, 4767), False, 'import os\n'), ((5582, 5601), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (5599, 5601), False, 'import logging\n'), ((5897, 5921), 'tornado.ioloop.IOLoop.instance', 'ioloop.IOLoop.instance', ([], {}), '()\n', (5919, 5921), False, 'from tornado import ioloop\n'), ((1011, 1064), 'logging.error', 'logging.error', (['"""Error sending message"""'], {'exc_info': '(True)'}), "('Error sending message', exc_info=True)\n", (1024, 1064), False, 'import logging\n'), ((1178, 1189), 'time.time', 'time.time', ([], {}), '()\n', (1187, 1189), False, 'import time\n'), ((2835, 2862), 'os.path.exists', 'os.path.exists', (['"""Guardfile"""'], {}), "('Guardfile')\n", (2849, 2862), False, 'import os\n'), ((3128, 3139), 'time.time', 'time.time', ([], {}), '()\n', (3137, 3139), False, 'import time\n'), ((3156, 3194), 'logging.info', 'logging.info', (['"""Start watching changes"""'], {}), "('Start watching changes')\n", (3168, 3194), False, 'import logging\n'), ((4077, 4101), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (4091, 4101), False, 'import os\n'), ((4186, 4209), 'os.path.exists', 'os.path.exists', (['abspath'], {}), '(abspath)\n', (4200, 4209), False, 'import os\n'), ((1757, 1810), 'logging.error', 'logging.error', (['"""Error sending message"""'], {'exc_info': '(True)'}), "('Error sending message', exc_info=True)\n", (1770, 1810), False, 'import logging\n'), ((2884, 2917), 'logging.info', 'logging.info', (['"""Reading Guardfile"""'], {}), "('Reading Guardfile')\n", (2896, 2917), False, 'import logging\n'), ((3002, 3030), 'logging.info', 'logging.info', (['"""No Guardfile"""'], {}), "('No Guardfile')\n", (3014, 3030), False, 'import logging\n'), ((3060, 3071), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3069, 3071), False, 'import os\n'), ((3211, 3257), 'tornado.ioloop.PeriodicCallback', 'ioloop.PeriodicCallback', (['self.watch_tasks', '(800)'], {}), '(self.watch_tasks, 800)\n', (3234, 3257), False, 'from tornado import ioloop\n')] |
from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField
from wtforms.fields.simple import SubmitField
from wtforms.validators import DataRequired, NumberRange
class SimParamsForm(FlaskForm):
intellect = IntegerField('Intellect', [NumberRange(0,1000)])
spellpower = IntegerField('Spellpower', [NumberRange(0,1000)])
hit_score = IntegerField('Spell Hit Rating', [NumberRange(0,202)])
crit_score = IntegerField('Spell Crit Rating', [NumberRange(0,500)])
haste_score = IntegerField('Spell Haste Rating', [NumberRange(0,1000)])
num_fights = IntegerField('# of fights to simulate', [NumberRange(1,2500)]) | [
"wtforms.validators.NumberRange"
] | [((260, 280), 'wtforms.validators.NumberRange', 'NumberRange', (['(0)', '(1000)'], {}), '(0, 1000)\n', (271, 280), False, 'from wtforms.validators import DataRequired, NumberRange\n'), ((332, 352), 'wtforms.validators.NumberRange', 'NumberRange', (['(0)', '(1000)'], {}), '(0, 1000)\n', (343, 352), False, 'from wtforms.validators import DataRequired, NumberRange\n'), ((410, 429), 'wtforms.validators.NumberRange', 'NumberRange', (['(0)', '(202)'], {}), '(0, 202)\n', (421, 429), False, 'from wtforms.validators import DataRequired, NumberRange\n'), ((487, 506), 'wtforms.validators.NumberRange', 'NumberRange', (['(0)', '(500)'], {}), '(0, 500)\n', (498, 506), False, 'from wtforms.validators import DataRequired, NumberRange\n'), ((564, 584), 'wtforms.validators.NumberRange', 'NumberRange', (['(0)', '(1000)'], {}), '(0, 1000)\n', (575, 584), False, 'from wtforms.validators import DataRequired, NumberRange\n'), ((641, 661), 'wtforms.validators.NumberRange', 'NumberRange', (['(1)', '(2500)'], {}), '(1, 2500)\n', (652, 661), False, 'from wtforms.validators import DataRequired, NumberRange\n')] |
from nba_api.stats.endpoints._base import Endpoint
from nba_api.stats.library.http import NBAStatsHTTP
class TeamDetails(Endpoint):
endpoint = 'teamdetails'
expected_data = {'TeamAwardsChampionships': ['YEARAWARDED', 'OPPOSITETEAM'], 'TeamAwardsConf': ['YEARAWARDED', 'OPPOSITETEAM'], 'TeamAwardsDiv': ['YEARAWARDED', 'OPPOSITETEAM'], 'TeamBackground': ['TEAM_ID', 'ABBREVIATION', 'NICKNAME', 'YEARFOUNDED', 'CITY', 'ARENA', 'ARENACAPACITY', 'OWNER', 'GENERALMANAGER', 'HEADCOACH', 'DLEAGUEAFFILIATION'], 'TeamHistory': ['TEAM_ID', 'CITY', 'NICKNAME', 'YEARFOUNDED', 'YEARACTIVETILL'], 'TeamHof': ['PLAYERID', 'PLAYER', 'POSITION', 'JERSEY', 'SEASONSWITHTEAM', 'YEAR'], 'TeamRetired': ['PLAYERID', 'PLAYER', 'POSITION', 'JERSEY', 'SEASONSWITHTEAM', 'YEAR'], 'TeamSocialSites': ['ACCOUNTTYPE', 'WEBSITE_LINK']}
nba_response = None
data_sets = None
player_stats = None
team_stats = None
headers = None
def __init__(self,
team_id,
proxy=None,
headers=None,
timeout=30,
get_request=True):
self.proxy = proxy
if headers is not None:
self.headers = headers
self.timeout = timeout
self.parameters = {
'TeamID': team_id
}
if get_request:
self.get_request()
def get_request(self):
self.nba_response = NBAStatsHTTP().send_api_request(
endpoint=self.endpoint,
parameters=self.parameters,
proxy=self.proxy,
headers=self.headers,
timeout=self.timeout,
)
self.load_response()
def load_response(self):
data_sets = self.nba_response.get_data_sets()
self.data_sets = [Endpoint.DataSet(data=data_set) for data_set_name, data_set in data_sets.items()]
self.team_awards_championships = Endpoint.DataSet(data=data_sets['TeamAwardsChampionships'])
self.team_awards_conf = Endpoint.DataSet(data=data_sets['TeamAwardsConf'])
self.team_awards_div = Endpoint.DataSet(data=data_sets['TeamAwardsDiv'])
self.team_background = Endpoint.DataSet(data=data_sets['TeamBackground'])
self.team_history = Endpoint.DataSet(data=data_sets['TeamHistory'])
self.team_hof = Endpoint.DataSet(data=data_sets['TeamHof'])
self.team_retired = Endpoint.DataSet(data=data_sets['TeamRetired'])
self.team_social_sites = Endpoint.DataSet(data=data_sets['TeamSocialSites'])
| [
"nba_api.stats.library.http.NBAStatsHTTP",
"nba_api.stats.endpoints._base.Endpoint.DataSet"
] | [((1905, 1964), 'nba_api.stats.endpoints._base.Endpoint.DataSet', 'Endpoint.DataSet', ([], {'data': "data_sets['TeamAwardsChampionships']"}), "(data=data_sets['TeamAwardsChampionships'])\n", (1921, 1964), False, 'from nba_api.stats.endpoints._base import Endpoint\n'), ((1997, 2047), 'nba_api.stats.endpoints._base.Endpoint.DataSet', 'Endpoint.DataSet', ([], {'data': "data_sets['TeamAwardsConf']"}), "(data=data_sets['TeamAwardsConf'])\n", (2013, 2047), False, 'from nba_api.stats.endpoints._base import Endpoint\n'), ((2079, 2128), 'nba_api.stats.endpoints._base.Endpoint.DataSet', 'Endpoint.DataSet', ([], {'data': "data_sets['TeamAwardsDiv']"}), "(data=data_sets['TeamAwardsDiv'])\n", (2095, 2128), False, 'from nba_api.stats.endpoints._base import Endpoint\n'), ((2160, 2210), 'nba_api.stats.endpoints._base.Endpoint.DataSet', 'Endpoint.DataSet', ([], {'data': "data_sets['TeamBackground']"}), "(data=data_sets['TeamBackground'])\n", (2176, 2210), False, 'from nba_api.stats.endpoints._base import Endpoint\n'), ((2239, 2286), 'nba_api.stats.endpoints._base.Endpoint.DataSet', 'Endpoint.DataSet', ([], {'data': "data_sets['TeamHistory']"}), "(data=data_sets['TeamHistory'])\n", (2255, 2286), False, 'from nba_api.stats.endpoints._base import Endpoint\n'), ((2311, 2354), 'nba_api.stats.endpoints._base.Endpoint.DataSet', 'Endpoint.DataSet', ([], {'data': "data_sets['TeamHof']"}), "(data=data_sets['TeamHof'])\n", (2327, 2354), False, 'from nba_api.stats.endpoints._base import Endpoint\n'), ((2383, 2430), 'nba_api.stats.endpoints._base.Endpoint.DataSet', 'Endpoint.DataSet', ([], {'data': "data_sets['TeamRetired']"}), "(data=data_sets['TeamRetired'])\n", (2399, 2430), False, 'from nba_api.stats.endpoints._base import Endpoint\n'), ((2464, 2515), 'nba_api.stats.endpoints._base.Endpoint.DataSet', 'Endpoint.DataSet', ([], {'data': "data_sets['TeamSocialSites']"}), "(data=data_sets['TeamSocialSites'])\n", (2480, 2515), False, 'from nba_api.stats.endpoints._base import Endpoint\n'), ((1782, 1813), 'nba_api.stats.endpoints._base.Endpoint.DataSet', 'Endpoint.DataSet', ([], {'data': 'data_set'}), '(data=data_set)\n', (1798, 1813), False, 'from nba_api.stats.endpoints._base import Endpoint\n'), ((1418, 1432), 'nba_api.stats.library.http.NBAStatsHTTP', 'NBAStatsHTTP', ([], {}), '()\n', (1430, 1432), False, 'from nba_api.stats.library.http import NBAStatsHTTP\n')] |
import unittest
class node():
def __init__(self, value=None):
self.value = value
self.left = None
self.right = None
def solution(root):
res = []
queue = []
queue.append(root)
while queue:
numberOfNodesInThisLevel = len(queue)
level = [queue.pop() for _ in range(numberOfNodesInThisLevel)]
res.append(level)
for n in level:
if n.left:
queue.append(n.left)
if n.right:
queue.append(n.right)
for level in res:
print(*[n.value for n in level])
return res
class Test(unittest.TestCase):
tree_1 = node(1)
tree_1.left = node(2)
tree_1.right = node(3)
tree_1.left.left = node(4)
tree_1.left.right = node(5)
tree_1.right.left = node(6)
tree_1.right.right = node(7)
def testTree1(self):
solution(self.tree_1)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main"
] | [((930, 945), 'unittest.main', 'unittest.main', ([], {}), '()\n', (943, 945), False, 'import unittest\n')] |
# -*- coding: utf-8 -*-
"""
This software is licensed under the License (MIT) located at
https://github.com/ephreal/rollbot/Licence
Please see the license for any restrictions or rights granted to you by the
License.
"""
from utils.rolling import rolling_utils
class Shadowrun3Roller():
"""
The shadowrun roller for shadowrun 1E games.
class methods:
check_successes(target: int, rolls: list[int])
-> dict(successes: int, rolls: list[int], failure: bool)
Checks how many integers in the rolls list exceed the target int.
Returns a dictionary with the amount of successes and the integers
that exceeded the target and whether or not the roll is a failure
is_failure(rolls: list[int]) -> bool
Checks to see if the roll is a failure, which is all 1's by
shadowrun 1E rules. Returns True if the roll is a failure.
roll(dice_pool: int) -> list[int]
Rolls and counts the dice according to shadowrun 1E rules. Does
no checks for failures or successes. Returns a list of integers
representing the totals.
roll_initiative(dice_pool: int, modifier: int) -> initiative: int
Rolls initiative dice and adds in reaction to give the initiative
score.
"""
def __init__(self):
pass
async def check_successes(self, target, rolls):
"""
Checks the rolls to see if any of the rolls are successes
target : int
roll : list[int]
-> dict{successes: int, rolls[int], failure: Bool}
"""
rolls = [roll for roll in rolls if roll >= target]
successes = {"successes": len(rolls),
"rolls": rolls
}
if await self.is_failure(rolls):
successes["failure"] = True
else:
successes["failure"] = False
return successes
async def is_failure(self, rolls):
"""
Checks to see if the roll is a failure. This is only the case if all
items in the roll are a 1.
rolls : list[int]
-> bool
"""
ones = [x for x in rolls if x == 1]
if len(ones) == len(rolls):
return True
return False
async def roll(self, dice_pool):
"""
Rolls and counts the dice according to shadowrun 1E rules. This does
no checking for successes.
dice_pool : int
-> list[int]
"""
rolls = await rolling_utils.roll(dice_pool)
if 6 in rolls:
# Get the sixes and remove them from the original list.
sixes = [x for x in rolls if x == 6]
rolls = [x for x in rolls if x != 6]
added = await self.roll(len(sixes))
sixes = [sixes[i] + added[i] for i in range(0, len(sixes))]
rolls.extend(sixes)
return rolls
async def roll_initiative(self, dice_pool=1, modifier=1):
"""
Rolls initiative dice and adds reaction in.
dice_pool: int
reaction: int
-> int
"""
# Adding 6's does not apply to initiative. Therefore use the general
# roller.
initiative_roll = await rolling_utils.roll(dice_pool)
for i in initiative_roll:
modifier += i
return initiative_roll, modifier
class Shadowrun5Roller():
"""
TODO: Add in glitch counting.
The shadowrun roller is my handler for all shadowrun 5E related rolling
functions. Types of rolls that are completed inlcude
general rolling and hit counting
- Adding in additional dice with +
- removing dice with -
class variables:
roller (base_roll_functions.roller()):
A roller class that handles the actual dice rolling.
class methods:
buy_hits(dice_pool: int) -> hits: int
"buys" hits at a 1 hit : 4 dice ratio. Rounds down.
SR5E CORE pg. 45
count_hits(rolls: list[int], prime: Boolean) -> {hits, misses, ones}
Creates the amount of hits, misses, and ones in the rolls. If the
roll is designated for a prime runner, it lowers the hit threshold
by 1.
SR5E CORE pg. 44
extended_test(dice_pool: int, threshold: int, prime: boolean)
-> {success: bool, rolls: list[int], totals {total_hits: int,
running_total: list[int]}}
Runs extended tests by shadowrun 5E rules. Stops as soon as
the test has been completed rather than running through all
iterations if not needed.
SR5E CORE pg. 48
is_glitch(rolls: list[int], hits: int)
-> {glitch: bool, type: str or None}
Checks whether or not a roll is a glitch.
SR5E CORE pg. 45-46
roll(dice_pool: int, exploding: Boolean) -> list[int]:
A dice roller that handles basic dice rolling. This allows for
exploding 6's with exploding=True
SR5E CORE pg. 44
SR5E CORE pg. 56 (Edge effects)
roll_initiative(dice_pool: int, modifier: int) -> initiative: int
Rolls initiative for shadowrun 5E.
SR5E CORE pg. 159
"""
def __init__(self):
pass
async def buy_hits(self, dice_pool=0):
"""
"buys" hits at a 1 hit : 4 dice ration. Rounds down.
dice_pool: int
-> int
"""
return dice_pool // 4
async def count_hits(self, rolls, prime=False):
"""
Counts the amount of hits, misses, and ones in a list of integers.
rolls: list[int]
-> {hits, misses, ones}
"""
hit_limit = 5
# Lower the hit threshold if rolling for a prime runner
if prime:
hit_limit = 4
hits, misses, ones = 0, 0, 0
for i in rolls:
if i >= hit_limit:
hits += 1
elif i > 1:
misses += 1
else:
ones += 1
return {"hits": hits, "misses": misses, "ones": ones}
async def extended_test(self, dice_pool, threshold, prime=False,
exploding=False):
"""
Runs an extended test with a dice pool to see if it is possible to
reach a threshold. Prime will lower the threshold when counting hits
if it is True. Returns a dict with a boolean representing success
status and a list of int lists representing the rolls.
dice_pool: int
threshold: int
prime: bool
exploding: bool
-> {success, rolls, totals {total_hits, running_total}}
"""
rolls = []
totals = []
success = False
total_hits = 0
while dice_pool > 0:
roll = await self.roll(dice_pool, exploding=exploding)
if prime:
counted = await self.count_hits(roll, prime=True)
else:
counted = await self.count_hits(roll)
total_hits += counted["hits"]
totals.append(total_hits)
rolls.append({"hits": counted["hits"], "roll": roll})
dice_pool -= 1
if total_hits >= threshold:
success = True
break
return {"success": success, "rolls": rolls, "totals": {
"total_hits": total_hits,
"running_total": totals}}
async def is_glitch(self, rolls, hits):
"""
Checks whether or not a roll is a glitch.
rolls: list[int]
hits: int
-> dict{glitch: bool, type: str or None}
"""
glitch = False
glitch_type = None
ones = [x for x in rolls if x == 1]
if len(ones) > (len(rolls) // 2) and not hits:
glitch = True
glitch_type = "critical"
elif len(ones) > (len(rolls) // 2) and hits:
glitch = True
glitch_type = "normal"
return {"glitch": glitch, "type": glitch_type}
async def roll(self, dice_pool, exploding=False):
"""
A dice roller that handles basic dice rolling. This allows for
exploding 6's with exploding=True
dice_pool: int
exploding: Boolean
-> list[int]
"""
rolls = await rolling_utils.roll(dice_pool=dice_pool, sides=6)
if exploding:
sixes = [x for x in rolls if x == 6]
rolls.extend(await self.roll(len(sixes)))
rolls.sort()
return rolls
rolls.sort()
return rolls
async def roll_initiative(self, dice_pool, modifier=0):
"""
Rolls initiative for shadowrun 5E.
dice_pool: int
modifier: int
-> initiative: int
"""
initiative_roll = await self.roll(dice_pool)
for i in initiative_roll:
modifier += i
return initiative_roll, modifier
| [
"utils.rolling.rolling_utils.roll"
] | [((2554, 2583), 'utils.rolling.rolling_utils.roll', 'rolling_utils.roll', (['dice_pool'], {}), '(dice_pool)\n', (2572, 2583), False, 'from utils.rolling import rolling_utils\n'), ((3281, 3310), 'utils.rolling.rolling_utils.roll', 'rolling_utils.roll', (['dice_pool'], {}), '(dice_pool)\n', (3299, 3310), False, 'from utils.rolling import rolling_utils\n'), ((8456, 8504), 'utils.rolling.rolling_utils.roll', 'rolling_utils.roll', ([], {'dice_pool': 'dice_pool', 'sides': '(6)'}), '(dice_pool=dice_pool, sides=6)\n', (8474, 8504), False, 'from utils.rolling import rolling_utils\n')] |
"""
Copyright (C) 2020 SunSpec Alliance
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import os
import xml.etree.ElementTree as ET
import sunspec2.mdef as mdef
SMDX_ROOT = 'sunSpecModels'
SMDX_MODEL = mdef.MODEL
SMDX_BLOCK = 'block'
SMDX_POINT = 'point'
SMDX_ATTR_VERS = 'v'
SMDX_ATTR_ID = 'id'
SMDX_ATTR_LEN = 'len'
SMDX_ATTR_NAME = mdef.NAME
SMDX_ATTR_TYPE = mdef.TYPE
SMDX_ATTR_COUNT = mdef.COUNT
SMDX_ATTR_VALUE = mdef.VALUE
SMDX_ATTR_TYPE_FIXED = 'fixed'
SMDX_ATTR_TYPE_REPEATING = 'repeating'
SMDX_ATTR_OFFSET = 'offset'
SMDX_ATTR_MANDATORY = mdef.MANDATORY
SMDX_ATTR_ACCESS = mdef.ACCESS
SMDX_ATTR_SF = mdef.SF
SMDX_ATTR_UNITS = mdef.UNITS
SMDX_SYMBOL = 'symbol'
SMDX_COMMENT = 'comment'
SMDX_STRINGS = 'strings'
SMDX_ATTR_LOCALE = 'locale'
SMDX_LABEL = mdef.LABEL
SMDX_DESCRIPTION = 'description'
SMDX_NOTES = 'notes'
SMDX_DETAIL = mdef.DETAIL
SMDX_TYPE_INT16 = mdef.TYPE_INT16
SMDX_TYPE_UINT16 = mdef.TYPE_UINT16
SMDX_TYPE_COUNT = mdef.TYPE_COUNT
SMDX_TYPE_ACC16 = mdef.TYPE_ACC16
SMDX_TYPE_ENUM16 = mdef.TYPE_ENUM16
SMDX_TYPE_BITFIELD16 = mdef.TYPE_BITFIELD16
SMDX_TYPE_PAD = mdef.TYPE_PAD
SMDX_TYPE_INT32 = mdef.TYPE_INT32
SMDX_TYPE_UINT32 = mdef.TYPE_UINT32
SMDX_TYPE_ACC32 = mdef.TYPE_ACC32
SMDX_TYPE_ENUM32 = mdef.TYPE_ENUM32
SMDX_TYPE_BITFIELD32 = mdef.TYPE_BITFIELD32
SMDX_TYPE_IPADDR = mdef.TYPE_IPADDR
SMDX_TYPE_INT64 = mdef.TYPE_INT64
SMDX_TYPE_UINT64 = mdef.TYPE_UINT64
SMDX_TYPE_ACC64 = mdef.TYPE_ACC64
SMDX_TYPE_IPV6ADDR = mdef.TYPE_IPV6ADDR
SMDX_TYPE_FLOAT32 = mdef.TYPE_FLOAT32
SMDX_TYPE_STRING = mdef.TYPE_STRING
SMDX_TYPE_SUNSSF = mdef.TYPE_SUNSSF
SMDX_TYPE_EUI48 = mdef.TYPE_EUI48
SMDX_ACCESS_R = 'r'
SMDX_ACCESS_RW = 'rw'
SMDX_MANDATORY_FALSE = 'false'
SMDX_MANDATORY_TRUE = 'true'
smdx_access_types = {SMDX_ACCESS_R: mdef.ACCESS_R, SMDX_ACCESS_RW: mdef.ACCESS_RW}
smdx_mandatory_types = {SMDX_MANDATORY_FALSE: mdef.MANDATORY_FALSE, SMDX_MANDATORY_TRUE: mdef.MANDATORY_TRUE}
smdx_type_types = [
SMDX_TYPE_INT16,
SMDX_TYPE_UINT16,
SMDX_TYPE_COUNT,
SMDX_TYPE_ACC16,
SMDX_TYPE_ENUM16,
SMDX_TYPE_BITFIELD16,
SMDX_TYPE_PAD,
SMDX_TYPE_INT32,
SMDX_TYPE_UINT32,
SMDX_TYPE_ACC32,
SMDX_TYPE_ENUM32,
SMDX_TYPE_BITFIELD32,
SMDX_TYPE_IPADDR,
SMDX_TYPE_INT64,
SMDX_TYPE_UINT64,
SMDX_TYPE_ACC64,
SMDX_TYPE_IPV6ADDR,
SMDX_TYPE_FLOAT32,
SMDX_TYPE_STRING,
SMDX_TYPE_SUNSSF,
SMDX_TYPE_EUI48
]
SMDX_PREFIX = 'smdx_'
SMDX_EXT = '.xml'
def to_smdx_filename(model_id):
return '%s%05d%s' % (SMDX_PREFIX, int(model_id), SMDX_EXT)
def model_filename_to_id(filename):
f = filename
if '.' in f:
f = os.path.splitext(f)[0]
try:
mid = int(f.rsplit('_', 1)[1])
except ValueError:
raise mdef.ModelDefinitionError('Error extracting model id from filename')
return mid
'''
smdx to json mapping:
fixed block -> top level group
model 'name' attribute -> group 'name'
ID point is created for model ID and 'value' is the model ID value as a number
L point is created for model len - model len has no value specified in the model definition
fixed block points are placed in top level group
repeating block -> group with count = 0 (indicates model len shoud be used to determine number of groups)
repeating block 'name' -> group 'name', if no 'name' is defined 'name' = 'repeating'
points:
all type, access, and mandatory attributes are preserved
point symbol map to the symbol object and placed in the symbols list for the point
symbol 'name' attribute -> symbol object 'name'
symbol element content -> symbol object 'value'
strings 'label', 'description', 'notes' elements map to point attributes 'label', 'desc', 'detail'
'''
def from_smdx_file(filename):
tree = ET.parse(filename)
root = tree.getroot()
return(from_smdx(root))
def from_smdx(element):
""" Sets the model type attributes based on an element tree model type
element contained in an SMDX model definition.
Parameters:
element :
Element Tree model type element.
"""
model_def = {}
m = element.find(SMDX_MODEL)
if m is None:
raise mdef.ModelDefinitionError('Model definition not found')
try:
mid = mdef.to_number_type(m.attrib.get(SMDX_ATTR_ID))
except ValueError:
raise mdef.ModelDefinitionError('Invalid model id: %s' % m.attrib.get(SMDX_ATTR_ID))
name = m.attrib.get(SMDX_ATTR_NAME)
if name is None:
name = 'model_' + str(mid)
model_def[mdef.NAME] = name
strings = element.find(SMDX_STRINGS)
# create top level group with ID and L points
fixed_def = {mdef.NAME: name,
mdef.TYPE: mdef.TYPE_GROUP,
mdef.POINTS: [
{mdef.NAME: 'ID', mdef.VALUE: mid,
mdef.DESCRIPTION: 'Model identifier', mdef.LABEL: 'Model ID', mdef.SIZE: 1,
mdef.MANDATORY: mdef.MANDATORY_TRUE, mdef.STATIC: mdef.STATIC_TRUE, mdef.TYPE: mdef.TYPE_UINT16},
{mdef.NAME: 'L',
mdef.DESCRIPTION: 'Model length', mdef.LABEL: 'Model Length', mdef.SIZE: 1,
mdef.MANDATORY: mdef.MANDATORY_TRUE, mdef.STATIC: mdef.STATIC_TRUE, mdef.TYPE: mdef.TYPE_UINT16}
]
}
repeating_def = None
fixed = None
repeating = None
for b in m.findall(SMDX_BLOCK):
btype = b.attrib.get(SMDX_ATTR_TYPE, SMDX_ATTR_TYPE_FIXED)
if btype == SMDX_ATTR_TYPE_FIXED:
if fixed is not None:
raise mdef.ModelDefinitionError('Duplicate fixed block type definition')
fixed = b
elif btype == SMDX_ATTR_TYPE_REPEATING:
if repeating is not None:
raise mdef.ModelDefinitionError('Duplicate repeating block type definition')
repeating = b
else:
raise mdef.ModelDefinitionError('Invalid block type: %s' % btype)
fixed_points_map = {}
if fixed is not None:
points = []
for e in fixed.findall(SMDX_POINT):
point_def = from_smdx_point(e)
if point_def[mdef.NAME] not in fixed_points_map:
fixed_points_map[point_def[mdef.NAME]] = point_def
points.append(point_def)
else:
raise mdef.ModelDefinitionError('Duplicate point definition: %s' % point_def[mdef.NAME])
if points:
fixed_def[mdef.POINTS].extend(points)
repeating_points_map = {}
if repeating is not None:
name = repeating.attrib.get(SMDX_ATTR_NAME)
if name is None:
name = 'repeating'
repeating_def = {mdef.NAME: name, mdef.TYPE: mdef.TYPE_GROUP, mdef.COUNT: 0}
points = []
for e in repeating.findall(SMDX_POINT):
point_def = from_smdx_point(e)
if point_def[mdef.NAME] not in repeating_points_map:
repeating_points_map[point_def[mdef.NAME]] = point_def
points.append(point_def)
else:
raise mdef.ModelDefinitionError('Duplicate point definition: %s' % point_def[mdef.NAME])
if points:
repeating_def[mdef.POINTS] = points
fixed_def[mdef.GROUPS] = [repeating_def]
e = element.find(SMDX_STRINGS)
if e.attrib.get(SMDX_ATTR_ID) == str(mid):
m = e.find(SMDX_MODEL)
if m is not None:
for a in m.findall('*'):
if a.tag == SMDX_LABEL and a.text:
fixed_def[mdef.LABEL] = a.text
elif a.tag == SMDX_DESCRIPTION and a.text:
fixed_def[mdef.DESCRIPTION] = a.text
elif a.tag == SMDX_NOTES and a.text:
fixed_def[mdef.DETAIL] = a.text
for p in e.findall(SMDX_POINT):
pid = p.attrib.get(SMDX_ATTR_ID)
label = desc = notes = None
for a in p.findall('*'):
if a.tag == SMDX_LABEL and a.text:
label = a.text
elif a.tag == SMDX_DESCRIPTION and a.text:
desc = a.text
elif a.tag == SMDX_NOTES and a.text:
notes = a.text
point_def = fixed_points_map.get(pid)
if point_def is not None:
if label:
point_def[mdef.LABEL] = label
if desc:
point_def[mdef.DESCRIPTION] = desc
if notes:
point_def[mdef.DETAIL] = notes
point_def = repeating_points_map.get(pid)
if point_def is not None:
if label:
point_def[mdef.LABEL] = label
if desc:
point_def[mdef.DESCRIPTION] = desc
if notes:
point_def[mdef.DETAIL] = notes
model_def = {'id': mid, 'group': fixed_def}
return model_def
def from_smdx_point(element):
""" Sets the point attributes based on an element tree point element
contained in an SMDX model definition.
Parameters:
element :
Element Tree point type element.
strings :
Indicates if *element* is a subelement of the 'strings'
definintion within the model definition.
"""
point_def = {}
pid = element.attrib.get(SMDX_ATTR_ID)
if pid is None:
raise mdef.ModelDefinitionError('Missing point id attribute')
point_def[mdef.NAME] = pid
ptype = element.attrib.get(SMDX_ATTR_TYPE)
if ptype is None:
raise mdef.ModelDefinitionError('Missing type attribute for point: %s' % pid)
elif ptype not in smdx_type_types:
raise mdef.ModelDefinitionError('Unknown point type %s for point %s' % (ptype, pid))
point_def[mdef.TYPE] = ptype
plen = mdef.to_number_type(element.attrib.get(SMDX_ATTR_LEN))
if ptype == SMDX_TYPE_STRING:
if plen is None:
raise mdef.ModelDefinitionError('Missing len attribute for point: %s' % pid)
point_def[mdef.SIZE] = plen
else:
point_def[mdef.SIZE] = mdef.point_type_info.get(ptype)['len']
mandatory = element.attrib.get(SMDX_ATTR_MANDATORY, SMDX_MANDATORY_FALSE)
if mandatory not in smdx_mandatory_types:
raise mdef.ModelDefinitionError('Unknown mandatory type: %s' % mandatory)
if mandatory == SMDX_MANDATORY_TRUE:
point_def[mdef.MANDATORY] = smdx_mandatory_types.get(mandatory)
access = element.attrib.get(SMDX_ATTR_ACCESS, SMDX_ACCESS_R)
if access not in smdx_access_types:
raise mdef.ModelDefinitionError('Unknown access type: %s' % access)
if access == SMDX_ACCESS_RW:
point_def[mdef.ACCESS] = smdx_access_types.get(access)
units = element.attrib.get(SMDX_ATTR_UNITS)
if units:
point_def[mdef.UNITS] = units
# if scale factor is an number, convert to correct type
sf = mdef.to_number_type(element.attrib.get(SMDX_ATTR_SF))
if sf is not None:
point_def[mdef.SF] = sf
# if scale factor is an number, convert to correct type
value = mdef.to_number_type(element.attrib.get(SMDX_ATTR_VALUE))
if value is not None:
point_def[mdef.VALUE] = value
symbols = []
for e in element.findall('*'):
if e.tag == SMDX_SYMBOL:
sid = e.attrib.get(SMDX_ATTR_ID)
value = e.text
try:
value = int(value)
except ValueError:
pass
symbols.append({mdef.NAME: sid, mdef.VALUE: value})
if symbols:
point_def[mdef.SYMBOLS] = symbols
return point_def
def indent(elem, level=0):
i = os.linesep + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
| [
"xml.etree.ElementTree.parse",
"sunspec2.mdef.ModelDefinitionError",
"os.path.splitext",
"sunspec2.mdef.point_type_info.get"
] | [((4873, 4891), 'xml.etree.ElementTree.parse', 'ET.parse', (['filename'], {}), '(filename)\n', (4881, 4891), True, 'import xml.etree.ElementTree as ET\n'), ((5273, 5328), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (['"""Model definition not found"""'], {}), "('Model definition not found')\n", (5298, 5328), True, 'import sunspec2.mdef as mdef\n'), ((10489, 10544), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (['"""Missing point id attribute"""'], {}), "('Missing point id attribute')\n", (10514, 10544), True, 'import sunspec2.mdef as mdef\n'), ((10659, 10730), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (["('Missing type attribute for point: %s' % pid)"], {}), "('Missing type attribute for point: %s' % pid)\n", (10684, 10730), True, 'import sunspec2.mdef as mdef\n'), ((11364, 11431), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (["('Unknown mandatory type: %s' % mandatory)"], {}), "('Unknown mandatory type: %s' % mandatory)\n", (11389, 11431), True, 'import sunspec2.mdef as mdef\n'), ((11664, 11725), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (["('Unknown access type: %s' % access)"], {}), "('Unknown access type: %s' % access)\n", (11689, 11725), True, 'import sunspec2.mdef as mdef\n'), ((3666, 3685), 'os.path.splitext', 'os.path.splitext', (['f'], {}), '(f)\n', (3682, 3685), False, 'import os\n'), ((3774, 3842), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (['"""Error extracting model id from filename"""'], {}), "('Error extracting model id from filename')\n", (3799, 3842), True, 'import sunspec2.mdef as mdef\n'), ((10784, 10862), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (["('Unknown point type %s for point %s' % (ptype, pid))"], {}), "('Unknown point type %s for point %s' % (ptype, pid))\n", (10809, 10862), True, 'import sunspec2.mdef as mdef\n'), ((11039, 11109), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (["('Missing len attribute for point: %s' % pid)"], {}), "('Missing len attribute for point: %s' % pid)\n", (11064, 11109), True, 'import sunspec2.mdef as mdef\n'), ((11187, 11218), 'sunspec2.mdef.point_type_info.get', 'mdef.point_type_info.get', (['ptype'], {}), '(ptype)\n', (11211, 11218), True, 'import sunspec2.mdef as mdef\n'), ((6682, 6748), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (['"""Duplicate fixed block type definition"""'], {}), "('Duplicate fixed block type definition')\n", (6707, 6748), True, 'import sunspec2.mdef as mdef\n'), ((7008, 7067), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (["('Invalid block type: %s' % btype)"], {}), "('Invalid block type: %s' % btype)\n", (7033, 7067), True, 'import sunspec2.mdef as mdef\n'), ((7437, 7524), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (["('Duplicate point definition: %s' % point_def[mdef.NAME])"], {}), "('Duplicate point definition: %s' % point_def[mdef\n .NAME])\n", (7462, 7524), True, 'import sunspec2.mdef as mdef\n'), ((8171, 8258), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (["('Duplicate point definition: %s' % point_def[mdef.NAME])"], {}), "('Duplicate point definition: %s' % point_def[mdef\n .NAME])\n", (8196, 8258), True, 'import sunspec2.mdef as mdef\n'), ((6879, 6949), 'sunspec2.mdef.ModelDefinitionError', 'mdef.ModelDefinitionError', (['"""Duplicate repeating block type definition"""'], {}), "('Duplicate repeating block type definition')\n", (6904, 6949), True, 'import sunspec2.mdef as mdef\n')] |
"""
MicroPython MY9221 LED driver
https://github.com/mcauser/micropython-my9221
MIT License
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from time import sleep_ms
from machine import Pin
class MY9221:
def __init__(self, di, dcki, reverse=False):
self._d = di
self._c = dcki
self._r = reverse
self._d.init(Pin.OUT, value=0)
self._c.init(Pin.OUT, value=0)
def _latch(self):
self._d(0)
sleep_ms(1)
for i in range(4):
self._d(1)
self._d(0)
sleep_ms(1)
def _write16(self, data):
for i in range(15,-1,-1):
self._d((data >> i) & 1)
state = self._c()
self._c(not state)
def _begin(self):
self._write16(0) # command: 8bit mode
def _end(self):
# unused last 2 channels are required to fill the 208 bit shift register
self._write16(0)
self._write16(0)
self._latch()
def reverse(self, val=None):
if val is None:
return self._r
self._r = val
def level(self, val, brightness=255):
self._begin()
for i in range(9,-1,-1) if self._r else range(10):
self._write16(brightness if val > i else 0)
self._end()
def bits(self, val, brightness=255):
val &= 0x3FF
self._begin()
for i in range(9,-1,-1) if self._r else range(10):
self._write16(brightness if (val >> i) & 1 else 0)
self._end()
def bytes(self, buf):
self._begin()
for i in range(9,-1,-1) if self._r else range(10):
self._write16(buf[i])
self._end()
| [
"time.sleep_ms"
] | [((1460, 1471), 'time.sleep_ms', 'sleep_ms', (['(1)'], {}), '(1)\n', (1468, 1471), False, 'from time import sleep_ms\n'), ((1553, 1564), 'time.sleep_ms', 'sleep_ms', (['(1)'], {}), '(1)\n', (1561, 1564), False, 'from time import sleep_ms\n')] |
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import CASCADE
class Grant(models.Model):
nazwa_projektu = models.TextField(blank=True, null=True)
zrodlo_finansowania = models.TextField(blank=True, null=True)
numer_projektu = models.CharField(max_length=200, unique=True)
rok = models.PositiveSmallIntegerField(null=True, blank=True)
class Meta:
verbose_name = "grant"
verbose_name_plural = "granty"
def __str__(self):
return f"{self.numer_projektu} {self.nazwa_projektu or ''}".strip()
class Grant_Rekordu(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
rekord = GenericForeignKey()
grant = models.ForeignKey(Grant, models.PROTECT)
class Meta:
verbose_name = "grant rekordu"
verbose_name_plural = "granty rekordu"
unique_together = [("grant", "content_type", "object_id")]
| [
"django.db.models.TextField",
"django.contrib.contenttypes.fields.GenericForeignKey",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.db.models.PositiveSmallIntegerField"
] | [((240, 279), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (256, 279), False, 'from django.db import models\n'), ((306, 345), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (322, 345), False, 'from django.db import models\n'), ((367, 412), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'unique': '(True)'}), '(max_length=200, unique=True)\n', (383, 412), False, 'from django.db import models\n'), ((423, 478), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (455, 478), False, 'from django.db import models\n'), ((722, 768), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ContentType', 'models.CASCADE'], {}), '(ContentType, models.CASCADE)\n', (739, 768), False, 'from django.db import models\n'), ((785, 814), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (812, 814), False, 'from django.db import models\n'), ((828, 847), 'django.contrib.contenttypes.fields.GenericForeignKey', 'GenericForeignKey', ([], {}), '()\n', (845, 847), False, 'from django.contrib.contenttypes.fields import GenericForeignKey\n'), ((860, 900), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Grant', 'models.PROTECT'], {}), '(Grant, models.PROTECT)\n', (877, 900), False, 'from django.db import models\n')] |
"""Views for debugging and diagnostics"""
import pprint
import traceback
from codejail.safe_exec import safe_exec
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.utils.html import escape
from django.views.decorators.csrf import ensure_csrf_cookie
from common.djangoapps.edxmako.shortcuts import render_to_response
from openedx.core.djangolib.markup import HTML
@login_required
@ensure_csrf_cookie
def run_python(request):
"""
A page to allow testing the Python sandbox on a production server.
Runs in the override context "debug_run_python", so resource limits with come first from:
CODE_JAIL['limit_overrides']['debug_run_python']
and then from:
CODE_JAIL['limits']
"""
if not request.user.is_staff:
raise Http404
c = {}
c['code'] = ''
c['results'] = None
if request.method == 'POST':
py_code = c['code'] = request.POST.get('code')
g = {}
try:
safe_exec(
code=py_code,
globals_dict=g,
slug="debug_run_python",
limit_overrides_context="debug_run_python",
)
except Exception: # pylint: disable=broad-except
c['results'] = traceback.format_exc()
else:
c['results'] = pprint.pformat(g)
return render_to_response("debug/run_python_form.html", c)
@login_required
def show_parameters(request):
"""A page that shows what parameters were on the URL and post."""
html_list = []
for name, value in sorted(request.GET.items()):
html_list.append(escape(f"GET {name}: {value!r}"))
for name, value in sorted(request.POST.items()):
html_list.append(escape(f"POST {name}: {value!r}"))
return HttpResponse("\n".join(HTML("<p>{}</p>").format(h) for h in html_list))
| [
"pprint.pformat",
"common.djangoapps.edxmako.shortcuts.render_to_response",
"openedx.core.djangolib.markup.HTML",
"traceback.format_exc",
"codejail.safe_exec.safe_exec",
"django.utils.html.escape"
] | [((1387, 1438), 'common.djangoapps.edxmako.shortcuts.render_to_response', 'render_to_response', (['"""debug/run_python_form.html"""', 'c'], {}), "('debug/run_python_form.html', c)\n", (1405, 1438), False, 'from common.djangoapps.edxmako.shortcuts import render_to_response\n'), ((1020, 1132), 'codejail.safe_exec.safe_exec', 'safe_exec', ([], {'code': 'py_code', 'globals_dict': 'g', 'slug': '"""debug_run_python"""', 'limit_overrides_context': '"""debug_run_python"""'}), "(code=py_code, globals_dict=g, slug='debug_run_python',\n limit_overrides_context='debug_run_python')\n", (1029, 1132), False, 'from codejail.safe_exec import safe_exec\n'), ((1358, 1375), 'pprint.pformat', 'pprint.pformat', (['g'], {}), '(g)\n', (1372, 1375), False, 'import pprint\n'), ((1653, 1685), 'django.utils.html.escape', 'escape', (['f"""GET {name}: {value!r}"""'], {}), "(f'GET {name}: {value!r}')\n", (1659, 1685), False, 'from django.utils.html import escape\n'), ((1765, 1798), 'django.utils.html.escape', 'escape', (['f"""POST {name}: {value!r}"""'], {}), "(f'POST {name}: {value!r}')\n", (1771, 1798), False, 'from django.utils.html import escape\n'), ((1294, 1316), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1314, 1316), False, 'import traceback\n'), ((1834, 1851), 'openedx.core.djangolib.markup.HTML', 'HTML', (['"""<p>{}</p>"""'], {}), "('<p>{}</p>')\n", (1838, 1851), False, 'from openedx.core.djangolib.markup import HTML\n')] |
"""Calculate metrics on graph"""
import graph_tool.all as gt
from .nodedataframe import NodeDataFrame
from .context import expect_nodes
def graph_component_count(G: gt.Graph,
directed: bool = False) -> NodeDataFrame:
expect_nodes(G)
counted_comp, _ = gt.label_components(G, directed=directed)
return NodeDataFrame({"cc": list(counted_comp)})["cc"]
def graph_largest_component(G: gt.Graph,
directed: bool = False) -> NodeDataFrame:
expect_nodes(G)
largest_comp = gt.label_largest_component(G, directed=directed)
return NodeDataFrame({"lc": list(largest_comp)})["lc"]
| [
"graph_tool.all.label_components",
"graph_tool.all.label_largest_component"
] | [((286, 327), 'graph_tool.all.label_components', 'gt.label_components', (['G'], {'directed': 'directed'}), '(G, directed=directed)\n', (305, 327), True, 'import graph_tool.all as gt\n'), ((534, 582), 'graph_tool.all.label_largest_component', 'gt.label_largest_component', (['G'], {'directed': 'directed'}), '(G, directed=directed)\n', (560, 582), True, 'import graph_tool.all as gt\n')] |
from bevy.injection import AutoInject, detect_dependencies
from bevy.app.args import ArgumentParser, CLIArgs
from typing import Any
import os
@detect_dependencies
class Options(AutoInject):
"""The options object aggregates all options values that the Bevy.App application pulls in from the environment."""
args: CLIArgs
var_prefix = "BEVY_APP_"
path_key = "PATH"
config_file_key = "CONFIG_FILE"
logger_level_key = "LOGGER_LEVEL"
logger_name_key = "LOGGER_NAME"
def __init__(self):
self._cli_options = {}
self._env_options = self._load_env()
self._options = self._build_base_options()
def __getitem__(self, item: str) -> Any:
if item in self._cli_options:
return self._cli_options[item]
if item in self._env_options:
return self._env_options[item]
return self._options[item]
def __contains__(self, item: str) -> bool:
return item in (self._cli_options | self._env_options | self._options)
@property
def cli(self) -> dict[str, Any]:
return self._cli_options.copy()
@property
def env(self) -> dict[str, Any]:
return self._env_options.copy()
def add_using_arg_parser(self, parser: ArgumentParser):
"""Uses an ArgumentParser to populate the CLI options."""
self._cli_options.update(self.args.parse_args(parser).to_dict())
def get(self, item: str, default: Any | None = None) -> Any | None:
try:
return self[item]
except KeyError:
return default
def _build_base_options(self) -> dict[str, Any]:
return {self.path_key: self._get_path()}
def _get_path(self):
return os.getcwd()
def _load_env(self) -> dict[str, Any]:
return {
key.removeprefix(self.prefix): value
for key, value in os.environ.items()
if key.startswith(self.prefix)
}
| [
"os.getcwd",
"os.environ.items"
] | [((1713, 1724), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1722, 1724), False, 'import os\n'), ((1865, 1883), 'os.environ.items', 'os.environ.items', ([], {}), '()\n', (1881, 1883), False, 'import os\n')] |
import logging
import numpy as np
import math
from .drawing import Camouflage, NoPattern, SolidColor, MultiGradient, ImagePattern, Gradient, Image, Symbol
from .fonts import LANGUAGE_MAP
from .generate import (
dataset_generator,
basic_attribute_sampler,
flatten_mask,
flatten_mask_except_first,
add_occlusion,
rand_seed,
)
def generate_i(n_samples, alphabet = None, language="english", font = 'calibri', set = "plain", seed=None, **kwargs):
"""[summary]
Args:
n_samples ([type]): [description]
language (str, optional): [description]. Defaults to "english".
seed ([type], optional): [description]. Defaults to None.
"""
if alphabet is None:
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
print(alphabet)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
rotation = 0
translation = (0.0,0.0)
if set == 'rotation':
rotation = (lambda rng: rng.uniform(low=0, high=1)*math.pi)
elif set == 'translation':
translation= (lambda rng: tuple(rng.uniform(low=-1, high=1, size=2)))
elif set == 'gradient':
fg = None
bg = None
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
font = font,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=rotation,
scale=0.7,
translation=translation,
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_dataset_alphabet_onlygrad(n_samples, chars, seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP['english'].get_alphabet(support_bold=False)
#print(alphabet.fonts[:10])
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
char=lambda rng: rng.choice(chars),
font=lambda rng: rng.choice(alphabet.fonts[50:55]),
is_slant=False,
is_bold=False,
rotation=0,
scale=0.7,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_dataset_alphabet(n_samples, chars, seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP['english'].get_alphabet(support_bold=False)
#print(alphabet.fonts[:10])
fg = [SolidColor((1, 1, 1)), ImagePattern(seed=123)]
bg = [SolidColor((0, 0, 0)), ImagePattern(seed=123)]
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
char=lambda rng: rng.choice(chars),
font=lambda rng: rng.choice(alphabet.fonts[50:55]),
is_slant=False,
is_bold=False,
background= lambda rng:rng.choice(bg),
foreground= lambda rng:rng.choice(fg),
rotation=0,
scale=0.7,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=0.7,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_rotated_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=lambda rng: rng.uniform(low=0, high=1)*math.pi,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_translated_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate default with translation uniformly b/w (-1,1)
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=1.0,
translation=lambda rng: tuple(rng.uniform(low=-1, high=1, size=2)),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_scaled_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=None,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_bold_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=True)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=True,
background=bg,
foreground=fg,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_italic_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate white on black, centered symbols.
The only factors of variations are font and char.
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=True,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_gradient_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate white on black, centered symbols.
The only factors of variations are font and char.
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_natural_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate white on black, centered symbols.
The only factors of variations are font and char.
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
background=lambda rng: ImagePattern(seed=rand_seed(rng)), #lambda rng: Gradient(seed=rand_seed(_rng))
foreground=lambda rng: ImagePattern(seed=rand_seed(rng)),
is_slant=False,
is_bold=False,
rotation=0,
scale=1.0,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_plain_camouflage_dataset(n_samples, language="english", seed=None, **kwargs):
"""
"""
alphabet = LANGUAGE_MAP[language].get_alphabet(support_bold=False)
angle = 0
fg = Camouflage(stroke_angle=angle, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
bg = Camouflage(stroke_angle=angle + np.pi / 2, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
scale = 0.7 * np.exp(np.random.randn() * 0.1)
attr_sampler = basic_attribute_sampler(
alphabet=alphabet,
is_slant=False,
is_bold=False,
background=bg,
foreground=fg,
rotation=0,
scale=scale,
translation=(0.0, 0.0),
inverse_color=False,
pixel_noise_scale=0.0,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_tiny_dataset(n_samples, language="english", seed=None, **kwarg):
"""Generate a dataset of 8x8 resolution in gray scale
with scale of 1 and minimal variations.
"""
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(support_bold=False),
background=bg,
foreground=fg,
is_bold=False,
is_slant=False,
scale=1,
resolution=(8, 8),
is_gray=True,
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_default_dataset(n_samples, language="english", seed=None, **kwarg):
"""Generate the default dataset,
using gradiant as foreground and background.
"""
attr_sampler = basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet())
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_solid_bg_dataset(n_samples, language="english", seed=None, **kwarg):
"""Same as default datasets, but uses white on black."""
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
attr_sampler = basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet(), background=bg, foreground=fg)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_natural_images_dataset(n_samples, language="english", seed=None, **kwargs):
"""Same as default dataset, but uses natural images as foreground and background."""
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(),
background=lambda rng: ImagePattern(seed=rand_seed(rng)),
foreground=lambda rng: ImagePattern(seed=rand_seed(rng)),
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_korean_1k_dataset(n_samples, seed=None, **kwarg):
"""Uses the first 1000 korean symbols"""
alphabet = LANGUAGE_MAP["korean"].get_alphabet(support_bold=True)
chars = alphabet.symbols[:1000]
fonts = alphabet.fonts
attr_sampler = basic_attribute_sampler(char=lambda rng: rng.choice(chars), font=lambda rng: rng.choice(fonts))
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_camouflage_dataset(n_samples, language="english", texture="camouflage", seed=None, **kwarg):
"""Generate a dataset where the pixel distribution
is the same for the foreground and background.
"""
def attr_sampler(seed=None):
if texture == "camouflage":
angle = 0
fg = Camouflage(stroke_angle=angle, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
bg = Camouflage(stroke_angle=angle + np.pi / 2, stroke_width=0.1, stroke_length=0.6, stroke_noise=0)
elif texture == "shade":
fg, bg = None, None
elif texture == "bw":
fg = SolidColor((1, 1, 1))
bg = SolidColor((0, 0, 0))
else:
raise ValueError("Unknown texture %s." % texture)
scale = 0.7 * np.exp(np.random.randn() * 0.1)
return basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(support_bold=True),
background=bg,
foreground=fg,
is_bold=True,
is_slant=False,
scale=scale,
)(seed)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_non_camou_bw_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate a black and white dataset with
the same attribute distribution as the camouflage dataset.
"""
return generate_camouflage_dataset(n_samples, language=language, texture="bw", seed=seed, **kwargs)
def generate_non_camou_shade_dataset(n_samples, language="english", seed=None, **kwargs):
"""Generate a gradient foreground and background dataset
with same attribute distribution as the camouflage dataset.
"""
return generate_camouflage_dataset(n_samples, language=language, texture="shade", seed=seed, **kwargs)
# for segmentation, detection, counting
# -------------------------------------
def generate_segmentation_dataset(n_samples, language="english", resolution=(128, 128), seed=None, **kwarg):
"""Generate 3-10 symbols of various scale
and rotation and translation (no bold).
"""
def scale(rng):
return 0.1 * np.exp(rng.randn() * 0.4)
def n_symbols(rng):
return rng.choice(list(range(3, 10)))
attr_generator = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(support_bold=False),
resolution=resolution,
scale=scale,
is_bold=False,
n_symbols=n_symbols,
)
return dataset_generator(attr_generator, n_samples, flatten_mask, dataset_seed=seed)
def generate_counting_dataset(
n_samples, language="english", resolution=(128, 128), n_symbols=None, scale_variation=0.5, seed=None, **kwarg
):
"""Generate 3-10 symbols at various scale.
Samples 'a' with prob 70% or a latin lowercase otherwise.
"""
if n_symbols is None:
def n_symbols(rng):
return rng.choice(list(range(3, 10)))
def scale(rng):
return 0.1 * np.exp(rng.randn() * scale_variation)
def char_sampler(rng):
if rng.rand() < 0.3:
return rng.choice(LANGUAGE_MAP[language].get_alphabet(support_bold=False).symbols)
else:
return "a"
attr_generator = basic_attribute_sampler(
char=char_sampler, resolution=resolution, scale=scale, is_bold=False, n_symbols=n_symbols
)
return dataset_generator(attr_generator, n_samples, flatten_mask, dataset_seed=seed)
def generate_counting_dataset_scale_fix(n_samples, seed=None, **kwargs):
"""Generate 3-10 symbols at fixed scale.
Samples 'a' with prob 70% or a latin lowercase otherwise.
"""
return generate_counting_dataset(n_samples, scale_variation=0, seed=seed, **kwargs)
def generate_counting_dataset_crowded(n_samples, seed=None, **kwargs):
"""Generate 30-50 symbols at fixed scale.
Samples 'a' with prob 70% or a latin lowercase otherwise.
"""
def n_symbols(rng):
return rng.choice(list(range(30, 50)))
return generate_counting_dataset(n_samples, scale_variation=0.1, n_symbols=n_symbols, seed=seed, **kwargs)
# for few-shot learning
# ---------------------
def all_chars(n_samples, seed=None, **kwarg):
"""Combines the symbols of all languages (up to 200 per languages).
Note: some fonts may appear rarely.
"""
symbols_list = []
for language in LANGUAGE_MAP.values():
alphabet = language.get_alphabet()
symbols = alphabet.symbols[:200]
logging.info("Using %d/%d symbols from alphabet %s", len(symbols), len(alphabet.symbols), alphabet.name)
symbols_list.extend(zip(symbols, [alphabet] * len(symbols)))
def attr_sampler(seed=None):
char, alphabet = symbols_list[np.random.choice(len(symbols_list))]
return basic_attribute_sampler(alphabet=alphabet, char=char)(seed)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def generate_balanced_font_chars_dataset(n_samples, seed=None, **kwarg):
"""Samples uniformly from all fonts (max 200 per alphabet)
or uniformly from all symbols (max 200 per alphabet)
with probability 50%.
"""
font_list = []
symbols_list = []
for language in LANGUAGE_MAP.values():
alphabet = language.get_alphabet()
fonts = alphabet.fonts[:200]
symbols = alphabet.symbols[:200]
logging.info("Using %d/%d fonts from alphabet %s", len(fonts), len(alphabet.fonts), alphabet.name)
font_list.extend(zip(fonts, [alphabet] * len(fonts)))
logging.info("Using %d/%d symbols from alphabet %s", len(symbols), len(alphabet.symbols), alphabet.name)
symbols_list.extend(zip(symbols, [alphabet] * len(symbols)))
logging.info("Total n_fonts: %d, n_symbols: %d.", len(font_list), len(symbols_list))
def attr_sampler(seed=None):
if np.random.rand() > 0.5:
font, alphabet = font_list[np.random.choice(len(font_list))]
symbol = np.random.choice(alphabet.symbols[:200])
else:
symbol, alphabet = symbols_list[np.random.choice(len(symbols_list))]
font = np.random.choice(alphabet.fonts[:200])
return basic_attribute_sampler(char=symbol, font=font, is_bold=False, is_slant=False)(seed)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
# for active learning
# -------------------
def generate_large_translation(n_samples, language="english", seed=None, **kwarg):
"""Synbols are translated beyond the border of the image
to create a cropping effect. Scale is fixed to 0.5.
"""
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(), scale=0.5, translation=lambda rng: tuple(rng.rand(2) * 4 - 2)
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
def missing_symbol_dataset(n_samples, language="english", seed=None, **kwarg):
"""With 10% probability, no symbols are drawn"""
def background(rng):
return MultiGradient(alpha=0.5, n_gradients=2, types=("linear", "radial"), seed=rand_seed(rng))
def tr(rng):
if rng.rand() > 0.1:
return tuple(rng.rand(2) * 2 - 1)
else:
return 10
attr_generator = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(), translation=tr, background=background
)
return dataset_generator(attr_generator, n_samples, dataset_seed=seed)
def generate_some_large_occlusions(n_samples, language="english", seed=None, **kwarg):
"""With probability 20%, add a large occlusion
over the existing symbol.
"""
def n_occlusion(rng):
if rng.rand() < 0.2:
return 1
else:
return 0
attr_sampler = add_occlusion(
basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet()),
n_occlusion=n_occlusion,
scale=lambda rng: 0.6 * np.exp(rng.randn() * 0.1),
translation=lambda rng: tuple(rng.rand(2) * 6 - 3),
)
return dataset_generator(attr_sampler, n_samples, flatten_mask_except_first, dataset_seed=seed)
def generate_many_small_occlusions(n_samples, language="english", seed=None, **kwarg):
"""Add small occlusions on all images.
Number of occlusions are sampled uniformly in [0,5).
"""
attr_sampler = add_occlusion(
basic_attribute_sampler(alphabet=LANGUAGE_MAP[language].get_alphabet()),
n_occlusion=lambda rng: rng.randint(0, 5),
)
return dataset_generator(attr_sampler, n_samples, flatten_mask_except_first, dataset_seed=seed)
def generate_pixel_noise(n_samples, language="english", seed=None, **kwarg):
"""Add large pixel noise with probability 0.5."""
def pixel_noise(rng):
if rng.rand() > 0.1:
return 0
else:
return 0.3
attr_sampler = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(), pixel_noise_scale=pixel_noise
)
return dataset_generator(attr_sampler, n_samples, dataset_seed=seed)
# for font classification
# -----------------------
def less_variations(n_samples, language="english", seed=None, **kwarg):
"""Less variations in scale and rotations.
Also, no bold and no italic. This makes a more accessible font
classification task.
"""
attr_generator = basic_attribute_sampler(
alphabet=LANGUAGE_MAP[language].get_alphabet(),
is_bold=False,
is_slant=False,
scale=lambda rng: 0.5 * np.exp(rng.randn() * 0.1),
rotation=lambda rng: rng.randn() * 0.1,
)
return dataset_generator(attr_generator, n_samples, dataset_seed=seed)
DATASET_GENERATOR_MAP = {
"plain": generate_plain_dataset,
"default": generate_default_dataset,
"default-bw": generate_solid_bg_dataset,
"korean-1k": generate_korean_1k_dataset,
"camouflage": generate_camouflage_dataset,
"non-camou-bw": generate_non_camou_bw_dataset,
"non-camou-shade": generate_non_camou_shade_dataset,
"segmentation": generate_segmentation_dataset,
"counting": generate_counting_dataset,
"counting-fix-scale": generate_counting_dataset_scale_fix,
"counting-crowded": generate_counting_dataset_crowded,
"missing-symbol": missing_symbol_dataset,
"some-large-occlusion": generate_some_large_occlusions,
"many-small-occlusion": generate_many_small_occlusions,
"large-translation": generate_large_translation,
"tiny": generate_tiny_dataset,
"balanced-font-chars": generate_balanced_font_chars_dataset,
"all-chars": all_chars,
"less-variations": less_variations,
"pixel-noise": generate_pixel_noise,
"natural-patterns": generate_natural_images_dataset,
}
| [
"numpy.random.rand",
"numpy.random.randn",
"numpy.random.choice"
] | [((17287, 17303), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (17301, 17303), True, 'import numpy as np\n'), ((17405, 17445), 'numpy.random.choice', 'np.random.choice', (['alphabet.symbols[:200]'], {}), '(alphabet.symbols[:200])\n', (17421, 17445), True, 'import numpy as np\n'), ((17560, 17598), 'numpy.random.choice', 'np.random.choice', (['alphabet.fonts[:200]'], {}), '(alphabet.fonts[:200])\n', (17576, 17598), True, 'import numpy as np\n'), ((8775, 8792), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (8790, 8792), True, 'import numpy as np\n'), ((12265, 12282), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (12280, 12282), True, 'import numpy as np\n')] |
import ast
import os
import pickle
import sys
import stackimpact
import datetime
import argparse
import multiprocessing
from log import logger
from patterns import Miner
from patterns.models import Fragment, Pattern
from vcs.traverse import GitAnalyzer, RepoInfo, Method
import pyflowgraph
import changegraph
import settings
class RunModes:
BUILD_PY_FLOW_GRAPH = 'pfg'
BUILD_CHANGE_GRAPH = 'cg'
COLLECT_CHANGE_GRAPHS = 'collect-cgs'
MINE_PATTERNS = 'patterns'
ALL = [BUILD_PY_FLOW_GRAPH, BUILD_CHANGE_GRAPH, COLLECT_CHANGE_GRAPHS, MINE_PATTERNS]
def main():
logger.info('------------------------------ Starting ------------------------------')
if settings.get('use_stackimpact', required=False):
_ = stackimpact.start(
agent_key=settings.get('stackimpact_agent_key'),
app_name='CodeChangesMiner',
debug=True,
app_version=str(datetime.datetime.now())
)
sys.setrecursionlimit(2**31-1)
multiprocessing.set_start_method('spawn', force=True)
parser = argparse.ArgumentParser()
parser.add_argument('mode', help=f'One of {RunModes.ALL}', type=str)
args, _ = parser.parse_known_args()
current_mode = args.mode
if current_mode == RunModes.BUILD_PY_FLOW_GRAPH:
parser.add_argument('-i', '--input', help='Path to source code file', type=str, required=True)
parser.add_argument('-o', '--output', help='Path to output file', type=str, default='pyflowgraph.dot')
parser.add_argument('--no-closure', action='store_true')
parser.add_argument('--show-deps', action='store_true')
parser.add_argument('--hide-op-kinds', action='store_true')
parser.add_argument('--show-data-keys', action='store_true')
args = parser.parse_args()
fg = pyflowgraph.build_from_file(
args.input, show_dependencies=args.show_deps, build_closure=not args.no_closure)
pyflowgraph.export_graph_image(
fg, args.output, show_op_kinds=not args.hide_op_kinds, show_data_keys=args.show_data_keys)
elif current_mode == RunModes.BUILD_CHANGE_GRAPH:
parser.add_argument('-s', '--src', help='Path to source code before changes', type=str, required=True)
parser.add_argument('-d', '--dest', help='Path to source code after changes', type=str, required=True)
parser.add_argument('-o', '--output', help='Path to output file', type=str, default='changegraph.dot')
args = parser.parse_args()
fg = changegraph.build_from_files(args.src, args.dest)
changegraph.export_graph_image(fg, args.output)
elif current_mode == RunModes.COLLECT_CHANGE_GRAPHS:
GitAnalyzer().build_change_graphs()
elif current_mode == RunModes.MINE_PATTERNS:
parser.add_argument('-s', '--src', help='Path to source code before changes', type=str, nargs='+')
parser.add_argument('-d', '--dest', help='Path to source code after changes', type=str, nargs='+')
parser.add_argument('--fake-mining', action='store_true')
args = parser.parse_args()
if args.src or args.dest or args.fake_mining:
if not args.src or len(args.src) != len(args.dest):
raise ValueError('src and dest have different size or unset')
change_graphs = []
for old_path, new_path in zip(args.src, args.dest):
methods = []
for n, path in enumerate([old_path, new_path]):
with open(path, 'r+') as f:
src = f.read()
methods.append(Method(path, 'test_name', ast.parse(src, mode='exec').body[0], src))
mock_commit_dtm = datetime.datetime.now(tz=datetime.timezone.utc)
repo_info = RepoInfo(
'mock repo path', 'mock repo name', 'mock repo url', 'mock hash', mock_commit_dtm,
'mock old file path', 'mock new file path', methods[0], methods[1])
cg = changegraph.build_from_files(old_path, new_path, repo_info=repo_info)
change_graphs.append(cg)
miner = Miner()
if args.fake_mining:
for cg in change_graphs:
fragment = Fragment()
fragment.graph = cg
fragment.nodes = cg.nodes
pattern = Pattern([fragment])
miner.add_pattern(pattern)
else:
miner.mine_patterns(change_graphs)
miner.print_patterns()
else:
storage_dir = settings.get('change_graphs_storage_dir')
file_names = os.listdir(storage_dir)
logger.warning(f'Found {len(file_names)} files in storage directory')
change_graphs = []
for file_num, file_name in enumerate(file_names):
file_path = os.path.join(storage_dir, file_name)
try:
with open(file_path, 'rb') as f:
graphs = pickle.load(f)
for graph in graphs:
change_graphs.append(pickle.loads(graph))
except:
logger.warning(f'Incorrect file {file_path}')
if file_num % 1000 == 0:
logger.warning(f'Loaded [{1+file_num}/{len(file_names)}] files')
logger.warning('Pattern mining has started')
miner = Miner()
try:
miner.mine_patterns(change_graphs)
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt: mined patterns will be stored before exit')
miner.print_patterns()
else:
raise ValueError
if __name__ == '__main__':
main()
| [
"pyflowgraph.export_graph_image",
"patterns.Miner",
"argparse.ArgumentParser",
"multiprocessing.set_start_method",
"vcs.traverse.GitAnalyzer",
"changegraph.export_graph_image",
"pickle.load",
"sys.setrecursionlimit",
"os.path.join",
"changegraph.build_from_files",
"log.logger.warning",
"settings.get",
"ast.parse",
"datetime.datetime.now",
"pickle.loads",
"patterns.models.Fragment",
"os.listdir",
"patterns.models.Pattern",
"log.logger.info",
"vcs.traverse.RepoInfo",
"pyflowgraph.build_from_file"
] | [((589, 679), 'log.logger.info', 'logger.info', (['"""------------------------------ Starting ------------------------------"""'], {}), "(\n '------------------------------ Starting ------------------------------')\n", (600, 679), False, 'from log import logger\n'), ((683, 730), 'settings.get', 'settings.get', (['"""use_stackimpact"""'], {'required': '(False)'}), "('use_stackimpact', required=False)\n", (695, 730), False, 'import settings\n'), ((957, 991), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(2 ** 31 - 1)'], {}), '(2 ** 31 - 1)\n', (978, 991), False, 'import sys\n'), ((992, 1045), 'multiprocessing.set_start_method', 'multiprocessing.set_start_method', (['"""spawn"""'], {'force': '(True)'}), "('spawn', force=True)\n", (1024, 1045), False, 'import multiprocessing\n'), ((1060, 1085), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1083, 1085), False, 'import argparse\n'), ((1812, 1924), 'pyflowgraph.build_from_file', 'pyflowgraph.build_from_file', (['args.input'], {'show_dependencies': 'args.show_deps', 'build_closure': '(not args.no_closure)'}), '(args.input, show_dependencies=args.show_deps,\n build_closure=not args.no_closure)\n', (1839, 1924), False, 'import pyflowgraph\n'), ((1942, 2068), 'pyflowgraph.export_graph_image', 'pyflowgraph.export_graph_image', (['fg', 'args.output'], {'show_op_kinds': '(not args.hide_op_kinds)', 'show_data_keys': 'args.show_data_keys'}), '(fg, args.output, show_op_kinds=not args.\n hide_op_kinds, show_data_keys=args.show_data_keys)\n', (1972, 2068), False, 'import pyflowgraph\n'), ((2513, 2562), 'changegraph.build_from_files', 'changegraph.build_from_files', (['args.src', 'args.dest'], {}), '(args.src, args.dest)\n', (2541, 2562), False, 'import changegraph\n'), ((2571, 2618), 'changegraph.export_graph_image', 'changegraph.export_graph_image', (['fg', 'args.output'], {}), '(fg, args.output)\n', (2601, 2618), False, 'import changegraph\n'), ((785, 822), 'settings.get', 'settings.get', (['"""stackimpact_agent_key"""'], {}), "('stackimpact_agent_key')\n", (797, 822), False, 'import settings\n'), ((917, 940), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (938, 940), False, 'import datetime\n'), ((2684, 2697), 'vcs.traverse.GitAnalyzer', 'GitAnalyzer', ([], {}), '()\n', (2695, 2697), False, 'from vcs.traverse import GitAnalyzer, RepoInfo, Method\n'), ((4131, 4138), 'patterns.Miner', 'Miner', ([], {}), '()\n', (4136, 4138), False, 'from patterns import Miner\n'), ((4582, 4623), 'settings.get', 'settings.get', (['"""change_graphs_storage_dir"""'], {}), "('change_graphs_storage_dir')\n", (4594, 4623), False, 'import settings\n'), ((4649, 4672), 'os.listdir', 'os.listdir', (['storage_dir'], {}), '(storage_dir)\n', (4659, 4672), False, 'import os\n'), ((5374, 5418), 'log.logger.warning', 'logger.warning', (['"""Pattern mining has started"""'], {}), "('Pattern mining has started')\n", (5388, 5418), False, 'from log import logger\n'), ((5440, 5447), 'patterns.Miner', 'Miner', ([], {}), '()\n', (5445, 5447), False, 'from patterns import Miner\n'), ((3700, 3747), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (3721, 3747), False, 'import datetime\n'), ((3776, 3943), 'vcs.traverse.RepoInfo', 'RepoInfo', (['"""mock repo path"""', '"""mock repo name"""', '"""mock repo url"""', '"""mock hash"""', 'mock_commit_dtm', '"""mock old file path"""', '"""mock new file path"""', 'methods[0]', 'methods[1]'], {}), "('mock repo path', 'mock repo name', 'mock repo url', 'mock hash',\n mock_commit_dtm, 'mock old file path', 'mock new file path', methods[0],\n methods[1])\n", (3784, 3943), False, 'from vcs.traverse import GitAnalyzer, RepoInfo, Method\n'), ((3999, 4068), 'changegraph.build_from_files', 'changegraph.build_from_files', (['old_path', 'new_path'], {'repo_info': 'repo_info'}), '(old_path, new_path, repo_info=repo_info)\n', (4027, 4068), False, 'import changegraph\n'), ((4878, 4914), 'os.path.join', 'os.path.join', (['storage_dir', 'file_name'], {}), '(storage_dir, file_name)\n', (4890, 4914), False, 'import os\n'), ((4244, 4254), 'patterns.models.Fragment', 'Fragment', ([], {}), '()\n', (4252, 4254), False, 'from patterns.models import Fragment, Pattern\n'), ((4371, 4390), 'patterns.models.Pattern', 'Pattern', (['[fragment]'], {}), '([fragment])\n', (4378, 4390), False, 'from patterns.models import Fragment, Pattern\n'), ((5570, 5648), 'log.logger.warning', 'logger.warning', (['"""KeyboardInterrupt: mined patterns will be stored before exit"""'], {}), "('KeyboardInterrupt: mined patterns will be stored before exit')\n", (5584, 5648), False, 'from log import logger\n'), ((5022, 5036), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5033, 5036), False, 'import pickle\n'), ((5189, 5234), 'log.logger.warning', 'logger.warning', (['f"""Incorrect file {file_path}"""'], {}), "(f'Incorrect file {file_path}')\n", (5203, 5234), False, 'from log import logger\n'), ((5124, 5143), 'pickle.loads', 'pickle.loads', (['graph'], {}), '(graph)\n', (5136, 5143), False, 'import pickle\n'), ((3622, 3649), 'ast.parse', 'ast.parse', (['src'], {'mode': '"""exec"""'}), "(src, mode='exec')\n", (3631, 3649), False, 'import ast\n')] |
#!/usr/bin/python3
# DESCRIPTION
# An efficient python script that reads a line separated list of stock symbols
# with optional start and end dates for range and saves the relevant daily
# volume and adjusted closing prices from Yahoo Finance.
# a logfile is also created to summarise the outcome in terms of available data
# please note that yahoo will return a different payload than expected if
# the start or end dates requested do not match global calendar dates,
# such as 2015-06-31
# I leave it to the user to check for this.
# for usage, use -h
import argparse
import urllib.request
import re
import csv
import os
from collections import defaultdict
# read a csv file and return dictionary objects
def get_csv_dict(path):
dictObjs = []
with open(path) as fileObj:
# assuming the first line is a header
header = csv.reader(fileObj, delimiter=",", quotechar='"').__next__()
for line in csv.DictReader(fileObj, fieldnames=header):
dictObjs.append(line)
return dictObjs
# why not convert to a list of objects than create and append?
# if it does not exist, it must be created
def init_folder(path):
if os.path.exists(path):
if os.path.isdir(path):
return True
else:
print("File [%s] will not be overwritten" % path)
return False
else:
try:
os.makedirs(path)
return True
except FileExistsError as e:
print("File Error [%s] with [%s]" % (e, path))
return False
# forming urls specifically for the yahoo service
def form_url(symbol, start="", end="", frequency="d"):
# check format, adjust month number, format to string
# or leave blank if does not conform
if re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", start):
dateInts = [int(d) for d in re.split("-", start)]
dateInts[1] -= 1
startDForm = "&c=%d&a=%d&b=%d" % tuple(dateInts)
else:
startDForm = ""
if re.search("^[0-9]{4}-[0-9]{2}-[0-9]{2}$", end):
dateInts = [int(d) for d in re.split("-", end)]
dateInts[1] -= 1
endDForm = "&f=%d&d=%d&e=%d" % tuple(dateInts)
else:
endDForm = ""
url = ("http://real-chart.finance.yahoo.com/table.csv" +
"?s=" + symbol + endDForm + "&g=" + frequency + startDForm +
"&ignore=.csv")
return url
# cleanly return the results of a web request
def req(url):
try:
return urllib.request.urlopen(url)
except urllib.request.URLError as e:
print("HTTP Error [%s] with [%s]" % (e.code, url))
# return the http object contents in usable format
def read_decode(httpObj):
body = httpObj.read()
httpObj.close()
try:
return body.decode('utf-8') # required, but a bottleneck
except UnicodeDecodeError as e:
print("Decode Error [%s]" % e)
# reform provided payload items for legibility and remove irrelevant variables
def reform_payload(items):
# reversing the headed list into continual order
items.append(items[0])
items.reverse()
items.pop()
# rename the header fields
items[0] = re.sub("Date", "date", items[0])
items[0] = re.sub("Volume", "v", items[0])
items[0] = re.sub("Adj Close", "p", items[0])
# determine if the date format requires reformatting
reformDate = True if (re.search("^[0-9]{2}/[0-9]{2}/[0-9]{4},.*",
items[1])) else False
# for each line, split by comma, extract only the desired elements
for i in range(len(items)):
items[i] = re.sub(",[^,]*,[^,]*,[^,]*,[^,]*", "", items[i])
if reformDate:
items[i] = ("%s-%s-%s%s" % (items[i][6:10],
items[i][3:5],
items[i][0:2],
items[i][10:]))
return items
# write list items en masse to a file
def write_items(path, items, mode="at"):
with open(path, mode) as fileObj:
try:
for i in items:
fileObj.write("%s\n" % i)
finally:
fileObj.close()
# write a line of text to a file
def writeln(path, text, mode="at"):
with open(path, mode) as fileObj:
try:
fileObj.write("%s\n" % text)
return True
except:
print("File error: could not write [%s] to [%s]" % (text, path))
return False
finally:
fileObj.close()
# find unique items and preserve order. By <NAME>
def unique(seq):
seen = set()
return [x for x in seq if x not in seen and not seen.add(x)]
# transform a nested dictionary into (unique key), (value) pairs
def reform_items(items):
s = []
for i in items:
s.append((i['symbol'], i['tradingDate']))
d = defaultdict(list)
for k, v in s:
d[k].append(v)
return d
# main section =========================================================
# start with parsing the arguments
argparser = argparse.ArgumentParser()
argparser.add_argument("items", type=str,
help=("CSV format items of stock symbols " +
"and dates of interest, YYYY-MM-DD format"))
argparser.add_argument("folder", type=str,
help=("The path to a folder to which stock " +
"price files are saved."))
argparser.add_argument("log", type=str,
help=("The path to a machine-readable logfile."))
argparser.add_argument("-s", "--startDate", type=str,
help=("Initial date sought for the range of " +
"time series data, YYYY-MM-DD format"))
argparser.add_argument("-e", "--endDate", type=str,
help=("Final date sought for the range of time " +
"series data, YYYY-MM-DD format"))
args = argparser.parse_args()
items = get_csv_dict(args.items)
initFolderSuccess = init_folder(args.folder)
initLogSuccess = writeln(path=args.log, mode="wt",
text="symbol,tradingDate,position,datapoints")
startDate = str(args.startDate) if args.startDate else ""
endDate = str(args.endDate) if args.endDate else ""
if items and initFolderSuccess and initLogSuccess:
uniqueSymbols = unique(list(i['symbol'] for i in items))
rItems = reform_items(items)
for symbol in uniqueSymbols:
print("Accessing %s" % symbol)
# get the raw payload
httpObj = req(form_url(symbol=symbol,
start=startDate,
end=endDate))
if httpObj:
# transform it to list items and check the number of rows
nData = 0
payload = re.split("\n", read_decode(httpObj))
if payload:
if payload[-1] == "":
payload.pop() # workaround for final \n on split
nData = len(payload) - 1
# write the reformed payload
rPayload = reform_payload(payload)
write_items(path=("%s/%s.csv" % (args.folder, symbol)),
items=rPayload,
mode="wt")
# get position of each tradingDate and write it to logfile
for tradingDate in rItems[symbol]:
position = ""
if rPayload:
pattern = re.compile(tradingDate)
for pos in range(len(rPayload)):
if not position:
if pattern.match(rPayload[pos]):
position = str(pos)
# perhaps it might be quicker to make a list of the results
# and use write_items instead?
writeln(path=args.log, mode="at",
text=("%s,%s,%s,%s" % (symbol,
tradingDate,
position,
str(nData))))
| [
"csv.reader",
"argparse.ArgumentParser",
"os.makedirs",
"re.split",
"os.path.isdir",
"csv.DictReader",
"os.path.exists",
"collections.defaultdict",
"re.search",
"re.sub",
"re.compile"
] | [((5018, 5043), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5041, 5043), False, 'import argparse\n'), ((1172, 1192), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1186, 1192), False, 'import os\n'), ((1762, 1810), 're.search', 're.search', (['"""^[0-9]{4}-[0-9]{2}-[0-9]{2}$"""', 'start'], {}), "('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', start)\n", (1771, 1810), False, 'import re\n'), ((1993, 2039), 're.search', 're.search', (['"""^[0-9]{4}-[0-9]{2}-[0-9]{2}$"""', 'end'], {}), "('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', end)\n", (2002, 2039), False, 'import re\n'), ((3143, 3175), 're.sub', 're.sub', (['"""Date"""', '"""date"""', 'items[0]'], {}), "('Date', 'date', items[0])\n", (3149, 3175), False, 'import re\n'), ((3191, 3222), 're.sub', 're.sub', (['"""Volume"""', '"""v"""', 'items[0]'], {}), "('Volume', 'v', items[0])\n", (3197, 3222), False, 'import re\n'), ((3238, 3272), 're.sub', 're.sub', (['"""Adj Close"""', '"""p"""', 'items[0]'], {}), "('Adj Close', 'p', items[0])\n", (3244, 3272), False, 'import re\n'), ((4820, 4837), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4831, 4837), False, 'from collections import defaultdict\n'), ((932, 974), 'csv.DictReader', 'csv.DictReader', (['fileObj'], {'fieldnames': 'header'}), '(fileObj, fieldnames=header)\n', (946, 974), False, 'import csv\n'), ((1205, 1224), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1218, 1224), False, 'import os\n'), ((3356, 3409), 're.search', 're.search', (['"""^[0-9]{2}/[0-9]{2}/[0-9]{4},.*"""', 'items[1]'], {}), "('^[0-9]{2}/[0-9]{2}/[0-9]{4},.*', items[1])\n", (3365, 3409), False, 'import re\n'), ((3580, 3628), 're.sub', 're.sub', (['""",[^,]*,[^,]*,[^,]*,[^,]*"""', '""""""', 'items[i]'], {}), "(',[^,]*,[^,]*,[^,]*,[^,]*', '', items[i])\n", (3586, 3628), False, 'import re\n'), ((1386, 1403), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1397, 1403), False, 'import os\n'), ((851, 900), 'csv.reader', 'csv.reader', (['fileObj'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(fileObj, delimiter=\',\', quotechar=\'"\')\n', (861, 900), False, 'import csv\n'), ((1848, 1868), 're.split', 're.split', (['"""-"""', 'start'], {}), "('-', start)\n", (1856, 1868), False, 'import re\n'), ((2077, 2095), 're.split', 're.split', (['"""-"""', 'end'], {}), "('-', end)\n", (2085, 2095), False, 'import re\n'), ((7412, 7435), 're.compile', 're.compile', (['tradingDate'], {}), '(tradingDate)\n', (7422, 7435), False, 'import re\n')] |
from django.core.exceptions import SuspiciousOperation
import hashlib
import hmac
import base64
class SecureAcceptanceSigner(object):
def __init__(self, secret_key):
self.secret_key = secret_key
def sign(self, data, signed_fields):
key = self.secret_key.encode("utf-8")
msg_raw = self._build_message(data, signed_fields).encode("utf-8")
msg_hmac = hmac.new(key, msg_raw, hashlib.sha256)
return base64.b64encode(msg_hmac.digest())
def verify_request(self, request):
# Ensure the signature is valid and that this request can be trusted
signed_field_names = request.POST.get("signed_field_names")
if not signed_field_names:
raise SuspiciousOperation("Request has no fields to verify")
signed_field_names = signed_field_names.split(",")
signature_given = request.POST["signature"].encode("utf-8")
signature_calc = self.sign(request.POST, signed_field_names)
return signature_given == signature_calc
def _build_message(self, data, signed_fields):
parts = []
for field in signed_fields:
parts.append("%s=%s" % (field, data.get(field, "")))
return ",".join(parts)
| [
"hmac.new",
"django.core.exceptions.SuspiciousOperation"
] | [((391, 429), 'hmac.new', 'hmac.new', (['key', 'msg_raw', 'hashlib.sha256'], {}), '(key, msg_raw, hashlib.sha256)\n', (399, 429), False, 'import hmac\n'), ((719, 773), 'django.core.exceptions.SuspiciousOperation', 'SuspiciousOperation', (['"""Request has no fields to verify"""'], {}), "('Request has no fields to verify')\n", (738, 773), False, 'from django.core.exceptions import SuspiciousOperation\n')] |
from os import makedirs, path
from lxml import html
import requests
import urllib
import getopt, sys, os
import re
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
#old_address = lambda n,city: f'https://guide.ancv.com/recherche/liste/cv?page={n}&rows=30&f%5B0%5D=im_field_ptl_activite_reference%3A6339&f%5B1%5D=im_field_ptl_activite_reference%3A6344&localisation={city}'
address = lambda city: f'https://leguide.ancv.com/ptl/recherche/list?location={city}&filters%5Bdomaine_activite_principale%5D%5BRestauration%5D=Restauration'
# Write the list of sorted items in file
def store(set_items, output):
if output == None:
print('output name is mandatory')
exit(1)
else:
with open(output,"w") as file:
for t in set_items:
str = ''.join(t)
file.writelines(str + '\n')
def getTotalNumberOfRestaurants(browser, city):
# Get the total number of restaurants
page = requests.get(address(city))
browser.get(address(city))
if page.status_code != 200:
print(f'cannot connect to ancv website')
sys.exit(1)
tree = html.fromstring(page.content)
total_resto_number = tree.xpath('//*[@id="spanNbResult"]/text()')
if total_resto_number == None or len(total_resto_number) == 0:
return 0
else:
print(f'Total number of restaurants: {total_resto_number[0]}')
return int(total_resto_number[0])
def restoLookup(city):
print('Start...')
total_resto = 0
resto_set = set()
# Set option to do not open the browser
options = Options()
options.add_argument('--headless')
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
browser = webdriver.Chrome(options=options)
# total restaurants
total_resto = getTotalNumberOfRestaurants(browser, city)
if total_resto == 0:
print(f'no restaurant found')
return
# collect all the restaurants name
restaurants = []
# With the latest version of the site, the list of restaurants is loaded dinamically
# when the user scrolls the page, this made their website much more usable.
# The infinite scroll can be normally stop when the scrolled more than remain scrollHeight
# for some reason in this website thescrollHeight attribute is not updated after each scroll.
# The workaround was to stop the loop we found all the restaurants.
# I will add a safety timer to avoid infinite loop.
time.sleep(2) # Allow 2 seconds for the web page to open
scroll_pause_time = 4 # set pause time between scrolls
screen_height = browser.execute_script("return window.screen.height;") # get the screen height of the web
i = 1
while True:
# scroll one screen height each time
browser.execute_script("window.scrollTo(0, {screen_height}*{i}*10);".format(screen_height=screen_height, i=i))
i += 1
time.sleep(scroll_pause_time)
# update scroll height each time after scrolled, as the scroll height can change after we scrolled the page
#scroll_height = browser.execute_script("return document.body.scrollHeight;")
restaurants = browser.find_elements_by_xpath('//*[@id="ptl-list-content"]/div/div/div[2]/p[2]')
print(f'resto found till now: {len(restaurants)}')
# Break the loop when the height we need to scroll to is larger than the total scroll height
#if (screen_height) * i > scroll_height:
# Warning: stopping when we found all the restaturants
if len(restaurants) >= total_resto:
break
if len(restaurants) == 0:
print(f'no restaurant found')
return
else:
print(f'restaurants {len(restaurants)} found')
# Add restaurants to the set
for r in restaurants:
print(f'Restaurant name: {r.text}')
t = r.text.replace("\'", "")
resto_set.add(t)
print('Removing duplicates and sorting the results...')
sorted_set = sorted(resto_set)
print('Done')
print(f'Restaurants found: {len(sorted_set)}')
return sorted_set
def usage():
print('Usage: ./ancv_html_scraper.py -c <city> -o <output-file>')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:v:c:s", ["help", "output=", "city=", "silent-mode"])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
output = None
city = None
verbose = False
silent_mode = False
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output = a
elif o in ("-c", "--city"):
city = a
elif o in ("-s", "--silent-mode"):
silent_mode = True
else:
assert False, "unhandled option"
if silent_mode == True:
f = open(os.devnull, 'w')
sys.stdout = f
if city == None :
print('city is a mandatory parameter')
exit(1)
if output == None :
output = 'restaurants_cv.txt'
restaurants = restoLookup(city)
store(restaurants, output)
if __name__ == "__main__":
main()
| [
"selenium.webdriver.chrome.options.Options",
"getopt.getopt",
"time.sleep",
"lxml.html.fromstring",
"selenium.webdriver.Chrome",
"sys.exit"
] | [((1165, 1194), 'lxml.html.fromstring', 'html.fromstring', (['page.content'], {}), '(page.content)\n', (1180, 1194), False, 'from lxml import html\n'), ((1617, 1626), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (1624, 1626), False, 'from selenium.webdriver.chrome.options import Options\n'), ((1773, 1806), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options'}), '(options=options)\n', (1789, 1806), False, 'from selenium import webdriver\n'), ((2528, 2541), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2538, 2541), False, 'import time\n'), ((1141, 1152), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1149, 1152), False, 'import getopt, sys, os\n'), ((2975, 3004), 'time.sleep', 'time.sleep', (['scroll_pause_time'], {}), '(scroll_pause_time)\n', (2985, 3004), False, 'import time\n'), ((4288, 4376), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""ho:v:c:s"""', "['help', 'output=', 'city=', 'silent-mode']"], {}), "(sys.argv[1:], 'ho:v:c:s', ['help', 'output=', 'city=',\n 'silent-mode'])\n", (4301, 4376), False, 'import getopt, sys, os\n'), ((4553, 4564), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (4561, 4564), False, 'import getopt, sys, os\n'), ((4782, 4792), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4790, 4792), False, 'import getopt, sys, os\n')] |
import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import plotly.express as px
import pandas as pd
import numpy as np
from app import app
from app import server
from apps import state, county
cases = pd.read_csv('data/cases.csv')
cases['report_date']=pd.to_datetime(cases.report_date)
top_layout = html.Div([
#links to other pages
html.Div([
html.Nav(className = "nav nav-pills", children=[
html.A('State', className="nav-item nav-link btn", href='/apps/state',
style={"font-size": "2rem",
"box-shadow": "4px 4px 2px #e3e3e3",
"padding": "5px",
'marginTop':'-15px'}),
html.H5(""),
html.A('County', className="nav-item nav-link active btn", href='/apps/county',
style={"font-size": "2rem",
"box-shadow": "4px 4px 2px #e3e3e3",
"padding": "5px",
'marginTop':'-15px'})
],style = {'marginTop':'-15px'}),
], className='one-third column', id = 'links', style={'textAlign':'center'}),
#title
html.Div([
html.Div([
html.H2('VA x COVID',
style={'marginBottom': '10','marginTop':'-15px'}),
html.H3('Virginia COVID-19 Dashboard',
style={'marginTop':'-15px'})
], style={'textAlign':'center'})
], className='one-third column', id='title'),
# last update date
html.Div([
html.H6('Last Updated: ',
style={'marginTop':'-15px'}),
html.H6(str(cases['report_date'].iloc[-1].strftime('%B %d, %Y')) + ' 13:00 (EST)')
], className='one-third column', id = 'title1', style={'textAlign':'center'})
], id='header',className='row flex-display', style={'margin-bottom': '10px','marginTop':'-15px'})
app.layout = html.Div([
dcc.Location(id='url',refresh=False),
top_layout,
html.Div(id='page-content',children=[])
], id='mainContainer', style={'display': 'flex','flex-direction':'column'})
@app.callback(Output('page-content', 'children'),
[Input('url','pathname')])
def display_page(pathname):
if pathname == '/apps/state':
return state.layout
if pathname == '/apps/county':
return county.layout
else:
return state.layout
if __name__ == '__main__':
app.run_server(debug=True)
| [
"dash_html_components.H3",
"dash_html_components.H6",
"dash_core_components.Location",
"dash_html_components.H2",
"pandas.read_csv",
"dash_html_components.Div",
"dash_html_components.A",
"dash.dependencies.Input",
"pandas.to_datetime",
"app.app.run_server",
"dash.dependencies.Output",
"dash_html_components.H5"
] | [((309, 338), 'pandas.read_csv', 'pd.read_csv', (['"""data/cases.csv"""'], {}), "('data/cases.csv')\n", (320, 338), True, 'import pandas as pd\n'), ((360, 393), 'pandas.to_datetime', 'pd.to_datetime', (['cases.report_date'], {}), '(cases.report_date)\n', (374, 393), True, 'import pandas as pd\n'), ((2197, 2231), 'dash.dependencies.Output', 'Output', (['"""page-content"""', '"""children"""'], {}), "('page-content', 'children')\n", (2203, 2231), False, 'from dash.dependencies import Input, Output\n'), ((2506, 2532), 'app.app.run_server', 'app.run_server', ([], {'debug': '(True)'}), '(debug=True)\n', (2520, 2532), False, 'from app import app\n'), ((2008, 2045), 'dash_core_components.Location', 'dcc.Location', ([], {'id': '"""url"""', 'refresh': '(False)'}), "(id='url', refresh=False)\n", (2020, 2045), True, 'import dash_core_components as dcc\n'), ((2066, 2106), 'dash_html_components.Div', 'html.Div', ([], {'id': '"""page-content"""', 'children': '[]'}), "(id='page-content', children=[])\n", (2074, 2106), True, 'import dash_html_components as html\n'), ((2246, 2270), 'dash.dependencies.Input', 'Input', (['"""url"""', '"""pathname"""'], {}), "('url', 'pathname')\n", (2251, 2270), False, 'from dash.dependencies import Input, Output\n'), ((1634, 1689), 'dash_html_components.H6', 'html.H6', (['"""Last Updated: """'], {'style': "{'marginTop': '-15px'}"}), "('Last Updated: ', style={'marginTop': '-15px'})\n", (1641, 1689), True, 'import dash_html_components as html\n'), ((1311, 1384), 'dash_html_components.H2', 'html.H2', (['"""VA x COVID"""'], {'style': "{'marginBottom': '10', 'marginTop': '-15px'}"}), "('VA x COVID', style={'marginBottom': '10', 'marginTop': '-15px'})\n", (1318, 1384), True, 'import dash_html_components as html\n'), ((1412, 1480), 'dash_html_components.H3', 'html.H3', (['"""Virginia COVID-19 Dashboard"""'], {'style': "{'marginTop': '-15px'}"}), "('Virginia COVID-19 Dashboard', style={'marginTop': '-15px'})\n", (1419, 1480), True, 'import dash_html_components as html\n'), ((530, 714), 'dash_html_components.A', 'html.A', (['"""State"""'], {'className': '"""nav-item nav-link btn"""', 'href': '"""/apps/state"""', 'style': "{'font-size': '2rem', 'box-shadow': '4px 4px 2px #e3e3e3', 'padding': '5px',\n 'marginTop': '-15px'}"}), "('State', className='nav-item nav-link btn', href='/apps/state',\n style={'font-size': '2rem', 'box-shadow': '4px 4px 2px #e3e3e3',\n 'padding': '5px', 'marginTop': '-15px'})\n", (536, 714), True, 'import dash_html_components as html\n'), ((823, 834), 'dash_html_components.H5', 'html.H5', (['""""""'], {}), "('')\n", (830, 834), True, 'import dash_html_components as html\n'), ((848, 1042), 'dash_html_components.A', 'html.A', (['"""County"""'], {'className': '"""nav-item nav-link active btn"""', 'href': '"""/apps/county"""', 'style': "{'font-size': '2rem', 'box-shadow': '4px 4px 2px #e3e3e3', 'padding': '5px',\n 'marginTop': '-15px'}"}), "('County', className='nav-item nav-link active btn', href=\n '/apps/county', style={'font-size': '2rem', 'box-shadow':\n '4px 4px 2px #e3e3e3', 'padding': '5px', 'marginTop': '-15px'})\n", (854, 1042), True, 'import dash_html_components as html\n')] |
#!/usr/bin/env python
"""
Bundle Python packages into a single script
A utility needed to build multiple files into a single script.
It can be useful for distribution or for easy writing of code
and bundle it (for example for www.codingame.com)
"""
import re
from distutils.core import setup
description, long_description = re.split('\n{2}', __doc__)
setup(
name='pyndler',
version='1.0.0',
description=description,
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/dimastark/pyndler',
tests_require=['pytest'],
scripts=["scripts/pyndler"],
packages=['pyndler'],
)
| [
"re.split",
"distutils.core.setup"
] | [((328, 354), 're.split', 're.split', (['"""\n{2}"""', '__doc__'], {}), "('\\n{2}', __doc__)\n", (336, 354), False, 'import re\n'), ((356, 629), 'distutils.core.setup', 'setup', ([], {'name': '"""pyndler"""', 'version': '"""1.0.0"""', 'description': 'description', 'long_description': 'long_description', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""http://github.com/dimastark/pyndler"""', 'tests_require': "['pytest']", 'scripts': "['scripts/pyndler']", 'packages': "['pyndler']"}), "(name='pyndler', version='1.0.0', description=description,\n long_description=long_description, author='<NAME>', author_email=\n '<EMAIL>', url='http://github.com/dimastark/pyndler', tests_require=[\n 'pytest'], scripts=['scripts/pyndler'], packages=['pyndler'])\n", (361, 629), False, 'from distutils.core import setup\n')] |
#!/usr/bin/env python3
from datetime import datetime
#web3fusion
from web3fsnpy import Fsn
linkToChain = {
'network' : 'mainnet', # One of 'testnet', or 'mainnet'
'provider' : 'WebSocket', # One of 'WebSocket', 'HTTP', or 'IPC'
'gateway' : 'wss://mainnetpublicgateway1.fusionnetwork.io:10001',
#'gateway' : 'wss://testnetpublicgateway1.fusionnetwork.io:10001',
}
web3fsn = Fsn(linkToChain)
pub_key = "0x3333333333333333333333333333333333333333"
Tckts = web3fsn.ticketsByAddress(pub_key)
#print(Tckts)
print('Total number of tickets: ',len(Tckts))
print('\nor using totalNumberOfTicketsByAddress: ',web3fsn.totalNumberOfTicketsByAddress(pub_key),'\n')
for a in Tckts:
tck = Tckts[a]
st = datetime.fromtimestamp(tck.StartTime).strftime('%c')
ex = datetime.fromtimestamp(tck.ExpireTime).strftime('%c')
print('Block Height: ',tck.Height,' Start Time: ',st,' Expiry Time: ',ex)
| [
"web3fsnpy.Fsn",
"datetime.datetime.fromtimestamp"
] | [((416, 432), 'web3fsnpy.Fsn', 'Fsn', (['linkToChain'], {}), '(linkToChain)\n', (419, 432), False, 'from web3fsnpy import Fsn\n'), ((751, 788), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['tck.StartTime'], {}), '(tck.StartTime)\n', (773, 788), False, 'from datetime import datetime\n'), ((813, 851), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['tck.ExpireTime'], {}), '(tck.ExpireTime)\n', (835, 851), False, 'from datetime import datetime\n')] |
import re
import datetime
class DateFoldStreamProxy:
old_date = None
date_re = re.compile(r"(?P<year>\d\d\d\d)-(?P<month>\d\d)-(?P<day>\d\d).*")
def __init__(self, stream):
self.stream = stream
def close(self):
self.stream.close()
def render_month(self, date: datetime.date):
return f"* {date.strftime('%B %Y')}\n"
def render_date(self, date: datetime.date):
return f"** {date.strftime('%Y-%m-%d - %A')}\n"
def write(self, content):
match = self.date_re.match(content)
if match:
g = dict((k, int(v)) for k, v in match.groupdict().items())
new_date = datetime.date(**g)
old_date = self.old_date
self.old_date = new_date
if not old_date or new_date.month != old_date.month:
self.stream.write(self.render_month(new_date))
if not old_date or new_date.day != old_date.day:
self.stream.write(self.render_date(new_date))
# Now write the Original Content
content = re.sub(r'\s+\n', r'\n', content, 999)
self.stream.write(content)
| [
"datetime.date",
"re.sub",
"re.compile"
] | [((89, 161), 're.compile', 're.compile', (['"""(?P<year>\\\\d\\\\d\\\\d\\\\d)-(?P<month>\\\\d\\\\d)-(?P<day>\\\\d\\\\d).*"""'], {}), "('(?P<year>\\\\d\\\\d\\\\d\\\\d)-(?P<month>\\\\d\\\\d)-(?P<day>\\\\d\\\\d).*')\n", (99, 161), False, 'import re\n'), ((1063, 1101), 're.sub', 're.sub', (['"""\\\\s+\\\\n"""', '"""\\\\n"""', 'content', '(999)'], {}), "('\\\\s+\\\\n', '\\\\n', content, 999)\n", (1069, 1101), False, 'import re\n'), ((657, 675), 'datetime.date', 'datetime.date', ([], {}), '(**g)\n', (670, 675), False, 'import datetime\n')] |
import extract_features as ef
import numpy as np
with open('./input.csv','r',encoding='utf-8') as input_file:
with open('./dataset.csv','w',encoding='utf-8') as dataset:
for line in input_file:
r = line.split(',')
x = r[0].strip()
y = r[1].strip()
example = ef.extractFeatures(x)
result = '{0},{1}\n'.format(
np.array2string(example, separator=','),
y
)
result = result.replace('[','')
result = result.replace(']','')
result = result.replace(' ','')
dataset.write(result)
| [
"numpy.array2string",
"extract_features.extractFeatures"
] | [((294, 315), 'extract_features.extractFeatures', 'ef.extractFeatures', (['x'], {}), '(x)\n', (312, 315), True, 'import extract_features as ef\n'), ((353, 392), 'numpy.array2string', 'np.array2string', (['example'], {'separator': '""","""'}), "(example, separator=',')\n", (368, 392), True, 'import numpy as np\n')] |
import click
import inflection
@click.command()
@click.argument("name")
@click.option("--doc")
def get_context(name, doc):
"""Generate a command with given name.
The command can be run immediately after generation.
For example:
dj generate command bar
dj run manage.py bar
"""
name = inflection.underscore(name)
return {"name": name, "doc": doc or name}
| [
"inflection.underscore",
"click.option",
"click.argument",
"click.command"
] | [((34, 49), 'click.command', 'click.command', ([], {}), '()\n', (47, 49), False, 'import click\n'), ((51, 73), 'click.argument', 'click.argument', (['"""name"""'], {}), "('name')\n", (65, 73), False, 'import click\n'), ((75, 96), 'click.option', 'click.option', (['"""--doc"""'], {}), "('--doc')\n", (87, 96), False, 'import click\n'), ((325, 352), 'inflection.underscore', 'inflection.underscore', (['name'], {}), '(name)\n', (346, 352), False, 'import inflection\n')] |
from contextlib import contextmanager
import requests
def _validate_response(response):
if response.status_code == 200:
return response
json = response.json()
raise Exception(json["message"])
def _release_lease(lease_id):
_validate_response(requests.post("http://localhost:3000/leases/release", json={"leaseId": lease_id})).json()
def _create_lease(media_id):
lease = _validate_response(requests.post("http://localhost:3000/leases/create", json={"mediaId": media_id})).json()
if lease["success"] is False:
raise Exception(lease["message"])
return lease
def _get_usb_drives():
drives = []
result = _validate_response(requests.get("http://localhost:3000/media")).json()
for media in result:
if media["provider"] == "udisks":
drives.append(media)
return drives
@contextmanager
def lease_first_drive_path():
drives = _get_usb_drives()
if len(drives) == 0:
yield
return
lease = _create_lease(drives[0]["id"])
mount_path = lease["mountPath"]
try:
yield mount_path
finally:
_release_lease(lease["leaseId"]) | [
"requests.post",
"requests.get"
] | [((268, 353), 'requests.post', 'requests.post', (['"""http://localhost:3000/leases/release"""'], {'json': "{'leaseId': lease_id}"}), "('http://localhost:3000/leases/release', json={'leaseId':\n lease_id})\n", (281, 353), False, 'import requests\n'), ((419, 504), 'requests.post', 'requests.post', (['"""http://localhost:3000/leases/create"""'], {'json': "{'mediaId': media_id}"}), "('http://localhost:3000/leases/create', json={'mediaId': media_id}\n )\n", (432, 504), False, 'import requests\n'), ((673, 716), 'requests.get', 'requests.get', (['"""http://localhost:3000/media"""'], {}), "('http://localhost:3000/media')\n", (685, 716), False, 'import requests\n')] |
# 05 de Junio del 2018
# 31 Mayo 2018
import numpy as np
import matplotlib.pyplot as plt
from time import sleep
import cv2
class Recurrent_Photo:
'''
Recurrent Photo only for testing
'''
def __init__(self, iterations=100, resize=(1280, 720)):
self.camera = cv2.VideoCapture(0)
self.video = np.zeros([iterations, resize[1], resize[0], 3])
for iteration in range(iterations):
self.video[iteration, :, :] = cv2.resize(
(self.camera.read()[1]/255),
# FIXME: AHORA TRABAJAMOS CON LOS TRES CANALES
resize
)
cv2.imshow('Prueba', self.video[iteration, :, :])
cv2.waitKey(1)
self.camera.release()
self.resize = resize
def get_recurrence(self, alpha=(0.75555, 0.25555)):
'''
Alpha are 2 float numbers represented by the amount of
superposition that you want to have in te current image.
Example:
alpha = (0.5, 0.5) is neutral change, were the last
image will have the same intensity of first image.
'''
first = np.array(self.video[0:self.video.shape[0]-1, :, :,])
second = np.array(self.video[1:self.video.shape[0], :, :])
diferences = self.get_diference(
second,
first
)
for image in range(len(diferences)):
diferences[image] = diferences[image-1]* alpha[0] + diferences[image]* alpha[1]
# Mirar ecuacion del cuaderno.
return diferences
def get_diference(self, A, B):
'''
Get diference from two items
'''
return np.abs(A - B)
def resize_images(X, dimensions=(100, 75)):
if len(X.shape) == 3:
X = cv2.resize(X, dimensions)
else:
for image in X:
image = cv2.resize(image, dimensions)
return X
def show_image(X):
if len(X.shape) == 3:
cv2.imshow('image', X)
else:
for image in X:
cv2.imshow('X', image)
cv2.waitKey(1)
sleep(0.05)
non_movement = Recurrent_Photo(50)
print('Prepare next movement...')
sleep(2)
movement = Recurrent_Photo(50)
non_movement_recurrence = non_movement.get_recurrence()
movement_recurrence = movement.get_recurrence()
X = resize_images(non_movement_recurrence)
Y = resize_images(movement_recurrence)
| [
"numpy.abs",
"cv2.waitKey",
"numpy.zeros",
"time.sleep",
"cv2.VideoCapture",
"numpy.array",
"cv2.imshow",
"cv2.resize"
] | [((2183, 2191), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (2188, 2191), False, 'from time import sleep\n'), ((285, 304), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (301, 304), False, 'import cv2\n'), ((326, 373), 'numpy.zeros', 'np.zeros', (['[iterations, resize[1], resize[0], 3]'], {}), '([iterations, resize[1], resize[0], 3])\n', (334, 373), True, 'import numpy as np\n'), ((1137, 1190), 'numpy.array', 'np.array', (['self.video[0:self.video.shape[0] - 1, :, :]'], {}), '(self.video[0:self.video.shape[0] - 1, :, :])\n', (1145, 1190), True, 'import numpy as np\n'), ((1207, 1256), 'numpy.array', 'np.array', (['self.video[1:self.video.shape[0], :, :]'], {}), '(self.video[1:self.video.shape[0], :, :])\n', (1215, 1256), True, 'import numpy as np\n'), ((1687, 1700), 'numpy.abs', 'np.abs', (['(A - B)'], {}), '(A - B)\n', (1693, 1700), True, 'import numpy as np\n'), ((1784, 1809), 'cv2.resize', 'cv2.resize', (['X', 'dimensions'], {}), '(X, dimensions)\n', (1794, 1809), False, 'import cv2\n'), ((1965, 1987), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'X'], {}), "('image', X)\n", (1975, 1987), False, 'import cv2\n'), ((630, 679), 'cv2.imshow', 'cv2.imshow', (['"""Prueba"""', 'self.video[iteration, :, :]'], {}), "('Prueba', self.video[iteration, :, :])\n", (640, 679), False, 'import cv2\n'), ((692, 706), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (703, 706), False, 'import cv2\n'), ((1868, 1897), 'cv2.resize', 'cv2.resize', (['image', 'dimensions'], {}), '(image, dimensions)\n', (1878, 1897), False, 'import cv2\n'), ((2038, 2060), 'cv2.imshow', 'cv2.imshow', (['"""X"""', 'image'], {}), "('X', image)\n", (2048, 2060), False, 'import cv2\n'), ((2073, 2087), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2084, 2087), False, 'import cv2\n'), ((2100, 2111), 'time.sleep', 'sleep', (['(0.05)'], {}), '(0.05)\n', (2105, 2111), False, 'from time import sleep\n')] |
#!/usr/bin/env python3
#
# Author:
# <NAME> (@skelsec)
#
# https://www.rfc-editor.org/rfc/rfc4178.txt
from asn1crypto.core import ObjectIdentifier, Sequence, SequenceOf, Enumerated, GeneralString, OctetString, BitString, Choice, Any, Boolean
import enum
import os
import io
TAG = 'explicit'
# class
UNIVERSAL = 0
APPLICATION = 1
CONTEXT = 2
class MechType(ObjectIdentifier):
_map = {
'1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support Provider',
'1.2.840.48018.1.2.2' : 'MS KRB5 - Microsoft Kerberos 5',
'1.2.840.113554.1.2.2' : 'KRB5 - Kerberos 5',
'1.2.840.113554.1.2.2.3': 'KRB5 - Kerberos 5 - User to User',
'1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security Mechanism',
}
class MechTypes(SequenceOf):
_child_spec = MechType
class ContextFlags(BitString):
_map = {
0: 'delegFlag',
1: 'mutualFlag',
2: 'replayFlag',
3: 'sequenceFlag',
4: 'anonFlag',
5: 'confFlag',
6: 'integFlag',
}
class NegState(Enumerated):
_map = {
0: 'accept-completed',
1: 'accept-incomplete',
2: 'reject',
3: 'request-mic',
}
class NegHints(Sequence):
_fields = [
('hintName', GeneralString, {'explicit': 0, 'optional': True}),
('hintAddress', OctetString, {'explicit': 1, 'optional': True}),
]
# https://www.rfc-editor.org/rfc/rfc4178.txt 4.2.1
# EXTENDED IN: https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-spng/8e71cf53-e867-4b79-b5b5-38c92be3d472
class NegTokenInit2(Sequence):
#explicit = (APPLICATION, 0)
_fields = [
('mechTypes', MechTypes, {'tag_type': TAG, 'tag': 0}),
('reqFlags', ContextFlags, {'tag_type': TAG, 'tag': 1, 'optional': True}),
('mechToken', OctetString, {'tag_type': TAG, 'tag': 2, 'optional': True}),
('negHints', NegHints, {'tag_type': TAG, 'tag': 3, 'optional': True}),
('mechListMIC', OctetString, {'tag_type': TAG, 'tag': 4, 'optional': True}),
]
# https://www.rfc-editor.org/rfc/rfc4178.txt 4.2.2
class NegTokenResp(Sequence):
#explicit = (APPLICATION, 1)
_fields = [
('negState', NegState, {'tag_type': TAG, 'tag': 0, 'optional': True}),
('supportedMech', MechType, {'tag_type': TAG, 'tag': 1, 'optional': True}),
('responseToken', OctetString, {'tag_type': TAG, 'tag': 2, 'optional': True}),
('mechListMIC', OctetString, {'tag_type': TAG, 'tag': 3, 'optional': True}),
]
class NegotiationToken(Choice):
_alternatives = [
('negTokenInit', NegTokenInit2, {'explicit': (CONTEXT, 0) } ),
('negTokenResp', NegTokenResp, {'explicit': (CONTEXT, 1) } ),
]
class GSS_SPNEGO(Sequence):
class_ = 2
tag = 0
_fields = [
('NegotiationToken', NegotiationToken),
]
### I have 0 idea where this is tandardized :(
class GSSType(ObjectIdentifier):
_map = {
#'': 'SNMPv2-SMI::enterprises.311.2.2.30',
'1.3.6.1.5.5.2': 'SPNEGO',
}
class GSSAPI(Sequence):
class_ = 1
tag = 0
_fields = [
('type', GSSType, {'optional': False}),
('value', Any, {'optional': False}),
]
_oid_pair = ('type', 'value')
_oid_specs = {
'SPNEGO': NegotiationToken,
}
# https://tools.ietf.org/html/rfc2743#page-81
# You may think this is ASN1. But it truth, it's not.
# Below is a fucking disgrace of a protocol design.
class KRB5Token:
def __init__(self, data = None, tok_id = b'\x01\x00'):
self.tok_id = tok_id
self.data = data
@staticmethod
def from_bytes(data):
return KRB5Token.from_buffer(io.BytesIO(data))
@staticmethod
def from_buffer(buff):
t = KRB5Token()
buff.read(1)
length = -1
x = int.from_bytes(buff.read(1), 'big', signed = False)
input(x)
if x <= 127:
length = x
else:
x &= ~0x80
input(x)
length = int.from_bytes(buff.read(x), 'big', signed = False)
input('length: %s' % length)
oid_asn1 = buff.read(11)
t.tok_id = int.from_bytes(buff.read(2), 'big', signed = False)
t.data = buff.read(length-13)
input(t.tok_id )
return t
def length_encode(self, x):
if x <= 127:
return x.to_bytes(1, 'big', signed = False)
else:
lb = x.to_bytes((x.bit_length() + 7) // 8, 'big')
t = (0x80 | len(lb)).to_bytes(1, 'big', signed = False)
return t+lb
def to_bytes(self):
t = b'\x60' #
t += self.length_encode(11 + 2 + len(self.data))
t += bytes.fromhex('06092a864886f712010202') #OID length + OID for kerberos
t += self.tok_id
t += self.data
return t | [
"io.BytesIO"
] | [((3357, 3373), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (3367, 3373), False, 'import io\n')] |
import docker
import boto3
import os
import sys
import base64
from datetime import datetime, timezone
def get_docker_client():
return docker.from_env()
def get_ecr_clients(settings):
clients = []
for region in settings['regions']:
clients.append(boto3.client('ecr',
aws_access_key_id=settings['access_key_id'],
aws_secret_access_key=settings['secret_access_key'],
region_name=region
))
return clients
def get_sts_client(settings):
return boto3.client('sts',
aws_access_key_id=settings['access_key_id'],
aws_secret_access_key=settings['secret_access_key']
)
def exit_with_error(message, *args):
print('Something went wrong:', message.format(*args), file=sys.stderr, flush=True)
sys.exit(1)
def get_aws_account_id(sts_client):
return sts_client.get_caller_identity().get('Account')
def get_regions(env):
regions = env.get('PLUGIN_REGION')
if not regions:
return None
return regions.split(',')
def get_repo(env):
return env.get('PLUGIN_REPO', env.get('DRONE_REPO_NAME'))
def get_dockerfile(env):
return env.get('PLUGIN_DOCKERFILE', './Dockerfile')
def get_tags(env):
user_tags = env.get('PLUGIN_TAGS')
tags = [tag for tag in user_tags.split(',')]
return tags
def get_settings(env):
return {
'access_key_id': env.get('PLUGIN_ACCESS_KEY_ID'),
'secret_access_key': env.get('PLUGIN_SECRET_ACCESS_KEY'),
'regions': get_regions(env),
'repo': get_repo(env),
'dockerfile': get_dockerfile(env),
'commit': env.get('DRONE_COMMIT'),
'repo_link': env.get('DRONE_REPO_LINK'),
'tags': get_tags(env)
}
def get_ecr_login(ecr_client, registry_id):
response = ecr_client.get_authorization_token(registryIds=[registry_id])
registry = response['authorizationData'][0]['proxyEndpoint']
token = response['authorizationData'][0]['authorizationToken']
username, password = base64.b64decode(token).decode().split(':')
return {
'username': username,
'password': password,
'registry': registry
}
def get_repos(settings, ecr_clients, aws_account_id):
repos = []
for client in ecr_clients:
response = client.describe_repositories(
registryId=aws_account_id,
repositoryNames=[settings['repo']]
)
repo = response['repositories'][0]
repos.append({
'registry_id': repo['registryId'],
'name': repo['repositoryName'],
'uri': repo['repositoryUri'],
'login': get_ecr_login(client, repo['registryId'])
})
return repos
def login_to_registries(docker_client, repos):
for repo in repos:
login = repo['login']
docker_client.login(
login['username'],
login['password'],
registry=login['registry']
)
def build_image(docker_client, settings):
build_tag = ':'.join((settings['repo'], settings['tags'][0]))
build_date = datetime.now(timezone.utc).astimezone().isoformat()
image, *_ = docker_client.images.build(
path="./",
tag=build_tag,
dockerfile=settings['dockerfile'],
rm=True,
forcerm=True,
buildargs={
'CI_BUILD_DATE': build_date,
'CI_VCS_URL': settings['repo_link'],
'CI_VCS_REF': settings['commit']
},
labels={
'org.label-schema.schema-version': '1.0',
'org.label-schema.build-date': build_date,
'org.label-schema.vcs-url': settings['repo_link'],
'org.label-schema.vcs-ref': settings['commit']
}
)
return image
def tag_image(image, settings, repos):
for tag in settings['tags']:
for repo in repos:
image.tag(
repository=repo['uri'],
tag=tag
)
def push_image(docker_client, settings, repos):
for tag in settings['tags']:
for repo in repos:
docker_client.images.push(
repository=repo['uri'],
tag=tag
)
def build_and_push_image():
settings = get_settings(os.environ)
sts_client = get_sts_client(settings)
ecr_clients = get_ecr_clients(settings)
docker_client = get_docker_client()
print('Finding AWS account id...')
aws_account_id = get_aws_account_id(sts_client)
print('AWS account id is {0}.'.format(aws_account_id))
print('Repo name is', settings['repo'], flush=True)
print('Regions:')
for region in settings['regions']:
print('- ', region)
print('Fetching repos info from ECR across regions...', flush=True)
repos = get_repos(settings, ecr_clients, aws_account_id)
print('Fetched repos info.')
print('Repos:')
for repo in repos:
print('- ', repo['uri'])
print('Logging in to registries...', flush=True)
login_to_registries(docker_client, repos)
print('Logged in. Building image...', flush=True)
try:
image = build_image(docker_client, settings)
except docker.errors.BuildError as e:
for line in e.build_log:
if 'stream' in line:
print(line['stream'].strip())
raise
print('Build finished.')
print('Tags:')
for tag in settings['tags']:
print('- ', tag)
print('Tagging image...', flush=True)
tag_image(image, settings, repos)
print('Tagged. Pushing image tags to registries...', flush=True)
push_image(docker_client, settings, repos)
print('Pushed. All done.')
if __name__ == '__main__':
build_and_push_image()
| [
"docker.from_env",
"boto3.client",
"base64.b64decode",
"datetime.datetime.now",
"sys.exit"
] | [((137, 154), 'docker.from_env', 'docker.from_env', ([], {}), '()\n', (152, 154), False, 'import docker\n'), ((481, 602), 'boto3.client', 'boto3.client', (['"""sts"""'], {'aws_access_key_id': "settings['access_key_id']", 'aws_secret_access_key': "settings['secret_access_key']"}), "('sts', aws_access_key_id=settings['access_key_id'],\n aws_secret_access_key=settings['secret_access_key'])\n", (493, 602), False, 'import boto3\n'), ((736, 747), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (744, 747), False, 'import sys\n'), ((261, 402), 'boto3.client', 'boto3.client', (['"""ecr"""'], {'aws_access_key_id': "settings['access_key_id']", 'aws_secret_access_key': "settings['secret_access_key']", 'region_name': 'region'}), "('ecr', aws_access_key_id=settings['access_key_id'],\n aws_secret_access_key=settings['secret_access_key'], region_name=region)\n", (273, 402), False, 'import boto3\n'), ((1884, 1907), 'base64.b64decode', 'base64.b64decode', (['token'], {}), '(token)\n', (1900, 1907), False, 'import base64\n'), ((2835, 2861), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (2847, 2861), False, 'from datetime import datetime, timezone\n')] |
from samcli.cli.context import Context
import click
class LocalContext(Context):
def __init__(self):
super().__init__()
self._aws_account_id = None
@property
def aws_account_id(self):
return self._aws_account_id
@aws_account_id.setter
def aws_account_id(self, value):
self._aws_account_id = value
self._refresh_session()
pass_context = click.make_pass_decorator(LocalContext)
| [
"click.make_pass_decorator"
] | [((402, 441), 'click.make_pass_decorator', 'click.make_pass_decorator', (['LocalContext'], {}), '(LocalContext)\n', (427, 441), False, 'import click\n')] |
# coding=utf-8
'''Implementation of the Add Page form.
'''
try:
from five.formlib.formbase import AddForm
except ImportError:
from Products.Five.formlib.formbase import AddForm # lint:ok
from zope.component import createObject
from zope.formlib import form
from Products.Five.browser.pagetemplatefile import ZopeTwoPageTemplateFile
from zope.app.form.browser import TextAreaWidget
from zope.app.apidoc.interface import getFieldsInOrder
from zope.schema import *
import interfaces
from page import GSContentPage
def wym_editor_widget(field, request):
retval = TextAreaWidget(field, request)
retval.cssClass = 'wymeditor'
return retval
class AddPageForm(AddForm):
label = u'Add Page'
pageTemplateFileName = 'browser/templates/edit_page.pt'
template = ZopeTwoPageTemplateFile(pageTemplateFileName)
def __init__(self, context, request):
self.context = context
self.request = request
self.interface = interface = getattr(interfaces, 'IGSContentPage')
AddForm.__init__(self, context, request)
self.siteInfo = createObject('groupserver.SiteInfo', context)
site_root = context.site_root()
assert hasattr(site_root, 'GlobalConfiguration')
self.form_fields = form.Fields(interface, render_context=True,
omit_readonly=True)
self.form_fields['content'].custom_widget = wym_editor_widget
self.form_fields['content'].field.default = u'<p>Enter content '\
u'here.</p>'
self.mode = 'add'
@property
def id(self):
return self.form_fields['id']
@property
def title(self):
return self.form_fields['title']
@property
def description(self):
return self.form_fields['description']
@property
def content(self):
return self.form_fields['content']
# --=mpj17=--
# The "form.action" decorator creates an action instance, with
# "handle_reset" set to the success handler,
# "handle_reset_action_failure" as the failure handler, and adds the
# action to the "actions" instance variable (creating it if
# necessary). I did not need to explicitly state that "Edit" is the
# label, but it helps with readability.
@form.action(label=u'Add', failure='handle_set_action_failure')
def handle_set(self, action, data):
return self.set_data(data)
def handle_set_action_failure(self, action, data, errors):
if len(errors) == 1:
self.status = u'<p>There is an error:</p>'
else:
self.status = u'<p>There are errors:</p>'
def set_data(self, data):
assert self.context
assert self.form_fields
alteredFields = []
for datum in getFieldsInOrder(self.interface):
if datum[0] in data:
if data[datum[0]] != getattr(self.context, datum[0]):
alteredFields.append(datum[0])
# Create the content folder and object and apply changes.
folder = GSContentPage(self.context, mode='add', id=data['id'])
if folder.status['error']:
retval = u'%s' % folder.status['msg']
changed = form.applyChanges(folder, self.form_fields, data)
# All good, so redirect to the edit page.
if changed:
url = '%s/edit_page.html' % folder.context.absolute_url(0)
self.request.response.redirect(url)
return
else:
retval = u'Problem creating page'
assert retval
assert type(retval) == unicode
self.status = retval
| [
"page.GSContentPage",
"zope.app.apidoc.interface.getFieldsInOrder",
"zope.formlib.form.Fields",
"zope.formlib.form.applyChanges",
"zope.component.createObject",
"Products.Five.formlib.formbase.AddForm.__init__",
"zope.app.form.browser.TextAreaWidget",
"zope.formlib.form.action",
"Products.Five.browser.pagetemplatefile.ZopeTwoPageTemplateFile"
] | [((574, 604), 'zope.app.form.browser.TextAreaWidget', 'TextAreaWidget', (['field', 'request'], {}), '(field, request)\n', (588, 604), False, 'from zope.app.form.browser import TextAreaWidget\n'), ((786, 831), 'Products.Five.browser.pagetemplatefile.ZopeTwoPageTemplateFile', 'ZopeTwoPageTemplateFile', (['pageTemplateFileName'], {}), '(pageTemplateFileName)\n', (809, 831), False, 'from Products.Five.browser.pagetemplatefile import ZopeTwoPageTemplateFile\n'), ((2281, 2343), 'zope.formlib.form.action', 'form.action', ([], {'label': 'u"""Add"""', 'failure': '"""handle_set_action_failure"""'}), "(label=u'Add', failure='handle_set_action_failure')\n", (2292, 2343), False, 'from zope.formlib import form\n'), ((1021, 1061), 'Products.Five.formlib.formbase.AddForm.__init__', 'AddForm.__init__', (['self', 'context', 'request'], {}), '(self, context, request)\n', (1037, 1061), False, 'from Products.Five.formlib.formbase import AddForm\n'), ((1087, 1132), 'zope.component.createObject', 'createObject', (['"""groupserver.SiteInfo"""', 'context'], {}), "('groupserver.SiteInfo', context)\n", (1099, 1132), False, 'from zope.component import createObject\n'), ((1259, 1322), 'zope.formlib.form.Fields', 'form.Fields', (['interface'], {'render_context': '(True)', 'omit_readonly': '(True)'}), '(interface, render_context=True, omit_readonly=True)\n', (1270, 1322), False, 'from zope.formlib import form\n'), ((2775, 2807), 'zope.app.apidoc.interface.getFieldsInOrder', 'getFieldsInOrder', (['self.interface'], {}), '(self.interface)\n', (2791, 2807), False, 'from zope.app.apidoc.interface import getFieldsInOrder\n'), ((3047, 3101), 'page.GSContentPage', 'GSContentPage', (['self.context'], {'mode': '"""add"""', 'id': "data['id']"}), "(self.context, mode='add', id=data['id'])\n", (3060, 3101), False, 'from page import GSContentPage\n'), ((3206, 3255), 'zope.formlib.form.applyChanges', 'form.applyChanges', (['folder', 'self.form_fields', 'data'], {}), '(folder, self.form_fields, data)\n', (3223, 3255), False, 'from zope.formlib import form\n')] |
#!/usr/bin/env python
#******************************************************************************
# From $Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $
# VERSION MODIFIED FROM ORIGINAL, come with no warranty
# <NAME>
# input: vrt file (-addalpha) in 3857 projection (projection is forced due
# to weird effect in AutoCreateWarpedVRT)
# 2 bands: 1 grayscale, one alpha mask
import sqlite3
import os
import math
__version__ = "$Id: gdal2tiles.py 19288 2010-04-02 18:36:17Z rouault $"
class SqliteTileStorage():
""" Sqlite files methods for simple tile storage"""
def __init__(self, type):
self.type=type
def create(self, filename, overwrite=False):
""" Create a new storage file, overwrite or not if already exists"""
self.filename=filename
CREATEINDEX=True
if overwrite:
if os.path.isfile(self.filename):
os.unlink(self.filename)
else:
if os.path.isfile(self.filename):
CREATEINDEX=False
self.db = sqlite3.connect(self.filename)
cur = self.db.cursor()
cur.execute(
"""
CREATE TABLE IF NOT EXISTS tiles (
x int,
y int,
z int,
s int,
image blob,
PRIMARY KEY(x,y,z,s))
""")
cur.execute(
"""
CREATE TABLE IF NOT EXISTS info (
desc TEXT,
tilenumbering TEXT,
minzoom int,
maxzoom int)
""")
if CREATEINDEX:
cur.execute(
"""
CREATE INDEX IND
ON tiles(x,y,z,s)
""")
cur.execute("insert into info(desc, tilenumbering) values('Simple sqlite tile storage..', (?))", (self.type, ))
self.minzoom = None
self.maxzoom = None
self.written = set()
self.db.commit()
self.pending_images = []
def open(self, filename) :
""" Open an existing file"""
self.filename=filename
if os.path.isfile(self.filename):
self.db = sqlite3.connect(self.filename)
return True
else:
return False
def close(self):
self.commitData(force=True)
cur = self.db.cursor()
cur.execute("UPDATE Info SET minzoom = (?), maxzoom = (?)", (self.minzoom, self.maxzoom))
self.db.commit()
def writeImageFile(self, x, y, z, f) :
""" write a single tile from a file """
self.writeImage(x, y, z, f.read())
def writeImage(self, x, y, z, image) :
""" write a single tile from string """
if (x, y, z) in self.written:
return
self.written.add((x, y, z))
self.pending_images.append((z, x, y, 0, sqlite3.Binary(image)))
if self.minzoom is None or z < self.minzoom:
self.minzoom = z
if self.maxzoom is None or z > self.maxzoom:
self.maxzoom = z
self.commitData()
def commitData(self, force = False):
if len(self.pending_images) > 500 or force:
cur = self.db.cursor()
cur.executemany('insert into tiles (z, x, y,s,image) \
values (?,?,?,?,?)',
self.pending_images)
self.pending_images = []
self.db.commit()
def readImage(self, x, y, z) :
""" read a single tile as string """
cur = self.db.cursor()
cur.execute("select image from tiles where x=? and y=? and z=?", (x, y, z))
res = cur.fetchone()
if res:
image = str(res[0])
return image
else :
print ("None found")
return None
def createFromDirectory(self, filename, basedir, overwrite=False) :
""" Create a new sqlite file from a z/y/x.ext directory structure"""
self.create(filename, overwrite)
for zs in os.listdir(basedir):
zz=int(zs)
for xs in os.listdir(basedir+'/'+zs+'/'):
xx=int(xs)
for ys in os.listdir(basedir+'/'+zs+'/'+'/'+xs+'/'):
yy=int(ys.split('.')[0])
print (zz, yy, xx)
z=zz
x=xx
y=yy
print (basedir+'/'+zs+'/'+'/'+xs+'/'+ys)
f=open(basedir+'/'+zs+'/'+'/'+xs+'/'+ys)
self.writeImageFile(x, y, z, f)
#cur.execute('insert into tiles (z, x, y,image) \
# values (?,?,?,?)',
# (z, x, y, sqlite3.Binary(f.read())))
def createBigPlanetFromTMS(self, targetname, overwrite=False):
""" Create a new sqlite with BigPlanet numbering scheme from a TMS one"""
target=SqliteTileStorage('BigPlanet')
target.create(targetname, overwrite)
cur = self.db.cursor()
cur.execute("select x, y, z from tiles")
res = cur.fetchall()
for (x, y, z) in res:
xx= x
zz= 17 - z
yy= 2**zz - y -1
im=self.readImage(x,y,z)
target.writeImage(xx,yy,zz,im)
def createTMSFromBigPlanet(self, targetname, overwrite=False):
""" Create a new sqlite with TMS numbering scheme from a BigPlanet one"""
target=SqliteTileStorage('TMS')
target.create(targetname, overwrite)
cur = self.db.cursor()
cur.execute("select x, y, z from tiles")
res = cur.fetchall()
for (x, y, z) in res:
xx= x
zz= 17 - z
yy= 2**zz - y -1
im=self.readImage(x,y,z)
target.writeImage(xx,yy,zz,im)
def createTMSFromOSM(self, targetname, overwrite=False):
""" Create a new sqlite with TMS numbering scheme from a OSM/Bing/Googlemaps one"""
target=SqliteTileStorage('TMS')
target.create(targetname, overwrite)
cur = self.db.cursor()
cur.execute("select x, y, z from tiles")
res = cur.fetchall()
for (x, y, z) in res:
xx= x
zz= z
yy= 2**zz - y
im=self.readImage(x,y,z)
target.writeImage(xx,yy,zz,im)
def createOSMFromTMS(self, targetname, overwrite=False):
""" Create a new sqlite with OSM/Bing/Googlemaps numbering scheme from a TMS one"""
target=SqliteTileStorage('OSM')
target.create(targetname, overwrite)
cur = self.db.cursor()
cur.execute("select x, y, z from tiles")
res = cur.fetchall()
for (x, y, z) in res:
xx= x
zz= z
yy= 2**zz - y
im=self.readImage(x,y,z)
target.writeImage(xx,yy,zz,im)
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = """
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:900913 = EPSG:3785)
for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by <NAME> on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it usefull for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
MAXZOOMLEVEL = 32
class GlobalMercator(object):
"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001.
Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in metres XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:900913
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:900913?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:900913?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yeh?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually noticable.
How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:900913'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is degined as EPSG:3785. WKT definition is in the official
EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPGS:900913:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon ):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913"
mx = lon * self.originShift / 180.0
my = math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my ):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan( math.exp( lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, pyr, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913"
mapSize = self.tileSize << zoom
py = mapSize - pyr
res = self.Resolution( zoom )
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level"
res = self.Resolution( zoom )
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
mapSize = self.tileSize << zoom
return px, mapSize - py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
#def PixelsToRaster(self, px, py, zoom):
# "Move the origin of pixel coordinates to top-left corner"
#
# mapSize = self.tileSize << zoom
# return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels( mx, my, zoom)
return self.PixelsToTile( px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:900913 coordinates"
minx, miny = self.PixelsToMeters( tx*self.tileSize, (ty+1)*self.tileSize, zoom )
maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty)*self.tileSize, zoom )
return ( minx, miny, maxx, maxy )
def TileLatLonBounds(self, tx, ty, zoom ):
"Returns bounds of the given tile in latutude/longitude using WGS84 datum"
bounds = self.TileBounds( tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return ( minLat, minLon, maxLat, maxLon )
def TileLatLonCorners(self, tx, ty, zoom ):
p1_lat, p1_lon, p3_lat, p3_lon = self.TileLatLonBounds(tx, ty, zoom)
p2_lat, p2_lon, _ , _ = self.TileLatLonBounds(tx+1, ty, zoom)
p4_lat, p4_lon, _, _ = self.TileLatLonBounds(tx, ty-1, zoom)
return (p1_lat, p1_lon, p2_lat, p2_lon, p3_lat, p3_lon, p4_lat, p4_lon)
def Resolution(self, zoom ):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize ):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
if i!=0:
return i-1
else:
return 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom ):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i-1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
| [
"math.exp",
"os.unlink",
"math.tan",
"os.path.isfile",
"sqlite3.connect",
"os.listdir",
"sqlite3.Binary"
] | [((1070, 1100), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (1085, 1100), False, 'import sqlite3\n'), ((2197, 2226), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (2211, 2226), False, 'import os\n'), ((4183, 4202), 'os.listdir', 'os.listdir', (['basedir'], {}), '(basedir)\n', (4193, 4202), False, 'import os\n'), ((869, 898), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (883, 898), False, 'import os\n'), ((970, 999), 'os.path.isfile', 'os.path.isfile', (['self.filename'], {}), '(self.filename)\n', (984, 999), False, 'import os\n'), ((2250, 2280), 'sqlite3.connect', 'sqlite3.connect', (['self.filename'], {}), '(self.filename)\n', (2265, 2280), False, 'import sqlite3\n'), ((4249, 4285), 'os.listdir', 'os.listdir', (["(basedir + '/' + zs + '/')"], {}), "(basedir + '/' + zs + '/')\n", (4259, 4285), False, 'import os\n'), ((916, 940), 'os.unlink', 'os.unlink', (['self.filename'], {}), '(self.filename)\n', (925, 940), False, 'import os\n'), ((2953, 2974), 'sqlite3.Binary', 'sqlite3.Binary', (['image'], {}), '(image)\n', (2967, 2974), False, 'import sqlite3\n'), ((4334, 4387), 'os.listdir', 'os.listdir', (["(basedir + '/' + zs + '/' + '/' + xs + '/')"], {}), "(basedir + '/' + zs + '/' + '/' + xs + '/')\n", (4344, 4387), False, 'import os\n'), ((13939, 13977), 'math.tan', 'math.tan', (['((90 + lat) * math.pi / 360.0)'], {}), '((90 + lat) * math.pi / 360.0)\n', (13947, 13977), False, 'import math\n'), ((14336, 14367), 'math.exp', 'math.exp', (['(lat * math.pi / 180.0)'], {}), '(lat * math.pi / 180.0)\n', (14344, 14367), False, 'import math\n')] |
from django.contrib import admin
from django.utils.html import format_html
# from django.contrib.auth.models import Group
from .models import Product, CartProduct, Order, Address, Payment, Coupon, Refund, Setting, ProductImages, Profile, \
Contact, Category, Size
# admin.site.unregister(Group)
class ProductImageModel(admin.StackedInline):
model = ProductImages
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_display = ['name', 'product_image', 'price', 'discount_price', 'slug', 'label']
inlines = [ProductImageModel]
list_per_page = 3
def product_image(self, obj):
return format_html(f'''
<img height='80px' src='{obj.image.url}'/>
''')
def make_refund_accepted(modeladmin, request, queryset):
queryset.update(cancelled=True, refund_requested=False, refund_granted=True)
make_refund_accepted.short_description = 'Update orders to refund granted'
def make_product_received(modeladmin, request, queryset):
queryset.update(received=True)
make_product_received.short_description = 'Update orders to received'
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
pass
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = ['user', 'ordered', 'ordered_date', 'being_delivered', 'cancelled', 'received', 'refund_requested',
'refund_granted',
'billing_address', 'shipping_address', 'payment', 'coupon', 'ip']
list_filter = ['ordered', 'being_delivered', 'received', 'refund_requested', 'refund_granted']
list_display_links = ['user', 'billing_address', 'shipping_address', 'payment', 'coupon']
search_fields = ['user__username', 'ref_code']
actions = [make_refund_accepted, make_product_received]
readonly_fields = ['user', 'ordered', 'billing_address', 'shipping_address', 'payment', 'coupon', 'ref_code',
'products', 'ordered_date']
date_hierarchy = 'ordered_date'
fieldsets = [
('Name', {'fields': ['user', 'ip', 'billing_address', 'shipping_address']}),
('Order Information', {'fields': ['ordered', 'ordered_date', 'payment', 'coupon', 'ref_code']}),
('Ordered Items', {'fields': ['products']}),
('Delivery Status', {'fields': ['being_delivered', 'cancelled', 'received']}),
('Refund', {'fields': ['refund_requested', 'refund_granted']}),
]
@admin.register(CartProduct)
class CartProductAdmin(admin.ModelAdmin):
list_display = ['user', 'product', 'quantity', 'ordered']
readonly_fields = ['user', 'product', 'quantity', 'ordered']
list_per_page = 5
@admin.register(Address)
class AddressAdmin(admin.ModelAdmin):
list_display = ['user', 'date', 'address', 'town', 'country', 'zip', 'address_type', 'default']
list_filter = ['default', 'address_type', 'country']
search_fields = ['user', 'street_address', 'apartment_address', 'zip']
date_hierarchy = 'date'
@admin.register(Payment)
class PaymentAdmin(admin.ModelAdmin):
readonly_fields = ['stripe_charge_id', 'paypal_order_key', 'paypal_user_id', 'user', 'paypal_full_name',
'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code',
'paypal_country_code', 'amount', 'paypal_amount']
list_display = ['user', 'amount', 'timestamp']
list_per_page = 5
date_hierarchy = 'timestamp'
fieldsets = (
('Customer', {'fields': ['user']}),
('Stripe Payment', {'fields': ['stripe_charge_id']}),
('Paypal Payment', {'fields': ['paypal_order_key', 'paypal_user_id', 'paypal_full_name',
'paypal_email', 'paypal_address1', 'paypal_address2', 'paypal_postal_code',
'paypal_country_code',
'paypal_amount']}),
('Total Amount Paid', {'fields': ['amount']}),
)
@admin.register(Coupon)
class CouponAdmin(admin.ModelAdmin):
pass
def refund_accepted(modeladmin, request, queryset):
queryset.update(accepted=True)
refund_accepted.short_description = 'Update refund to accepted'
@admin.register(Refund)
class RefundAdmin(admin.ModelAdmin):
list_display = ['order', 'ref_code', 'accepted', 'email', 'date_req']
readonly_fields = ['order', 'ref_code', 'accepted', 'email', 'reason']
actions = [refund_accepted]
date_hierarchy = 'date_req'
@admin.register(Setting)
class SettingAdmin(admin.ModelAdmin):
pass
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
fieldsets = [
('User Profile', {'fields': ['user', 'country', 'phone_number']}),
('Profile Photo', {'fields': ['image']}),
]
readonly_fields = ['user', 'country', 'phone_number', 'image']
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
pass
@admin.register(Size)
class SizeAdmin(admin.ModelAdmin):
pass
admin.site.site_title = "EMU"
admin.site.site_header = "EMU"
admin.site.index_title = "Administration"
| [
"django.contrib.admin.register",
"django.utils.html.format_html"
] | [((381, 404), 'django.contrib.admin.register', 'admin.register', (['Product'], {}), '(Product)\n', (395, 404), False, 'from django.contrib import admin\n'), ((1110, 1134), 'django.contrib.admin.register', 'admin.register', (['Category'], {}), '(Category)\n', (1124, 1134), False, 'from django.contrib import admin\n'), ((1186, 1207), 'django.contrib.admin.register', 'admin.register', (['Order'], {}), '(Order)\n', (1200, 1207), False, 'from django.contrib import admin\n'), ((2421, 2448), 'django.contrib.admin.register', 'admin.register', (['CartProduct'], {}), '(CartProduct)\n', (2435, 2448), False, 'from django.contrib import admin\n'), ((2643, 2666), 'django.contrib.admin.register', 'admin.register', (['Address'], {}), '(Address)\n', (2657, 2666), False, 'from django.contrib import admin\n'), ((2968, 2991), 'django.contrib.admin.register', 'admin.register', (['Payment'], {}), '(Payment)\n', (2982, 2991), False, 'from django.contrib import admin\n'), ((3939, 3961), 'django.contrib.admin.register', 'admin.register', (['Coupon'], {}), '(Coupon)\n', (3953, 3961), False, 'from django.contrib import admin\n'), ((4166, 4188), 'django.contrib.admin.register', 'admin.register', (['Refund'], {}), '(Refund)\n', (4180, 4188), False, 'from django.contrib import admin\n'), ((4442, 4465), 'django.contrib.admin.register', 'admin.register', (['Setting'], {}), '(Setting)\n', (4456, 4465), False, 'from django.contrib import admin\n'), ((4516, 4539), 'django.contrib.admin.register', 'admin.register', (['Profile'], {}), '(Profile)\n', (4530, 4539), False, 'from django.contrib import admin\n'), ((4797, 4820), 'django.contrib.admin.register', 'admin.register', (['Contact'], {}), '(Contact)\n', (4811, 4820), False, 'from django.contrib import admin\n'), ((4871, 4891), 'django.contrib.admin.register', 'admin.register', (['Size'], {}), '(Size)\n', (4885, 4891), False, 'from django.contrib import admin\n'), ((638, 727), 'django.utils.html.format_html', 'format_html', (['f"""\n <img height=\'80px\' src=\'{obj.image.url}\'/>\n """'], {}), '(\n f"""\n <img height=\'80px\' src=\'{obj.image.url}\'/>\n """)\n', (649, 727), False, 'from django.utils.html import format_html\n')] |
from typing import Callable
from queue import Queue
"""A type of depth-first walk of a tree (parent, left, right)"""
def pre_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable):
if node is not None:
result.append(node)
if left(node) is not None:
pre_order_walk(left(node), result, left, right, parent)
if right(node) is not None:
pre_order_walk(right(node), result, left, right, parent)
"""A type of depth-first walk of a tree (left, parent, right)"""
def in_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable):
if node is not None:
if left(node) is not None:
in_order_walk(left(node), result, left, right, parent)
result.append(node)
if right(node) is not None:
in_order_walk(right(node), result, left, right, parent)
"""A type of depth-first walk of a tree (left, right, parent)"""
def post_order_walk(node, result: list, left: Callable, right: Callable, parent: Callable):
if node is not None:
if left(node) is not None:
post_order_walk(left(node), result, left, right, parent)
if right(node) is not None:
post_order_walk(right(node), result, left, right, parent)
result.append(node)
def add_child(node, child_node):
if not "left" in node:
node["left"] = child_node
child_node["parent"] = node
elif not "right" in node:
node["right"] = child_node
child_node["parent"] = node
else:
raise Exception("parent node is full")
def is_full(node) -> bool:
return "left" in node and "right" in node
def make_node(data):
node = { "data" : data }
return node
def make_tree(items: list):
tree = []
q = Queue()
current_parent = q.get(block=False) if q.empty() is False else None
for item in items:
print('DEBUG: adding item %s' % item)
node = make_node(item)
q.put(node)
tree.append(node)
if current_parent is not None:
if (is_full(current_parent)):
current_parent = q.get(block=False)
add_child(current_parent, node)
else:
current_parent = q.get(block=False)
return tree
def print_tree(tree: list):
for node in tree:
parent = node["parent"]["data"] if "parent" in node else None
left = node["left"]["data"] if "left" in node else None
right = node["right"]["data"] if "right" in node else None
print("%s <- %s: %s %s" % (parent, node["data"], left, right))
def print_tree_minimal(tree: list):
for node in tree:
print("%s" % node["data"], end=' ')
print()
def main():
tree = make_tree([25, 23, 22, 21, 12, 20, 17, 15, 16, 10, 9, 19, 18, 14, 7, 4, 13, 11])
print_tree(tree)
pre_order_walk_result = []
pre_order_walk(tree[0], pre_order_walk_result,
left=lambda node: node["left"] if "left" in node else None,
right=lambda node: node["right"] if "right" in node else None,
parent=lambda node: node["parent"] if "parent" in node else None)
print_tree_minimal(pre_order_walk_result)
in_order_walk_result = []
in_order_walk(tree[0], in_order_walk_result,
left=lambda node: node["left"] if "left" in node else None,
right=lambda node: node["right"] if "right" in node else None,
parent=lambda node: node["parent"] if "parent" in node else None)
print_tree_minimal(in_order_walk_result)
post_order_walk_result = []
post_order_walk(tree[0], post_order_walk_result,
left=lambda node: node["left"] if "left" in node else None,
right=lambda node: node["right"] if "right" in node else None,
parent=lambda node: node["parent"] if "parent" in node else None)
print_tree_minimal(post_order_walk_result)
main()
| [
"queue.Queue"
] | [((1787, 1794), 'queue.Queue', 'Queue', ([], {}), '()\n', (1792, 1794), False, 'from queue import Queue\n')] |
#!/usr/bin/python3.5
# -*- coding: utf-8 -*-
import os
import sys
EXE_PATH = os.getcwd() # 실행 경로
SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__)) # 스크립트 경로
UPPER_PATH = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 상위 경로
sys.path.append(UPPER_PATH) # 상위 경로를 추가
from my_tools import first_module # Upper directory 에서 Lower directory 참조
def function_2():
print("function_2 of second module imported")
def call_function_in_first_module():
print("called from second module to first module")
first_module.function_1()
if __name__ == "__main__":
function_2()
call_function_in_first_module()
| [
"sys.path.append",
"os.path.abspath",
"my_tools.first_module.function_1",
"os.getcwd",
"os.path.dirname"
] | [((79, 90), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (88, 90), False, 'import os\n'), ((250, 277), 'sys.path.append', 'sys.path.append', (['UPPER_PATH'], {}), '(UPPER_PATH)\n', (265, 277), False, 'import sys\n'), ((130, 155), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (145, 155), False, 'import os\n'), ((534, 559), 'my_tools.first_module.function_1', 'first_module.function_1', ([], {}), '()\n', (557, 559), False, 'from my_tools import first_module\n'), ((213, 238), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (228, 238), False, 'import os\n')] |
path = './stores.csv'
import pandas as pd
from shopify import extract_products_json
result = ''
with open(path) as csvfile:
df = pd.read_csv(csvfile)
url = df['url']
products = extract_products_json(url)
print(products) | [
"pandas.read_csv",
"shopify.extract_products_json"
] | [((135, 155), 'pandas.read_csv', 'pd.read_csv', (['csvfile'], {}), '(csvfile)\n', (146, 155), True, 'import pandas as pd\n'), ((191, 217), 'shopify.extract_products_json', 'extract_products_json', (['url'], {}), '(url)\n', (212, 217), False, 'from shopify import extract_products_json\n')] |
from setuptools import setup, find_packages
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='taming-transformers',
version='0.0.1-eden',
description='Taming Transformers for High-Resolution Image Synthesis',
packages=find_packages(),
include_package_data=True,
install_requires= required,
)
| [
"setuptools.find_packages"
] | [((272, 287), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (285, 287), False, 'from setuptools import setup, find_packages\n')] |
#! /usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.stats import rankdata
from ._base import CategoricalStats
class ANOSIM(CategoricalStats):
"""ANOSIM statistical method executor.
Analysis of Similarities (ANOSIM) is a non-parametric method that tests
whether two or more groups of objects are significantly different based on
a categorical factor. The ranks of the distances in the distance matrix are
used to calculate an R statistic, which ranges between -1 (anti-grouping)
to +1 (strong grouping), with an R value of 0 indicating random grouping.
Notes
-----
See [1]_ for the original ANOSIM reference. The general algorithm and
interface are similar to ``vegan::anosim``, available in R's vegan package
[2]_.
References
----------
.. [1] <NAME>. "Non-parametric multivariate analyses of changes in
community structure." Australian journal of ecology 18.1 (1993):
117-143.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
"""
short_method_name = 'ANOSIM'
long_method_name = 'Analysis of Similarities'
test_statistic_name = 'R statistic'
def __init__(self, distance_matrix, grouping, column=None):
super(ANOSIM, self).__init__(distance_matrix, grouping, column=column)
self._divisor = self._dm.shape[0] * ((self._dm.shape[0] - 1) / 4)
self._ranked_dists = rankdata(self._dm.condensed_form(),
method='average')
def _run(self, grouping):
"""Compute ANOSIM R statistic (between -1 and +1)."""
# Create a matrix where True means that the two objects are in the same
# group. This ufunc requires that grouping is a numeric vector (e.g.,
# it won't work with a grouping vector of strings).
grouping_matrix = np.equal.outer(grouping, grouping)
# Extract upper triangle from the grouping matrix. It is important to
# extract the values in the same order that the distances are extracted
# from the distance matrix (see self._ranked_dists). Extracting the
# upper triangle (excluding the diagonal) preserves this order.
grouping_tri = grouping_matrix[self._tri_idxs]
return self._compute_r_stat(grouping_tri)
def _compute_r_stat(self, grouping_tri):
# within
r_W = np.mean(self._ranked_dists[grouping_tri])
# between
r_B = np.mean(self._ranked_dists[np.invert(grouping_tri)])
return (r_B - r_W) / self._divisor
| [
"numpy.mean",
"numpy.equal.outer",
"numpy.invert"
] | [((2254, 2288), 'numpy.equal.outer', 'np.equal.outer', (['grouping', 'grouping'], {}), '(grouping, grouping)\n', (2268, 2288), True, 'import numpy as np\n'), ((2779, 2820), 'numpy.mean', 'np.mean', (['self._ranked_dists[grouping_tri]'], {}), '(self._ranked_dists[grouping_tri])\n', (2786, 2820), True, 'import numpy as np\n'), ((2880, 2903), 'numpy.invert', 'np.invert', (['grouping_tri'], {}), '(grouping_tri)\n', (2889, 2903), True, 'import numpy as np\n')] |
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import pandas as pd
import numpy as np
from datetime import datetime
from textblob import TextBlob
page = requests.get('https://qz.com/india/latest')
soup = BeautifulSoup(page.content, 'html.parser')
weblinks = soup.find_all('article')
pagelinks = []
for link in weblinks[5:]:
url = link.contents[0].find_all('a')[0]
pagelinks.append('http://qz.com'+url.get('href'))
authorname = []
title = []
thearticle = []
for link in pagelinks:
# store the text for each article
paragraphtext = []
# get url
url = link
# get page text
page = requests.get(url)
# parse with BFS
soup = BeautifulSoup(page.text, 'html.parser')
# get author name, if there's a named author
try:
abody = soup.find(class_='d3284 india').find('a')
aname = abody.get_text()
except:
aname = 'Anonymous'
# get article title
atitle = soup.find(class_="_21349 india none _4ca8e")
thetitle = atitle.get_text()
# get main article page
articlebody = soup.find(class_='_61c55')
# get text
articletext = soup.find_all('p')[8:]
# print text
for paragraph in articletext[:-1]:
# get the text only
text = paragraph.get_text()
paragraphtext.append(text)
# combine all paragraphs into an article
thearticle.append(paragraphtext)
authorname.append(aname)
title.append(thetitle)
# join paragraphs to re-create the article
myarticle = [' '.join(article) for article in thearticle]
# creating excel file "Quartz_India"
df = pd.DataFrame(columns = ['Title', 'Author' , 'PageLink', 'Article', 'Date'])
df.to_excel("Quartz_India.xlsx", index = False)
# save article data to file
data = {'Title':title,
'Author':authorname,
'PageLink':pagelinks,
'Article':myarticle,
'Date':datetime.now()}
oldnews = pd.read_excel('Quartz_India.xlsx')
news = pd.DataFrame(data=data)
cols = ['Title', 'Author', 'PageLink', 'Article', 'Date']
news = news[cols]
afronews = oldnews.append(news)
afronews.drop_duplicates(subset='Title', keep='last', inplace=True)
afronews.reset_index(inplace=True)
afronews.drop(labels='index', axis=1, inplace=True)
filename = 'Quartz_India.xlsx'
wks_name = 'Data'
writer = pd.ExcelWriter(filename)
afronews.to_excel(writer, wks_name, index=False)
writer.save()
# performing sentiment analysis on the article
data = pd.read_excel("Quartz_India.xlsx")
data['Polarity Article'] = data.apply(lambda x: TextBlob(x['Article']).sentiment.polarity, axis=1)
data.to_excel("Sentiment_Analysis.xlsx",index = False)
| [
"pandas.DataFrame",
"datetime.datetime.now",
"pandas.read_excel",
"textblob.TextBlob",
"requests.get",
"bs4.BeautifulSoup",
"pandas.ExcelWriter"
] | [((199, 242), 'requests.get', 'requests.get', (['"""https://qz.com/india/latest"""'], {}), "('https://qz.com/india/latest')\n", (211, 242), False, 'import requests\n'), ((251, 293), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.content', '"""html.parser"""'], {}), "(page.content, 'html.parser')\n", (264, 293), False, 'from bs4 import BeautifulSoup\n'), ((1646, 1718), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Title', 'Author', 'PageLink', 'Article', 'Date']"}), "(columns=['Title', 'Author', 'PageLink', 'Article', 'Date'])\n", (1658, 1718), True, 'import pandas as pd\n'), ((1956, 1990), 'pandas.read_excel', 'pd.read_excel', (['"""Quartz_India.xlsx"""'], {}), "('Quartz_India.xlsx')\n", (1969, 1990), True, 'import pandas as pd\n'), ((1998, 2021), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data'}), '(data=data)\n', (2010, 2021), True, 'import pandas as pd\n'), ((2346, 2370), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['filename'], {}), '(filename)\n', (2360, 2370), True, 'import pandas as pd\n'), ((2490, 2524), 'pandas.read_excel', 'pd.read_excel', (['"""Quartz_India.xlsx"""'], {}), "('Quartz_India.xlsx')\n", (2503, 2524), True, 'import pandas as pd\n'), ((666, 683), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (678, 683), False, 'import requests\n'), ((716, 755), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page.text', '"""html.parser"""'], {}), "(page.text, 'html.parser')\n", (729, 755), False, 'from bs4 import BeautifulSoup\n'), ((1929, 1943), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1941, 1943), False, 'from datetime import datetime\n'), ((2574, 2596), 'textblob.TextBlob', 'TextBlob', (["x['Article']"], {}), "(x['Article'])\n", (2582, 2596), False, 'from textblob import TextBlob\n')] |
from spreadsheet.utility import get_user_ccas, is_registered, register_user
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import ConversationHandler
import logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
CCA = 1
SECOND = 2
def start(update, context):
username = update.message.from_user.username
first_name = update.message.from_user.first_name
logger.info("User %s started the conversation.", first_name)
if is_registered(username):
ccas = get_user_ccas(username)
keyboard = [[InlineKeyboardButton(
cca, callback_data=cca)] for cca in ccas]
reply_markup = InlineKeyboardMarkup(keyboard)
update.message.reply_text("Hi %s! Please select one of your CCAs." % (
first_name), reply_markup=reply_markup)
return CCA
else:
update.message.reply_text(
'Sorry, you are not a registered user. Please contact your CCA Head to register you or /register here.')
def back(update, context):
query = update.callback_query
username = query.from_user.username
first_name = query.from_user.first_name
ccas = get_user_ccas(username)
keyboard = [[InlineKeyboardButton(
cca, callback_data=cca)] for cca in ccas]
reply_markup = InlineKeyboardMarkup(keyboard)
query.edit_message_text("Hi %s! Please select one of your CCAs." % (
first_name), reply_markup=reply_markup)
return CCA
def end(update, context):
query = update.callback_query
query.answer()
query.edit_message_text(text="Ok, see you next time!")
return ConversationHandler.END
| [
"logging.basicConfig",
"telegram.InlineKeyboardButton",
"telegram.InlineKeyboardMarkup",
"spreadsheet.utility.get_user_ccas",
"spreadsheet.utility.is_registered",
"logging.getLogger"
] | [((201, 308), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n", (220, 308), False, 'import logging\n'), ((319, 346), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (336, 346), False, 'import logging\n'), ((572, 595), 'spreadsheet.utility.is_registered', 'is_registered', (['username'], {}), '(username)\n', (585, 595), False, 'from spreadsheet.utility import get_user_ccas, is_registered, register_user\n'), ((1260, 1283), 'spreadsheet.utility.get_user_ccas', 'get_user_ccas', (['username'], {}), '(username)\n', (1273, 1283), False, 'from spreadsheet.utility import get_user_ccas, is_registered, register_user\n'), ((1393, 1423), 'telegram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['keyboard'], {}), '(keyboard)\n', (1413, 1423), False, 'from telegram import InlineKeyboardButton, InlineKeyboardMarkup\n'), ((612, 635), 'spreadsheet.utility.get_user_ccas', 'get_user_ccas', (['username'], {}), '(username)\n', (625, 635), False, 'from spreadsheet.utility import get_user_ccas, is_registered, register_user\n'), ((757, 787), 'telegram.InlineKeyboardMarkup', 'InlineKeyboardMarkup', (['keyboard'], {}), '(keyboard)\n', (777, 787), False, 'from telegram import InlineKeyboardButton, InlineKeyboardMarkup\n'), ((1302, 1346), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', (['cca'], {'callback_data': 'cca'}), '(cca, callback_data=cca)\n', (1322, 1346), False, 'from telegram import InlineKeyboardButton, InlineKeyboardMarkup\n'), ((658, 702), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', (['cca'], {'callback_data': 'cca'}), '(cca, callback_data=cca)\n', (678, 702), False, 'from telegram import InlineKeyboardButton, InlineKeyboardMarkup\n')] |
# Generated by Django 3.2.4 on 2021-10-11 10:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Film',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('film_name', models.CharField(max_length=100)),
('film_release_date', models.DateField(null=True)),
('film_genre', models.CharField(max_length=50)),
('film_rating', models.CharField(choices=[('unrated', 'Unrated'), ('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '10')], default='unrated', max_length=10)),
('film_type', models.CharField(choices=[('movie', 'Movie'), ('tv_show', 'TV Show'), ('animated_film', 'Animated film'), ('animated_show', 'Animated show')], default='movie', max_length=50)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"django.db.migrations.swappable_dependency",
"django.db.models.BigAutoField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.DateField"
] | [((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((433, 529), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (452, 529), False, 'from django.db import migrations, models\n'), ((558, 590), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (574, 590), False, 'from django.db import migrations, models\n'), ((631, 658), 'django.db.models.DateField', 'models.DateField', ([], {'null': '(True)'}), '(null=True)\n', (647, 658), False, 'from django.db import migrations, models\n'), ((692, 723), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (708, 723), False, 'from django.db import migrations, models\n'), ((758, 973), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('unrated', 'Unrated'), ('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), (\n '5', '5'), ('6', '6'), ('7', '7'), ('8', '8'), ('9', '9'), ('10', '10')]", 'default': '"""unrated"""', 'max_length': '(10)'}), "(choices=[('unrated', 'Unrated'), ('1', '1'), ('2', '2'), (\n '3', '3'), ('4', '4'), ('5', '5'), ('6', '6'), ('7', '7'), ('8', '8'),\n ('9', '9'), ('10', '10')], default='unrated', max_length=10)\n", (774, 973), False, 'from django.db import migrations, models\n'), ((997, 1180), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('movie', 'Movie'), ('tv_show', 'TV Show'), ('animated_film',\n 'Animated film'), ('animated_show', 'Animated show')]", 'default': '"""movie"""', 'max_length': '(50)'}), "(choices=[('movie', 'Movie'), ('tv_show', 'TV Show'), (\n 'animated_film', 'Animated film'), ('animated_show', 'Animated show')],\n default='movie', max_length=50)\n", (1013, 1180), False, 'from django.db import migrations, models\n'), ((1199, 1305), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(null=True, on_delete=django.db.models.deletion.CASCADE,\n to=settings.AUTH_USER_MODEL)\n', (1216, 1305), False, 'from django.db import migrations, models\n')] |
import json
import logging
import gevent
import os
import psutil
import six
from gevent import subprocess
from six.moves import range
from iris.core.interfaces import Interface
from iris.utils.sockets import create_socket
logger = logging.getLogger(__name__)
class Process(object):
def __init__(self, cmd, env=None):
self.cmd = cmd
self.env = env
self._process = None
self._popen = None
def is_running(self):
return self._process and self._process.is_running()
def start(self):
self._popen = subprocess.Popen(
self.cmd, env=self.env, close_fds=False)
self._process = psutil.Process(self._popen.pid)
def stop(self):
try:
self._process.terminate()
self._process.wait()
except psutil.NoSuchProcess:
pass
def restart(self):
print("restarting %s" % self)
self.stop()
self.start()
def stats(self):
try:
memory = self._process.memory_info()
return {
'memory': {'rss': memory.rss, 'vms': memory.vms},
'cpu': self._process.cpu_percent(interval=2.0),
}
except psutil.NoSuchProcess:
return {}
class Node(Interface):
register_with_coordinator = False
def __init__(self, *args, **kwargs):
super(Node, self).__init__(*args, **kwargs)
self.sockets = {}
self.processes = []
self.running = False
self._sockets = []
self._services = []
def stats(self):
process_stats = []
for p in self.processes:
if not p.is_running():
continue
process_stats.append({
'command': p.cmd,
'stats': p.stats(),
})
return {'processes': process_stats}
def apply_config(self, config):
for name, c in six.iteritems(config.get('instances', {})):
self._services.append((name, c.get('command'), c.get('numprocesses', 1)))
for name, c in six.iteritems(config.get('sockets', {})):
self._sockets.append((name, c.get('host'), c.get('port')))
def on_start(self):
self.create_shared_sockets()
self.running = True
shared_fds = json.dumps({port: s.fileno() for port, s in six.iteritems(self.sockets)})
for service_type, cmd, num in self._services:
env = os.environ.copy()
env['IRIS_NODE'] = self.container.endpoint
env['IRIS_NODE_IP'] = self.container.ip
env['IRIS_SHARED_SOCKET_FDS'] = shared_fds
for i in range(num):
p = Process(cmd.split(' '), env=env)
self.processes.append(p)
logger.info('starting %s', cmd)
p.start()
gevent.spawn(self.watch_processes)
def on_stop(self):
logger.info("waiting for all service processes to die ...")
self.running = False
for p in self.processes:
p.stop()
super(Node, self).on_stop()
def create_shared_sockets(self):
for name, host, port in self._sockets:
sock = create_socket(
'%s:%s' % (host or self.container.ip, port), inheritable=True)
self.sockets[port] = sock
def restart_all(self):
for process in self.processes:
process.stop()
def watch_processes(self):
while True:
for process in self.processes:
try:
status = process._process.status
except psutil.NoSuchProcess:
if self.running:
process.start()
continue
if status in (psutil.STATUS_ZOMBIE, psutil.STATUS_DEAD):
if self.running:
process.restart()
gevent.sleep(1)
| [
"psutil.Process",
"six.moves.range",
"os.environ.copy",
"gevent.subprocess.Popen",
"iris.utils.sockets.create_socket",
"gevent.sleep",
"gevent.spawn",
"six.iteritems",
"logging.getLogger"
] | [((234, 261), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (251, 261), False, 'import logging\n'), ((559, 616), 'gevent.subprocess.Popen', 'subprocess.Popen', (['self.cmd'], {'env': 'self.env', 'close_fds': '(False)'}), '(self.cmd, env=self.env, close_fds=False)\n', (575, 616), False, 'from gevent import subprocess\n'), ((654, 685), 'psutil.Process', 'psutil.Process', (['self._popen.pid'], {}), '(self._popen.pid)\n', (668, 685), False, 'import psutil\n'), ((2829, 2863), 'gevent.spawn', 'gevent.spawn', (['self.watch_processes'], {}), '(self.watch_processes)\n', (2841, 2863), False, 'import gevent\n'), ((2440, 2457), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (2455, 2457), False, 'import os\n'), ((2641, 2651), 'six.moves.range', 'range', (['num'], {}), '(num)\n', (2646, 2651), False, 'from six.moves import range\n'), ((3179, 3255), 'iris.utils.sockets.create_socket', 'create_socket', (["('%s:%s' % (host or self.container.ip, port))"], {'inheritable': '(True)'}), "('%s:%s' % (host or self.container.ip, port), inheritable=True)\n", (3192, 3255), False, 'from iris.utils.sockets import create_socket\n'), ((3889, 3904), 'gevent.sleep', 'gevent.sleep', (['(1)'], {}), '(1)\n', (3901, 3904), False, 'import gevent\n'), ((2338, 2365), 'six.iteritems', 'six.iteritems', (['self.sockets'], {}), '(self.sockets)\n', (2351, 2365), False, 'import six\n')] |
# Standard
# PIP
from torch.utils.data import DataLoader
from pytorch_lightning import LightningDataModule
from torchtext.datasets import WikiText2
# Custom
class CustomDataModule(LightningDataModule):
def __init__(
self,
batch_size=1,
num_workers=0,
):
super().__init__()
self.batch_size = batch_size
self.num_workers = num_workers
def setup(
self,
stage=None,
):
# Assign train & val datasets
if stage == "fit" or stage is None:
self.train_dataset = WikiText2(split='train')
self.valid_dataset = WikiText2(split='valid')
# Assign test dataset
if stage == "test" or stage is None:
self.test_dataset = WikiText2(split='test')
def train_dataloader(self):
return DataLoader(
dataset=self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
def val_dataloader(self):
return DataLoader(
dataset=self.valid_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
)
def test_dataloader(self):
return DataLoader(
dataset=self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
)
| [
"torchtext.datasets.WikiText2",
"torch.utils.data.DataLoader"
] | [((828, 943), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'self.train_dataset', 'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': 'self.num_workers'}), '(dataset=self.train_dataset, batch_size=self.batch_size, shuffle=\n True, num_workers=self.num_workers)\n', (838, 943), False, 'from torch.utils.data import DataLoader\n'), ((1044, 1159), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'self.valid_dataset', 'batch_size': 'self.batch_size', 'shuffle': '(True)', 'num_workers': 'self.num_workers'}), '(dataset=self.valid_dataset, batch_size=self.batch_size, shuffle=\n True, num_workers=self.num_workers)\n', (1054, 1159), False, 'from torch.utils.data import DataLoader\n'), ((1261, 1376), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'self.test_dataset', 'batch_size': 'self.batch_size', 'shuffle': '(False)', 'num_workers': 'self.num_workers'}), '(dataset=self.test_dataset, batch_size=self.batch_size, shuffle=\n False, num_workers=self.num_workers)\n', (1271, 1376), False, 'from torch.utils.data import DataLoader\n'), ((565, 589), 'torchtext.datasets.WikiText2', 'WikiText2', ([], {'split': '"""train"""'}), "(split='train')\n", (574, 589), False, 'from torchtext.datasets import WikiText2\n'), ((623, 647), 'torchtext.datasets.WikiText2', 'WikiText2', ([], {'split': '"""valid"""'}), "(split='valid')\n", (632, 647), False, 'from torchtext.datasets import WikiText2\n'), ((756, 779), 'torchtext.datasets.WikiText2', 'WikiText2', ([], {'split': '"""test"""'}), "(split='test')\n", (765, 779), False, 'from torchtext.datasets import WikiText2\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import speaksee.data as data
import numpy as np
import torch
'''class TestImageField(object):
def test_preprocessing(self):
field = data.ImageField()
image = ''
expected_image = ''
assert field.preprocess(image) == expected_image
'''
class TestTextField(object):
def test_pad(self):
# Default case.
field = data.TextField()
minibatch = [["a", "sentence", "of", "data", "."],
["yet", "another"],
["one", "last", "sent"]]
expected_padded_minibatch = [["a", "sentence", "of", "data", "."],
["yet", "another", "<pad>", "<pad>", "<pad>"],
["one", "last", "sent", "<pad>", "<pad>"]]
expected_lengths = [5, 2, 3]
assert field.pad(minibatch) == expected_padded_minibatch
field = data.TextField(include_lengths=True)
assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)
# Test fix_length properly truncates and pads.
field = data.TextField(fix_length=3)
minibatch = [["a", "sentence", "of", "data", "."],
["yet", "another"],
["one", "last", "sent"]]
expected_padded_minibatch = [["a", "sentence", "of"],
["yet", "another", "<pad>"],
["one", "last", "sent"]]
expected_lengths = [3, 2, 3]
assert field.pad(minibatch) == expected_padded_minibatch
field = data.TextField(fix_length=3, include_lengths=True)
assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)
field = data.TextField(fix_length=3, truncate_first=True)
expected_padded_minibatch = [["of", "data", "."],
["yet", "another", "<pad>"],
["one", "last", "sent"]]
assert field.pad(minibatch) == expected_padded_minibatch
# Test init_token is properly handled.
field = data.TextField(fix_length=4, init_token="<bos>")
minibatch = [["a", "sentence", "of", "data", "."],
["yet", "another"],
["one", "last", "sent"]]
expected_padded_minibatch = [["<bos>", "a", "sentence", "of"],
["<bos>", "yet", "another", "<pad>"],
["<bos>", "one", "last", "sent"]]
expected_lengths = [4, 3, 4]
assert field.pad(minibatch) == expected_padded_minibatch
field = data.TextField(fix_length=4, init_token="<bos>", include_lengths=True)
assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)
# Test init_token and eos_token are properly handled.
field = data.TextField(init_token="<bos>", eos_token="<eos>")
minibatch = [["a", "sentence", "of", "data", "."],
["yet", "another"],
["one", "last", "sent"]]
expected_padded_minibatch = [
["<bos>", "a", "sentence", "of", "data", ".", "<eos>"],
["<bos>", "yet", "another", "<eos>", "<pad>", "<pad>", "<pad>"],
["<bos>", "one", "last", "sent", "<eos>", "<pad>", "<pad>"]]
expected_lengths = [7, 4, 5]
assert field.pad(minibatch) == expected_padded_minibatch
field = data.TextField(init_token="<bos>", eos_token="<eos>", include_lengths=True)
assert field.pad(minibatch) == (expected_padded_minibatch, expected_lengths)
def test_decode(self):
def test_all_dtypes(word_idxs, expected_output):
assert field.decode(word_idxs) == expected_output
assert field.decode(np.asarray(word_idxs)) == expected_output
assert field.decode(torch.from_numpy(np.asarray(word_idxs))) == expected_output
class MyVocab(object):
def __init__(self, eos_token):
self.itos = {0: 'a',
1: 'b',
2: eos_token,
3: 'c'}
field = data.TextField()
field.vocab = MyVocab(field.eos_token)
# Empty captions (not tested for PyTorch tensors)
word_idxs = []
expected_output = ''
assert field.decode(word_idxs) == expected_output
assert field.decode(np.asarray(word_idxs)) == expected_output
word_idxs = [[]]
expected_output = ['', ]
assert field.decode(word_idxs) == expected_output
assert field.decode(np.asarray(word_idxs)) == expected_output
# Single caption
word_idxs = [0, 3, 2, 1]
expected_output = 'a c'
test_all_dtypes(word_idxs, expected_output)
# Batch of captions
word_idxs = [[0, 3, 2, 1],
[3, 3, 2, 1],
[2, 1, 1, 1]]
expected_output = ['a c', 'c c', '']
test_all_dtypes(word_idxs, expected_output)
| [
"numpy.asarray",
"speaksee.data.TextField"
] | [((446, 462), 'speaksee.data.TextField', 'data.TextField', ([], {}), '()\n', (460, 462), True, 'import speaksee.data as data\n'), ((966, 1002), 'speaksee.data.TextField', 'data.TextField', ([], {'include_lengths': '(True)'}), '(include_lengths=True)\n', (980, 1002), True, 'import speaksee.data as data\n'), ((1160, 1188), 'speaksee.data.TextField', 'data.TextField', ([], {'fix_length': '(3)'}), '(fix_length=3)\n', (1174, 1188), True, 'import speaksee.data as data\n'), ((1643, 1693), 'speaksee.data.TextField', 'data.TextField', ([], {'fix_length': '(3)', 'include_lengths': '(True)'}), '(fix_length=3, include_lengths=True)\n', (1657, 1693), True, 'import speaksee.data as data\n'), ((1795, 1844), 'speaksee.data.TextField', 'data.TextField', ([], {'fix_length': '(3)', 'truncate_first': '(True)'}), '(fix_length=3, truncate_first=True)\n', (1809, 1844), True, 'import speaksee.data as data\n'), ((2160, 2208), 'speaksee.data.TextField', 'data.TextField', ([], {'fix_length': '(4)', 'init_token': '"""<bos>"""'}), "(fix_length=4, init_token='<bos>')\n", (2174, 2208), True, 'import speaksee.data as data\n'), ((2690, 2760), 'speaksee.data.TextField', 'data.TextField', ([], {'fix_length': '(4)', 'init_token': '"""<bos>"""', 'include_lengths': '(True)'}), "(fix_length=4, init_token='<bos>', include_lengths=True)\n", (2704, 2760), True, 'import speaksee.data as data\n'), ((2925, 2978), 'speaksee.data.TextField', 'data.TextField', ([], {'init_token': '"""<bos>"""', 'eos_token': '"""<eos>"""'}), "(init_token='<bos>', eos_token='<eos>')\n", (2939, 2978), True, 'import speaksee.data as data\n'), ((3499, 3574), 'speaksee.data.TextField', 'data.TextField', ([], {'init_token': '"""<bos>"""', 'eos_token': '"""<eos>"""', 'include_lengths': '(True)'}), "(init_token='<bos>', eos_token='<eos>', include_lengths=True)\n", (3513, 3574), True, 'import speaksee.data as data\n'), ((4204, 4220), 'speaksee.data.TextField', 'data.TextField', ([], {}), '()\n', (4218, 4220), True, 'import speaksee.data as data\n'), ((4465, 4486), 'numpy.asarray', 'np.asarray', (['word_idxs'], {}), '(word_idxs)\n', (4475, 4486), True, 'import numpy as np\n'), ((4652, 4673), 'numpy.asarray', 'np.asarray', (['word_idxs'], {}), '(word_idxs)\n', (4662, 4673), True, 'import numpy as np\n'), ((3839, 3860), 'numpy.asarray', 'np.asarray', (['word_idxs'], {}), '(word_idxs)\n', (3849, 3860), True, 'import numpy as np\n'), ((3930, 3951), 'numpy.asarray', 'np.asarray', (['word_idxs'], {}), '(word_idxs)\n', (3940, 3951), True, 'import numpy as np\n')] |
import functools
import json
from datetime import datetime
from typing import Any, Dict
from .wikidatasession import WikidataSession
@functools.lru_cache()
def getPropertyType(propertyId: str):
repo = WikidataSession()
query = {
"action": "query",
"format": "json",
"prop": "revisions",
"titles": "Property:" + propertyId,
"rvprop": "content",
}
DATA = repo.get(query)
jsonstr = list(DATA["query"]["pages"].values())[0]["revisions"][0]["*"]
content = json.loads(jsonstr)
return content["datatype"]
def buildDataValue(datatype: str, value):
if datatype in [
"wikibase-lexeme",
"wikibase-form",
"wikibase-sense",
"wikibase-item",
"wikibase-property",
]:
if type(value) == dict:
return {"value": value, "type": "wikibase-entity"}
elif type(value) == str:
value = {"entity-type": datatype[9:], "id": value}
return {"value": value, "type": "wikibase-entity"}
else:
raise TypeError(
f"Can not convert type {type(value)} to datatype {datatype}"
)
elif datatype in [
"string",
"tabular-data",
"geo-shape",
"url",
"musical-notation",
"math",
"commonsMedia",
]:
if type(value) == dict:
return {"value": value, "type": "string"}
elif type(value) == str:
return {"value": {"value": value}, "type": "string"}
else:
raise TypeError(
f"Can not convert type {type(value)} to datatype {datatype}"
)
elif datatype == "monolingualtext":
if type(value) == dict:
return {"value": value, "type": "monolingualtext"}
else:
raise TypeError(
f"Can not convert type {type(value)} to datatype {datatype}"
)
elif datatype == "globe-coordinate":
if type(value) == dict:
return {"value": value, "type": "globecoordinate"}
else:
raise TypeError(
f"Can not convert type {type(value)} to datatype {datatype}"
)
elif datatype == "quantity":
if type(value) == dict:
return {"value": value, "type": "quantity"}
if type(value) in [int, float]:
valueObj = {
"amount": "%+f" % value,
"unit": "1",
}
return {"value": valueObj, "type": "time"}
else:
raise TypeError(
f"Can not convert type {type(value)} to datatype {datatype}"
)
elif datatype == "time":
if type(value) == dict:
return {"value": value, "type": "time"}
if type(value) == datetime:
cleanedDateTime = value.replace(hour=0, minute=0, second=0, microsecond=0)
valueObj: Dict[str, Any] = {
"time": "+" + cleanedDateTime.isoformat() + "Z",
"timezone": 0,
"before": 0,
"after": 0,
"precision": 11,
"calendarmodel": "http://www.wikidata.org/entity/Q1985727",
}
return {"value": valueObj, "type": "time"}
else:
raise TypeError(
f"Can not convert type {type(value)} to datatype {datatype}"
)
else:
raise NotImplementedError(f"Datatype {datatype} not implemented")
def buildSnak(propertyId: str, value):
datatype = getPropertyType(propertyId)
datavalue = buildDataValue(datatype, value)
return {
"snaktype": "value",
"property": propertyId,
"datavalue": datavalue,
"datatype": datatype,
}
| [
"functools.lru_cache",
"json.loads"
] | [((137, 158), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (156, 158), False, 'import functools\n'), ((518, 537), 'json.loads', 'json.loads', (['jsonstr'], {}), '(jsonstr)\n', (528, 537), False, 'import json\n')] |
"""
# espn.py
# classes for scraping, parsing espn football data
# this does include some basic fantasy data
# espn_fantasy is mostly about managing fantasy teams
# NOTE: trouble accessing data in offseason
# will have to revisit this module as season approaches
"""
import logging
import re
from bs4 import BeautifulSoup, NavigableString, Tag
from namematcher.xref import Site
from sportscraper.scraper import RequestScraper
FANTASY_TEAMS = {
1: "Atl",
2: "Buf",
3: "Chi",
4: "Cin",
5: "Cle",
6: "Dal",
7: "Den",
8: "Det",
9: "GB",
10: "Ten",
11: "Ind",
12: "KC",
13: "Oak",
14: "LAR",
15: "Mia",
16: "Min",
17: "NE",
18: "NO",
19: "NYG",
20: "NYJ",
21: "Phi",
22: "Ari",
23: "Pit",
24: "LAC",
25: "SF",
26: "Sea",
27: "TB",
28: "Wsh",
29: "Car",
30: "Jax",
33: "Bal",
34: "Hou",
}
class Scraper(RequestScraper):
"""
Scrape ESPN.com for football stats
"""
@staticmethod
def _check_pos(pos):
"""
Makes sure pos is valid and uppercase
Args:
pos(str):
Returns:
str
"""
if pos in [
"qb",
"rb",
"wr",
"te",
"dst",
"d/st",
"k",
"QB",
"RB",
"WR",
"TE",
"K",
"D/ST",
"DST",
]:
if pos in ["DST", "dst"]:
pos = "D/ST"
return pos.upper()
else:
raise ValueError("invalid position: {}".format(pos))
def adp(self, season_year):
"""
Gets adp data
Args:
season_year(int): 2019, etc.
Returns:
dict: parsed JSON
"""
url = (
f"http://fantasy.espn.com/apis/v3/games/ffl/seasons/{season_year}/"
f"segments/0/leaguedefaults/1?view=kona_player_info"
)
return self.get_json(url)
def players_position(self, pos):
"""
Gets page with all players by position
Args:
pos(str): qb, rb, wr, te, k, etc.
Returns:
str
"""
url = "http://www.espn.com/nfl/players?position={}&league=nfl"
return self.get(url.format(pos), encoding="latin1")
def projections(self, pos, season_year=None, week=0, offset=0):
"""
Gets page with projections by position
Args:
pos: str qb, rb, wr, te, k, etc.
season_year: int 2017, 2016
week: int 1, 2, 3
offset: int 0, 40, 80, etc.
Returns:
HTML string
TODO: revise based on new URL
"""
pos = pos.lower()
slot_categories = {"qb": 0, "rb": 2, "wr": 4, "te": 6, "dst": 16, "k": 17}
max_offset = {"qb": 120, "rb": 240, "wr": 360, "te": 160, "dst": 0, "k": 40}
if pos not in slot_categories.keys():
raise ValueError("invalid pos {}".format(pos))
if offset > max_offset.get(pos):
raise ValueError("invalid offset {}".format(offset))
if offset % 40 > 0:
raise ValueError("invalid offset {}".format(offset))
# https://fantasy.espn.com/football/players/projections
url = "http://games.espn.com/ffl/tools/projections?"
if season_year:
params = {
"slotCategoryId": slot_categories[pos],
"startIndex": offset,
"seasonId": season_year,
}
else:
params = {"slotCategoryId": slot_categories[pos], "startIndex": offset}
if week:
params["scoringPeriodId"] = week
else:
params["seasonTotals"] = "true"
return self.get(url, params=params, encoding="latin1")
def team_roster(self, team_code):
"""
Gets list of NFL players from ESPN.com
Args:
team_code: str 'DEN', 'BUF', etc.
Returns:
HTML string
"""
url = f"http://www.espn.com/nfl/team/roster/_/name/{team_code}"
return self.get(url, encoding="latin1")
def weekly_scoring(self, season_year, week, position):
"""
Gets weekly fantasy scoring page
Args:
season_year (int): 2017, 2016, etc.
week (int): 1 through 17
position (str): 'qb', 'wr', etc.
Returns:
str: HTML
TODO: rework for new URL
"""
poscode = {"qb": 0, "rb": 2, "wr": 4, "te": 6, "dst": 16, "k": 17}
if position.lower() not in poscode:
raise ValueError("invalid position: {}".format(position))
# https://fantasy.espn.com/football/leaders
url = "http://games.espn.com/ffl/leaders?&"
params = {
"scoringPeriodId": week,
"seasonId": season_year,
"slotCategoryId": position,
}
return self.get(url, params=params)
class Parser:
"""
Parse ESPN.com for football stats
"""
def __init__(self):
"""
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
@staticmethod
def _val(val):
"""
Converts non-numeric value to numeric 0
Args:
val:
Returns:
number
"""
if "--" in val:
return 0
return val
@staticmethod
def adp(content):
"""
Parses season-long ADP
Args:
content:
Returns:
list of dict
"""
vals = []
for item in content["players"]:
tl_wanted = [
"defaultPositionId",
"firstName",
"id",
"lastName",
"proTeamId",
]
api_player = {k: v for k, v in item["player"].items() if k in tl_wanted}
for scoring_type in ["PPR", "STANDARD"]:
for rank_type in ["rank", "auctionValue"]:
key = scoring_type.lower() + "_" + rank_type
try:
api_player[key] = item["player"]["draftRanksByRankType"][
scoring_type
][rank_type]
except KeyError:
api_player[key] = None
vals.append(api_player)
return vals
def projections(self, content, pos):
"""
Parses ESPN fantasy football season-long sortable projections page
Args:
content: HTML string
Returns:
list of dict
"""
players = []
soup = BeautifulSoup(content, "lxml")
if pos.lower() in ["qb", "rb", "wr", "te", "flex"]:
headers = [
"pass_att",
"pass_cmp",
"pass_yds",
"pass_td",
"pass_int",
"rush_att",
"rush_yds",
"rush_td",
"rec",
"rec_yds",
"rec_td",
"fantasy_points_ppr",
]
for row in soup.findAll("tr", {"class": "pncPlayerRow"}):
player = {"source": "espn"}
tds = row.find_all("td")
# tds[0]: rank
player["source_position_rank"] = tds[0].text
# tds[1]: name/team/pos
link, navstr = list(tds[1].children)[0:2]
player["source_player_name"] = link.text
player["source_player_team"], player[
"source_player_position"
] = navstr.split()[-2:]
player["source_player_id"] = link.attrs.get("playerid")
# loop through stats
# they have attempts/completions in one column so have to remove & split
vals = [self._val(td.text) for td in tds[3:]]
for header, val in zip(headers, tds[2].text.split("/") + vals):
player[header] = val
players.append(player)
elif pos.lower() == "k":
for row in soup.findAll("tr", {"class": "pncPlayerRow"}):
player = {"source": "espn"}
tds = row.find_all("td")
# tds[0]: rank
player["source_position_rank"] = tds[0].text
# tds[1]: name/team/pos
link, navstr = list(tds[1].children)[0:2]
player["source_player_name"] = link.text
player["source_player_team"], player[
"source_player_position"
] = navstr.split()[-2:]
player["source_player_id"] = link.attrs.get("playerid")
# loop through stats
# they have attempts/completions in one column so have to remove & split
player["fantasy_points_ppr"] = self._val(tds[-1].text)
players.append(player)
else:
pass
return players
@staticmethod
def players_position(content, pos):
"""
Parses page of ESPN players by position
Args:
content:
pos:
Returns:
list: of dict
"""
players = []
soup = BeautifulSoup(content, "lxml")
for row in soup.find_all("tr"):
class_matches = set(["oddrow", "evenrow"])
classes = set(row.attrs.get("class", []))
if class_matches.intersection(classes):
player = {"source": "espn", "source_player_position": pos}
tds = row.find_all("td")
# tds[0]: <a href="http://www.espn.com/nfl/player/_/id/
# 2574511/brandon-allen"><NAME></a>
player["source_player_name"] = tds[0].text
link = row.find("a", {"href": re.compile(r"/player/_/")})
if link:
match = re.search(r"\/id\/([0-9]+)", link["href"])
if match:
player["source_player_id"] = match.group(1)
# tds[1]: <td><a href="http://www.espn.com/nfl/team/_/
# name/jax/jacksonville-jaguars"><NAME></a></td>
player["source_team_name"] = tds[1].text
link = row.find("a", {"href": re.compile(r"/team/_/name")})
if link:
match = re.search(r"name/(\w+)/", link["href"])
if match:
player["source_team_code"] = match.group(1)
# tds[2]: <td>Arkansas</td>
player["college"] = tds[2].text
# add to list
players.append(player)
return players
@staticmethod
def team_roster(content):
"""
Parses team roster page into list of player dict
Args:
content: HTML of espn nfl team roster page
Returns:
list of dict
"""
players = []
soup = BeautifulSoup(content, "lxml")
for row in soup.find_all("tr"):
link = row.find("a", {"href": re.compile(r"/nfl/player/_/id/")})
try:
player = {"source": "espn"}
tds = row.find_all("td")
if len(tds) != 8:
continue
player["source_player_position"] = tds[2].text
player["source_player_name"] = link.text
player["source_player_id"] = link["href"].split("/")[-2]
players.append(player)
except ValueError:
pass
return players
@staticmethod
def weekly_scoring(content):
"""
Parses weekly scoring page
Args:
content (str): HTML
Returns:
list: of dict
"""
results = []
headers = [
"c_a",
"pass_yds",
"pass_td",
"pass_int",
"rush_att",
"rush_yds",
"rush_td",
"rec_rec",
"rec_yds",
"rec_td",
"rec_tar",
"tpc",
"fumble",
"misc_td",
"fpts",
]
soup = BeautifulSoup(content, "lxml")
tbl = soup.select("table#playertable_0")[0]
for row in tbl.find_all("tr", {"id": re.compile(r"plyr")}):
tds = [td.text for td in row.find_all("td", class_="playertableStat")]
if tds:
player = dict(zip(headers, tds))
# name, team, position
nametd = row.find("td", {"id": re.compile(r"playername")})
for child in nametd.children:
if isinstance(child, NavigableString):
player["source_player_team"], player[
"source_player_position"
] = child.string.split()[1:3]
elif isinstance(child, Tag):
player["source_player_name"] = child.string
player["source_player_id"] = child.attrs.get("playerid")
results.append(player)
return results
@staticmethod
def weekly_scoring_dst(content):
"""
Parses weekly scoring page for dst
Args:
content(str): HTML
Returns:
list: of dict
"""
# TODO: adapt for dst
results = []
headers = [
"c_a",
"pass_yds",
"pass_td",
"pass_int",
"rush_att",
"rush_yds",
"rush_td",
"rec_rec",
"rec_yds",
"rec_td",
"rec_tar",
"tpc",
"fumble",
"misc_td",
"fpts",
]
soup = BeautifulSoup(content, "lxml")
tbl = soup.select("table#playertable_0")[0]
for row in tbl.find_all("tr", {"id": re.compile(r"plyr")}):
tds = [td.text for td in row.find_all("td", class_="playertableStat")]
if tds:
player = dict(zip(headers, tds))
# name, team, position
nametd = row.find("td", {"id": re.compile(r"playername")})
for child in nametd.children:
if isinstance(child, NavigableString):
player["source_player_team"], player[
"source_player_position"
] = child.string.split()[1:3]
elif isinstance(child, Tag):
player["source_player_name"] = child.string
player["source_player_id"] = child.attrs.get("playerid")
results.append(player)
return results
@staticmethod
def weekly_scoring_k(content):
"""
Parses weekly scoring page for kickers
Args:
content (str): HTML
Returns:
list: of dict
"""
# TODO: adapt for kicker
results = []
headers = [
"c_a",
"pass_yds",
"pass_td",
"pass_int",
"rush_att",
"rush_yds",
"rush_td",
"rec_rec",
"rec_yds",
"rec_td",
"rec_tar",
"tpc",
"fumble",
"misc_td",
"fpts",
]
soup = BeautifulSoup(content, "lxml")
tbl = soup.select("table#playertable_0")[0]
for row in tbl.find_all("tr", {"id": re.compile(r"plyr")}):
tds = [td.text for td in row.find_all("td", class_="playertableStat")]
if tds:
player = dict(zip(headers, tds))
# name, team, position
nametd = row.find("td", {"id": re.compile(r"playername")})
for child in nametd.children:
if isinstance(child, NavigableString):
player["source_player_team"], player[
"source_player_position"
] = child.string.split()[1:3]
elif isinstance(child, Tag):
player["source_player_name"] = child.string
player["source_player_id"] = child.attrs.get("playerid")
results.append(player)
return results
class Agent:
"""
Combines common scraping/parsing tasks
"""
def __init__(self, scraper=None, parser=None, cache_name="espn-agent"):
"""
Creates Agent object
Args:
scraper(espn.Scraper): default None
parser(espn.Parser): default None
cache_name(str): default 'espn-agent'
"""
logging.getLogger(__name__).addHandler(logging.NullHandler())
if scraper:
self._s = scraper
else:
self._s = Scraper(cache_name=cache_name)
if parser:
self._p = parser
else:
self._p = Parser()
def adp(self, season_year):
"""
Gets season ADP data
Args:
season_year(int): 2018, 2019, etc.
Returns:
list: of dict
"""
content = self._s.adp(season_year)
return self._p.adp(content)
class Xref(Site):
"""
Cross-reference source players with other names/ids
"""
def __init__(self, source_name="espn"):
"""
Args:
source_name(str): either 'espn' or 'espn_fantasy'
"""
super().__init__()
self.source_name = source_name
if __name__ == "__main__":
pass
| [
"logging.NullHandler",
"bs4.BeautifulSoup",
"re.search",
"logging.getLogger",
"re.compile"
] | [((6740, 6770), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""lxml"""'], {}), "(content, 'lxml')\n", (6753, 6770), False, 'from bs4 import BeautifulSoup, NavigableString, Tag\n'), ((9377, 9407), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""lxml"""'], {}), "(content, 'lxml')\n", (9390, 9407), False, 'from bs4 import BeautifulSoup, NavigableString, Tag\n'), ((11105, 11135), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""lxml"""'], {}), "(content, 'lxml')\n", (11118, 11135), False, 'from bs4 import BeautifulSoup, NavigableString, Tag\n'), ((12330, 12360), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""lxml"""'], {}), "(content, 'lxml')\n", (12343, 12360), False, 'from bs4 import BeautifulSoup, NavigableString, Tag\n'), ((13927, 13957), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""lxml"""'], {}), "(content, 'lxml')\n", (13940, 13957), False, 'from bs4 import BeautifulSoup, NavigableString, Tag\n'), ((15530, 15560), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""lxml"""'], {}), "(content, 'lxml')\n", (15543, 15560), False, 'from bs4 import BeautifulSoup, NavigableString, Tag\n'), ((5201, 5222), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (5220, 5222), False, 'import logging\n'), ((16893, 16914), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (16912, 16914), False, 'import logging\n'), ((5162, 5189), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (5179, 5189), False, 'import logging\n'), ((12458, 12476), 're.compile', 're.compile', (['"""plyr"""'], {}), "('plyr')\n", (12468, 12476), False, 'import re\n'), ((14055, 14073), 're.compile', 're.compile', (['"""plyr"""'], {}), "('plyr')\n", (14065, 14073), False, 'import re\n'), ((15658, 15676), 're.compile', 're.compile', (['"""plyr"""'], {}), "('plyr')\n", (15668, 15676), False, 'import re\n'), ((16854, 16881), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (16871, 16881), False, 'import logging\n'), ((10037, 10080), 're.search', 're.search', (['"""\\\\/id\\\\/([0-9]+)"""', "link['href']"], {}), "('\\\\/id\\\\/([0-9]+)', link['href'])\n", (10046, 10080), False, 'import re\n'), ((10501, 10540), 're.search', 're.search', (['"""name/(\\\\w+)/"""', "link['href']"], {}), "('name/(\\\\w+)/', link['href'])\n", (10510, 10540), False, 'import re\n'), ((11218, 11249), 're.compile', 're.compile', (['"""/nfl/player/_/id/"""'], {}), "('/nfl/player/_/id/')\n", (11228, 11249), False, 'import re\n'), ((9956, 9980), 're.compile', 're.compile', (['"""/player/_/"""'], {}), "('/player/_/')\n", (9966, 9980), False, 'import re\n'), ((10418, 10444), 're.compile', 're.compile', (['"""/team/_/name"""'], {}), "('/team/_/name')\n", (10428, 10444), False, 'import re\n'), ((12719, 12743), 're.compile', 're.compile', (['"""playername"""'], {}), "('playername')\n", (12729, 12743), False, 'import re\n'), ((14316, 14340), 're.compile', 're.compile', (['"""playername"""'], {}), "('playername')\n", (14326, 14340), False, 'import re\n'), ((15919, 15943), 're.compile', 're.compile', (['"""playername"""'], {}), "('playername')\n", (15929, 15943), False, 'import re\n')] |
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
DbCommitedEntity = declarative_base(metadata=MetaData(schema="commited")) | [
"sqlalchemy.MetaData"
] | [((134, 161), 'sqlalchemy.MetaData', 'MetaData', ([], {'schema': '"""commited"""'}), "(schema='commited')\n", (142, 161), False, 'from sqlalchemy import MetaData\n')] |
import pandas as pd
import numpy as np
import nltk
nltk.download('punkt')
import os
import nltk.corpus
from nltk.probability import FreqDist
from nltk.tokenize import word_tokenize
# read result
result = pd.read_csv("result.csv")
Tags = result["Tag"]
print(Tags)
allTag = ""
for row in result.index:
allTag = allTag + " " + result['Tag'][row]
token = word_tokenize(allTag)
# find most popular 20tag
fdist = FreqDist(token)
fdist20 = fdist.most_common(20)
print(fdist20) | [
"pandas.read_csv",
"nltk.download",
"nltk.probability.FreqDist",
"nltk.tokenize.word_tokenize"
] | [((51, 73), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (64, 73), False, 'import nltk\n'), ((206, 231), 'pandas.read_csv', 'pd.read_csv', (['"""result.csv"""'], {}), "('result.csv')\n", (217, 231), True, 'import pandas as pd\n'), ((362, 383), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['allTag'], {}), '(allTag)\n', (375, 383), False, 'from nltk.tokenize import word_tokenize\n'), ((420, 435), 'nltk.probability.FreqDist', 'FreqDist', (['token'], {}), '(token)\n', (428, 435), False, 'from nltk.probability import FreqDist\n')] |
from setuptools import setup, find_packages
setup(
name='blueliv-python-sdk',
version='2.3.0',
description='Blueliv API SDK for Python',
url='https://github.com/Blueliv/api-python-sdk',
author='Blueliv',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='blueliv api crime servers bot ips security',
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
install_requires=['requests>=2.4.0, <= 2.5.1', 'python-dateutil>=2.4.0'],
test_requires=['mock']
)
| [
"setuptools.find_packages"
] | [((915, 967), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['contrib', 'docs', 'tests*']"}), "(exclude=['contrib', 'docs', 'tests*'])\n", (928, 967), False, 'from setuptools import setup, find_packages\n')] |
import sys
#import argparse
import pandas as pd
import matplotlib.pyplot as plt
from dztools.stats.intersample import intersample
from dztools.utils.makeplots import makeplots
xmin = 1 # define lower limit for probability density plots (PDPs) and kernel density estimates (KDEs) and all plots
xmax = 4000 #upper limit for PDPs and KDEs and all plots
xint = 1 # discretization interval for PDPs and KDEs only
#DZtools options
DZstats = 1
DZmds = 0
PlotDistributions = 1
def DZ_main():
filename = sys.argv[1]
df = pd.read_csv(filename)
if DZstats == 1:
intersample_results = intersample(df, xmin, xmax, xint)
print(intersample_results)
if PlotDistributions == 1:
fig, axs = makeplots(df, xmin, xmax, xint)
plt.show()
if __name__ == '__main__':
DZ_main()
| [
"pandas.read_csv",
"dztools.utils.makeplots.makeplots",
"dztools.stats.intersample.intersample",
"matplotlib.pyplot.show"
] | [((526, 547), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (537, 547), True, 'import pandas as pd\n'), ((600, 633), 'dztools.stats.intersample.intersample', 'intersample', (['df', 'xmin', 'xmax', 'xint'], {}), '(df, xmin, xmax, xint)\n', (611, 633), False, 'from dztools.stats.intersample import intersample\n'), ((720, 751), 'dztools.utils.makeplots.makeplots', 'makeplots', (['df', 'xmin', 'xmax', 'xint'], {}), '(df, xmin, xmax, xint)\n', (729, 751), False, 'from dztools.utils.makeplots import makeplots\n'), ((760, 770), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (768, 770), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
"""
:param MLLD_functions: this class has several functions that are usually used by myself.
"""
class MLLD_functions:
def standardization(self, variable):
"""
:param variable: the array with the variables you wish to standardize
:return: standardized array
"""
var_average = np.average(variable)
var_std = np.std(variable)
new_variable = []
for i in range(variable.size):
new_variable_i = (variable[i] - var_average)/var_std
new_variable.append(new_variable_i)
self.new_variable = np.array(new_variable)
return self.new_variable
| [
"numpy.std",
"numpy.average",
"numpy.array"
] | [((342, 362), 'numpy.average', 'np.average', (['variable'], {}), '(variable)\n', (352, 362), True, 'import numpy as np\n'), ((385, 401), 'numpy.std', 'np.std', (['variable'], {}), '(variable)\n', (391, 401), True, 'import numpy as np\n'), ((608, 630), 'numpy.array', 'np.array', (['new_variable'], {}), '(new_variable)\n', (616, 630), True, 'import numpy as np\n')] |
import random
import time
from pathlib import Path
import numpy as np
import json
import torch
from editsql.data_util import atis_batch
from editsql.data_util.atis_data import ATISDataset
from editsql.data_util.interaction import load_function
from editsql.model import model, utils_bert
from editsql.model.schema_interaction_model import SchemaInteractionATISModel
from editsql.postprocess_eval import postprocess_one
from editsql.preprocess import read_database_schema
from editsql.model.bert import tokenization as tokenization
from editsql.model.bert.modeling import BertConfig, BertModel
from adapters.editsql import parse_args_spider, parse_args_sparc
from adapters.editsql.constants import *
from api import setup_util
from api.paths import DB_SCHEMAS_FILE
class EditsqlAdapter:
"""
Uses the functionality of editsql to translate arbitrary questions into sql
"""
def __init__(self, model="spider"):
if model == "sparc":
params = parse_args_sparc.interpret_args()
else:
params = parse_args_spider.interpret_args()
# create the dataset and model
data = ATISDataset(params)
self.model = self.load_model(params, data)
_, _, self.database_schemas = read_database_schema(DB_SCHEMAS_FILE, schema_tokens={}, column_names={}, database_schemas_dict={})
# function used for loading of interaction in raw state
self.int_load_function = load_function(params,
data.entities_dictionary,
data.anonymizer,
database_schema=self.database_schemas)
def load_model(self, params, data):
"""
Loads the editsql translation model
Args:
params: the parsed arguments
data: the ATISDataset
Returns:
the loaded SchemaInteractionATISModel
"""
model = SchemaInteractionATISModel(
params,
data.input_vocabulary,
data.output_vocabulary,
data.output_vocabulary_schema,
data.anonymizer if params.anonymize and params.anonymization_scoring else None)
model.load_state_dict(torch.load(params.save_file,map_location='cpu'))
print("Loaded model from file " + params.save_file)
model.eval()
return model
def prepare_interaction(self, nl_questions, db_id, prev_predictions):
"""
Creates an InteractionItem that contains the natural language question and the database id
Args:
nl_questions: the natural language questions
db_id: the database that acts as context
prev_predictions: the previous predictions
Returns:
an InteractionItem that contains the natural language question and the database id
"""
# establish the structure of an interaction in raw state
example = dict()
example["final"] = dict()
example["interaction"] = []
# fill the general fields
example["id"] = "dummy id"
example["scenario"] = ""
example["interaction_id"] = 42
# fill the content fields
example["database_id"] = db_id
prev_predictions.append("dummy sql query")
for i, nl_q in enumerate(nl_questions):
sql_int = [(prev_predictions[i].split(), [])]
example["interaction"].append({"utterance": nl_q, "sql": sql_int})
example["final"]["utterance"] = nl_questions[-1]
example["final"]["sql"] = "query to be predicted"
# transform the raw interaction to an InteractionItem
obj, _ = self.int_load_function(example)
interaction = atis_batch.InteractionItem(obj)
return interaction
def translate(self, nl_question, db_id):
"""
Translate a single natural language question into sql
Args:
nl_question: the natural language question
db_id: the database that acts as context
Returns:
the sql prediction
"""
# preprocess
nl_questions = [nl_question]
interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions=[])
prediction = self.predict(interaction)
return self.post_process(prediction, db_id)
def translate_interaction(self, nl_question, db_id, prev_nl_questions, prev_predictions):
"""
Predict the sql for the next utterance in an interaction
Args:
nl_question: the natural language question
db_id: the database that acts as context
prev_nl_questions: the previous questions or an empty list
prev_predictions: the previous predictions or an empty list
Returns:
the sql prediction
"""
# preprocess
nl_questions = prev_nl_questions + [nl_question]
interaction = self.prepare_interaction(nl_questions, db_id, prev_predictions)
prediction = self.predict(interaction)
return self.post_process(prediction, db_id)
def predict(self, interaction):
prediction = self.model.predict_with_predicted_queries(interaction, 1000)
pred_tokens_raw = prediction[-1][0]
pred_tokens = pred_tokens_raw[:-1] # strip the _EOS symbol
pred_str = " ".join(pred_tokens)
return pred_str
def post_process(self, prediction, db_id):
schema = self.database_schemas[db_id]
post_processed = postprocess_one(prediction, schema)
return post_processed
# ------------ Evaluation -----------------
def evaluate(self, amount=0, randomness=False, show_all=False, use_gold_query=False):
"""
Evaluate the translation output of EditsqlAdapter.
By default the prediction results of standalone editsql act as the reference.
The use_gold_query switch enables comparison with the gold queries from spider
Args:
amount: the amount of samples to use
randomness: randomly choose samples
show_all: write all samples, not only those with errors
use_gold_query: comparison with the gold queries from spider instead of the prediction results of standalone editsql
"""
# load the prediction results of standalone editsql
with open(EVAL_REFERENCE_FILE) as infile:
references = json.load(infile)
if not amount:
# let amount default to _all_ examples from the file
amount = len(references)
# determine the instances to test on
if randomness:
sample_indices = random.sample(range(len(references)), k=amount)
else:
sample_indices = range(amount)
comparisons = []
num_errors = 0
start = time.time()
for i in sample_indices:
db_id = references[i]["database_id"]
in_seq_raw = references[i]["input_seq"]
in_seq = " ".join(in_seq_raw)
schema = self.database_schemas[db_id]
dev_prediction_raw = references[i]["flat_prediction"]
dev_prediction = " ".join(dev_prediction_raw)
dev_prediction = postprocess_one(dev_prediction, schema)
translation = self.translate(in_seq, db_id)
gold = " ".join(references[i]["gold_query"])
gold = postprocess_one(gold, schema)
# normalize and prevent numbering from distorting the results
gold_norm = ''.join("0" if c.isdigit() else c.lower() for c in gold)
dev_pred_norm = ''.join("0" if c.isdigit() else c.lower() for c in dev_prediction)
translation_norm = ''.join("0" if c.isdigit() else c.lower() for c in translation)
if use_gold_query:
is_error = translation_norm != gold_norm
else:
is_error = translation_norm != dev_pred_norm
if is_error:
num_errors += 1
if is_error or show_all:
comparison = dict()
comparison["identifier"] = references[i]["identifier"]
comparison["is_equal"] = not is_error
comparison["input_seq"] = in_seq
comparison["prediction"] = {}
if use_gold_query:
comparison["prediction"]["gold "] = gold
else:
comparison["prediction"]["editsql "] = dev_prediction
comparison["prediction"]["translation"] = translation
comparisons.append(comparison)
end = time.time()
duration = end - start
time_per_item = duration / amount
num_correct = amount - num_errors
accuracy = num_correct * 100 / amount
eval_output = dict()
eval_output["time per item"] = time_per_item
eval_output["# items"] = amount
eval_output["% equal"] = accuracy
if show_all:
eval_output["content"] = comparisons
else:
eval_output["diff"] = comparisons
write_json_log_results(eval_output, CURRENT_DIR / "evaluation/results")
# ------------ Batch processing -----------------
@classmethod
def batch_translate(cls, input_file=BATCH_INPUT_FILE, output_dir=BATCH_OUTPUT_DIR):
"""
Read the list of dicts with values for nl_question and db_id from the input file
and save the translations to a file in the output directory
Args:
input_file: path of file with list of dicts with values for nl_question and db_id
output_dir: path of dir where the translations are saved
"""
edi_adap = EditsqlAdapter()
with open(input_file) as f:
requests = json.load(f)
for i, request in enumerate(requests):
request["sql"] = edi_adap.translate(request["nl_question"], request["db_id"])
write_json_log_results(requests, output_dir)
def write_json_log_results(content, directory):
path = Path(directory)
filename = time.strftime("%Y_%m_%d-%H_%M_%S") + ".json"
with open(str(path / filename), 'w') as outfile:
json.dump(content, outfile, indent=4)
# define a modified embeddings loading function that makes use of the preloaded glove
def load_word_embeddings_for_editsql(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):
glove_embedding_size = 300
# ------- use preloaded glove -----------
glove_embeddings = setup_util.glove_embeddings
# ---------------------------------------
input_embedding_size = glove_embedding_size
def create_word_embeddings(vocab):
vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)
vocabulary_tokens = vocab.inorder_tokens
glove_oov = 0
para_oov = 0
for token in vocabulary_tokens:
token_id = vocab.token_to_id(token)
if token in glove_embeddings:
vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]
else:
glove_oov += 1
print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab))
return vocabulary_embeddings
input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)
output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)
output_vocabulary_schema_embeddings = None
if output_vocabulary_schema:
output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)
return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size
# overwrite the original embeddings loading function with the modified version
model.load_word_embeddings = load_word_embeddings_for_editsql
# define a modified version with absolute path instead of relative path in the first line
def get_bert(params):
BERT_PT_PATH = str(TRANSLATORS_DIR / "editsql/model/bert/data/annotated_wikisql_and_PyTorch_bert_param")
map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12',
'uL': 'uncased_L-24_H-1024_A-16',
'cS': 'cased_L-12_H-768_A-12',
'cL': 'cased_L-24_H-1024_A-16',
'mcS': 'multi_cased_L-12_H-768_A-12'}
bert_type = map_bert_type_abb[params.bert_type_abb]
if params.bert_type_abb == 'cS' or params.bert_type_abb == 'cL' or params.bert_type_abb == 'mcS':
do_lower_case = False
else:
do_lower_case = True
no_pretraining = False
bert_config_file = os.path.join(BERT_PT_PATH, f'bert_config_{bert_type}.json')
vocab_file = os.path.join(BERT_PT_PATH, f'vocab_{bert_type}.txt')
init_checkpoint = os.path.join(BERT_PT_PATH, f'pytorch_model_{bert_type}.bin')
print('bert_config_file', bert_config_file)
print('vocab_file', vocab_file)
print('init_checkpoint', init_checkpoint)
bert_config = BertConfig.from_json_file(bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
bert_config.print_status()
model_bert = BertModel(bert_config)
if no_pretraining:
pass
else:
model_bert.load_state_dict(torch.load(init_checkpoint, map_location='cpu'))
print("Load pre-trained parameters.")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_bert.to(device)
return model_bert, tokenizer, bert_config
# overwrite the original function with the modified version
utils_bert.get_bert = get_bert
| [
"editsql.model.bert.modeling.BertModel",
"time.strftime",
"pathlib.Path",
"editsql.model.bert.modeling.BertConfig.from_json_file",
"editsql.model.schema_interaction_model.SchemaInteractionATISModel",
"torch.load",
"editsql.model.bert.tokenization.FullTokenizer",
"editsql.model.model.eval",
"json.dump",
"editsql.data_util.atis_data.ATISDataset",
"editsql.preprocess.read_database_schema",
"editsql.data_util.atis_batch.InteractionItem",
"torch.cuda.is_available",
"adapters.editsql.parse_args_sparc.interpret_args",
"editsql.data_util.interaction.load_function",
"json.load",
"editsql.postprocess_eval.postprocess_one",
"time.time",
"adapters.editsql.parse_args_spider.interpret_args"
] | [((10111, 10126), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (10115, 10126), False, 'from pathlib import Path\n'), ((13090, 13133), 'editsql.model.bert.modeling.BertConfig.from_json_file', 'BertConfig.from_json_file', (['bert_config_file'], {}), '(bert_config_file)\n', (13115, 13133), False, 'from editsql.model.bert.modeling import BertConfig, BertModel\n'), ((13150, 13228), 'editsql.model.bert.tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'vocab_file', 'do_lower_case': 'do_lower_case'}), '(vocab_file=vocab_file, do_lower_case=do_lower_case)\n', (13176, 13228), True, 'from editsql.model.bert import tokenization as tokenization\n'), ((13287, 13309), 'editsql.model.bert.modeling.BertModel', 'BertModel', (['bert_config'], {}), '(bert_config)\n', (13296, 13309), False, 'from editsql.model.bert.modeling import BertConfig, BertModel\n'), ((1137, 1156), 'editsql.data_util.atis_data.ATISDataset', 'ATISDataset', (['params'], {}), '(params)\n', (1148, 1156), False, 'from editsql.data_util.atis_data import ATISDataset\n'), ((1247, 1349), 'editsql.preprocess.read_database_schema', 'read_database_schema', (['DB_SCHEMAS_FILE'], {'schema_tokens': '{}', 'column_names': '{}', 'database_schemas_dict': '{}'}), '(DB_SCHEMAS_FILE, schema_tokens={}, column_names={},\n database_schemas_dict={})\n', (1267, 1349), False, 'from editsql.preprocess import read_database_schema\n'), ((1444, 1551), 'editsql.data_util.interaction.load_function', 'load_function', (['params', 'data.entities_dictionary', 'data.anonymizer'], {'database_schema': 'self.database_schemas'}), '(params, data.entities_dictionary, data.anonymizer,\n database_schema=self.database_schemas)\n', (1457, 1551), False, 'from editsql.data_util.interaction import load_function\n'), ((1974, 2176), 'editsql.model.schema_interaction_model.SchemaInteractionATISModel', 'SchemaInteractionATISModel', (['params', 'data.input_vocabulary', 'data.output_vocabulary', 'data.output_vocabulary_schema', '(data.anonymizer if params.anonymize and params.anonymization_scoring else None\n )'], {}), '(params, data.input_vocabulary, data.\n output_vocabulary, data.output_vocabulary_schema, data.anonymizer if \n params.anonymize and params.anonymization_scoring else None)\n', (2000, 2176), False, 'from editsql.model.schema_interaction_model import SchemaInteractionATISModel\n'), ((2376, 2388), 'editsql.model.model.eval', 'model.eval', ([], {}), '()\n', (2386, 2388), False, 'from editsql.model import model, utils_bert\n'), ((3769, 3800), 'editsql.data_util.atis_batch.InteractionItem', 'atis_batch.InteractionItem', (['obj'], {}), '(obj)\n', (3795, 3800), False, 'from editsql.data_util import atis_batch\n'), ((5562, 5597), 'editsql.postprocess_eval.postprocess_one', 'postprocess_one', (['prediction', 'schema'], {}), '(prediction, schema)\n', (5577, 5597), False, 'from editsql.postprocess_eval import postprocess_one\n'), ((6882, 6893), 'time.time', 'time.time', ([], {}), '()\n', (6891, 6893), False, 'import time\n'), ((8678, 8689), 'time.time', 'time.time', ([], {}), '()\n', (8687, 8689), False, 'import time\n'), ((10143, 10177), 'time.strftime', 'time.strftime', (['"""%Y_%m_%d-%H_%M_%S"""'], {}), "('%Y_%m_%d-%H_%M_%S')\n", (10156, 10177), False, 'import time\n'), ((10249, 10286), 'json.dump', 'json.dump', (['content', 'outfile'], {'indent': '(4)'}), '(content, outfile, indent=4)\n', (10258, 10286), False, 'import json\n'), ((978, 1011), 'adapters.editsql.parse_args_sparc.interpret_args', 'parse_args_sparc.interpret_args', ([], {}), '()\n', (1009, 1011), False, 'from adapters.editsql import parse_args_spider, parse_args_sparc\n'), ((1047, 1081), 'adapters.editsql.parse_args_spider.interpret_args', 'parse_args_spider.interpret_args', ([], {}), '()\n', (1079, 1081), False, 'from adapters.editsql import parse_args_spider, parse_args_sparc\n'), ((2259, 2307), 'torch.load', 'torch.load', (['params.save_file'], {'map_location': '"""cpu"""'}), "(params.save_file, map_location='cpu')\n", (2269, 2307), False, 'import torch\n'), ((6469, 6486), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (6478, 6486), False, 'import json\n'), ((7275, 7314), 'editsql.postprocess_eval.postprocess_one', 'postprocess_one', (['dev_prediction', 'schema'], {}), '(dev_prediction, schema)\n', (7290, 7314), False, 'from editsql.postprocess_eval import postprocess_one\n'), ((7449, 7478), 'editsql.postprocess_eval.postprocess_one', 'postprocess_one', (['gold', 'schema'], {}), '(gold, schema)\n', (7464, 7478), False, 'from editsql.postprocess_eval import postprocess_one\n'), ((9845, 9857), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9854, 9857), False, 'import json\n'), ((13391, 13438), 'torch.load', 'torch.load', (['init_checkpoint'], {'map_location': '"""cpu"""'}), "(init_checkpoint, map_location='cpu')\n", (13401, 13438), False, 'import torch\n'), ((13522, 13547), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13545, 13547), False, 'import torch\n')] |
"""User defined module for simulation."""
import numpy
def get_analytical(grid, asol, user_bc):
"""Compute and set the analytical solution.
Arguments
---------
grid : flowx.Grid object
Grid containing data.
asol : string
Name of the variable on the grid.
"""
X, Y = numpy.meshgrid(grid.x, grid.y)
if(user_bc == 'dirichlet'):
values = numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi * Y)
else:
values = numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi * Y)
grid.set_values(asol, values.transpose())
def get_rhs(grid, rvar, user_bc):
"""Compute and set the right-hand side of the Poisson system.
Arguments
---------
grid : flowx.Grid object
Grid containing data.
rvar : string
Name of the variable on the grid.
"""
X, Y = numpy.meshgrid(grid.x, grid.y)
if(user_bc == 'dirichlet'):
values = (-8 * numpy.pi**2 *
numpy.sin(2 * numpy.pi * X) * numpy.sin(2 * numpy.pi * Y))
else:
values = (-8 * numpy.pi**2 *
numpy.cos(2 * numpy.pi * X) * numpy.cos(2 * numpy.pi * Y))
grid.set_values(rvar, values.transpose())
| [
"numpy.sin",
"numpy.meshgrid",
"numpy.cos"
] | [((315, 345), 'numpy.meshgrid', 'numpy.meshgrid', (['grid.x', 'grid.y'], {}), '(grid.x, grid.y)\n', (329, 345), False, 'import numpy\n'), ((856, 886), 'numpy.meshgrid', 'numpy.meshgrid', (['grid.x', 'grid.y'], {}), '(grid.x, grid.y)\n', (870, 886), False, 'import numpy\n'), ((396, 423), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * X)'], {}), '(2 * numpy.pi * X)\n', (405, 423), False, 'import numpy\n'), ((426, 453), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * Y)'], {}), '(2 * numpy.pi * Y)\n', (435, 453), False, 'import numpy\n'), ((481, 508), 'numpy.cos', 'numpy.cos', (['(2 * numpy.pi * X)'], {}), '(2 * numpy.pi * X)\n', (490, 508), False, 'import numpy\n'), ((511, 538), 'numpy.cos', 'numpy.cos', (['(2 * numpy.pi * Y)'], {}), '(2 * numpy.pi * Y)\n', (520, 538), False, 'import numpy\n'), ((1005, 1032), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * Y)'], {}), '(2 * numpy.pi * Y)\n', (1014, 1032), False, 'import numpy\n'), ((1129, 1156), 'numpy.cos', 'numpy.cos', (['(2 * numpy.pi * Y)'], {}), '(2 * numpy.pi * Y)\n', (1138, 1156), False, 'import numpy\n'), ((975, 1002), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * X)'], {}), '(2 * numpy.pi * X)\n', (984, 1002), False, 'import numpy\n'), ((1099, 1126), 'numpy.cos', 'numpy.cos', (['(2 * numpy.pi * X)'], {}), '(2 * numpy.pi * X)\n', (1108, 1126), False, 'import numpy\n')] |
from pipeline.feature_engineering.preprocessing.abstract_preprocessor import Preprocessor
from pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy import MeanReplacementStrategy
from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy
from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy
from overrides import overrides
import traceback
import os
import pandas
from sklearn.decomposition import PCA
import numpy
class SussexHuaweiPreprocessor(Preprocessor):
def __init__(self):
super().__init__()
@overrides
def segment_data(self, data, mode, label_column=None, args=None):
"""
Segements a time series based on a label column, semantic segementation of a fixed interval.
:param data:
:param mode:
:param label_column:
:param args:
:return:
"""
try:
if data is None or mode is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if mode == 'semantic':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
if mode == 'labels':
# 1. Select all data with desired label value
data_segments = []
for target_label in args:
selected_data = data[data[label_column] == target_label]
# 2. Split by non-subsequent indices
# Source for next 3 lines after comment:
# https://stackoverflow.com/questions/56257329/how-to-split-a-dataframe-based-on-consecutive-index
non_sequence = pandas.Series(selected_data.index).diff() != 1
grouper = non_sequence.cumsum().values
selected_data_segments = [group for _, group in selected_data.groupby(grouper)]
for segment in selected_data_segments:
data_segments.append(segment)
return data_segments
if mode == 'fixed_interval':
segment_length = args[0]
aggregate = args[1]
exact_length = args[2]
segments_aggregated = []
split = lambda df, chunk_size : numpy.array_split(df, len(df) // chunk_size + 1, axis=0)
# 1. Ensure index is datetime index and standardize type
data.index = pandas.DatetimeIndex(data.index.astype('datetime64[1s]'))
#2. Segment data
segments = split(data, segment_length)
if not exact_length:
for segment in segments:
segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))
return segments
#3. Remove segments that are too long or too short after splitting
min_length_subsegements = []
for segment in segments:
if segment.shape[0] == segment_length:
min_length_subsegements.append(segment)
if not aggregate:
for segment in min_length_subsegements:
segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))
return min_length_subsegements
#3. Resample and aggregate data
segments_combined = None
for segment in min_length_subsegements:
segment = segment.reset_index()
segment.index = pandas.DatetimeIndex(segment.index.astype('datetime64[1s]'))
segment = self.resample_quantitative_data(segment,
freq="{}s".format(segment_length),
mode = 'mean')
if segments_combined is None:
segments_combined = segment
else:
segments_combined = pandas.concat([segments_combined, segment], axis=0)
if segments_combined is not None:
segments_combined = segments_combined.reset_index()
segments_combined.index = pandas.DatetimeIndex(
segments_combined.index.astype('datetime64[1s]'))
segments_aggregated.append(segments_combined)
return segments_aggregated
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def de_segment_data(self, data_segments, selected_columns=None, axis = 0):
"""
Desegements as time series.
:param data_segments:
:param selected_columns:
:param axis:
:return:
"""
try:
data = None
for ind in range(len(data_segments)):
if data is None:
data = data_segments[ind][selected_columns]
else:
data = pandas.concat([data, data_segments[ind][selected_columns]], axis=axis)
data = data.reset_index(drop=True)
return data
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def remove_nans(self, data, replacement_mode, replacement_value=None):
"""
Remove NaNs
:param data:
:param replacement_mode: string, 'mean', 'replacement_val', 'delet_row'
:param replacement_value: any type, used as value if replacment_mode is 'default_val'
:return: pandas.DataFrame
"""
try:
if data is None or replacement_mode is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if replacement_mode == 'mean':
return MeanReplacementStrategy().replace(data, 'NaN')
if replacement_mode == 'del_row':
return DelRowReplacementStrategy().replace(data, 'NaN')
if replacement_mode == 'replacement_val':
return ReplacementValReplacementStrategy().replace(data, 'NaN', replacement_vals=replacement_value)
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def remove_outliers_from_quantitative_data(self, data, replacement_mode, columns, quantile = None, threshold = None):
"""
Removes outlieres either based on quantile or a threshold value.
:param data:
:param replacement_mode:
:param columns:
:param quantile:
:param threshold:
:return:
"""
try:
if data is None or replacement_mode is None or columns is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(columns, list) or not isinstance(replacement_mode, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if len(columns) < 1:
raise ValueError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value)
if replacement_mode == 'quantile':
# Source for next 7 lines of code after comment:
# https://nextjournal.com/schmudde/how-to-remove-outliers-in-data
for column in columns:
not_outliers = data[column].between(
data[column].quantile(1.0 - quantile),
data[column].quantile(quantile)
)
data[column] = data[column][not_outliers]
index_names = data[~not_outliers].index
data.drop(index_names, inplace=True)
old_index = data.index
data = data.reset_index(drop=False)
data = data.set_index(old_index)
return data
if replacement_mode == 'threshold':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def resample_quantitative_data(self, data, freq, mode = None):
"""
Resamples quantitative data.
:param data:
:param freq:
:param mode:
:return:
"""
# Source:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.resample.html
# https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html
try:
if data is None or freq is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(freq, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if mode == 'mean' or mode is None:
return data.resample(freq).mean()
if mode == 'sum':
return data.resample(freq).sum()
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def convert_unix_to_datetime(self, data, column, unit):
"""
Converts unix time stamps to date time.
:param data:
:param column:
:param unit:
:return:
"""
# Source:
# https://stackoverflow.com/questions/19231871/convert-unix-time-to-readable-date-in-pandas-dataframe
# https://stackoverflow.com/questions/42698421/pandas-to-datetime-from-milliseconds-produces-incorrect-datetime
try:
if data is None or column is None or unit is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(column, str) or not isinstance(unit, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
data[column] = pandas.to_datetime(data[column], unit=unit)
return data
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def remove_unwanted_labels(self, data, unwanted_labels, replacement_mode):
"""
Remove rows that have an unwanted label.
:param data:
:param unwanted_labels:
:param replacement_mode:
:return:
"""
try:
if data is None or replacement_mode is None or unwanted_labels is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(unwanted_labels, list) or not isinstance(replacement_mode, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if replacement_mode == 'del_row':
return DelRowReplacementStrategy().replace(data, 'unwanted_labels', unwanted_labels)
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def project_accelerometer_to_global_coordinates(self, data, target_columns, mode, args=None):
"""
Project accelerometer data from local vehicle coordinates to a global coordinate system.
:param data:
:param target_columns:
:param mode:
:param args:
:return:
"""
try:
if data is None or target_columns is None or mode is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(target_columns, list):
raise TypeError(type(data))
if mode == 'mean_estimate_gravity':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
if mode == 'gyroscope':
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
if mode == 'gravity':
if len(target_columns) != len(args):
raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value)
for ind, column in enumerate(target_columns):
data[column] = data[column] - data[args[ind]]
return data
if mode == 'orientation':
if len(target_columns)+1 != len(args):
raise TypeError(self.messages.PROVIDED_ARRAYS_DONT_MATCH_LENGTH.value)
# Source for theory behind below calculation
# https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
# https://en.wikipedia.org/wiki/Homogeneous_coordinates
# #https://stackoverflow.com/questions/2422750/in-opengl-vertex-shaders-what-is-w-and-why-do-i-divide-by-it
for ind, column in enumerate(target_columns):
data[column] = data[column] * (data[args[ind]] / data[args[3]])
return data
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def label_data(self, labels, data):
"""
Combines labels vector and data matrix.
:param labels:
:param data:
:return:
"""
try:
if data is None or labels is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not (isinstance(data, pandas.DataFrame) and isinstance(labels, pandas.DataFrame)):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if (len(labels) != len(data)):
raise TypeError(self.messages.PROVIDED_FRAME_DOESNT_MATCH_DATA.value)
return pandas.concat((labels, data), axis=1)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def znormalize_quantitative_data(self, data, columns = None, mean = None, std = None):
"""
Apply z-normalization to a data set.
:param data:
:param columns:
:param mean:
:param std:
:return:
"""
try:
if data is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if not all(column in data.keys() for column in columns):
raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value)
if mean is None and std is None:
if columns is not None:
mean = data[columns].mean()
std = data[columns].std()
data[columns] = (data[columns] - data[columns].mean()) / data[columns].std()
else:
mean = data.mean()
std = data.std()
data = (data - data.mean()) / data.std()
elif mean is not None and std is not None:
if columns is not None:
data[columns] = (data[columns] - mean) / std
else:
data = (data - mean) / std
else:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
return data, mean, std
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def min_max_normalize_quantitative_data(self, data, columns=None):
"""
Apply min-max-normalization to a data set.
:param data:
:param columns:
:return:
"""
try:
if data is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if not all(column in data.keys() for column in columns):
raise TypeError(self.messages.PROVIDED_ARRAY_DOESNT_MATCH_DATA.value)
if columns is not None:
data[columns]=(data[columns]-data[columns].min())/(data[columns].max()-data[columns].min()) # to center around 0.0 substract 0.5
else:
data = (data - data.min()) / (data.max() - data.min()) # to center around 0.0 substract 0.5
return data
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def re_represent_data(self, current_representation, target_representation, data):
"""
Change representation of a data set.
:param current_representation:
:param target_representation:
:param data:
:return:
"""
raise NotImplementedError(self.messages.NOT_IMPLEMENTED.value)
@overrides
def reduce_quantitativ_data_dimensionality(self, data, mode, reduced_column_name = 'reduced', columns = None):
"""
Apply a dimensionality reduction technique to a data set.
:param data:
:param mode:
:param reduced_column_name:
:param columns:
:return:
"""
try:
if data is None or mode is None or reduced_column_name is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(reduced_column_name, str):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if mode == 'euclidean':
# Source:
# https://thispointer.com/pandas-apply-apply-a-function-to-each-row-column-in-dataframe/
# https://www.google.com/search?client=ubuntu&channel=fs&q=euclidean+norm&ie=utf-8&oe=utf-8
# https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html
reduced = data[columns].apply(numpy.square, axis=1)[columns].sum(axis=1).apply(numpy.sqrt) #**(1/2) alternative
old_index = data.index
data = pandas.concat([data, reduced], axis=1)
data = data.rename(columns={0: reduced_column_name})
data = data.reset_index(drop=True)
data = data.set_index(old_index)
return data
if mode == 'manhatten':
reduced = data[columns].sum(axis=1).apply(numpy.abs, axis=1)
old_index = data.index
data = pandas.concat([data, reduced], axis=1)
data = data.rename(columns={0: reduced_column_name})
data = data.reset_index(drop=True)
data = data.set_index(old_index)
return data
if mode == 'pca':
# Source:
# https://stackoverflow.com/questions/23282130/principal-components-analysis-using-pandas-dataframe
# https://stackoverflow.com/questions/54260920/combine-merge-dataframes-with-different-indexes-and-different-column-names
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.append.html
# https://en.wikipedia.org/wiki/Principal_component_analysis
pca = PCA(n_components=1)
pca.fit(data[columns])
reduced = pandas.DataFrame((numpy.dot(pca.components_, data[columns].T).T))
reduced = reduced.rename(columns={0:reduced_column_name})
reduced = reduced.reset_index(drop=True)
old_index = data.index
data = data.reset_index(drop=True)
data = pandas.concat([data, reduced], axis=1)
data = data.set_index(old_index)
return data
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def encode_categorical_features(self, data, mode, columns, encoding_function):
"""
Encode categorical features using an encoding function.
:param data:
:param mode:
:param columns:
:param encoding_function:
:return:
"""
try:
if data is None or mode is None or columns is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
if not isinstance(data, pandas.DataFrame) or not isinstance(mode, str) or not isinstance(
columns, list):
raise TypeError(self.messages.ILLEGAL_ARGUMENT_TYPE.value)
if mode == 'custom_function':
if encoding_function is None:
raise TypeError(self.messages.ILLEGAL_ARGUMENT_NONE_TYPE.value)
for column in columns:
data[column] = encoding_function(data[column])
return data
raise ValueError(self.messages.PROVIDED_MODE_DOESNT_EXIST.value)
except (TypeError, NotImplementedError, ValueError):
self.logger.error(traceback.format_exc())
os._exit(1)
except Exception:
self.logger.error(traceback.format_exc())
os._exit(2)
@overrides
def inference_split_process(self, data, config, meta_data):
"""
Apply all preprocessing steps necessary for inference.
:param data: pandas.DataFrame
:param params: List
:return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame
"""
print('Fetch params')
acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]]
freq = config['pre_proc_resample_freq'] # '1000ms'
mean_train = meta_data['mean_train']
std_train = meta_data['std_train']
print('Convert time unit, remove nans')
data = self.convert_unix_to_datetime(data, column='time', unit='ms')
data = self.remove_nans(data, replacement_mode='del_row')
data.set_index(data['time'], drop=True, inplace=True)
print('Resample')
data = self.resample_quantitative_data(data,
freq=freq) # 8000 1.25 Hz
print('Dimensionality reduction')
data = self.reduce_quantitativ_data_dimensionality(
data=data,
mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif
columns=acelerometer_columns,
reduced_column_name='acceleration_abs'
)
print('Normalizing, outlier removal')
selected_columns = ['acceleration_abs']
data, mean, std = self.znormalize_quantitative_data(data, selected_columns, mean_train, std_train)
data = self.remove_outliers_from_quantitative_data(
data,
replacement_mode='quantile',
columns=selected_columns,
quantile=0.99 # current run @0.95 for classical approach via TS Fresh
)[:-1]
return data
@overrides
def training_split_process(self, data, config, labels):
"""
Apply all preprocessing steps necessary for training.
:param data: pandas.DataFrame
:param params: List
:return: pandas.DataFrame, pandas.DataFrame, pandas.DataFrame
"""
print('Fetch params')
#print(params)
labels = labels
test_sz = config['pre_proc_test_sz']
train_sz = config['pre_proc_training_sz']
valid_sz = config['pre_proc_validation_sz']
#acelerometer_columns = ['acceleration_x', 'acceleration_y', 'acceleration_z']
acelerometer_columns = [config['data_set_column_names'][1:][0], config['data_set_column_names'][1:][1], config['data_set_column_names'][1:][2]]
selected_coarse_labels = config['pre_proc_movement_type_label'] #[5]
selected_road_labels = config['pre_proc_road_type_label'] #[1, 3]
freq = config['pre_proc_resample_freq'] #'1000ms'
print('Convert time unit, label data, remove nans')
data = self.convert_unix_to_datetime(data, column = 'time', unit = 'ms')
data = self.label_data(data, labels)
data = self.remove_nans(data, replacement_mode='del_row')
print('Train, Test, Validation split')
data_len = data.shape[0]
test_len = int(data_len * test_sz)
train_len = int(data_len * train_sz)
valid_len = int(data_len * valid_sz)
data_train, data_test_valid = data.head(train_len), data.tail(test_len+valid_len)
data_test = data_test_valid.head(test_len)
data_valid = data_test_valid.tail(valid_len)
print('Segment by labels')
#Segment Train
car_train_segments = self.segment_data(data_train, mode='labels',
label_column='coarse_label',
args=selected_coarse_labels)
data_train_segments = []
for car_segment in car_train_segments:
road_segments = self.segment_data(car_segment, mode='labels',
label_column='road_label',
args=selected_road_labels
)
for road_segment in road_segments:
data_train_segments.append(road_segment)
#Segment Test
car_test_segments = self.segment_data(data_test, mode='labels',
label_column='coarse_label',
args=selected_coarse_labels)
data_test_segments = []
for car_segment in car_test_segments:
road_segments = self.segment_data(car_segment, mode='labels',
label_column='road_label',
args=selected_road_labels
)
for road_segment in road_segments:
data_test_segments.append(road_segment)
#Segment Valid
car_valid_segments = self.segment_data(data_valid, mode='labels',
label_column='coarse_label',
args=selected_coarse_labels)
data_valid_segments = []
for car_segment in car_valid_segments:
road_segments = self.segment_data(car_segment, mode='labels',
label_column='road_label',
args=selected_road_labels
)
for road_segment in road_segments:
data_valid_segments.append(road_segment)
print('Resample')
#Train
for ind in range(len(data_train_segments)):
data_train_segments[ind] = data_train_segments[ind].set_index('time')
data_train_segments[ind] = self.resample_quantitative_data(data_train_segments[ind],
freq=freq) # 8000 1.25 Hz
#Test
for ind in range(len(data_test_segments)):
data_test_segments[ind] = data_test_segments[ind].set_index('time')
data_test_segments[ind] = self.resample_quantitative_data(data_test_segments[ind],
freq=freq)
#Valid
for ind in range(len(data_valid_segments)):
data_valid_segments[ind] = data_valid_segments[ind].set_index('time')
data_valid_segments[ind] = self.resample_quantitative_data(data_valid_segments[ind],
freq=freq)
print('Dimensionality reduction')
#Train
for ind in range(len(data_train_segments)):
data_train_segments[ind] = self.reduce_quantitativ_data_dimensionality(
data=data_train_segments[ind],
mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif
columns=acelerometer_columns,
reduced_column_name='acceleration_abs'
)
#Test
for ind in range(len(data_test_segments)):
data_test_segments[ind] = self.reduce_quantitativ_data_dimensionality(
data=data_test_segments[ind],
mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif
columns=acelerometer_columns,
reduced_column_name='acceleration_abs'
)
#Valid
for ind in range(len(data_valid_segments)):
data_valid_segments[ind] = self.reduce_quantitativ_data_dimensionality(
data=data_valid_segments[ind],
mode=config['feature_eng_dim_reduction_type'], # works better than euclidean for motif
columns=acelerometer_columns,
reduced_column_name='acceleration_abs'
)
print('Normalizing, outlier removal')
#Train
selected_columns = ['acceleration_abs',
'road_label', 'id'] # 'acceleration_abs'
data_train = self.de_segment_data(data_train_segments, selected_columns)
data_train, mean_train, std_train = self.znormalize_quantitative_data(data_train, selected_columns[:-2])
data_train = self.remove_outliers_from_quantitative_data(
data_train,
replacement_mode='quantile',
columns=selected_columns[:-2],
quantile=0.99 # current run @0.95 for classical approach via TS Fresh
)[:-2]
#Test
data_test = self.de_segment_data(data_test_segments, selected_columns)
data_test, mean_test, std_test = self.znormalize_quantitative_data(data_test,
selected_columns[:-2],
mean_train, std_train)
data_test = self.remove_outliers_from_quantitative_data(
data_test,
replacement_mode='quantile',
columns=selected_columns[:-2],
quantile=0.99 # current run @0.95 for classical approach via TS Fresh
)[:-2]
#Valid
data_valid = self.de_segment_data(data_valid_segments, selected_columns)
data_valid, mean_valid, std_valid = self.znormalize_quantitative_data(data_valid,
selected_columns[:-2],
mean_train, std_train)
data_valid = self.remove_outliers_from_quantitative_data(
data_valid,
replacement_mode='quantile',
columns=selected_columns[:-2],
quantile=0.99 # current run @0.95 for classical approach via TS Fresh
)[:-2]
data_train = data_train.loc[:, ~data_train.columns.duplicated()]
data_test = data_test.loc[:, ~data_test.columns.duplicated()]
data_valid = data_valid.loc[:, ~data_valid.columns.duplicated()]
#print('Rolling mean smoothing')
#data_train['acceleration_abs'] = data_train['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3) #TODO make configureable
#data_test['acceleration_abs'] = data_test['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3)
#data_valid['acceleration_abs'] = data_valid['acceleration_abs'].rolling(5, min_periods=1, win_type='gaussian').sum(std=3)
#data_train = self.remove_nans(data_train, replacement_mode='del_row')
#data_test = self.remove_nans(data_test, replacement_mode='del_row')
#data_valid = self.remove_nans(data_valid, replacement_mode='del_row')
#print(data_train.head(100))
return data_train, mean_train, std_train, data_test, data_valid | [
"pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy.DelRowReplacementStrategy",
"pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy.ReplacementValReplacementStrategy",
"pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy.MeanReplacementStrategy",
"os._exit",
"pandas.to_datetime",
"traceback.format_exc",
"sklearn.decomposition.PCA",
"pandas.Series",
"numpy.dot",
"pandas.concat"
] | [((11397, 11440), 'pandas.to_datetime', 'pandas.to_datetime', (['data[column]'], {'unit': 'unit'}), '(data[column], unit=unit)\n', (11415, 11440), False, 'import pandas\n'), ((15773, 15810), 'pandas.concat', 'pandas.concat', (['(labels, data)'], {'axis': '(1)'}), '((labels, data), axis=1)\n', (15786, 15810), False, 'import pandas\n'), ((4889, 4900), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (4897, 4900), False, 'import os\n'), ((4995, 5006), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (5003, 5006), False, 'import os\n'), ((5775, 5786), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (5783, 5786), False, 'import os\n'), ((5881, 5892), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (5889, 5892), False, 'import os\n'), ((7146, 7157), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (7154, 7157), False, 'import os\n'), ((7252, 7263), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (7260, 7263), False, 'import os\n'), ((9267, 9278), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (9275, 9278), False, 'import os\n'), ((9373, 9384), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (9381, 9384), False, 'import os\n'), ((10428, 10439), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (10436, 10439), False, 'import os\n'), ((10533, 10544), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (10541, 10544), False, 'import os\n'), ((11593, 11604), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (11601, 11604), False, 'import os\n'), ((11698, 11709), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (11706, 11709), False, 'import os\n'), ((12724, 12735), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (12732, 12735), False, 'import os\n'), ((12829, 12840), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (12837, 12840), False, 'import os\n'), ((15005, 15016), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (15013, 15016), False, 'import os\n'), ((15110, 15121), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (15118, 15121), False, 'import os\n'), ((15939, 15950), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (15947, 15950), False, 'import os\n'), ((16044, 16055), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (16052, 16055), False, 'import os\n'), ((17668, 17679), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (17676, 17679), False, 'import os\n'), ((17774, 17785), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (17782, 17785), False, 'import os\n'), ((18877, 18888), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (18885, 18888), False, 'import os\n'), ((18982, 18993), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (18990, 18993), False, 'import os\n'), ((20773, 20811), 'pandas.concat', 'pandas.concat', (['[data, reduced]'], {'axis': '(1)'}), '([data, reduced], axis=1)\n', (20786, 20811), False, 'import pandas\n'), ((21185, 21223), 'pandas.concat', 'pandas.concat', (['[data, reduced]'], {'axis': '(1)'}), '([data, reduced], axis=1)\n', (21198, 21223), False, 'import pandas\n'), ((21937, 21956), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (21940, 21956), False, 'from sklearn.decomposition import PCA\n'), ((22332, 22370), 'pandas.concat', 'pandas.concat', (['[data, reduced]'], {'axis': '(1)'}), '([data, reduced], axis=1)\n', (22345, 22370), False, 'import pandas\n'), ((22654, 22665), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (22662, 22665), False, 'import os\n'), ((22759, 22770), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (22767, 22770), False, 'import os\n'), ((23960, 23971), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (23968, 23971), False, 'import os\n'), ((24065, 24076), 'os._exit', 'os._exit', (['(2)'], {}), '(2)\n', (24073, 24076), False, 'import os\n'), ((4853, 4875), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4873, 4875), False, 'import traceback\n'), ((4959, 4981), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4979, 4981), False, 'import traceback\n'), ((5496, 5566), 'pandas.concat', 'pandas.concat', (['[data, data_segments[ind][selected_columns]]'], {'axis': 'axis'}), '([data, data_segments[ind][selected_columns]], axis=axis)\n', (5509, 5566), False, 'import pandas\n'), ((5739, 5761), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5759, 5761), False, 'import traceback\n'), ((5845, 5867), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5865, 5867), False, 'import traceback\n'), ((7110, 7132), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7130, 7132), False, 'import traceback\n'), ((7216, 7238), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (7236, 7238), False, 'import traceback\n'), ((9231, 9253), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9251, 9253), False, 'import traceback\n'), ((9337, 9359), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9357, 9359), False, 'import traceback\n'), ((10392, 10414), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10412, 10414), False, 'import traceback\n'), ((10497, 10519), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10517, 10519), False, 'import traceback\n'), ((11557, 11579), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11577, 11579), False, 'import traceback\n'), ((11662, 11684), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11682, 11684), False, 'import traceback\n'), ((12688, 12710), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (12708, 12710), False, 'import traceback\n'), ((12793, 12815), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (12813, 12815), False, 'import traceback\n'), ((14969, 14991), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (14989, 14991), False, 'import traceback\n'), ((15074, 15096), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (15094, 15096), False, 'import traceback\n'), ((15903, 15925), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (15923, 15925), False, 'import traceback\n'), ((16008, 16030), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (16028, 16030), False, 'import traceback\n'), ((17632, 17654), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (17652, 17654), False, 'import traceback\n'), ((17738, 17760), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (17758, 17760), False, 'import traceback\n'), ((18841, 18863), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (18861, 18863), False, 'import traceback\n'), ((18946, 18968), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (18966, 18968), False, 'import traceback\n'), ((22618, 22640), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (22638, 22640), False, 'import traceback\n'), ((22723, 22745), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (22743, 22745), False, 'import traceback\n'), ((23924, 23946), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (23944, 23946), False, 'import traceback\n'), ((24029, 24051), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (24049, 24051), False, 'import traceback\n'), ((4334, 4385), 'pandas.concat', 'pandas.concat', (['[segments_combined, segment]'], {'axis': '(0)'}), '([segments_combined, segment], axis=0)\n', (4347, 4385), False, 'import pandas\n'), ((6605, 6630), 'pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy.MeanReplacementStrategy', 'MeanReplacementStrategy', ([], {}), '()\n', (6628, 6630), False, 'from pipeline.feature_engineering.preprocessing.replacement_strategies.mean_replacement_strategy import MeanReplacementStrategy\n'), ((6721, 6748), 'pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy.DelRowReplacementStrategy', 'DelRowReplacementStrategy', ([], {}), '()\n', (6746, 6748), False, 'from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy\n'), ((6847, 6882), 'pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy.ReplacementValReplacementStrategy', 'ReplacementValReplacementStrategy', ([], {}), '()\n', (6880, 6882), False, 'from pipeline.feature_engineering.preprocessing.replacement_strategies.replacement_val_replacement_strategy import ReplacementValReplacementStrategy\n'), ((12440, 12467), 'pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy.DelRowReplacementStrategy', 'DelRowReplacementStrategy', ([], {}), '()\n', (12465, 12467), False, 'from pipeline.feature_engineering.preprocessing.replacement_strategies.del_row_replacement_strategy import DelRowReplacementStrategy\n'), ((22040, 22083), 'numpy.dot', 'numpy.dot', (['pca.components_', 'data[columns].T'], {}), '(pca.components_, data[columns].T)\n', (22049, 22083), False, 'import numpy\n'), ((1950, 1984), 'pandas.Series', 'pandas.Series', (['selected_data.index'], {}), '(selected_data.index)\n', (1963, 1984), False, 'import pandas\n')] |
"""Struct classes for car telemetry. Classes parse data from binary format and extract player data."""
import struct
import ctypes
from dataclasses import dataclass, asdict
from typing import List
PACKET_HEADER_FORMAT = "<HBBBBQfLBB"
PACKET_CAR_TELEMETRY_DATA_FORMAT = "BBb"
CAR_TELEMETRY_DATA_FORMAT = "HfffBbHBBHHHHHBBBBBBBBHffffBBBB"
LAP_DATA_FORMAT = "LLHHfffBBBBBBBBBBBBBBHHB"
CAR_STATUS_DATA_FORMAT = "BBBBBfffHHBBHBBBbfBfffB"
CAR_DAMAGE_DATA_FORMAT = "ffffBBBBBBBBBBBBBBBBBBBBBBB"
def _telemetry_list_to_attributes(telemetry_values: list, attribute_name: str) -> dict:
"""Get single attributes from attributes list and allocate to position on car (fl, fr, rl, rr).
Args:
telemetry_values(list): List of telemetry values that should be mapped to attributes.
attribute_name(str): Attribute name used as keys in dict.
"""
car_position_mapping = ["rl", "rr", "fl", "fr"]
telemetry_values_dict = {}
for i, telemetry_value in enumerate(telemetry_values):
key_name = str(attribute_name) + "_" + car_position_mapping[i]
telemetry_values_dict[key_name] = telemetry_value
return telemetry_values_dict
@dataclass
class PacketHeader:
"""PacketHeader struct."""
m_packet_format: ctypes.c_uint16
m_game_major_version: ctypes.c_uint8
m_game_minor_version: ctypes.c_uint8
m_packet_version: ctypes.c_uint8
m_packet_id: ctypes.c_uint8
m_session_uid: ctypes.c_uint64
m_session_time: ctypes.c_float
m_frame_identifier: ctypes.c_uint32
m_player_car_index: ctypes.c_uint8
m_secondary_player_car_index: ctypes.c_uint8
@classmethod
def from_binary(cls, binary_message: str):
"""Create class form binary UDP package.
Args:
binary_message (str): Binary representation of package header.
"""
format_string = "<HBBBBQfLBB"
unpacked = struct.unpack_from(format_string, binary_message)
return cls(
unpacked[0],
unpacked[1],
unpacked[2],
unpacked[3],
unpacked[4],
unpacked[5],
unpacked[6],
unpacked[7],
unpacked[8],
unpacked[9],
)
@dataclass
class PacketWOAdditionalAttributes:
"""PacketCarStatusData struct."""
m_header: PacketHeader
@classmethod
def get_message_list(
cls,
packet_header: PacketHeader,
binary_message: str,
message_format: str,
message_type: object,
):
"""Create class form binary UDP package.
Args:
packet_header (PacketHeader): PacketHeader class.
binary_message (str): Binary representation of struct.
"""
# Unpack struct
unpacked = struct.unpack_from(
PACKET_HEADER_FORMAT + "".join(message_format * 22),
binary_message,
)
# Remove header from struct
unpacked_wo_header = unpacked[len(asdict(packet_header)) : :]
# Get lap data for each active car
data_list = list()
for i in range(22):
data = message_type.from_unpacked(
unpacked_wo_header[
i * len(message_format) : (i + 1) * len(message_format)
]
)
data_list.append(data)
return data_list
@dataclass
class CarTelemetryData:
"""CarTelemetryData struct."""
m_speed: ctypes.c_uint16
m_throttle: ctypes.c_float
m_steer: ctypes.c_float
m_brake: ctypes.c_float
m_clutch: ctypes.c_uint8
m_gear: ctypes.c_int8
m_engine_rpm: ctypes.c_uint16
m_drs: ctypes.c_uint8
m_rev_lights_percent: ctypes.c_uint8
m_rev_lights_bit_value: ctypes.c_uint16
m_brakes_temperature: List[ctypes.c_uint16]
m_tyres_surface_temperature: List[ctypes.c_uint8]
m_tyres_inner_temperature: List[ctypes.c_uint8]
m_engine_temperature: ctypes.c_uint16
m_tyres_pressure: List[ctypes.c_float]
m_surface_type: List[ctypes.c_uint8]
@classmethod
def from_unpacked(cls, unpacked: List):
"""Parse unpacked struct into class attributes.
Args:
unpacked (list): Unpacked struct containing all
attributes to construct CarTelemetryData class.
"""
return cls(
unpacked[0],
unpacked[1],
unpacked[2],
unpacked[3],
unpacked[4],
unpacked[5],
unpacked[6],
unpacked[7],
unpacked[8],
unpacked[9],
list([unpacked[10], unpacked[11], unpacked[12], unpacked[13]]),
list([unpacked[14], unpacked[15], unpacked[16], unpacked[17]]),
list([unpacked[18], unpacked[19], unpacked[20], unpacked[21]]),
unpacked[22],
list([unpacked[23], unpacked[24], unpacked[25], unpacked[26]]),
list([unpacked[27], unpacked[28], unpacked[29], unpacked[30]]),
)
@dataclass
class PacketCarTelemetryData:
"""PacketCarTelemetryData struct."""
m_header: PacketHeader
m_car_telemetry_data: List[CarTelemetryData]
m_mfd_panel_index: ctypes.c_uint8
m_mfd_panel_index_secondary_player: ctypes.c_uint8
m_suggested_gear: ctypes.c_int8
@classmethod
def from_binary(cls, packet_header: PacketHeader, binary_message: str):
"""Create class form binary UDP package.
Args:
packet_header (PacketHeader): PacketHeader class.
binary_message (str): Binary representation of struct.
"""
# Unpack struct
unpacked = struct.unpack_from(
PACKET_HEADER_FORMAT
+ "".join(CAR_TELEMETRY_DATA_FORMAT * 22)
+ PACKET_CAR_TELEMETRY_DATA_FORMAT,
binary_message,
)
# Remove header from struct
unpacked_wo_header = unpacked[len(asdict(packet_header)) : :]
# Get telemetry for each active car
car_telemetry_data_list = list()
for i in range(22):
car_telemetry_data = CarTelemetryData.from_unpacked(
unpacked_wo_header[
i
* len(CAR_TELEMETRY_DATA_FORMAT) : (i + 1)
* len(CAR_TELEMETRY_DATA_FORMAT)
]
)
car_telemetry_data_list.append(car_telemetry_data)
return cls(
packet_header,
car_telemetry_data_list,
unpacked_wo_header[-3],
unpacked_wo_header[-2],
unpacked_wo_header[-1],
)
def get_player_car_data(self) -> dict:
"""Get data from player car."""
player_car_index = self.m_header.m_player_car_index
player_car_telemetry = self.m_car_telemetry_data[player_car_index]
player_telemetry_message = (
self.m_header.__dict__ | player_car_telemetry.__dict__.copy()
)
# Map tyre temperature values from list to attributes
player_telemetry_message = (
player_telemetry_message
| _telemetry_list_to_attributes(
player_telemetry_message["m_tyres_surface_temperature"],
"m_tyres_surface_temperature",
)
)
player_telemetry_message.pop("m_tyres_surface_temperature")
# Map tyre inner temperature values from list to attributes
player_telemetry_message = (
player_telemetry_message
| _telemetry_list_to_attributes(
player_telemetry_message["m_tyres_inner_temperature"],
"m_tyres_inner_temperature",
)
)
player_telemetry_message.pop("m_tyres_inner_temperature")
# Map brake temperature values from list to attributes
player_telemetry_message = (
player_telemetry_message
| _telemetry_list_to_attributes(
player_telemetry_message["m_brakes_temperature"],
"m_brakes_temperature",
)
)
player_telemetry_message.pop("m_brakes_temperature")
# Map tyres pressure values from list to attributes
player_telemetry_message = (
player_telemetry_message
| _telemetry_list_to_attributes(
player_telemetry_message["m_tyres_pressure"],
"m_tyres_pressure",
)
)
player_telemetry_message.pop("m_tyres_pressure")
player_telemetry_message.pop("m_surface_type")
return player_telemetry_message
@dataclass
class LapData:
"""LapData struct."""
m_lastLapTimeInMS: ctypes.c_uint32
m_currentLapTimeInMS: ctypes.c_uint32
m_sector1TimeInMS: ctypes.c_uint16
m_sector2TimeInMS: ctypes.c_uint16
m_lapDistance: ctypes.c_uint32
m_currentLapNum: ctypes.c_uint8
@classmethod
def from_unpacked(cls, unpacked: List):
"""Parse unpacked struct into class attributes.
Args:
unpacked (list): Unpacked struct containing all
attributes to construct CarTelemetryData class.
"""
return cls(
unpacked[0], unpacked[1], unpacked[2], unpacked[3], unpacked[4], unpacked[8]
)
@dataclass
class PacketLapData(PacketWOAdditionalAttributes):
"""PacketCarTelemetryData struct."""
m_lap_data: List[LapData]
@classmethod
def from_binary(cls, packet_header: PacketHeader, binary_message: str):
"""Create class form binary UDP package.
Args:
packet_header (PacketHeader): PacketHeader class.
binary_message (str): Binary representation of struct.
"""
lap_data_list = cls.get_message_list(
packet_header, binary_message, LAP_DATA_FORMAT, LapData
)
return cls(packet_header, lap_data_list)
def get_player_car_data(self) -> dict:
"""Get data from player car."""
player_car_index = self.m_header.m_player_car_index
player_values = (
self.m_header.__dict__ | self.m_lap_data[player_car_index].__dict__.copy()
)
return player_values
@dataclass
class CarStatusData:
"""CarStatusData struct."""
m_fuelInTank: ctypes.c_float
m_fuelCapacity: ctypes.c_float
m_fuelRemainingLaps: ctypes.c_float
m_actualTyreCompound: ctypes.c_uint8
m_tyresAgeLaps: ctypes.c_uint8
m_ersStoreEnergy: ctypes.c_float
m_ersDeployMode: ctypes.c_uint8
m_ersHarvestedThisLapMGUK: ctypes.c_float
m_ersHarvestedThisLapMGUH: ctypes.c_float
m_ersDeployedThisLap: ctypes.c_float
@classmethod
def from_unpacked(cls, unpacked: List):
"""Parse unpacked struct into class attributes.
Args:
unpacked (list): Unpacked struct containing all
attributes to construct CarTelemetryData class.
"""
return cls(
unpacked[5],
unpacked[6],
unpacked[7],
unpacked[13],
unpacked[15],
unpacked[17],
unpacked[18],
unpacked[19],
unpacked[20],
unpacked[21],
)
@dataclass
class PacketCarStatusData(PacketWOAdditionalAttributes):
"""PacketCarStatusData struct."""
m_carStatusData: List[CarStatusData]
@classmethod
def from_binary(cls, packet_header: PacketHeader, binary_message: str):
"""Create class form binary UDP package.
Args:
packet_header (PacketHeader): PacketHeader class.
binary_message (str): Binary representation of struct.
"""
car_status_data_list = cls.get_message_list(
packet_header, binary_message, CAR_STATUS_DATA_FORMAT, CarStatusData
)
return cls(packet_header, car_status_data_list)
def get_player_car_data(self) -> dict:
"""Get data from player car."""
player_car_index = self.m_header.m_player_car_index
player_values = (
self.m_header.__dict__
| self.m_carStatusData[player_car_index].__dict__.copy()
)
return player_values
@dataclass
class CarDamageData:
"""CarStatusData struct."""
m_tyresWear: ctypes.c_float
m_tyresDamage: ctypes.c_uint8
m_brakesDamage: ctypes.c_uint8
@classmethod
def from_unpacked(cls, unpacked: List):
"""Parse unpacked struct into class attributes.
Args:
unpacked (list): Unpacked struct containing all
attributes to construct CarTelemetryData class.
"""
return cls(
list([unpacked[0], unpacked[1], unpacked[2], unpacked[3]]),
list([unpacked[4], unpacked[5], unpacked[6], unpacked[7]]),
list([unpacked[8], unpacked[9], unpacked[10], unpacked[11]]),
)
@dataclass
class PacketCarDamageData(PacketWOAdditionalAttributes):
"""PacketCarStatusData struct."""
m_carDamageData: List[CarDamageData]
@classmethod
def from_binary(cls, packet_header: PacketHeader, binary_message: str):
"""Create class form binary UDP package.
Args:
packet_header (PacketHeader): PacketHeader class.
binary_message (str): Binary representation of struct.
"""
car_damage_data_list = cls.get_message_list(
packet_header, binary_message, CAR_DAMAGE_DATA_FORMAT, CarDamageData
)
return cls(packet_header, car_damage_data_list)
def get_player_car_data(self) -> dict:
"""Get data from player car."""
player_car_index = self.m_header.m_player_car_index
player_car_damage = self.m_carDamageData[player_car_index]
player_car_damage_message = (
self.m_header.__dict__ | player_car_damage.__dict__.copy()
)
# Map tyre wear values from list to attributes
player_car_damage_message = (
player_car_damage_message
| _telemetry_list_to_attributes(
player_car_damage_message["m_tyresWear"],
"m_tyresWear",
)
)
player_car_damage_message.pop("m_tyresWear")
# Map tyre damage values from list to attributes
player_car_damage_message = (
player_car_damage_message
| _telemetry_list_to_attributes(
player_car_damage_message["m_tyresDamage"],
"m_tyresDamage",
)
)
player_car_damage_message.pop("m_tyresDamage")
# Map brake damage values from list to attributes
player_car_damage_message = (
player_car_damage_message
| _telemetry_list_to_attributes(
player_car_damage_message["m_brakesDamage"],
"m_brakesDamage",
)
)
player_car_damage_message.pop("m_brakesDamage")
return player_car_damage_message
| [
"dataclasses.asdict",
"struct.unpack_from"
] | [((1886, 1935), 'struct.unpack_from', 'struct.unpack_from', (['format_string', 'binary_message'], {}), '(format_string, binary_message)\n', (1904, 1935), False, 'import struct\n'), ((2969, 2990), 'dataclasses.asdict', 'asdict', (['packet_header'], {}), '(packet_header)\n', (2975, 2990), False, 'from dataclasses import dataclass, asdict\n'), ((5875, 5896), 'dataclasses.asdict', 'asdict', (['packet_header'], {}), '(packet_header)\n', (5881, 5896), False, 'from dataclasses import dataclass, asdict\n')] |
import os
from fastapi import FastAPI, HTTPException
from github3.exceptions import NotFoundError, ForbiddenError
from github3.github import GitHub
from github3.pulls import PullRequest
from pydantic import BaseModel
GITHUB_PRIVATE_KEY = os.environ.get('APP_PRIVATE_KEY', None)
GITHUB_APP_IDENTIFIER = os.environ.get('APP_IDENTIFIER', None)
if not GITHUB_PRIVATE_KEY:
GITHUB_PRIVATE_KEY = open('private-key.pem', 'rt').read()
app = FastAPI()
class ActionIn(BaseModel):
content: str
owner: str
repository: str
pr_number: int
@property
def repo(self) -> str:
return f'{self.owner}/{self.repository}'
@app.post('/comment')
def comment_on_pr(action: ActionIn):
gh = login_as_installation(action)
get_pr(gh, action).create_comment(action.content)
return "Post Success", 200
@app.post('/reaction')
def react_to_pr(action: ActionIn):
gh = login_as_installation(action)
issue = get_pr(gh, action).issue()
issue._post(
issue._api + '/reactions',
data={"content": action.content},
headers={'Accept': 'application/vnd.github.squirrel-girl-preview+json'}
)
return "Post Success", 200
def login_as_installation(action: ActionIn):
try:
gh = GitHub()
gh.login_as_app(GITHUB_PRIVATE_KEY.encode(), GITHUB_APP_IDENTIFIER)
install = gh.app_installation_for_repository(action.owner, action.repository)
gh.login_as_app_installation(
GITHUB_PRIVATE_KEY.encode(),
GITHUB_APP_IDENTIFIER,
install.id
)
return gh
except NotFoundError:
raise HTTPException(404, f"OpeAPI Perf App not installed to {action.repo}")
def get_pr(gh, action: ActionIn) -> PullRequest:
try:
return gh.pull_request(
owner=action.owner,
repository=action.repository,
number=action.pr_number
)
except ForbiddenError:
raise HTTPException(403, f"Application not setup for the repository {action.repo}")
except NotFoundError:
raise HTTPException(404, f"PR #{action.pr_number} does not exist in {action.repo}")
| [
"os.environ.get",
"github3.github.GitHub",
"fastapi.HTTPException",
"fastapi.FastAPI"
] | [((240, 279), 'os.environ.get', 'os.environ.get', (['"""APP_PRIVATE_KEY"""', 'None'], {}), "('APP_PRIVATE_KEY', None)\n", (254, 279), False, 'import os\n'), ((304, 342), 'os.environ.get', 'os.environ.get', (['"""APP_IDENTIFIER"""', 'None'], {}), "('APP_IDENTIFIER', None)\n", (318, 342), False, 'import os\n'), ((440, 449), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (447, 449), False, 'from fastapi import FastAPI, HTTPException\n'), ((1246, 1254), 'github3.github.GitHub', 'GitHub', ([], {}), '()\n', (1252, 1254), False, 'from github3.github import GitHub\n'), ((1624, 1693), 'fastapi.HTTPException', 'HTTPException', (['(404)', 'f"""OpeAPI Perf App not installed to {action.repo}"""'], {}), "(404, f'OpeAPI Perf App not installed to {action.repo}')\n", (1637, 1693), False, 'from fastapi import FastAPI, HTTPException\n'), ((1947, 2024), 'fastapi.HTTPException', 'HTTPException', (['(403)', 'f"""Application not setup for the repository {action.repo}"""'], {}), "(403, f'Application not setup for the repository {action.repo}')\n", (1960, 2024), False, 'from fastapi import FastAPI, HTTPException\n'), ((2065, 2142), 'fastapi.HTTPException', 'HTTPException', (['(404)', 'f"""PR #{action.pr_number} does not exist in {action.repo}"""'], {}), "(404, f'PR #{action.pr_number} does not exist in {action.repo}')\n", (2078, 2142), False, 'from fastapi import FastAPI, HTTPException\n')] |
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
'''
print(lemmatizer.lemmatize("cacti"))
print(lemmatizer.lemmatize("geese"))
print(lemmatizer.lemmatize("rocks"))
print(lemmatizer.lemmatize("python"))
'''
#default pos="n"(noun)
#"a" = adjective, "v" = verb
#lemmas give back actual words, usually better then stemmers
print(lemmatizer.lemmatize("better", pos="a"))
print(lemmatizer.lemmatize("best", pos="a"))
print(lemmatizer.lemmatize("run", pos="v"))
print(lemmatizer.lemmatize("run"))
| [
"nltk.stem.WordNetLemmatizer"
] | [((54, 73), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (71, 73), False, 'from nltk.stem import WordNetLemmatizer\n')] |
from django.urls import reverse
from django.db import models
# Create your models here
class Location(models.Model):
name = models.CharField(max_length=60)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('gallery_list',args=[self.slug])
class Image(models.Model):
location = models.ForeignKey(Location,on_delete=models.CASCADE) # one image belongs to a single location
category = models.ForeignKey(Category,on_delete=models.CASCADE) # one image belongs to a single category
name= models.CharField(max_length=200)
description = models.TextField(max_length=300)
image = models.ImageField(upload_to = 'articles/',blank=True)
def get_absolute_url(self):
return reverse('gallery_detail',args=[self.id])
| [
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.SlugField",
"django.db.models.ImageField",
"django.urls.reverse"
] | [((129, 160), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (145, 160), False, 'from django.db import models\n'), ((254, 286), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (270, 286), False, 'from django.db import models\n'), ((298, 327), 'django.db.models.SlugField', 'models.SlugField', ([], {'unique': '(True)'}), '(unique=True)\n', (314, 327), False, 'from django.db import models\n'), ((510, 563), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Location'], {'on_delete': 'models.CASCADE'}), '(Location, on_delete=models.CASCADE)\n', (527, 563), False, 'from django.db import models\n'), ((619, 672), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Category'], {'on_delete': 'models.CASCADE'}), '(Category, on_delete=models.CASCADE)\n', (636, 672), False, 'from django.db import models\n'), ((723, 755), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (739, 755), False, 'from django.db import models\n'), ((774, 806), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (790, 806), False, 'from django.db import models\n'), ((819, 871), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""articles/"""', 'blank': '(True)'}), "(upload_to='articles/', blank=True)\n", (836, 871), False, 'from django.db import models\n'), ((425, 466), 'django.urls.reverse', 'reverse', (['"""gallery_list"""'], {'args': '[self.slug]'}), "('gallery_list', args=[self.slug])\n", (432, 466), False, 'from django.urls import reverse\n'), ((921, 962), 'django.urls.reverse', 'reverse', (['"""gallery_detail"""'], {'args': '[self.id]'}), "('gallery_detail', args=[self.id])\n", (928, 962), False, 'from django.urls import reverse\n')] |
from PIL import ImageFont
def analysis(obj):
params = dict()
params['size'] = size(obj)
params['rgb'] = color(obj)
params['lines'] = line(obj)
params['ellipses'] = ellipse_and_rectangle(obj, 'ellipses')
params['rectangles'] = ellipse_and_rectangle(obj, 'rectangles')
params['texts'] = text(obj)
params['store'] = store(obj)
params['point'] = point(obj)
params['opacity'] = opacity(obj)
params['original'] = o if (o := obj.get('original')) else False
params['colour'] = colour(obj)
params['album'] = album(obj)
return params
def opacity(obj):
if a := o if (o := obj.get('a')) else None:
return op if 0 <= (op := int(a)) <= 255 else None
return None
def album(obj):
data = params.split(',') if (params := obj.get('album')) else None
if data and len(data) >= 2:
return [data[0], data[1]]
return None
def colour(obj):
data = params.split(',') if (params := obj.get('colour')) else None
if data and len(data) >= 7:
return [int(data[0]), (int(data[1]), int(data[2]), int(data[3])), (int(data[4]), int(data[5]), int(data[6]))]
return None
def store(obj):
bg = None if not obj.get('store') else obj.get('store').split(',')
if bg:
if len(bg) >= 2:
bg_args = [bg[0], bg[1]]
return bg_args
if len(bg) >= 1:
bg_args = [bg[0], '0']
return bg_args
else:
return None
def point(obj):
return None if not obj.get('point') else float(obj.get('point'))
def size(obj):
width = int(obj.get('width') or obj.get('w') or '400')
height = int(obj.get('height') or obj.get('h') or '300')
return width, height
def color(obj):
rgb = (obj.get('rgb') or '200,200,200').split(',')
rgb[0] = rgb[0] if not obj.get('r') else obj.get('r')
rgb[1] = rgb[1] if not obj.get('g') else obj.get('g')
rgb[2] = rgb[2] if not obj.get('b') else obj.get('b')
return int(rgb[0]), int(rgb[1]), int(rgb[2])
def line(obj):
lines = list()
if lines_args := obj.get('lines'):
line_args = lines_args.split(';')
for i in line_args:
try:
line_arg = i.split(',')
if len(line_arg) >= 7:
lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])),
(int(line_arg[4]), int(line_arg[5]), int(line_arg[6]))])
elif len(line_arg) >= 4:
lines.append([(int(line_arg[0]), int(line_arg[1]), int(line_arg[2]), int(line_arg[3])), (0, 0, 0)])
except Exception as ex:
print(str(ex))
return lines
def ellipse_and_rectangle(obj, shape):
shapes = list()
if shapes_args := obj.get(shape):
shape_args = shapes_args.split(';')
for i in shape_args:
try:
shape_arg = i.split(',')
if len(shape_arg) >= 10:
shapes.append(
[(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])),
(int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])),
(int(shape_arg[7]), int(shape_arg[8]), int(shape_arg[9]))])
elif len(shape_arg) >= 7:
shapes.append(
[(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])),
(int(shape_arg[4]), int(shape_arg[5]), int(shape_arg[6])),
(0, 0, 0)])
elif len(shape_arg) >= 4:
shapes.append(
[(int(shape_arg[0]), int(shape_arg[1]), int(shape_arg[2]), int(shape_arg[3])),
(0, 0, 0), (0, 0, 0)])
except Exception as ex:
print(str(ex))
return shapes
def text(obj):
texts = list()
if texts_args := obj.get('texts'):
text_args = texts_args.split(';')
# ttf = '/home/ahri/code/AhriImage/Image/font.ttf'
ttf = '/project/Image/font.ttf'
for i in text_args:
text_arg = i.split(',')
if len(text_arg) >= 7:
texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2],
(int(text_arg[3]), int(text_arg[3]), int(text_arg[5])),
ImageFont.truetype(ttf, int(text_arg[6]))])
elif len(text_arg) >= 6:
texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2],
(int(text_arg[3]), int(text_arg[3]), int(text_arg[5])),
ImageFont.truetype(ttf, 30)])
if len(text_args) >= 3:
texts.append([(int(text_arg[0]), int(text_arg[1])), text_arg[2], (0, 0, 0),
ImageFont.truetype(ttf, 30)])
return texts
| [
"PIL.ImageFont.truetype"
] | [((4858, 4885), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['ttf', '(30)'], {}), '(ttf, 30)\n', (4876, 4885), False, 'from PIL import ImageFont\n'), ((4670, 4697), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['ttf', '(30)'], {}), '(ttf, 30)\n', (4688, 4697), False, 'from PIL import ImageFont\n')] |
'''Combines oslo bors and yahoo data'''
import numpy as np
import pandas as pd
from pprint import pprint
import scrapeconfig as cng
def merge_bors_and_yahoo_dfs(bors_name: str, yahoo_name: str, result_filename: str):
'''
Get filenames for csv files from Oslo Bors and Yahoo Finance and merges them
to one large dataset.
'''
df_bors = pd.read_csv(bors_name)
df_stats = pd.read_csv(yahoo_name)
# Some of the features from Yahoo Finance
# are very sparse, so here I am picking the ones
# that are not so sparse and that I FEEL makes
# makes sense to include.
df_stats = df_stats[cng.SELECTED_FEATURES]
df_combined = pd.merge(df_bors, df_stats, on=cng.MERGE_DFS_ON)
df_combined.set_index(cng.MERGE_DFS_ON, inplace=True)
df_combined.to_csv(cng.FINALDATASET_FILENAME)
if __name__ == '__main__':
merge_bors_and_yahoo_dfs(cng.BORS_CSV_NAME, cng.YAHOO_CSV_NAME, cng.FINALDATASET_FILENAME)
| [
"pandas.read_csv",
"pandas.merge"
] | [((371, 393), 'pandas.read_csv', 'pd.read_csv', (['bors_name'], {}), '(bors_name)\n', (382, 393), True, 'import pandas as pd\n'), ((410, 433), 'pandas.read_csv', 'pd.read_csv', (['yahoo_name'], {}), '(yahoo_name)\n', (421, 433), True, 'import pandas as pd\n'), ((692, 740), 'pandas.merge', 'pd.merge', (['df_bors', 'df_stats'], {'on': 'cng.MERGE_DFS_ON'}), '(df_bors, df_stats, on=cng.MERGE_DFS_ON)\n', (700, 740), True, 'import pandas as pd\n')] |
"""
Goldair WiFi Heater device.
"""
import logging
import json
from homeassistant.const import (
ATTR_TEMPERATURE, TEMP_CELSIUS, STATE_UNAVAILABLE
)
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE, ATTR_PRESET_MODE,
HVAC_MODE_OFF, HVAC_MODE_HEAT,
SUPPORT_TARGET_TEMPERATURE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE
)
from custom_components.goldair_climate import GoldairTuyaDevice
_LOGGER = logging.getLogger(__name__)
ATTR_TARGET_TEMPERATURE = 'target_temperature'
ATTR_CHILD_LOCK = 'child_lock'
ATTR_FAULT = 'fault'
ATTR_POWER_MODE_AUTO = 'auto'
ATTR_POWER_MODE_USER = 'user'
ATTR_POWER_LEVEL = 'power_level'
ATTR_DISPLAY_ON = 'display_on'
ATTR_POWER_MODE = 'power_mode'
ATTR_ECO_TARGET_TEMPERATURE = 'eco_' + ATTR_TARGET_TEMPERATURE
STATE_COMFORT = 'Comfort'
STATE_ECO = 'Eco'
STATE_ANTI_FREEZE = 'Anti-freeze'
PROPERTY_TO_DPS_ID = {
ATTR_HVAC_MODE: '1',
ATTR_TARGET_TEMPERATURE: '2',
ATTR_TEMPERATURE: '3',
ATTR_PRESET_MODE: '4',
ATTR_CHILD_LOCK: '6',
ATTR_FAULT: '12',
ATTR_POWER_LEVEL: '101',
ATTR_DISPLAY_ON: '104',
ATTR_POWER_MODE: '105',
ATTR_ECO_TARGET_TEMPERATURE: '106'
}
# GOLDAIR GECO270
PROPERTY_TO_DPS_ID_GECO270 = {
ATTR_HVAC_MODE: '1',
ATTR_TARGET_TEMPERATURE: '3',
ATTR_TEMPERATURE: '4',
ATTR_PRESET_MODE: '5',
ATTR_CHILD_LOCK: '2',
ATTR_FAULT: '12',
ATTR_POWER_LEVEL: '101',
ATTR_DISPLAY_ON: '104',
ATTR_POWER_MODE: '105',
ATTR_ECO_TARGET_TEMPERATURE: '106'
}
HVAC_MODE_TO_DPS_MODE = {
HVAC_MODE_OFF: False,
HVAC_MODE_HEAT: True
}
PRESET_MODE_TO_DPS_MODE = {
STATE_COMFORT: 'C',
STATE_ECO: 'ECO',
STATE_ANTI_FREEZE: 'AF'
}
POWER_LEVEL_TO_DPS_LEVEL = {
'Stop': 'stop',
'1': '1',
'2': '2',
'3': '3',
'4': '4',
'5': '5',
'Auto': 'auto'
}
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE
class GoldairHeater(ClimateDevice):
"""Representation of a Goldair WiFi heater."""
def __init__(self, device):
"""Initialize the heater.
Args:
name (str): The device's name.
device (GoldairTuyaDevice): The device API instance."""
self._device = device
self._support_flags = SUPPORT_FLAGS
self._TEMPERATURE_STEP = 1
self._TEMPERATURE_LIMITS = {
STATE_COMFORT: {
'min': 5,
'max': 37
},
STATE_ECO: {
'min': 5,
'max': 21
}
}
# self._model = model
# _LOGGER.info(f'Setting model to {model}')
@property
def get_property_to_dps_id(self):
"""Get the correct PROPERTY_TO_DPS_ID depending on the model of the heater you have"""
if self._device.model == "GECO270":
return PROPERTY_TO_DPS_ID_GECO270
else:
return PROPERTY_TO_DPS_ID
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the climate device."""
return self._device.name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return self._device.temperature_unit
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self.preset_mode == STATE_COMFORT:
return self._device.get_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE])
elif self.preset_mode == STATE_ECO:
return self._device.get_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE])
else:
return None
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._TEMPERATURE_STEP
@property
def min_temp(self):
"""Return the minimum temperature."""
if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE:
return self._TEMPERATURE_LIMITS[self.preset_mode]['min']
else:
return None
@property
def max_temp(self):
"""Return the maximum temperature."""
if self.preset_mode and self.preset_mode != STATE_ANTI_FREEZE:
return self._TEMPERATURE_LIMITS[self.preset_mode]['max']
else:
return None
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
if kwargs.get(ATTR_PRESET_MODE) is not None:
self.set_preset_mode(kwargs.get(ATTR_PRESET_MODE))
if kwargs.get(ATTR_TEMPERATURE) is not None:
self.set_target_temperature(kwargs.get(ATTR_TEMPERATURE))
def set_target_temperature(self, target_temperature):
target_temperature = int(round(target_temperature))
preset_mode = self.preset_mode
if preset_mode == STATE_ANTI_FREEZE:
raise ValueError('You cannot set the temperature in Anti-freeze mode.')
limits = self._TEMPERATURE_LIMITS[preset_mode]
if not limits['min'] <= target_temperature <= limits['max']:
raise ValueError(
f'Target temperature ({target_temperature}) must be between '
f'{limits["min"]} and {limits["max"]}'
)
if preset_mode == STATE_COMFORT:
self._device.set_property(self.get_property_to_dps_id[ATTR_TARGET_TEMPERATURE], target_temperature)
elif preset_mode == STATE_ECO:
self._device.set_property(self.get_property_to_dps_id[ATTR_ECO_TARGET_TEMPERATURE], target_temperature)
@property
def current_temperature(self):
"""Return the current temperature."""
return self._device.get_property(self.get_property_to_dps_id[ATTR_TEMPERATURE])
@property
def hvac_mode(self):
"""Return current HVAC mode, ie Heat or Off."""
dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_HVAC_MODE])
if dps_mode is not None:
return GoldairTuyaDevice.get_key_for_value(HVAC_MODE_TO_DPS_MODE, dps_mode)
else:
return STATE_UNAVAILABLE
@property
def hvac_modes(self):
"""Return the list of available HVAC modes."""
return list(HVAC_MODE_TO_DPS_MODE.keys())
def set_hvac_mode(self, hvac_mode):
"""Set new HVAC mode."""
dps_mode = HVAC_MODE_TO_DPS_MODE[hvac_mode]
self._device.set_property(self.get_property_to_dps_id[ATTR_HVAC_MODE], dps_mode)
@property
def preset_mode(self):
"""Return current preset mode, ie Comfort, Eco, Anti-freeze."""
dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_PRESET_MODE])
keys = list(self.get_property_to_dps_id)
if dps_mode not in keys:
_LOGGER.debug(f'Could not load correct preset mode from api status. Defaulting to Comfort')
_LOGGER.debug(f'dps_mode was: {dps_mode}, PROPERTY_TO_DPS_ID was: {json.dumps(self.get_property_to_dps_id)}')
dps_mode = 'C'
if dps_mode is not None:
return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode)
else:
return None
@property
def preset_modes(self):
"""Return the list of available preset modes."""
return list(PRESET_MODE_TO_DPS_MODE.keys())
def set_preset_mode(self, preset_mode):
"""Set new preset mode."""
dps_mode = PRESET_MODE_TO_DPS_MODE[preset_mode]
self._device.set_property(self.get_property_to_dps_id[ATTR_PRESET_MODE], dps_mode)
@property
def swing_mode(self):
"""Return the power level."""
dps_mode = self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_MODE])
if dps_mode == ATTR_POWER_MODE_USER:
return self._device.get_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL])
elif dps_mode == ATTR_POWER_MODE_AUTO:
return GoldairTuyaDevice.get_key_for_value(POWER_LEVEL_TO_DPS_LEVEL, dps_mode)
else:
return None
@property
def swing_modes(self):
"""List of power levels."""
return list(POWER_LEVEL_TO_DPS_LEVEL.keys())
def set_swing_mode(self, swing_mode):
"""Set new power level."""
new_level = swing_mode
if new_level not in POWER_LEVEL_TO_DPS_LEVEL.keys():
raise ValueError(f'Invalid power level: {new_level}')
dps_level = POWER_LEVEL_TO_DPS_LEVEL[new_level]
self._device.set_property(self.get_property_to_dps_id[ATTR_POWER_LEVEL], dps_level)
def update(self):
self._device.refresh()
| [
"custom_components.goldair_climate.GoldairTuyaDevice.get_key_for_value",
"logging.getLogger",
"json.dumps"
] | [((487, 514), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (504, 514), False, 'import logging\n'), ((6228, 6296), 'custom_components.goldair_climate.GoldairTuyaDevice.get_key_for_value', 'GoldairTuyaDevice.get_key_for_value', (['HVAC_MODE_TO_DPS_MODE', 'dps_mode'], {}), '(HVAC_MODE_TO_DPS_MODE, dps_mode)\n', (6263, 6296), False, 'from custom_components.goldair_climate import GoldairTuyaDevice\n'), ((7304, 7374), 'custom_components.goldair_climate.GoldairTuyaDevice.get_key_for_value', 'GoldairTuyaDevice.get_key_for_value', (['PRESET_MODE_TO_DPS_MODE', 'dps_mode'], {}), '(PRESET_MODE_TO_DPS_MODE, dps_mode)\n', (7339, 7374), False, 'from custom_components.goldair_climate import GoldairTuyaDevice\n'), ((8165, 8236), 'custom_components.goldair_climate.GoldairTuyaDevice.get_key_for_value', 'GoldairTuyaDevice.get_key_for_value', (['POWER_LEVEL_TO_DPS_LEVEL', 'dps_mode'], {}), '(POWER_LEVEL_TO_DPS_LEVEL, dps_mode)\n', (8200, 8236), False, 'from custom_components.goldair_climate import GoldairTuyaDevice\n'), ((7181, 7220), 'json.dumps', 'json.dumps', (['self.get_property_to_dps_id'], {}), '(self.get_property_to_dps_id)\n', (7191, 7220), False, 'import json\n')] |
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from random import randint
from time import sleep
from discord import Webhook, RequestsWebhookAdapter
import json
# Loads config file
json = json.load(open('config.json', 'r'))
webhook = Webhook.from_url(
json['discord_webook'],
adapter=RequestsWebhookAdapter()) # Creates webhook using discord url
driver = webdriver.Firefox(
executable_path=json['executable_path']) # Creates WebDriver instance
url = "https://www.bestbuy.com"
timeout = 3 # Timeout for element loaded checks
purchased = open('purchased.txt', 'r').read()
def navigate_to_bb():
"""
* Navigates to the URL supplied, by default this is BestBuy.com
"""
driver.get(url)
print("navigated to bestbuy")
def navigate_to_product():
"""
* Navigates to the URL supplied + the product URL
"""
driver.get(url + json['url'])
def check_if_in_stock():
"""
This function tries to find the Add To Cart button, if it does not find it, it means it is out
of stock currently and it throws a NoSuchElementException.
:return: Returns True for in stock and False for not in stock
:rtype: None Type
"""
try:
not_sold_out = driver.find_element_by_css_selector(
'button.btn-primary:nth-child(1)')
except NoSuchElementException:
return False
return True
def add_to_cart():
"""
This function finds the Add to Cart button, and then adds the product to cart
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.CSS_SELECTOR, 'button.btn-primary:nth-child(1)'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
add_to_cart_button = driver.find_element_by_css_selector(
"button.btn-primary:nth-child(1)")
add_to_cart_button.click()
print("added to cart")
def navigate_to_cart():
"""
This function navigates to the BestBuy cart page
"""
driver.get(url + "/cart")
print("navigated to cart")
return driver.title
def change_zip_code_and_select_shipping():
"""
This function first selects the ZipCode element on the cart page, then types the correct
zip code for shipping, and then clicks update location.
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.CSS_SELECTOR, '.change-zipcode-link'))
WebDriverWait(driver, 10).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
zip_code_click = driver.find_element_by_css_selector(
".change-zipcode-link")
zip_code_click.send_keys(Keys.ENTER)
print("clicked on zip code")
zip_code_change = driver.find_element_by_css_selector(
"#location")
zip_code_change.send_keys(json['zip_code'])
update = driver.find_element_by_css_selector(
'#item-availability-links > button:nth-child(3)')
update.click()
print("changed zip code")
def click_checkout_key():
"""
This function clicks the checkout button on the BestBuy cart page
:rtype: object
"""
checkout_button = driver.find_element_by_css_selector(
".btn-lg")
checkout_button.click()
print("checkout started")
def select_guest_checkout():
"""
This function selects the Checkout as Guest option on the page following the BestBuy cart
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.CSS_SELECTOR, '.cia-guest-content__continue'))
WebDriverWait(driver, 9).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
guest = driver.find_element_by_css_selector('.cia-guest-content__continue')
guest.click()
def sign_in_and_click_button():
"""
This function types the supplied email and password and then clicks the Sign In button.
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.CSS_SELECTOR,
'.cia-form__controls__submit'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
email = driver.find_element_by_id("fld-e")
email.send_keys(json['email'])
print("email typed")
password = driver.find_element_by_id("fld-p1")
password.send_keys(json['password'])
print("password typed")
button = driver.find_element_by_css_selector(
'.cia-form__controls__submit')
button.click()
print("signed in")
def check_if_verify():
"""
This function checks if the account has been flagged for manual user verification
:rtype: object
"""
try:
verify = driver.find_element_by_css_selector(
'h1.cia-section-title').text
if "Verify Your Account" in verify:
return False
else:
return True
except NoSuchElementException:
return False
# return True
def check_if_shipping_info_needed():
"""
This function checks to see if the bot needs to input the shipping information if the user has been
signed in using the previous functions
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.ID, 'consolidatedAddresses.ui_address_2.firstName'))
WebDriverWait(driver, 3).until(element_present)
except BaseException:
return False
return True
def input_shipping_information():
"""
This function inputs the shipping information that the user provides if they have been logged in with
previous functions
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.ID, 'consolidatedAddresses.ui_address_2.firstName'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
fname = driver.find_element_by_id(
"consolidatedAddresses.ui_address_2.firstName")
fname.send_keys(json['first_name'])
print("fname typed")
lname = driver.find_element_by_id(
"consolidatedAddresses.ui_address_2.lastName")
lname.send_keys(json["last_name"])
print("lname typed")
suggestions = driver.find_element_by_css_selector(".autocomplete__toggle")
if "Hide Suggestions" in suggestions.text:
suggestions.click()
print("suggestions removed")
address = driver.find_element_by_id(
"consolidatedAddresses.ui_address_2.street")
address.send_keys(json['address'])
print("street address typed")
city = driver.find_element_by_id("consolidatedAddresses.ui_address_2.city")
city.send_keys(json['city'])
print("city typed")
select = Select(driver.find_element_by_id(
'consolidatedAddresses.ui_address_2.state'))
select.select_by_visible_text(json['state'])
print("state selected")
zip_code = driver.find_element_by_id(
'consolidatedAddresses.ui_address_2.zipcode')
zip_code.send_keys(json['zip_code'])
print("zip code address section typed")
def input_shipping_info_guest():
"""
This function inputs the shipping information that the user provides if they have selected to checkout
as a guest
:rtype: object
"""
fname = driver.find_element_by_xpath(
"/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[1]/label[1]/div[1]/input[1]")
for i in range(len(json['first_name'])):
fname.send_keys(json['first_name'][i])
print(json['first_name'] + " typed")
lname = driver.find_element_by_xpath(
"/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[2]/label[1]/div[1]/input[1]")
for i in range(len(json['last_name'])):
lname.send_keys(json["last_name"][i])
print("lname typed")
suggestions = driver.find_element_by_css_selector(".autocomplete__toggle")
if "Hide Suggestions" in suggestions.text:
suggestions.click()
print("suggestions removed")
address = driver.find_element_by_xpath(
"/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[3]/label[1]/div[2]/div[1]/div[1]/input[1]")
for i in range(len(json['address'])):
address.send_keys(json['address'][i])
print("street address typed")
city = driver.find_element_by_xpath(
"/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[5]/div[1]/div[1]/label[1]/div[1]/input[1]")
for i in range(len(json['city'])):
city.send_keys(json['city'][i])
print("city typed")
select = Select(driver.find_element_by_xpath(
'/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[5]/div[1]/div[2]/label[1]/div[1]/div[1]/select[1]'))
select.select_by_visible_text(json['state'])
print("state selected")
zip_code = driver.find_element_by_xpath(
'/html[1]/body[1]/div[1]/div[2]/div[1]/div[2]/div[1]/div[1]/main[1]/div[2]/div[2]/form[1]/section[1]/div[1]/div[1]/div[1]/div[1]/section[1]/div[2]/div[1]/section[1]/section[1]/div[6]/div[1]/div[1]/label[1]/div[1]/input[1]')
for i in range(len(json['zip_code'])):
zip_code.send_keys(json['zip_code'][i])
print("zip code address section typed")
def input_phone_and_email():
"""
This function inputs the phone number and email that the user has provided if they are checking out
as a guest
:rtype: object
"""
email = driver.find_element_by_id('user.emailAddress')
email.send_keys(json['email'])
phone = driver.find_element_by_id('user.phone')
phone.send_keys(json['phone'])
def check_if_payment_info_on_page():
"""
This function checks if the bot must enter payment information on the current page
:rtype: object
"""
try:
cvv = driver.find_element_by_id('credit-card-cvv')
except NoSuchElementException:
return False
return True
def click_continue_to_payment_info():
"""
This function clicks the continue to payment information if the previous function returns False
:rtype: object
"""
button = driver.find_element_by_css_selector(
'.btn-lg')
button.click()
def input_payment_info():
"""
This function inputs the CVV if the user has been logged in during a previous function and has a card saved
:rtype: object
"""
cvv = driver.find_element_by_id('credit-card-cvv')
cvv.send_keys(json['cvv'])
print("CVV added")
def input_payment_info_guest():
"""
This function inputs the payment information of the user if they have selected Guest checkout
:rtype: object
"""
try:
element_present = EC.presence_of_element_located(
(By.ID, 'optimized-cc-card-number'))
WebDriverWait(driver, timeout).until(element_present)
except TimeoutException:
print("Timed out waiting for page to load")
cc_number = driver.find_element_by_id(
'optimized-cc-card-number')
cc_number.send_keys(json['cc_number'])
select = Select(driver.find_element_by_name(
'expiration-month'))
select.select_by_visible_text(json['month'])
print("month selected")
select = Select(driver.find_element_by_name(
'expiration-year'))
select.select_by_visible_text(json['year'])
print("year selected")
cvv = driver.find_element_by_css_selector('#credit-card-cvv')
cvv.send_keys(json['cvv'])
print("CVV typed")
def place_order():
"""
This function places the order by clicking the final button
:rtype: object
"""
button = driver.find_element_by_css_selector(
'.btn-lg')
button.click()
def main(guest_or_sign_in):
time_start = 0
time_end = 0
if purchased.strip() == "0":
in_stock = 0
while in_stock == 0:
navigate_to_product()
driver.implicitly_wait(0.3)
y = check_if_in_stock()
if not y:
in_stock = 0
randinteger = randint(1, 5)
print(
"Sleeping for " +
str(randinteger) +
" seconds due to product not being in stock")
sleep(randinteger)
else:
#print("Stock found - running script")
#webhook.send("@everyone Stock Found")
#webhook.send(url + json['url'])
time_start = time.time()
add_to_cart()
in_stock = 1
navigate_to_cart()
change_zip_code_and_select_shipping()
click_checkout_key()
if guest_or_sign_in == "sign-in":
sign_in_and_click_button()
if not check_if_verify():
quit(0)
if check_if_shipping_info_needed() is True:
input_shipping_information()
if check_if_payment_info_on_page() is False:
click_continue_to_payment_info()
input_payment_info()
# place_order()
time_end = time.time()
time_diff = time_end - time_start
webhook.send(
"@everyone Purchased, Time elapsed: " +
str(time_diff) +
" Seconds")
json2 = open('purchased.txt', 'w')
json2.write('1')
json2.close()
else:
input_payment_info()
# place_order
time_end = time.time()
time_diff = time_end - time_start
webhook.send(
"@everyone Purchased, Time elapsed: " +
str(time_diff) +
" Seconds")
json2 = open('purchased.txt', 'w')
json2.write('1')
json2.close()
else:
if check_if_payment_info_on_page() is False:
click_continue_to_payment_info()
input_payment_info()
# place_order()
time_end = time.time()
time_diff = time_end - time_start
webhook.send(
"@everyone Purchased, Time elapsed: " +
str(time_diff) +
" Seconds")
json2 = open('purchased.txt', 'w')
json2.write('1')
json2.close()
else:
input_payment_info()
# place_order
time_end = time.time()
time_diff = time_end - time_start
webhook.send(
"@everyone Purchased, Time elapsed: " +
str(time_diff) +
" Seconds")
json2 = open('purchased.txt', 'w')
json2.write('1')
json2.close()
elif guest_or_sign_in == "guest":
select_guest_checkout()
# driver.refresh()
input_shipping_info_guest()
input_phone_and_email()
click_continue_to_payment_info()
input_payment_info_guest()
# place_order()
time_end = time.time()
time_diff = time_end - time_start
webhook.send(
"@everyone Purchased, Time elapsed: " +
str(time_diff) +
" Seconds")
json2 = open('purchased.txt', 'w')
json2.write('1')
json2.close()
else:
webhook.send(
"@everyone Not purchased as item has already been bought. "
"To reset this please open purchased.txt and replace the 0 with a 1")
quit(0)
main(guest_or_sign_in=json['bot_usage_case'])
| [
"discord.RequestsWebhookAdapter",
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"random.randint",
"selenium.webdriver.Firefox",
"time.sleep",
"time.time",
"selenium.webdriver.support.ui.WebDriverWait"
] | [((742, 800), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {'executable_path': "json['executable_path']"}), "(executable_path=json['executable_path'])\n", (759, 800), False, 'from selenium import webdriver\n'), ((670, 694), 'discord.RequestsWebhookAdapter', 'RequestsWebhookAdapter', ([], {}), '()\n', (692, 694), False, 'from discord import Webhook, RequestsWebhookAdapter\n'), ((1913, 2001), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.CSS_SELECTOR, 'button.btn-primary:nth-child(1)')"], {}), "((By.CSS_SELECTOR,\n 'button.btn-primary:nth-child(1)'))\n", (1943, 2001), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2766, 2839), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.CSS_SELECTOR, '.change-zipcode-link')"], {}), "((By.CSS_SELECTOR, '.change-zipcode-link'))\n", (2796, 2839), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((3908, 3993), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.CSS_SELECTOR, '.cia-guest-content__continue')"], {}), "((By.CSS_SELECTOR,\n '.cia-guest-content__continue'))\n", (3938, 3993), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((4434, 4519), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.CSS_SELECTOR, '.cia-form__controls__submit')"], {}), "((By.CSS_SELECTOR, '.cia-form__controls__submit')\n )\n", (4464, 4519), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((5732, 5823), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.ID, 'consolidatedAddresses.ui_address_2.firstName')"], {}), "((By.ID,\n 'consolidatedAddresses.ui_address_2.firstName'))\n", (5762, 5823), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((6187, 6278), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.ID, 'consolidatedAddresses.ui_address_2.firstName')"], {}), "((By.ID,\n 'consolidatedAddresses.ui_address_2.firstName'))\n", (6217, 6278), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((11735, 11802), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.ID, 'optimized-cc-card-number')"], {}), "((By.ID, 'optimized-cc-card-number'))\n", (11765, 11802), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2019, 2049), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'timeout'], {}), '(driver, timeout)\n', (2032, 2049), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((2861, 2886), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (2874, 2886), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((4011, 4035), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(9)'], {}), '(driver, 9)\n', (4024, 4035), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((4549, 4579), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'timeout'], {}), '(driver, timeout)\n', (4562, 4579), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((5841, 5865), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(3)'], {}), '(driver, 3)\n', (5854, 5865), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((6296, 6326), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'timeout'], {}), '(driver, timeout)\n', (6309, 6326), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((11824, 11854), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', 'timeout'], {}), '(driver, timeout)\n', (11837, 11854), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((13059, 13072), 'random.randint', 'randint', (['(1)', '(5)'], {}), '(1, 5)\n', (13066, 13072), False, 'from random import randint\n'), ((13255, 13273), 'time.sleep', 'sleep', (['randinteger'], {}), '(randinteger)\n', (13260, 13273), False, 'from time import sleep\n'), ((13480, 13491), 'time.time', 'time.time', ([], {}), '()\n', (13489, 13491), False, 'import time\n'), ((16403, 16414), 'time.time', 'time.time', ([], {}), '()\n', (16412, 16414), False, 'import time\n'), ((14119, 14130), 'time.time', 'time.time', ([], {}), '()\n', (14128, 14130), False, 'import time\n'), ((14614, 14625), 'time.time', 'time.time', ([], {}), '()\n', (14623, 14625), False, 'import time\n'), ((15221, 15232), 'time.time', 'time.time', ([], {}), '()\n', (15230, 15232), False, 'import time\n'), ((15716, 15727), 'time.time', 'time.time', ([], {}), '()\n', (15725, 15727), False, 'import time\n')] |
from asyncore import read
import os
import shutil
import yaml
import json
from app_logger import logger
from datetime import datetime
import uuid
def create_directory(path: str, is_recreate: bool = False)->None:
"""Utility to create the dirctory
Args:
path (str): Give the full path with directory name
is_recreate (bool, optional): If True then it will first delete and then ceate the directory . Defaults to False.
"""
if is_recreate:
try:
shutil.rmtree(path)
except Exception:
pass
os.makedirs(path,exist_ok=True) # It will not through error if the folder already exists
def read_params(config_path: str ='config/params.yaml')->dict:
"""Responsible for reading the yaml file
Args:
config_path (str): Path of the Yaml file . Defaults to 'config/params.yaml'
Returns:
dict: Return the details of the yaml file
"""
with open(config_path, 'r') as f:
return yaml.safe_load(f)
def get_log_object_for_training(collection_name: str, execution_id : str=None, executed_by: str=None, project_id :str=None, is_log_enabled : bool=True):
"""It will give the Log Object for training
Args:
collection_name (str): Name of the collection in which the log will be stored
execution_id (str, optional): Execution id. Defaults to None.
executed_by (str, optional): Executed by. Defaults to None.
project_id (str, optional): Id of the project. Defaults to None.
is_log_enabled (bool, optional): If it is set to True then only it will write the logs. Defaults to True.
Returns:
Logger: Logger Object
"""
params=read_params()
if execution_id==None:
execution_id=uuid.uuid4().hex
if executed_by==None:
executed_by=params['base']['author']
if project_id==None:
project_id = params['base']['project_id']
logger_obj = logger.Logger(execution_id=execution_id, executed_by=executed_by, project_id=project_id,
databasename=params['database_logs']['training_logs']['database_name'], collection_name=collection_name, is_log_enabled=is_log_enabled)
return logger_obj
def get_log_object_for_prediction(collection_name: str, execution_id : str=None, executed_by: str=None, project_id :str=None, is_log_enabled : bool=True):
"""It will give the Log Object for prediction
Args:
collection_name (str): Name of the collection in which the log will be stored
execution_id (str, optional): Execution id. Defaults to None.
executed_by (str, optional): Executed by. Defaults to None.
project_id (str, optional): Id of the project. Defaults to None.
is_log_enabled (bool, optional): If it is set to True then only it will write the logs. Defaults to True.
Returns:
Logger: Logger Object
"""
params=read_params()
if execution_id==None:
execution_id=uuid.uuid4().hex
if executed_by==None:
executed_by=params['base']['author']
if project_id==None:
project_id = params['base']['project_id']
logger_obj = logger.Logger(execution_id=execution_id, executed_by=executed_by, project_id=project_id,
databasename=params['database_logs']['prediction_logs']['database_name'], collection_name=collection_name, is_log_enabled=is_log_enabled)
return logger_obj
def read_prediction_schema():
"""Responsible for reading the schema from schema_prediction.json
"""
params=read_params()
path=params['data_schemas']['prediction_schema']
with open(path) as f:
schema=json.load(f)
LengthOfDateStampInFile = schema['LengthOfDateStampInFile']
LengthOfTimeStampInFile = schema['LengthOfTimeStampInFile']
NumberofColumns = schema['NumberofColumns']
ColName = schema['ColName']
return LengthOfDateStampInFile,LengthOfTimeStampInFile,NumberofColumns,ColName
def read_training_schema():
"""Responsible for reading the schema from schema_training.json
"""
params=read_params()
path = params['data_schemas']['training_schema']
with open(path) as f:
schema=json.load(f)
LengthOfDateStampInFile = schema['LengthOfDateStampInFile']
LengthOfTimeStampInFile = schema['LengthOfTimeStampInFile']
NumberofColumns = schema['NumberofColumns']
ColName = schema['ColName']
return LengthOfDateStampInFile,LengthOfTimeStampInFile,NumberofColumns,ColName
def get_date():
"""Returns the current date.
"""
return datetime.now().date().strftime('%d-%m-%y')
def get_time():
"""Returns the current time
"""
return datetime.now().time().strftime('%H-%M-%S')
| [
"app_logger.logger.Logger",
"json.load",
"uuid.uuid4",
"os.makedirs",
"yaml.safe_load",
"shutil.rmtree",
"datetime.datetime.now"
] | [((562, 594), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (573, 594), False, 'import os\n'), ((1927, 2164), 'app_logger.logger.Logger', 'logger.Logger', ([], {'execution_id': 'execution_id', 'executed_by': 'executed_by', 'project_id': 'project_id', 'databasename': "params['database_logs']['training_logs']['database_name']", 'collection_name': 'collection_name', 'is_log_enabled': 'is_log_enabled'}), "(execution_id=execution_id, executed_by=executed_by,\n project_id=project_id, databasename=params['database_logs'][\n 'training_logs']['database_name'], collection_name=collection_name,\n is_log_enabled=is_log_enabled)\n", (1940, 2164), False, 'from app_logger import logger\n'), ((3130, 3369), 'app_logger.logger.Logger', 'logger.Logger', ([], {'execution_id': 'execution_id', 'executed_by': 'executed_by', 'project_id': 'project_id', 'databasename': "params['database_logs']['prediction_logs']['database_name']", 'collection_name': 'collection_name', 'is_log_enabled': 'is_log_enabled'}), "(execution_id=execution_id, executed_by=executed_by,\n project_id=project_id, databasename=params['database_logs'][\n 'prediction_logs']['database_name'], collection_name=collection_name,\n is_log_enabled=is_log_enabled)\n", (3143, 3369), False, 'from app_logger import logger\n'), ((979, 996), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (993, 996), False, 'import yaml\n'), ((3637, 3649), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3646, 3649), False, 'import json\n'), ((4164, 4176), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4173, 4176), False, 'import json\n'), ((495, 514), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (508, 514), False, 'import shutil\n'), ((1747, 1759), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1757, 1759), False, 'import uuid\n'), ((2950, 2962), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2960, 2962), False, 'import uuid\n'), ((4536, 4550), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4548, 4550), False, 'from datetime import datetime\n'), ((4646, 4660), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4658, 4660), False, 'from datetime import datetime\n')] |
"""Test of pmd_consumer functionality, with a selection of data."""
from os.path import join, expanduser
from async_cv.play_file import play_file
from async_cv.event_processing.pmd_consumer import pmd_consumer
data_root = 'OneDrive\\Documents\\NIWC\\NeuroComp\\boat_tests\\'
annot_root = 'OneDrive\\Documents\\NIWC\\NeuroComp\\boat_tests\\'
files = {
'june_12': {
'boat_tests': {
2: 'Davis346red-2020-06-12T12-11-45-0700-0_Test_2.aedat4',
3: 'Davis346red-2020-06-12T12-15-01-0700-0_Test_3.aedat4',
5: 'Davis346red-2020-06-12T12-24-03-0700-0_Test_5.aedat4',
6: 'Davis346red-2020-06-12T12-25-39-0700-0_Test_6.aedat4'
},
'annotations': {
2: 'Davis346red-2020-06-12T12-11-45-0700-0_Test_2.xml',
3: 'Davis346red-2020-06-12T12-15-01-0700-0_Test_3.xml',
5: 'Davis346red-2020-06-12T12-24-03-0700-0_Test_5.xml',
6: 'Davis346red-2020-06-12T12-25-39-0700-0_Test_6.xml'
},
'data_format': '.aedat4'
},
'june_26': {
'boat_tests': {
# 2: 'Davis346red-2020-06-26T12-26-42-0700-00000195-0_Test_2.aedat4',
3: 'Davis346red-2020-06-26T12-27-39-0700-00000195-0_Test_3.aedat4',
# 4: 'Davis346red-2020-06-26T12-28-38-0700-00000195-0_Test_4.aedat4',
6: 'Davis346red-2020-06-26T12-30-20-0700-00000195-0_Test_6.aedat4',
9: 'Davis346red-2020-06-26T12-32-12-0700-00000195-0_Test_9.aedat4',
21: 'Davis346red-2020-06-26T13-22-40-0700-00000195-0_Test_21.aedat4'
},
'annotations': {
# 2: 'Davis346red-2020-06-26T12-26-42-0700-00000195-0_Test_2.xml',
3: 'Davis346red-2020-06-26T12-27-39-0700-00000195-0_Test_3.xml',
# 4: 'Davis346red-2020-06-26T12-28-38-0700-00000195-0_Test_4.xml',
6: 'Davis346red-2020-06-26T12-30-20-0700-00000195-0_Test_6.xml',
9: 'Davis346red-2020-06-26T12-32-12-0700-00000195-0_Test_9.xml',
21: 'Davis346red-2020-06-26T13-22-40-0700-00000195-0_Test_21.xml'
},
'data_format': '.aedat4'
},
'april_12': {
'boat_tests': {
0: '25mm-1000us-speedboat-2021_04_12_15_09_24.aedat4',
1: '25mm-1200us-drifting-boat-2021_04_12_15_33_47.aedat4',
2: '75mm-1500us-drifting-boat-2021_04_12_15_35_24.aedat4',
3: '75mm-2000us-boat2-2021_04_12_15_21_16.aedat4',
4: '75mm-2000us-boat3-2021_04_12_15_30_50.aedat4',
5: '75mm-2000us-filter-boat-2021_04_12_15_16_43.aedat4',
6: '75mm-2000us-on-off-filter-boat-2021_04_12_15_17_24.aedat4',
# 7: '75mm-2000us-speedboat-2021_04_12_15_26_01.aedat4'
},
'annotations': {
0: '25mm-1000us-speedboat-2021_04_12_15_09_24-2021_06_03_18_58_28-cvat+for+video+1.1.xml',
1: '25mm-1200us-drifting-boat-2021_04_12_15_33_47-2021_06_03_21_30_33-cvat+for+video+1.1.xml',
2: '75mm-1500us-drifting-boat-2021_04_12_15_35_24-2021_06_03_21_50_58-cvat+for+video+1.1.xml',
3: '75mm-2000us-boat2-2021_04_12_15_21_16-2021_06_03_22_21_59-cvat+for+video+1.1.xml',
4: '75mm-2000us-boat3-2021_04_12_15_30_50-2021_06_03_22_55_50-cvat+for+video+1.1.xml',
5: '75mm-2000us-filter-boat-2021_04_12_15_16_43-2021_06_03_23_20_19-cvat+for+video+1.1.xml',
6: '75mm-2000us-on-off-filter-boat-2021_04_12_15_17_24-2021_06_03_23_26_34-cvat+for+video+1.1.xml',
# 7: '75mm-2000us-speedboat-2021_04_12_15_26_01-2021_06_07_15_08_31-cvat+for+video+1.1.xml'
}
},
# 'april_29': {
# 1: 'out_2021-04-29_17-56-14.raw',
# 2: 'out_2021-04-29_17-57-47.raw',
# 3: 'out_2021-04-29_18-02-48.raw',
# 4: 'out_2021-04-29_18-04-41.raw',
# 5: 'out_2021-04-29_18-06-47.raw',
# 6: 'out_2021-04-29_18-10-59.raw',
# 7: 'out_2021-04-29_18-17-21.raw',
# 8: 'out_2021-04-29_18-20-10.raw'
# },
}
def run_one(group, test, setting=''):
run_name = setting+f'{group}_run_{test:02d}'
data_path = join(expanduser('~\\'), data_root, join(
group, files[group]['boat_tests'][test]))
annot_path = join(expanduser('~\\'), annot_root, join(
group, files[group]['annotations'][test]))
play_file(data_path, 33, pmd_consumer,
run_name=run_name,
video_out=True,
targets=['vessel', 'boat', 'RHIB'],
annot_file=annot_path,
show_metrics=False,
parameters=parameters
)
def run_group(group, setting=''):
for test in files[group]['boat_tests'].keys():
run_one(group, test, setting)
def run_all(setting=''):
for group in files:
run_group(group, setting)
for factor in range(0, 1010, 10):
# Define PMD parameters
parameters = {
'x_div': 4, # number of horizontal divisions
'y_div': 4, # number of vertical divisions
'us_per_event': 50, # processing time alloted to each event handler to process events
'temporal_filter': 100_000,
# number of events to remember for each (x, y) position
'event_buffer_depth': 8,
'tf': 200_000, # how far back in time to consider events for filtering
'tc': 200_000, # how far back in time to consider events for clustering
'n': 4, # minimum number of correlated events required to allow a particular event through the filter
'max_cluster_size': 30, # maximum taxicab dist from center of cluster to each event
# microseconds periodicity to flush expired (>tc) events from buffer
'buffer_flush_period': 20_000,
'num_analyzers': 32,
'sample_period': 100_000, # microseconds between each centroid position sample
'long_duration': 3_000_000, #5_000_000,
'short_duration': 2_000_000, #3_000_000,
'detection_tau': -0.002,
'ratio_threshold': 0,
'dot_ratio_threshold': 1.0,
'ratio_stability_factor': 1.0,
'dot_ratio_stability_factor': factor,
}
run_group('june_12', f'{factor:03}/')
run_group('june_26', f'{factor:03}/')
run_group('april_12', f'{factor:03}/')
# run_all()
# run_one('june_12', 6)
| [
"os.path.join",
"os.path.expanduser",
"async_cv.play_file.play_file"
] | [((4289, 4473), 'async_cv.play_file.play_file', 'play_file', (['data_path', '(33)', 'pmd_consumer'], {'run_name': 'run_name', 'video_out': '(True)', 'targets': "['vessel', 'boat', 'RHIB']", 'annot_file': 'annot_path', 'show_metrics': '(False)', 'parameters': 'parameters'}), "(data_path, 33, pmd_consumer, run_name=run_name, video_out=True,\n targets=['vessel', 'boat', 'RHIB'], annot_file=annot_path, show_metrics\n =False, parameters=parameters)\n", (4298, 4473), False, 'from async_cv.play_file import play_file\n'), ((4088, 4105), 'os.path.expanduser', 'expanduser', (['"""~\\\\"""'], {}), "('~\\\\')\n", (4098, 4105), False, 'from os.path import join, expanduser\n'), ((4118, 4163), 'os.path.join', 'join', (['group', "files[group]['boat_tests'][test]"], {}), "(group, files[group]['boat_tests'][test])\n", (4122, 4163), False, 'from os.path import join, expanduser\n'), ((4196, 4213), 'os.path.expanduser', 'expanduser', (['"""~\\\\"""'], {}), "('~\\\\')\n", (4206, 4213), False, 'from os.path import join, expanduser\n'), ((4227, 4273), 'os.path.join', 'join', (['group', "files[group]['annotations'][test]"], {}), "(group, files[group]['annotations'][test])\n", (4231, 4273), False, 'from os.path import join, expanduser\n')] |
# Generated by Django 2.1.4 on 2019-03-18 22:00
from django.db import migrations, models
import front.models
class Migration(migrations.Migration):
dependencies = [
('front', '0002_images_filename'),
]
operations = [
migrations.AlterField(
model_name='images',
name='ifile',
field=models.ImageField(unique=True, upload_to=front.models.upld_dir),
),
]
| [
"django.db.models.ImageField"
] | [((350, 413), 'django.db.models.ImageField', 'models.ImageField', ([], {'unique': '(True)', 'upload_to': 'front.models.upld_dir'}), '(unique=True, upload_to=front.models.upld_dir)\n', (367, 413), False, 'from django.db import migrations, models\n')] |
"""tensorflow summary util"""
import tensorflow as tf
def mean_summary(var):
"""mean scalar summary
:type var: tensorflow.Variable
:param var: variable to add summary
"""
with tf.name_scope(var.name.split(":")[0]):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
def stddev_summary(var):
"""stddev scalar summary
:type var: tensorflow.Variable
:param var: variable to add summary
"""
with tf.name_scope(var.name.split(":")[0]):
mean = tf.reduce_mean(var)
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar("stddev", stddev)
def histogram_summary(var):
"""histogram summary
:type var: tensorflow.Variable
:param var: variable to add summary
"""
with tf.name_scope(var.name.split(":")[0]):
tf.summary.histogram('histogram', var)
def max_summary(var):
"""max scalar summary
:type var: tensorflow.Variable
:param var: variable to add summary
"""
with tf.name_scope(var.name.split(":")[0]):
tf.summary.scalar("max", tf.reduce_max(var))
def min_summary(var):
"""min summary
:type var: tensorflow.Variable
:param var: variable to add summary
"""
with tf.name_scope(var.name.split(":")[0]):
tf.summary.scalar("min", tf.reduce_min(var))
def summary_loss(var):
"""loss summary
loss's scalar and histogram summary
:type var: tensorflow.Variable
:param var: variable to summary
"""
with tf.name_scope(var.name.split(":")[0]):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
tf.summary.histogram('histogram', var)
def summary_image(var, max_outputs=0):
"""image summary
:type var: tensorflow.Variable
:type max_outputs: int
:param var: variable to summary
:param max_outputs: max output to summary image
"""
with tf.name_scope(var.name.split(":")[0]):
tf.summary.image("image", var, max_outputs=max_outputs)
| [
"tensorflow.summary.image",
"tensorflow.summary.scalar",
"tensorflow.reduce_mean",
"tensorflow.summary.histogram",
"tensorflow.square",
"tensorflow.reduce_max",
"tensorflow.reduce_min"
] | [((264, 283), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['var'], {}), '(var)\n', (278, 283), True, 'import tensorflow as tf\n'), ((293, 324), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean"""', 'mean'], {}), "('mean', mean)\n", (310, 324), True, 'import tensorflow as tf\n'), ((538, 557), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['var'], {}), '(var)\n', (552, 557), True, 'import tensorflow as tf\n'), ((632, 667), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""stddev"""', 'stddev'], {}), "('stddev', stddev)\n", (649, 667), True, 'import tensorflow as tf\n'), ((873, 911), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram"""', 'var'], {}), "('histogram', var)\n", (893, 911), True, 'import tensorflow as tf\n'), ((1636, 1655), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['var'], {}), '(var)\n', (1650, 1655), True, 'import tensorflow as tf\n'), ((1665, 1696), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean"""', 'mean'], {}), "('mean', mean)\n", (1682, 1696), True, 'import tensorflow as tf\n'), ((1706, 1744), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram"""', 'var'], {}), "('histogram', var)\n", (1726, 1744), True, 'import tensorflow as tf\n'), ((2034, 2089), 'tensorflow.summary.image', 'tf.summary.image', (['"""image"""', 'var'], {'max_outputs': 'max_outputs'}), "('image', var, max_outputs=max_outputs)\n", (2050, 2089), True, 'import tensorflow as tf\n'), ((1137, 1155), 'tensorflow.reduce_max', 'tf.reduce_max', (['var'], {}), '(var)\n', (1150, 1155), True, 'import tensorflow as tf\n'), ((1375, 1393), 'tensorflow.reduce_min', 'tf.reduce_min', (['var'], {}), '(var)\n', (1388, 1393), True, 'import tensorflow as tf\n'), ((599, 620), 'tensorflow.square', 'tf.square', (['(var - mean)'], {}), '(var - mean)\n', (608, 620), True, 'import tensorflow as tf\n')] |
from datetime import datetime
armstrong = datetime(1969, 7, 21, 14, 56, 15)
armstrong.date() # datetime.date(1969, 7, 21)
armstrong.time() # datetime.time(14, 56, 15)
armstrong.weekday() # 0 # in US week starts with Sunday
| [
"datetime.datetime"
] | [((44, 77), 'datetime.datetime', 'datetime', (['(1969)', '(7)', '(21)', '(14)', '(56)', '(15)'], {}), '(1969, 7, 21, 14, 56, 15)\n', (52, 77), False, 'from datetime import datetime\n')] |
# -*- coding: utf-8 -*-
"""Read polarity.
"""
import csv
def readfile(filepath, fmt='csv'):
with open(filepath, 'r') as f:
data = csv.reader(f, delimiter=',', skipinitialspace=True)
next(data)
r = {i[0]: int(i[1]) for i in data if not i[0].startswith("#")}
return r
| [
"csv.reader"
] | [((145, 196), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""', 'skipinitialspace': '(True)'}), "(f, delimiter=',', skipinitialspace=True)\n", (155, 196), False, 'import csv\n')] |
from django.contrib import admin
from .models import Planet, Jedi, Tests, Questions
# Register your models here.
admin.site.register(Planet)
admin.site.register(Jedi)
class TestsInline(admin.StackedInline):
model = Questions
extra = 0
@admin.register(Tests)
class QuestionsAdmin(admin.ModelAdmin):
inlines = [
TestsInline,
]
| [
"django.contrib.admin.register",
"django.contrib.admin.site.register"
] | [((115, 142), 'django.contrib.admin.site.register', 'admin.site.register', (['Planet'], {}), '(Planet)\n', (134, 142), False, 'from django.contrib import admin\n'), ((143, 168), 'django.contrib.admin.site.register', 'admin.site.register', (['Jedi'], {}), '(Jedi)\n', (162, 168), False, 'from django.contrib import admin\n'), ((248, 269), 'django.contrib.admin.register', 'admin.register', (['Tests'], {}), '(Tests)\n', (262, 269), False, 'from django.contrib import admin\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Contains function to call API for information about a team's games.
from bvlapi.api.call import call_api
from bvlapi.api.settings import API_BASE_URL
from bvlapi.common.exceptions import InvalidGuid
from bvlapi.guid.team import is_team_guid
def get_matches_by_guid(guid):
""" Calls API to retrieve information about a basketball team's season.
:param str guid: GUID of basketball team
:rtype: [dict]
:return: a list of dictionaries containing information about team's games
:raise ApiCallFailed: something went wrong while calling API
"""
if not is_team_guid(guid):
raise InvalidGuid("'{}' is not a valid team GUID.".format(guid))
url = API_BASE_URL + "TeamMatchesByGuid?teamGuid={}".format(guid)
return call_api(url)
| [
"bvlapi.api.call.call_api",
"bvlapi.guid.team.is_team_guid"
] | [((804, 817), 'bvlapi.api.call.call_api', 'call_api', (['url'], {}), '(url)\n', (812, 817), False, 'from bvlapi.api.call import call_api\n'), ((630, 648), 'bvlapi.guid.team.is_team_guid', 'is_team_guid', (['guid'], {}), '(guid)\n', (642, 648), False, 'from bvlapi.guid.team import is_team_guid\n')] |
import sqlite3
from pathlib import Path
from typing import Any, AsyncIterator, Dict
import aiohttp_jinja2
import aiosqlite
import jinja2
from aiohttp import web
router = web.RouteTableDef()
async def fetch_post(db: aiosqlite.Connection, post_id: int) -> Dict[str, Any]:
async with db.execute(
"select owner, editor, title, text from posts where id = ?", [post_id]
) as cursor:
row = await cursor.fetchone()
print(row)
if row is None:
raise RuntimeError(f"Post {post_id} does not exist")
return {
"id": post_id,
"owner": row["owner"],
"editor": row["editor"],
"title": row["title"],
"text": row["text"],
}
@router.get("/")
@aiohttp_jinja2.template("index.html")
async def index(request: web.Request) -> Dict[str, Any]:
ret = []
db = request.config_dict["DB"]
async with db.execute("select id, owner, editor, title from posts") as cursor:
async for row in cursor:
ret.append(
{
"id": row["id"],
"owner": row["owner"],
"editor": row["editor"],
"title": row["title"],
}
)
return {"posts": ret}
@router.get("/new")
@aiohttp_jinja2.template("new.html")
async def new_post(request: web.Request) -> Dict[str, Any]:
return {}
@router.post("/new")
@aiohttp_jinja2.template("edit.html")
async def new_post_apply(request: web.Request) -> Dict[str, Any]:
db = request.config_dict["DB"]
post = await request.post()
owner = "Anonymous"
await db.execute(
"insert into posts (owner, editor, title, text) values (?,?,?,?)",
[owner, owner, post["title"], post["text"]],
)
await db.commit()
raise web.HTTPSeeOther(location=f"/")
@router.get("/{post}")
@aiohttp_jinja2.template("view.html")
async def view_post(request: web.Request) -> Dict[str, Any]:
post_id = request.match_info["post"]
if post_id.endswith(".ico"):
raise web.HTTPSeeOther(location=f"/")
db = request.config_dict["DB"]
return {"post": await fetch_post(db, post_id)}
@router.get("/{post}/edit")
@aiohttp_jinja2.template("edit.html")
async def edit_post(request: web.Request) -> Dict[str, Any]:
post_id = request.match_info["post"]
db = request.config_dict["DB"]
return {"post": await fetch_post(db, post_id)}
@router.post("/{post}/edit")
async def edit_post_apply(request: web.Request) -> web.Response:
post_id = request.match_info["post"]
db = request.config_dict["DB"]
post = await request.post()
await db.execute(
"update posts set title=?, text=? where id =?",
[post["title"], post["text"], post_id],
)
await db.commit()
raise web.HTTPSeeOther(location=f"/{post_id}")
@router.get("/{post}/delete")
async def delete_post(request: web.Request) -> web.Response:
post_id = request.match_info["post"]
db = request.config_dict["DB"]
await db.execute("delete from posts where id=?", [post_id])
raise web.HTTPSeeOther(location=f"/")
def get_db_path() -> Path:
here = Path.cwd()
return here / "db.sqlite3"
async def init_db(app: web.Application) -> AsyncIterator[None]:
sqlite_db = get_db_path()
db = await aiosqlite.connect(sqlite_db)
db.row_factory = aiosqlite.Row
app["DB"] = db
yield
await db.close()
async def init_app() -> web.Application:
app = web.Application()
app.add_routes(router)
app.cleanup_ctx.append(init_db)
aiohttp_jinja2.setup(
app, loader=jinja2.FileSystemLoader(str(Path.cwd() / "templates"))
)
return app
def try_make_db() -> None:
sqlite_db = get_db_path()
if sqlite_db.exists():
return
with sqlite3.connect(sqlite_db) as conn:
cur = conn.cursor()
cur.execute(
"""CREATE TABLE posts (
id INTEGER PRIMARY KEY,
title TEXT,
text TEXT,
owner TEXT,
editor TEXT)
"""
)
conn.commit()
try_make_db()
web.run_app(init_app())
| [
"aiohttp.web.RouteTableDef",
"sqlite3.connect",
"aiohttp.web.HTTPSeeOther",
"aiohttp_jinja2.template",
"aiosqlite.connect",
"pathlib.Path.cwd",
"aiohttp.web.Application"
] | [((173, 192), 'aiohttp.web.RouteTableDef', 'web.RouteTableDef', ([], {}), '()\n', (190, 192), False, 'from aiohttp import web\n'), ((758, 795), 'aiohttp_jinja2.template', 'aiohttp_jinja2.template', (['"""index.html"""'], {}), "('index.html')\n", (781, 795), False, 'import aiohttp_jinja2\n'), ((1308, 1343), 'aiohttp_jinja2.template', 'aiohttp_jinja2.template', (['"""new.html"""'], {}), "('new.html')\n", (1331, 1343), False, 'import aiohttp_jinja2\n'), ((1442, 1478), 'aiohttp_jinja2.template', 'aiohttp_jinja2.template', (['"""edit.html"""'], {}), "('edit.html')\n", (1465, 1478), False, 'import aiohttp_jinja2\n'), ((1882, 1918), 'aiohttp_jinja2.template', 'aiohttp_jinja2.template', (['"""view.html"""'], {}), "('view.html')\n", (1905, 1918), False, 'import aiohttp_jinja2\n'), ((2217, 2253), 'aiohttp_jinja2.template', 'aiohttp_jinja2.template', (['"""edit.html"""'], {}), "('edit.html')\n", (2240, 2253), False, 'import aiohttp_jinja2\n'), ((1824, 1855), 'aiohttp.web.HTTPSeeOther', 'web.HTTPSeeOther', ([], {'location': 'f"""/"""'}), "(location=f'/')\n", (1840, 1855), False, 'from aiohttp import web\n'), ((2810, 2850), 'aiohttp.web.HTTPSeeOther', 'web.HTTPSeeOther', ([], {'location': 'f"""/{post_id}"""'}), "(location=f'/{post_id}')\n", (2826, 2850), False, 'from aiohttp import web\n'), ((3094, 3125), 'aiohttp.web.HTTPSeeOther', 'web.HTTPSeeOther', ([], {'location': 'f"""/"""'}), "(location=f'/')\n", (3110, 3125), False, 'from aiohttp import web\n'), ((3166, 3176), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (3174, 3176), False, 'from pathlib import Path\n'), ((3486, 3503), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (3501, 3503), False, 'from aiohttp import web\n'), ((2068, 2099), 'aiohttp.web.HTTPSeeOther', 'web.HTTPSeeOther', ([], {'location': 'f"""/"""'}), "(location=f'/')\n", (2084, 2099), False, 'from aiohttp import web\n'), ((3319, 3347), 'aiosqlite.connect', 'aiosqlite.connect', (['sqlite_db'], {}), '(sqlite_db)\n', (3336, 3347), False, 'import aiosqlite\n'), ((3800, 3826), 'sqlite3.connect', 'sqlite3.connect', (['sqlite_db'], {}), '(sqlite_db)\n', (3815, 3826), False, 'import sqlite3\n'), ((3641, 3651), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (3649, 3651), False, 'from pathlib import Path\n')] |
from custom_types import *
from constants import EPSILON
import igl
def scale_all(*values: T):
max_val = max([val.max().item() for val in values])
min_val = min([val.min().item() for val in values])
scale = max_val - min_val
values = [(val - min_val) / scale for val in values]
if len(values) == 1:
return values[0]
return values
def get_faces_normals(mesh: Union[T_Mesh, T]) -> T:
if type(mesh) is not T:
vs, faces = mesh
vs_faces = vs[faces]
else:
vs_faces = mesh
if vs_faces.shape[-1] == 2:
vs_faces = torch.cat(
(vs_faces, torch.zeros(*vs_faces.shape[:2], 1, dtype=vs_faces.dtype, device=vs_faces.device)), dim=2)
face_normals = torch.cross(vs_faces[:, 1, :] - vs_faces[:, 0, :], vs_faces[:, 2, :] - vs_faces[:, 1, :])
return face_normals
def compute_face_areas(mesh: Union[T_Mesh, T]) -> TS:
face_normals = get_faces_normals(mesh)
face_areas = torch.norm(face_normals, p=2, dim=1)
face_areas_ = face_areas.clone()
face_areas_[torch.eq(face_areas_, 0)] = 1
face_normals = face_normals / face_areas_[:, None]
face_areas = 0.5 * face_areas
return face_areas, face_normals
def check_sign_area(*meshes: T_Mesh) -> bool:
for mesh in meshes:
face_normals = get_faces_normals(mesh)
if not face_normals[:, 2].gt(0).all():
return False
return True
def to_numpy(*tensors: T) -> ARRAYS:
params = [param.detach().cpu().numpy() if type(param) is T else param for param in tensors]
return params
def create_mapper(mask: T) -> T:
mapper = torch.zeros(mask.shape[0], dtype=torch.int64, device=mask.device) - 1
mapper[mask] = torch.arange(mask.sum().item(), device=mask.device)
return mapper
def mesh_center(mesh: T_Mesh):
return mesh[0].mean(0)
def to_center(vs):
max_vals = vs.max(0)[0]
min_vals = vs.min(0)[0]
center = (max_vals + min_vals) / 2
vs -= center[None, :]
return vs
def to_unit_sphere(mesh: T_Mesh, in_place: bool = True, scale=1.) -> T_Mesh:
vs, faces = mesh
if not in_place:
vs = vs.clone()
vs = to_center(vs)
norm = vs.norm(2, dim=1).max()
vs *= scale * norm ** -1
return vs, faces
def scale_from_ref(mesh: T_Mesh, center: T, scale: float, in_place: bool = True) -> T_Mesh:
vs, faces = mesh
if not in_place:
vs = vs.clone()
vs -= center[None, :]
vs *= scale
return vs, faces
def to_unit_cube(*meshes: T_Mesh_T, scale=1, in_place: bool = True) -> Tuple[Union[T_Mesh_T, Tuple[T_Mesh_T, ...]], Tuple[T, float]]:
remove_me = 0
meshes = [(mesh, remove_me) if type(mesh) is T else mesh for mesh in meshes]
vs, faces = meshes[0]
max_vals = vs.max(0)[0]
min_vals = vs.min(0)[0]
max_range = (max_vals - min_vals).max() / 2
center = (max_vals + min_vals) / 2
meshes_ = []
scale = float(scale / max_range)
for mesh in meshes:
vs_, faces_ = scale_from_ref(mesh, center, scale)
meshes_.append(vs_ if faces_ is remove_me else (vs_, faces_))
if len(meshes_) == 1:
meshes_ = meshes_[0]
return meshes_, (center, scale)
def get_edges_ind(mesh: T_Mesh) -> T:
vs, faces = mesh
raw_edges = torch.cat([faces[:, [i, (i + 1) % 3]] for i in range(3)]).sort()
raw_edges = raw_edges[0].cpu().numpy()
edges = {(int(edge[0]), int(edge[1])) for edge in raw_edges}
edges = torch.tensor(list(edges), dtype=torch.int64, device=faces.device)
return edges
def edge_lengths(mesh: T_Mesh, edges_ind: TN = None) -> T:
vs, faces = mesh
if edges_ind is None:
edges_ind = get_edges_ind(mesh)
edges = vs[edges_ind]
return torch.norm(edges[:, 0] - edges[:, 1], 2, dim=1)
# in place
def to_unit_edge(*meshes: T_Mesh) -> Tuple[Union[T_Mesh, Tuple[T_Mesh, ...]], Tuple[T, float]]:
ref = meshes[0]
center = ref[0].mean(0)
ratio = edge_lengths(ref).mean().item()
for mesh in meshes:
vs, _ = mesh
vs -= center[None, :].to(vs.device)
vs /= ratio
if len(meshes) == 1:
meshes = meshes[0]
return meshes, (center, ratio)
def to(tensors, device: D) -> Union[T_Mesh, TS, T]:
out = []
for tensor in tensors:
if type(tensor) is T:
out.append(tensor.to(device, ))
elif type(tensor) is tuple or type(tensors) is List:
out.append(to(list(tensor), device))
else:
out.append(tensor)
if len(tensors) == 1:
return out[0]
else:
return tuple(out)
def clone(*tensors: Union[T, TS]) -> Union[TS, T_Mesh]:
out = []
for t in tensors:
if type(t) is T:
out.append(t.clone())
else:
out.append(clone(*t))
return out
def get_box(w: float, h: float, d: float) -> T_Mesh:
vs = [[0, 0, 0], [w, 0, 0], [0, d, 0], [w, d, 0],
[0, 0, h], [w, 0, h], [0, d, h], [w, d, h]]
faces = [[0, 2, 1], [1, 2, 3], [4, 5, 6], [5, 7, 6],
[0, 1, 5], [0, 5, 4], [2, 6, 7], [3, 2, 7],
[1, 3, 5], [3, 7, 5], [0, 4, 2], [2, 4, 6]]
return torch.tensor(vs, dtype=torch.float32), torch.tensor(faces, dtype=torch.int64)
def normalize(t: T):
t = t / t.norm(2, dim=1)[:, None]
return t
def interpolate_vs(mesh: T_Mesh, faces_inds: T, weights: T) -> T:
vs = mesh[0][mesh[1][faces_inds]]
vs = vs * weights[:, :, None]
return vs.sum(1)
def sample_uvw(shape, device: D):
u, v = torch.rand(*shape, device=device), torch.rand(*shape, device=device)
mask = (u + v).gt(1)
u[mask], v[mask] = -u[mask] + 1, -v[mask] + 1
w = -u - v + 1
uvw = torch.stack([u, v, w], dim=len(shape))
return uvw
def get_sampled_fe(fe: T, mesh: T_Mesh, face_ids: T, uvw: TN) -> T:
# to_squeeze =
if fe.dim() == 1:
fe = fe.unsqueeze(1)
if uvw is None:
fe_iner = fe[face_ids]
else:
vs_ids = mesh[1][face_ids]
fe_unrolled = fe[vs_ids]
fe_iner = torch.einsum('sad,sa->sd', fe_unrolled, uvw)
# if to_squeeze:
# fe_iner = fe_iner.squeeze_(1)
return fe_iner
def sample_on_faces(mesh: T_Mesh, num_samples: int) -> TS:
vs, faces = mesh
uvw = sample_uvw([faces.shape[0], num_samples], vs.device)
samples = torch.einsum('fad,fna->fnd', vs[faces], uvw)
return samples, uvw
class SampleBy(Enum):
AREAS = 0
FACES = 1
HYB = 2
def sample_on_mesh(mesh: T_Mesh, num_samples: int, face_areas: TN = None,
sample_s: SampleBy = SampleBy.HYB) -> TNS:
vs, faces = mesh
if faces is None: # sample from pc
uvw = None
if vs.shape[0] < num_samples:
chosen_faces_inds = torch.arange(vs.shape[0])
else:
chosen_faces_inds = torch.argsort(torch.rand(vs.shape[0]))[:num_samples]
samples = vs[chosen_faces_inds]
else:
weighted_p = []
if sample_s == SampleBy.AREAS or sample_s == SampleBy.HYB:
if face_areas is None:
face_areas, _ = compute_face_areas(mesh)
face_areas[torch.isnan(face_areas)] = 0
weighted_p.append(face_areas / face_areas.sum())
if sample_s == SampleBy.FACES or sample_s == SampleBy.HYB:
weighted_p.append(torch.ones(mesh[1].shape[0], device=mesh[0].device))
chosen_faces_inds = [torch.multinomial(weights, num_samples // len(weighted_p), replacement=True) for weights in weighted_p]
if sample_s == SampleBy.HYB:
chosen_faces_inds = torch.cat(chosen_faces_inds, dim=0)
chosen_faces = faces[chosen_faces_inds]
uvw = sample_uvw([num_samples], vs.device)
samples = torch.einsum('sf,sfd->sd', uvw, vs[chosen_faces])
return samples, chosen_faces_inds, uvw
def get_samples(mesh: T_Mesh, num_samples: int, sample_s: SampleBy, *features: T) -> Union[T, TS]:
samples, face_ids, uvw = sample_on_mesh(mesh, num_samples, sample_s=sample_s)
if len(features) > 0:
samples = [samples] + [get_sampled_fe(fe, mesh, face_ids, uvw) for fe in features]
return samples, face_ids, uvw
def find_barycentric(vs: T, triangles: T) -> T:
def compute_barycentric(ind):
triangles[:, ind] = vs
alpha = compute_face_areas(triangles)[0] / areas
triangles[:, ind] = recover[:, ind]
return alpha
device, dtype = vs.device, vs.dtype
vs = vs.to(device, dtype=torch.float64)
triangles = triangles.to(device, dtype=torch.float64)
areas, _ = compute_face_areas(triangles)
recover = triangles.clone()
barycentric = [compute_barycentric(i) for i in range(3)]
barycentric = torch.stack(barycentric, dim=1)
# assert barycentric.sum(1).max().item() <= 1 + EPSILON
return barycentric.to(device, dtype=dtype)
def from_barycentric(mesh: Union[T_Mesh, T], face_ids: T, weights: T) -> T:
if type(mesh) is not T:
triangles: T = mesh[0][mesh[1]]
else:
triangles: T = mesh
to_squeeze = weights.dim() == 1
if to_squeeze:
weights = weights.unsqueeze(0)
face_ids = face_ids.unsqueeze(0)
vs = torch.einsum('nad,na->nd', triangles[face_ids], weights)
if to_squeeze:
vs = vs.squeeze(0)
return vs
def check_circle_angles(mesh: T_Mesh, center_ind: int, select: T) -> bool:
vs, _ = mesh
all_vecs = vs[select] - vs[center_ind][None, :]
all_vecs = all_vecs / all_vecs.norm(2, 1)[:, None]
all_vecs = torch.cat([all_vecs, all_vecs[:1]], dim=0)
all_cos = torch.einsum('nd,nd->n', all_vecs[1:], all_vecs[:-1])
all_angles = torch.acos_(all_cos)
all_angles = all_angles.sum()
return (all_angles - 2 * np.pi).abs() < EPSILON
def vs_over_triangle(vs_mid: T, triangle: T, normals=None) -> T:
if vs_mid.dim() == 1:
vs_mid = vs_mid.unsqueeze(0)
triangle = triangle.unsqueeze(0)
if normals is None:
_, normals = compute_face_areas(triangle)
select = torch.arange(3)
d_vs = vs_mid[:, None, :] - triangle
d_f = triangle[:, select] - triangle[:, (select + 1) % 3]
all_cross = torch.cross(d_vs, d_f, dim=2)
all_dots = torch.einsum('nd,nad->na', normals, all_cross)
is_over = all_dots.ge(0).long().sum(1).eq(3)
return is_over
def igl_prepare(*dtypes):
def decoder(func):
def wrapper(*args, **kwargs):
mesh = args[0]
device, dtype = mesh[0].device, mesh[0].dtype
vs, faces = to_numpy(*mesh)
result = func((vs, faces), *args[1:], **kwargs)
return to_torch(result, device)
to_torch = to_torch_singe if len(dtypes) == 1 else to_torch_multi
return wrapper
def to_torch_singe(result, device):
return torch.from_numpy(result).to(device, dtype=dtypes[0])
def to_torch_multi(result, device):
return [torch.from_numpy(r).to(device, dtype=dtype) for r, dtype in zip(result, dtypes)]
return decoder
@igl_prepare(torch.float32, torch.int64)
def decimate_igl(mesh, num_faces: int):
if mesh[1].shape[0] <= num_faces:
return mesh
vs, faces, _ = igl.remove_duplicates(*mesh, 1e-8)
return igl.decimate(vs, faces, num_faces)[1:3]
@igl_prepare(torch.float32)
def gaussian_curvature(mesh: T_Mesh) -> T:
gc = igl.gaussian_curvature(*mesh)
return gc
@igl_prepare(torch.float32)
def per_vertex_normals_igl(mesh: T_Mesh, weighting: int = 0) -> T:
normals = igl.per_vertex_normals(*mesh, weighting)
return normals
@igl_prepare(torch.float32, torch.int64)
def remove_duplicate_vertices(mesh: T_Mesh, epsilon=1e-7) -> T_Mesh:
vs, _, _, faces = igl.remove_duplicate_vertices(*mesh, epsilon)
return vs, faces
@igl_prepare(torch.float32)
def winding_number_igl(mesh: T_Mesh, query: T) -> T:
query = query.cpu().numpy()
return igl.fast_winding_number_for_meshes(*mesh, query)
@igl_prepare(torch.float32, torch.float32, torch.float32, torch.float32)
def principal_curvature(mesh: T_Mesh) -> TS:
out = igl.principal_curvature(*mesh)
min_dir, max_dir, min_val, max_val = out
return min_dir, max_dir, min_val, max_val
def get_inside_outside(points: T, mesh: T_Mesh) -> T:
device = points.device
points = points.numpy()
vs, faces = mesh[0].numpy(), mesh[1].numpy()
winding_numbers = igl.fast_winding_number_for_meshes(vs, faces, points)
winding_numbers = torch.from_numpy(winding_numbers)
inside_outside = winding_numbers.lt(.5).float() * 2 - 1
return inside_outside.to(device)
@igl_prepare(torch.float32)
def lscm(mesh: T_Mesh, boundary_indices: T, boundary_coordinates: T) -> T:
boundary_indices, boundary_coordinates = boundary_indices.numpy(), boundary_coordinates.numpy()
check, uv = igl.lscm(*mesh, boundary_indices, boundary_coordinates)
return uv
def interpulate_vs(mesh: T_Mesh, faces_inds: T, weights: T) -> T:
vs = mesh[0][mesh[1][faces_inds]]
vs = vs * weights[:, :, None]
return vs.sum(1)
| [
"igl.remove_duplicate_vertices",
"igl.principal_curvature",
"igl.decimate",
"igl.gaussian_curvature",
"igl.fast_winding_number_for_meshes",
"igl.lscm",
"igl.per_vertex_normals",
"igl.remove_duplicates"
] | [((11061, 11096), 'igl.remove_duplicates', 'igl.remove_duplicates', (['*mesh', '(1e-08)'], {}), '(*mesh, 1e-08)\n', (11082, 11096), False, 'import igl\n'), ((11229, 11258), 'igl.gaussian_curvature', 'igl.gaussian_curvature', (['*mesh'], {}), '(*mesh)\n', (11251, 11258), False, 'import igl\n'), ((11384, 11424), 'igl.per_vertex_normals', 'igl.per_vertex_normals', (['*mesh', 'weighting'], {}), '(*mesh, weighting)\n', (11406, 11424), False, 'import igl\n'), ((11578, 11623), 'igl.remove_duplicate_vertices', 'igl.remove_duplicate_vertices', (['*mesh', 'epsilon'], {}), '(*mesh, epsilon)\n', (11607, 11623), False, 'import igl\n'), ((11771, 11819), 'igl.fast_winding_number_for_meshes', 'igl.fast_winding_number_for_meshes', (['*mesh', 'query'], {}), '(*mesh, query)\n', (11805, 11819), False, 'import igl\n'), ((11950, 11980), 'igl.principal_curvature', 'igl.principal_curvature', (['*mesh'], {}), '(*mesh)\n', (11973, 11980), False, 'import igl\n'), ((12254, 12307), 'igl.fast_winding_number_for_meshes', 'igl.fast_winding_number_for_meshes', (['vs', 'faces', 'points'], {}), '(vs, faces, points)\n', (12288, 12307), False, 'import igl\n'), ((12682, 12737), 'igl.lscm', 'igl.lscm', (['*mesh', 'boundary_indices', 'boundary_coordinates'], {}), '(*mesh, boundary_indices, boundary_coordinates)\n', (12690, 12737), False, 'import igl\n'), ((11107, 11141), 'igl.decimate', 'igl.decimate', (['vs', 'faces', 'num_faces'], {}), '(vs, faces, num_faces)\n', (11119, 11141), False, 'import igl\n')] |
from django.test import TestCase
from django.urls import reverse
from model_mommy import mommy
from monitor.models import TwitterUser
from monitor.tests.utils.http_client_mixin import HTTPClientMixin
import mock
class TestTwitterUserView(HTTPClientMixin, TestCase):
def setUp(self):
super(TestTwitterUserView, self).setUp()
self.url = reverse('monitor:users')
self.users = mommy.make('monitor.TwitterUser', _quantity=3)
def test_get(self):
response = self.client.get(self.url)
self.assertEqual(len(response.data), 3)
for count, user in enumerate(self.users):
self.assertEqual(response.data[count].get('id'), user.id)
def test_post(self):
self.assertEqual(TwitterUser.objects.count(), 3)
path = (
'monitor.api.serializers.twitter_user_serializers.'
'retrieve_tweets.delay'
)
with mock.patch(path, mock.Mock()) as retrieve_tweets:
response = self.client.post(self.url, {'username': 'test'})
retrieve_tweets.assert_called()
self.assertEqual(TwitterUser.objects.count(), 4)
self.assertEqual(response.data.get('username'), 'test')
class TestUsernameListView(HTTPClientMixin, TestCase):
def setUp(self):
super(TestUsernameListView, self).setUp()
self.users = mommy.make('monitor.TwitterUser', _quantity=3)
def test_get(self):
url = reverse('monitor:usernames')
response = self.client.get(url)
self.assertEqual(len(response.data), 3)
for count, user in enumerate(self.users):
self.assertEqual(
response.data[count].get('username'), user.username
)
| [
"django.urls.reverse",
"monitor.models.TwitterUser.objects.count",
"mock.Mock",
"model_mommy.mommy.make"
] | [((358, 382), 'django.urls.reverse', 'reverse', (['"""monitor:users"""'], {}), "('monitor:users')\n", (365, 382), False, 'from django.urls import reverse\n'), ((404, 450), 'model_mommy.mommy.make', 'mommy.make', (['"""monitor.TwitterUser"""'], {'_quantity': '(3)'}), "('monitor.TwitterUser', _quantity=3)\n", (414, 450), False, 'from model_mommy import mommy\n'), ((1359, 1405), 'model_mommy.mommy.make', 'mommy.make', (['"""monitor.TwitterUser"""'], {'_quantity': '(3)'}), "('monitor.TwitterUser', _quantity=3)\n", (1369, 1405), False, 'from model_mommy import mommy\n'), ((1445, 1473), 'django.urls.reverse', 'reverse', (['"""monitor:usernames"""'], {}), "('monitor:usernames')\n", (1452, 1473), False, 'from django.urls import reverse\n'), ((741, 768), 'monitor.models.TwitterUser.objects.count', 'TwitterUser.objects.count', ([], {}), '()\n', (766, 768), False, 'from monitor.models import TwitterUser\n'), ((931, 942), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (940, 942), False, 'import mock\n'), ((1109, 1136), 'monitor.models.TwitterUser.objects.count', 'TwitterUser.objects.count', ([], {}), '()\n', (1134, 1136), False, 'from monitor.models import TwitterUser\n')] |
"""
Sinusoidal Function Sphere function (2 random inputs, scalar output)
======================================================================
In this example, PCE is used to generate a surrogate model for a given set of 2D data.
.. math:: f(x) = x_1^2 + x_2^2
**Description:** Dimensions: 2
**Input Domain:** This function is evaluated on the hypercube :math:`x_i \in [-5.12, 5.12]` for all :math:`i = 1,2`.
**Global minimum:** :math:`f(x^*)=0,` at :math:`x^* = (0,0)`.
**Reference:** <NAME>., & <NAME>. (1978). The global optimization problem: an introduction. Towards global optimization, 2, 1-15.
"""
# %% md
#
# Import necessary libraries.
# %%
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from UQpy.surrogates import *
from UQpy.distributions import Uniform, JointIndependent
# %% md
#
# Define the function.
# %%
def function(x,y):
return x**2 + y**2
# %% md
#
# Create a distribution object, generate samples and evaluate the function at the samples.
# %%
np.random.seed(1)
dist_1 = Uniform(loc=-5.12, scale=10.24)
dist_2 = Uniform(loc=-5.12, scale=10.24)
marg = [dist_1, dist_2]
joint = JointIndependent(marginals=marg)
n_samples = 100
x = joint.rvs(n_samples)
y = function(x[:,0], x[:,1])
# %% md
#
# Visualize the 2D function.
# %%
xmin, xmax = -6,6
ymin, ymax = -6,6
X1 = np.linspace(xmin, xmax, 50)
X2 = np.linspace(ymin, ymax, 50)
X1_, X2_ = np.meshgrid(X1, X2) # grid of points
f = function(X1_, X2_)
fig = plt.figure(figsize=(10,6))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X1_, X2_, f, rstride=1, cstride=1, cmap='gnuplot2', linewidth=0, antialiased=False)
ax.set_title('True function')
ax.set_xlabel('$x_1$', fontsize=15)
ax.set_ylabel('$x_2$', fontsize=15)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(20, 140)
fig.colorbar(surf, shrink=0.5, aspect=7)
plt.show()
# %% md
#
# Visualize training data.
# %%
fig = plt.figure(figsize=(10,6))
ax = fig.gca(projection='3d')
ax.scatter(x[:,0], x[:,1], y, s=20, c='r')
ax.set_title('Training data')
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(20,140)
ax.set_xlabel('$x_1$', fontsize=15)
ax.set_ylabel('$x_2$', fontsize=15)
plt.show()
# %% md
#
# Create an object from the PCE class. Compute PCE coefficients using least squares regression.
# %%
max_degree = 3
polynomial_basis = TotalDegreeBasis(joint, max_degree)
least_squares = LeastSquareRegression()
pce = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=least_squares)
pce.fit(x,y)
# %% md
#
# Compute PCE coefficients using LASSO.
# %%
polynomial_basis = TotalDegreeBasis(joint, max_degree)
lasso = LassoRegression()
pce2 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=lasso)
pce2.fit(x,y)
# %% md
#
# Compute PCE coefficients with Ridge regression.
# %%
polynomial_basis = TotalDegreeBasis(joint, max_degree)
ridge = RidgeRegression()
pce3 = PolynomialChaosExpansion(polynomial_basis=polynomial_basis, regression_method=ridge)
pce3.fit(x,y)
# %% md
#
# PCE surrogate is used to predict the behavior of the function at new samples.
# %%
n_test_samples = 10000
x_test = joint.rvs(n_test_samples)
y_test = pce.predict(x_test)
# %% md
#
# Plot PCE prediction.
# %%
fig = plt.figure(figsize=(10,6))
ax = fig.gca(projection='3d')
ax.scatter(x_test[:,0], x_test[:,1], y_test, s=1)
ax.set_title('PCE predictor')
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.view_init(20,140)
ax.set_xlim(-6,6)
ax.set_ylim(-6,6)
ax.set_xlabel('$x_1$', fontsize=15)
ax.set_ylabel('$x_2$', fontsize=15)
plt.show()
# %% md
# Error Estimation
# -----------------
# Construct a validation dataset and get the validation error.
# %%
# validation sample
n_samples = 150
x_val = joint.rvs(n_samples)
y_val = function(x_val[:,0], x_val[:,1])
# PCE predictions
y_pce = pce.predict(x_val).flatten()
y_pce2 = pce2.predict(x_val).flatten()
y_pce3 = pce3.predict(x_val).flatten()
# mean relative validation errors
error = np.sum(np.abs((y_val - y_pce)/y_val))/n_samples
error2 = np.sum(np.abs((y_val - y_pce2)/y_val))/n_samples
error3 = np.sum(np.abs((y_val - y_pce3)/y_val))/n_samples
print('Mean rel. error, LSTSQ:', error)
print('Mean rel. error, LASSO:', error2)
print('Mean rel. error, Ridge:', error3)
# %% md
# Moment Estimation
# -----------------
# Returns mean and variance of the PCE surrogate.
# %%
n_mc = 1000000
x_mc = joint.rvs(n_mc)
y_mc = function(x_mc[:,0], x_mc[:,1])
mean_mc = np.mean(y_mc)
var_mc = np.var(y_mc)
print('Moments from least squares regression :', pce.get_moments())
print('Moments from LASSO regression :', pce2.get_moments())
print('Moments from Ridge regression :', pce3.get_moments())
print('Moments from Monte Carlo integration: ', mean_mc, var_mc) | [
"numpy.meshgrid",
"numpy.random.seed",
"matplotlib.pyplot.show",
"numpy.abs",
"UQpy.distributions.JointIndependent",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.ticker.LinearLocator",
"matplotlib.ticker.FormatStrFormatter",
"numpy.linspace",
"UQpy.distributions.Uniform",
"numpy.var"
] | [((1125, 1142), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1139, 1142), True, 'import numpy as np\n'), ((1153, 1184), 'UQpy.distributions.Uniform', 'Uniform', ([], {'loc': '(-5.12)', 'scale': '(10.24)'}), '(loc=-5.12, scale=10.24)\n', (1160, 1184), False, 'from UQpy.distributions import Uniform, JointIndependent\n'), ((1194, 1225), 'UQpy.distributions.Uniform', 'Uniform', ([], {'loc': '(-5.12)', 'scale': '(10.24)'}), '(loc=-5.12, scale=10.24)\n', (1201, 1225), False, 'from UQpy.distributions import Uniform, JointIndependent\n'), ((1259, 1291), 'UQpy.distributions.JointIndependent', 'JointIndependent', ([], {'marginals': 'marg'}), '(marginals=marg)\n', (1275, 1291), False, 'from UQpy.distributions import Uniform, JointIndependent\n'), ((1451, 1478), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(50)'], {}), '(xmin, xmax, 50)\n', (1462, 1478), True, 'import numpy as np\n'), ((1484, 1511), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(50)'], {}), '(ymin, ymax, 50)\n', (1495, 1511), True, 'import numpy as np\n'), ((1523, 1542), 'numpy.meshgrid', 'np.meshgrid', (['X1', 'X2'], {}), '(X1, X2)\n', (1534, 1542), True, 'import numpy as np\n'), ((1590, 1617), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (1600, 1617), True, 'import matplotlib.pyplot as plt\n'), ((2024, 2034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2032, 2034), True, 'import matplotlib.pyplot as plt\n'), ((2086, 2113), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2096, 2113), True, 'import matplotlib.pyplot as plt\n'), ((2414, 2424), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2422, 2424), True, 'import matplotlib.pyplot as plt\n'), ((3497, 3524), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (3507, 3524), True, 'import matplotlib.pyplot as plt\n'), ((3868, 3878), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3876, 3878), True, 'import matplotlib.pyplot as plt\n'), ((4760, 4773), 'numpy.mean', 'np.mean', (['y_mc'], {}), '(y_mc)\n', (4767, 4773), True, 'import numpy as np\n'), ((4783, 4795), 'numpy.var', 'np.var', (['y_mc'], {}), '(y_mc)\n', (4789, 4795), True, 'import numpy as np\n'), ((1883, 1900), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (1896, 1900), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((1931, 1958), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (1949, 1958), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((2244, 2261), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (2257, 2261), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((2292, 2319), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (2310, 2319), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((3662, 3679), 'matplotlib.ticker.LinearLocator', 'LinearLocator', (['(10)'], {}), '(10)\n', (3675, 3679), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((3710, 3737), 'matplotlib.ticker.FormatStrFormatter', 'FormatStrFormatter', (['"""%.02f"""'], {}), "('%.02f')\n", (3728, 3737), False, 'from matplotlib.ticker import LinearLocator, FormatStrFormatter\n'), ((4288, 4319), 'numpy.abs', 'np.abs', (['((y_val - y_pce) / y_val)'], {}), '((y_val - y_pce) / y_val)\n', (4294, 4319), True, 'import numpy as np\n'), ((4345, 4377), 'numpy.abs', 'np.abs', (['((y_val - y_pce2) / y_val)'], {}), '((y_val - y_pce2) / y_val)\n', (4351, 4377), True, 'import numpy as np\n'), ((4403, 4435), 'numpy.abs', 'np.abs', (['((y_val - y_pce3) / y_val)'], {}), '((y_val - y_pce3) / y_val)\n', (4409, 4435), True, 'import numpy as np\n')] |
from datetime import date
import boundaries
boundaries.register('Ottawa wards',
domain='Ottawa, ON',
last_updated=date(2010, 8, 27),
name_func=boundaries.dashed_attr('WARD_EN'),
id_func=boundaries.attr('WARD_NUM'),
authority='City of Ottawa',
source_url='http://ottawa.ca/online_services/opendata/info/wards2010_en.html',
licence_url='http://ottawa.ca/online_services/opendata/terms_en.html',
data_url='http://ottawa.ca/online_services/opendata/data/wards2010.zip',
notes='Convert the features to 2D with: ogr2ogr -f "ESRI Shapefile" -overwrite . Wards_2010.shp -nlt POLYGON',
encoding='iso-8859-1',
)
| [
"boundaries.attr",
"boundaries.dashed_attr",
"datetime.date"
] | [((124, 141), 'datetime.date', 'date', (['(2010)', '(8)', '(27)'], {}), '(2010, 8, 27)\n', (128, 141), False, 'from datetime import date\n'), ((157, 190), 'boundaries.dashed_attr', 'boundaries.dashed_attr', (['"""WARD_EN"""'], {}), "('WARD_EN')\n", (179, 190), False, 'import boundaries\n'), ((204, 231), 'boundaries.attr', 'boundaries.attr', (['"""WARD_NUM"""'], {}), "('WARD_NUM')\n", (219, 231), False, 'import boundaries\n')] |
from nose.tools import *
import title_cleaner
TRUTH = [
(True, 'Manhattan: 1st Ave. - 34th St. E.'),
(True, 'Queens: Hoyt Avenue - 24th Street'),
(False, "Queens: Flushing Meadow Park - New York World's Fair of 1939-40 - [Industrial exhibits.]"),
(False, 'Fifth Avenue - 90th Street, southeast corner'),
(False, 'Recreation and hobbies - Miscellaneous - Children.'),
(True, 'Manhattan: 59th Street - 6th Avenue'),
(True, 'Queens: Queens Boulevard - Junction Boulevard'),
(True, 'Manhattan: 50th Street (West) - 5th Avenue'),
(True, 'Manhattan: 5th Avenue - 78th Street'),
(True, 'Manhattan: 5th Avenue - 33rd Street'),
(True, 'Queens: Queens Boulevard - 62nd Avenue'),
(False, 'Manhattan: Battery Park.'),
(False, 'Manhattan: Central Park - The Sailboat Pool'),
(True, 'Queens: Colonial Avenue - 62nd Drive'),
(True, 'Queens: Woodhaven Blvd - Fleet Street'),
(True, 'Richmond: New Dorp Lane - Cedar Grove Avenue')
]
def test_clean_title():
for correct, title in TRUTH:
assert correct == title_cleaner.is_pure_location(title), '%s %s' % (correct, title)
| [
"title_cleaner.is_pure_location"
] | [((1072, 1109), 'title_cleaner.is_pure_location', 'title_cleaner.is_pure_location', (['title'], {}), '(title)\n', (1102, 1109), False, 'import title_cleaner\n')] |
from discord.ext import commands
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print('Logged in as')
print(self.bot.user.name)
print(self.bot.user.id)
print('------')
def setup(bot):
bot.add_cog(Events(bot))
| [
"discord.ext.commands.Cog.listener"
] | [((122, 145), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (143, 145), False, 'from discord.ext import commands\n')] |
import datetime
import os
from models.system_model_v3.model.params.init import params
from models.system_model_v3.model.state_variables.init import state_variables
from models.constants import RAY
from experiments.system_model_v3.configure import configure_experiment
from experiments.system_model_v3.run import run_experiment
from experiments.utils import save_to_HDF5, batch, merge_parameter_sweep
from radcad.core import generate_parameter_sweep
SIMULATION_TIMESTEPS = 8758 #len(eth_price_df) - 1
MONTE_CARLO_RUNS = 1
sweeps = {
'controller_enabled': [True,False],
}
# Configure sweep and update parameters
params_update, experiment_metrics = configure_experiment(sweeps, timesteps=SIMULATION_TIMESTEPS, runs=MONTE_CARLO_RUNS)
params.update(params_update)
# Override parameters
params_override = {
'liquidity_demand_enabled': [False],
}
params.update(params_override)
# Experiment details
now = datetime.datetime.now()
dir_path = os.path.dirname(os.path.realpath(__file__))
experiment_folder = __file__.split('.py')[0]
results_id = now.isoformat()
if __name__ == '__main__':
run_experiment(results_id, experiment_folder, experiment_metrics, timesteps=SIMULATION_TIMESTEPS, runs=MONTE_CARLO_RUNS, params=params, initial_state=state_variables, save_file=True, save_logs=True)
| [
"experiments.system_model_v3.configure.configure_experiment",
"models.system_model_v3.model.params.init.params.update",
"os.path.realpath",
"experiments.system_model_v3.run.run_experiment",
"datetime.datetime.now"
] | [((659, 747), 'experiments.system_model_v3.configure.configure_experiment', 'configure_experiment', (['sweeps'], {'timesteps': 'SIMULATION_TIMESTEPS', 'runs': 'MONTE_CARLO_RUNS'}), '(sweeps, timesteps=SIMULATION_TIMESTEPS, runs=\n MONTE_CARLO_RUNS)\n', (679, 747), False, 'from experiments.system_model_v3.configure import configure_experiment\n'), ((743, 771), 'models.system_model_v3.model.params.init.params.update', 'params.update', (['params_update'], {}), '(params_update)\n', (756, 771), False, 'from models.system_model_v3.model.params.init import params\n'), ((859, 889), 'models.system_model_v3.model.params.init.params.update', 'params.update', (['params_override'], {}), '(params_override)\n', (872, 889), False, 'from models.system_model_v3.model.params.init import params\n'), ((918, 941), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (939, 941), False, 'import datetime\n'), ((969, 995), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (985, 995), False, 'import os\n'), ((1103, 1310), 'experiments.system_model_v3.run.run_experiment', 'run_experiment', (['results_id', 'experiment_folder', 'experiment_metrics'], {'timesteps': 'SIMULATION_TIMESTEPS', 'runs': 'MONTE_CARLO_RUNS', 'params': 'params', 'initial_state': 'state_variables', 'save_file': '(True)', 'save_logs': '(True)'}), '(results_id, experiment_folder, experiment_metrics, timesteps\n =SIMULATION_TIMESTEPS, runs=MONTE_CARLO_RUNS, params=params,\n initial_state=state_variables, save_file=True, save_logs=True)\n', (1117, 1310), False, 'from experiments.system_model_v3.run import run_experiment\n')] |
from get_config import get_config
from get_ansible import get_ansible
from get_helm import get_helm
from get_skaffold import get_skaffold
from get_docker import get_docker
from get_minikube import get_minikube
from get_regs import get_registries
from edit_hosts import edit_hosts
from c_registry import c_registry
if __name__ == '__main__':
edit_hosts()
get_docker() #
c_registry() #
get_ansible() #
#get_helm() #
get_minikube() #
get_skaffold() #
get_config()
get_registries()
edit_hosts()
| [
"get_docker.get_docker",
"edit_hosts.edit_hosts",
"c_registry.c_registry",
"get_minikube.get_minikube",
"get_regs.get_registries",
"get_skaffold.get_skaffold",
"get_ansible.get_ansible",
"get_config.get_config"
] | [((347, 359), 'edit_hosts.edit_hosts', 'edit_hosts', ([], {}), '()\n', (357, 359), False, 'from edit_hosts import edit_hosts\n'), ((364, 376), 'get_docker.get_docker', 'get_docker', ([], {}), '()\n', (374, 376), False, 'from get_docker import get_docker\n'), ((383, 395), 'c_registry.c_registry', 'c_registry', ([], {}), '()\n', (393, 395), False, 'from c_registry import c_registry\n'), ((402, 415), 'get_ansible.get_ansible', 'get_ansible', ([], {}), '()\n', (413, 415), False, 'from get_ansible import get_ansible\n'), ((442, 456), 'get_minikube.get_minikube', 'get_minikube', ([], {}), '()\n', (454, 456), False, 'from get_minikube import get_minikube\n'), ((464, 478), 'get_skaffold.get_skaffold', 'get_skaffold', ([], {}), '()\n', (476, 478), False, 'from get_skaffold import get_skaffold\n'), ((486, 498), 'get_config.get_config', 'get_config', ([], {}), '()\n', (496, 498), False, 'from get_config import get_config\n'), ((503, 519), 'get_regs.get_registries', 'get_registries', ([], {}), '()\n', (517, 519), False, 'from get_regs import get_registries\n'), ((524, 536), 'edit_hosts.edit_hosts', 'edit_hosts', ([], {}), '()\n', (534, 536), False, 'from edit_hosts import edit_hosts\n')] |
import numpy as np
import pytest
from fast_carpenter.selection.filters import Counter
@pytest.fixture
def weight_names():
return [
"EventWeight",
# "MuonWeight", "ElectronWeight", "JetWeight",
]
@pytest.fixture
def counter(weight_names):
return Counter(weight_names)
def test_init(weight_names, full_wrapped_tree):
c = Counter(weight_names)
assert c._weight_names == weight_names
assert c.counts == (0, 0.0)
assert c._w_counts == (0.0)
def test_increment_mc(counter, full_wrapped_tree):
counter.increment(full_wrapped_tree, is_mc=True)
n_events = len(full_wrapped_tree)
expected_weighted_sum = 229.94895935058594
# expected value is taken from numpy sum, but awkward sum is used
# the difference is small and due to optimization
# see https://github.com/scikit-hep/awkward-1.0/issues/1241
assert counter._w_counts == pytest.approx(np.array([expected_weighted_sum]), 1e-4)
assert counter.counts == (n_events, pytest.approx(expected_weighted_sum, 1e-4))
def test_increment_data(counter, full_wrapped_tree):
counter.increment(full_wrapped_tree, is_mc=False)
n_events = len(full_wrapped_tree)
assert counter._w_counts == (n_events)
assert counter.counts == (n_events, n_events)
def test_add(counter, full_wrapped_tree):
counter.increment(full_wrapped_tree, is_mc=True)
counter.add(counter)
n_events = len(full_wrapped_tree)
expected_weighted_sum = 229.94895935058594
# expected value is taken from numpy sum, but awkward sum is used
# the difference is small and due to optimization
# see https://github.com/scikit-hep/awkward-1.0/issues/1241
assert counter._w_counts == pytest.approx((expected_weighted_sum * 2,), 2e-4)
assert counter.counts == (n_events * 2, pytest.approx(expected_weighted_sum * 2, 2e-4))
def test_increment_without_weights(full_wrapped_tree):
counter = Counter([])
counter.increment(full_wrapped_tree, is_mc=True)
n_events = len(full_wrapped_tree)
with pytest.raises(IndexError):
assert counter._w_counts[0] == n_events
assert counter.counts == (n_events, )
| [
"pytest.raises",
"pytest.approx",
"numpy.array",
"fast_carpenter.selection.filters.Counter"
] | [((277, 298), 'fast_carpenter.selection.filters.Counter', 'Counter', (['weight_names'], {}), '(weight_names)\n', (284, 298), False, 'from fast_carpenter.selection.filters import Counter\n'), ((357, 378), 'fast_carpenter.selection.filters.Counter', 'Counter', (['weight_names'], {}), '(weight_names)\n', (364, 378), False, 'from fast_carpenter.selection.filters import Counter\n'), ((1917, 1928), 'fast_carpenter.selection.filters.Counter', 'Counter', (['[]'], {}), '([])\n', (1924, 1928), False, 'from fast_carpenter.selection.filters import Counter\n'), ((1704, 1755), 'pytest.approx', 'pytest.approx', (['(expected_weighted_sum * 2,)', '(0.0002)'], {}), '((expected_weighted_sum * 2,), 0.0002)\n', (1717, 1755), False, 'import pytest\n'), ((2030, 2055), 'pytest.raises', 'pytest.raises', (['IndexError'], {}), '(IndexError)\n', (2043, 2055), False, 'import pytest\n'), ((911, 944), 'numpy.array', 'np.array', (['[expected_weighted_sum]'], {}), '([expected_weighted_sum])\n', (919, 944), True, 'import numpy as np\n'), ((992, 1036), 'pytest.approx', 'pytest.approx', (['expected_weighted_sum', '(0.0001)'], {}), '(expected_weighted_sum, 0.0001)\n', (1005, 1036), False, 'import pytest\n'), ((1798, 1846), 'pytest.approx', 'pytest.approx', (['(expected_weighted_sum * 2)', '(0.0002)'], {}), '(expected_weighted_sum * 2, 0.0002)\n', (1811, 1846), False, 'import pytest\n')] |
# coding:utf-8
import sys
from flask import Flask, jsonify
from flask_cors import CORS
from flask_migrate import Migrate
from flask_restplus import Api
from flasgger import Swagger
from alchemy.common.base import db
from marshmallow import Schema, fields, ValidationError, pre_load
from controllers import tests_controller
from container import Container
def create_app(testConfig=None, sqlConnectionString=None):
# container and dependency injection configuration setup on controller level
container = Container()
container.wire(modules=[tests_controller])
# create and configure the app
app = Flask(__name__, instance_relative_config=True)
api = Api(app)
swagger = Swagger(app)
# set up environmenet variables from the passed in configuration file from the instance folder
if testConfig is None:
# load the instance config, if it exists, when not testing
app.config.from_pyfile('config_dev.py', silent=False)
else:
# load the test config if passed in
app.config.from_pyfile(testConfig, silent=False)
if sqlConnectionString is not None:
app.config['SQLALCHEMY_DATABASE_URI']=sqlConnectionString
# import tables here to be referenced in the alembic migration scripts
from alchemy.tables.test_defintion_table import TestDefinition
db.init_app(app)
migrate = Migrate(app, db, render_as_batch=True)
# Register blueprints
routes = {
'tests': {'route': tests_controller.testsControllerBlueprint, 'url_prefix': '/tests/'},
}
for route in routes:
blueprint = routes[route]
app.register_blueprint(blueprint['route'], url_prefix = blueprint['url_prefix'])
CORS(app, resources={r"/*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
return app
| [
"alchemy.common.base.db.init_app",
"flask_restplus.Api",
"flask_cors.CORS",
"flask.Flask",
"flask_migrate.Migrate",
"flasgger.Swagger",
"container.Container"
] | [((517, 528), 'container.Container', 'Container', ([], {}), '()\n', (526, 528), False, 'from container import Container\n'), ((622, 668), 'flask.Flask', 'Flask', (['__name__'], {'instance_relative_config': '(True)'}), '(__name__, instance_relative_config=True)\n', (627, 668), False, 'from flask import Flask, jsonify\n'), ((679, 687), 'flask_restplus.Api', 'Api', (['app'], {}), '(app)\n', (682, 687), False, 'from flask_restplus import Api\n'), ((702, 714), 'flasgger.Swagger', 'Swagger', (['app'], {}), '(app)\n', (709, 714), False, 'from flasgger import Swagger\n'), ((1339, 1355), 'alchemy.common.base.db.init_app', 'db.init_app', (['app'], {}), '(app)\n', (1350, 1355), False, 'from alchemy.common.base import db\n'), ((1370, 1408), 'flask_migrate.Migrate', 'Migrate', (['app', 'db'], {'render_as_batch': '(True)'}), '(app, db, render_as_batch=True)\n', (1377, 1408), False, 'from flask_migrate import Migrate\n'), ((1707, 1752), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/*': {'origins': '*'}}"}), "(app, resources={'/*': {'origins': '*'}})\n", (1711, 1752), False, 'from flask_cors import CORS\n')] |
from strato.racktest.infra.suite import *
from example_seeds import addition
import time
SIGNALLED_CALLABLE_CODE = """
import signal
import time
signalReceived = None
def signalHandler(sigNum, _):
global signalReceived
signalReceived = sigNum
signal.signal(signal.SIGUSR2, signalHandler)
while not signalReceived:
time.sleep(1)
"""
class Test:
HOSTS = dict(it=dict(rootfs="rootfs-basic"))
def run(self):
TS_ASSERT_EQUALS(host.it.seed.runCallable(
addition.addition, 1, second=2, takeSitePackages=True)[0], 3)
TS_ASSERT_EQUALS(host.it.seed.runCode(
"from example_seeds import addition\nresult = addition.addition(2, second=3)",
takeSitePackages=True)[0], 5)
forked = host.it.seed.forkCode(
"import time\ntime.sleep(3)\n"
"print 'OUTPUT LINE'\n"
"from example_seeds import addition\nresult = addition.addition(2, second=3)",
takeSitePackages=True)
TS_ASSERT(forked.poll() is None)
TS_ASSERT(forked.poll() is None)
TS_ASSERT_PREDICATE_TIMEOUT(forked.poll, TS_timeout=4)
TS_ASSERT(forked.poll())
TS_ASSERT_EQUALS(forked.result(), 5)
TS_ASSERT('OUTPUT LINE' in forked.output())
forked = host.it.seed.forkCode(
"import time\nwhile True: time.sleep(2)", takeSitePackages=True)
TS_ASSERT(forked.poll() is None)
TS_ASSERT(forked.poll() is None)
forked.kill()
for i in xrange(10):
if forked.poll() is None:
time.sleep(1)
else:
break
TS_ASSERT_EQUALS(forked.poll(), False)
forked = host.it.seed.forkCode(
"import time\nwhile True: time.sleep(2)", takeSitePackages=True)
TS_ASSERT(forked.poll() is None)
TS_ASSERT(forked.poll() is None)
forked.kill('TERM')
for i in xrange(10):
if forked.poll() is None:
time.sleep(1)
else:
break
TS_ASSERT_EQUALS(forked.poll(), False)
forked = host.it.seed.forkCode(SIGNALLED_CALLABLE_CODE, takeSitePackages=True)
TS_ASSERT(forked.poll() is None)
TS_ASSERT(forked.poll() is None)
forked.kill('USR2')
for i in xrange(10):
if forked.poll() is None:
time.sleep(1)
else:
break
TS_ASSERT_EQUALS(forked.poll(), True)
| [
"time.sleep"
] | [((1563, 1576), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1573, 1576), False, 'import time\n'), ((1975, 1988), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1985, 1988), False, 'import time\n'), ((2357, 2370), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2367, 2370), False, 'import time\n')] |
# Reference : https://docs.atlassian.com/software/jira/docs/api/REST/8.5.3
# Reference : https://developer.atlassian.com/cloud/jira/platform/rest/v2/
# https://id.atlassian.com/manage/api-tokens - create the api token
# https://developer.atlassian.com/cloud/jira/platform/basic-auth-for-rest-apis/ - doing it
from configparser import ConfigParser
from constants import BASE_URL
import requests
import base64
class JiraAPI:
headers={}
base_url=BASE_URL
@staticmethod
def get_from_config(item):
config = ConfigParser()
config.read('../secret.ini')
try:
return config.get('Jira',item)
except:
return None
def __init__(self):
"""
Get the username and password from the secrets.ini file
"""
email = self.get_from_config("email")
api_token = self.get_from_config("api_token")
required_string = f"{email}:{api_token}"
encoded = base64.b64encode(
required_string.encode("utf-8")).decode("utf-8")
self.headers = {
'Authorization': f"Basic {encoded}",
'Content-Type': "application/json"
}
def get(self, route, params=None):
"""
Get the API Response
"""
print(f"{self.base_url}{route}")
response = None
if params is None:
response = requests.get(
f"{self.base_url}{route}",
headers=self.headers,
)
else:
response = requests.get(
f"{self.base_url}{route}",
headers=self.headers,
params=params
)
# Return the response to get the required data
try:
return response.json()
except:
return None
# Application roles
def get_application_roles_all(self):
"""
Returns all application roles.
"""
route = "rest/api/2/applicationrole"
return self.get(route=route) or {}
def get_application_roles(self,key):
"""
Returns an application roles.
:key: - The key of the application role.
"""
route = f"rest/api/2/applicationrole/{key}"
return self.get(route=route) or {}
# Audit Records
def get_audit_records(self,startat=None,maxresults=None):
"""
Returns a list of audit records.
:startat: - The number of records to skip before returning the first result.
:maxresults: - The maximum number of results to return.
"""
params={}
if(startat):
params["startat"] = startat
if(maxresults):
params["maxresults"] = maxresults
route = "rest/api/2/auditing/record"
return self.get(route=route,params=params) or {}
# Avatars
def get_system_avatars_by_type(self,avtype):
"""
Returns a list of system avatar details by owner type, where the owner
types are issue type, project, or user.
:avtype: - avatar type
"""
route = f"rest/api/2/avatar/{avtype}/system"
return self.get(route=route) or {}
def get_avatars(self,avtype,entityid):
"""
Returns the system and custom avatars for a project or issue type.
:avtype: - avatar type
:entityid: - The ID of the item the avatar is associated with.
"""
route = f"rest/api/2/universal_avatar/type/{avtype}/owner/{entityid}"
return self.get(route=route) or {}
# Dashboard
def get_all_dashboards(self,startat=None,maxresults=None):
params={}
if(startat):
params["startAt"] = startat
if(maxresults):
params["maxResults"] = maxresults
route = "rest/api/2/dashboard"
return self.get(route=route,params=params) or {}
def search_for_dashboards(self,name=None,accid=None,groupname=None):
params={}
if(name):
params["dashboardName"] = name
if(accid):
params["accountId"] = accid
if(groupname):
params["groupname"] = groupname
route = "rest/api/2/dashboard/search"
return self.get(route=route,params=params) or {}
def get_dashboard_item_property_keys(self,dashboardId,itemId):
route = f"rest/api/2/dashboard/{dashboardId}/items/{itemId}/properties"
return self.get(route=route) or {}
def get_dashboard_item_property(self,dashboardId,itemId,propertyKey):
route = f"rest/api/2/dashboard/{dashboardId}/items/{itemId}/properties/{propertyKey}"
return self.get(route=route) or {}
def get_dashboard(self,dId):
route = f"rest/api/2/dashboard/{dId}"
return self.get(route=route) or {}
# Filter
def get_filter(self,fId):
route = f"rest/api/2/filter/{fId}"
return self.get(route=route) or {}
def get_my_filters(self):
route = "rest/api/2/filter/my"
return self.get(route=route) or {}
# Groups
def get_users_from_group(self,groupname,includeInactiveUsers=None,startAt=None,maxResults=None):
params={}
params["groupname"] = groupname
if(includeInactiveUsers):
params["includeInactiveUsers"] = includeInactiveUsers
if(startat):
params["startat"] = startat
if(maxResults):
params["maxResults"] = maxResults
route = "rest/api/2/group/member"
return self.get(route=route,params=params) or {}
# Issues --partial
def get_issue(self,issueIdOrKey):
route = f"rest/api/2/issue/{issueIdOrKey}"
return self.get(route=route) or {}
def get_changelogs(self,issueIdOrKey,startAt=None,maxResults=None):
params={}
if(startat):
params["startat"] = startat
if(maxResults):
params["maxResults"] = maxResults
route = f"rest/api/2/issue/{issueIdOrKey}/changelog"
return self.get(route=route,params=params) or {}
def get_transitions(self,issueIdOrKey,transitionId=None):
params={}
if(transitionId):
params["transitionId"] = transitionId
route = f"rest/api/2/issue/{issueIdOrKey}/changelog"
return self.get(route=route,params=params) or {}
def get_comments(self,issueIdOrKey,startAt=None,maxResults=None):
params={}
if(startat):
params["startat"] = startat
if(maxResults):
params["maxResults"] = maxResults
route = f"rest/api/2/issue/{issueIdOrKey}/comments"
return self.get(route=route,params=params) or {}
def get_comment(self,issueIdOrKey,cId):
route = f"rest/api/2/issue/{issueIdOrKey}/comment/{cId}"
return self.get(route=route) or {}
# Permissions
def get_my_permissions(self):
"""
Provide permission information for the current user.
"""
route = "rest/api/2/mypermissions"
return self.get(route=route) or {}
def get_permissions_all(self):
"""
Provide permission information for the current user.
"""
route = "rest/api/2/permissions"
return self.get(route=route) or {}
def get_property(self,key=None,permissionLevel=None):
"""
Returns an application property.
:key: OPT
:permissionLevel: OPT
"""
params={}
if(key):
params["key"] = key
if(permissionLevel):
params["permissionLevel"] = permissionLevel
route = "rest/api/2/application-properties"
return self.get(route=route,params=params)
# Projects -- partial
def get_project(self,projectIdOrKey):
route = f"rest/api/2/project/{projectIdOrKey}"
return self.get(route=route) or {}
def get_all_projects(self,startAt=None,maxResults=None):
params={}
if(startat):
params["startat"] = startat
if(maxResults):
params["maxResults"] = maxResults
route = f"rest/api/2/project/search"
return self.get(route=route,params=params) or {}
# User
def get_user(self,accountId=None):
params={}
if(accountId):
params["accountId"] = accountId
route = f"rest/api/2/project/search"
return self.get(route=route,params=params) or {}
| [
"configparser.ConfigParser",
"requests.get"
] | [((513, 527), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (525, 527), False, 'from configparser import ConfigParser\n'), ((1194, 1255), 'requests.get', 'requests.get', (['f"""{self.base_url}{route}"""'], {'headers': 'self.headers'}), "(f'{self.base_url}{route}', headers=self.headers)\n", (1206, 1255), False, 'import requests\n'), ((1292, 1368), 'requests.get', 'requests.get', (['f"""{self.base_url}{route}"""'], {'headers': 'self.headers', 'params': 'params'}), "(f'{self.base_url}{route}', headers=self.headers, params=params)\n", (1304, 1368), False, 'import requests\n')] |
import os
import json
import re
import sys
from datetime import datetime
import logging
import wx
import zipfile
import shutil
import pcbnew
from .config import Config
from ..dialog import SettingsDialog
from ..errors import ParsingException
from .parser import Parser
class Logger(object):
def __init__(self, cli=False):
self.cli = cli
self.logger = logging.getLogger('KiZip')
self.logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)-15s %(levelname)s %(message)s")
ch.setFormatter(formatter)
self.logger.addHandler(ch)
def info(self, *args):
if self.cli:
self.logger.info(*args)
def error(self, msg):
if self.cli:
self.logger.error(msg)
else:
wx.MessageBox(msg)
def warn(self, msg):
if self.cli:
self.logger.warn(msg)
else:
wx.LogWarning(msg)
log = None # type: Logger or None
def process_substitutions(output_name_format, pcb_file_name, metadata):
# type: (str, str, dict)->str
name = output_name_format.replace('%f', os.path.splitext(pcb_file_name)[0])
name = name.replace('%p', metadata['title'])
name = name.replace('%c', metadata['company'])
name = name.replace('%r', metadata['revision'])
name = name.replace('%d', metadata['date'].replace(':', '-'))
now = datetime.now()
name = name.replace('%D', now.strftime('%Y-%m-%d'))
name = name.replace('%T', now.strftime('%H-%M-%S'))
# sanitize the name to avoid characters illegal in file systems
name = name.replace('\\', '/')
name = re.sub(r'[?%*:|"<>]', '_', name)
return name + '.zip'
class KiZipPlugin(pcbnew.ActionPlugin, object):
def __init__(self):
super(KiZipPlugin, self).__init__()
self.name = "Generate Gerber Package"
self.category = "Read PCB"
self.pcbnew_icon_support = hasattr(self, "show_toolbar_button")
self.show_toolbar_button = True
icon_dir = os.path.dirname(os.path.dirname(__file__))
self.icon_file_name = os.path.join(icon_dir, 'icon.png')
self.description = "Generate Gerber Package"
def defaults(self):
pass
def Run(self):
from ..version import version
from ..errors import ParsingException
self.version = version
board = pcbnew.GetBoard()
pcb_file_name = board.GetFileName()
config = Config(self.version, os.path.dirname(pcb_file_name))
logger = Logger()
if not pcb_file_name:
logger.error('Please save the board file before generating gerbers')
return
parser = Parser(pcb_file_name, config, logger, board)
try:
run_with_dialog(parser, config, logger)
except ParsingException as e:
logger.error(str(e))
def main(parser, config, logger):
# type: (Parser, Config, Logger) -> None
global log
log = logger
pcb_file_name = os.path.basename(parser.file_name)
pcb_file_dir = os.path.dirname(parser.file_name)
pcbdata = parser.parse()
file_list = parser.plot()
logger.info(file_list)
if os.path.isabs(config.output_dest_dir):
output_file_dir = config.output_dest_dir
else:
output_file_dir = os.path.join(pcb_file_dir, config.output_dest_dir)
output_file_name = process_substitutions(
config.output_name_format, pcb_file_name, pcbdata['metadata'])
output_file_name = os.path.join(output_file_dir, output_file_name)
os.makedirs(output_file_dir, exist_ok=True)
#zip up all files
with zipfile.ZipFile(output_file_name, "w", zipfile.ZIP_DEFLATED) as zf:
for filename in file_list:
zf.write(filename=os.path.abspath(filename), arcname=os.path.basename(filename))
def run_with_dialog(parser, config, logger):
# type: (Parser, Config, Logger) -> None
def save_config(dialog_panel):
config.set_from_dialog(dialog_panel)
config.save()
config.load_from_ini()
dlg = SettingsDialog(
config_save_func=save_config,
file_name_format_hint=config.FILE_NAME_FORMAT_HINT,
version=config.version
)
try:
config.transfer_to_dialog(dlg.panel)
if dlg.ShowModal() == wx.ID_OK:
config.set_from_dialog(dlg.panel)
main(parser, config, logger)
finally:
dlg.Destroy() | [
"os.path.isabs",
"wx.LogWarning",
"zipfile.ZipFile",
"os.path.join",
"os.makedirs",
"os.path.basename",
"os.path.abspath",
"pcbnew.GetBoard",
"os.path.dirname",
"logging.StreamHandler",
"logging.getLogger",
"logging.Formatter",
"os.path.splitext",
"wx.MessageBox",
"datetime.datetime.now",
"re.sub"
] | [((1491, 1505), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1503, 1505), False, 'from datetime import datetime\n'), ((1732, 1763), 're.sub', 're.sub', (['"""[?%*:|"<>]"""', '"""_"""', 'name'], {}), '(\'[?%*:|"<>]\', \'_\', name)\n', (1738, 1763), False, 'import re\n'), ((3091, 3125), 'os.path.basename', 'os.path.basename', (['parser.file_name'], {}), '(parser.file_name)\n', (3107, 3125), False, 'import os\n'), ((3145, 3178), 'os.path.dirname', 'os.path.dirname', (['parser.file_name'], {}), '(parser.file_name)\n', (3160, 3178), False, 'import os\n'), ((3285, 3322), 'os.path.isabs', 'os.path.isabs', (['config.output_dest_dir'], {}), '(config.output_dest_dir)\n', (3298, 3322), False, 'import os\n'), ((3605, 3652), 'os.path.join', 'os.path.join', (['output_file_dir', 'output_file_name'], {}), '(output_file_dir, output_file_name)\n', (3617, 3652), False, 'import os\n'), ((3657, 3700), 'os.makedirs', 'os.makedirs', (['output_file_dir'], {'exist_ok': '(True)'}), '(output_file_dir, exist_ok=True)\n', (3668, 3700), False, 'import os\n'), ((379, 405), 'logging.getLogger', 'logging.getLogger', (['"""KiZip"""'], {}), "('KiZip')\n", (396, 405), False, 'import logging\n'), ((462, 495), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (483, 495), False, 'import logging\n'), ((550, 611), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)-15s %(levelname)s %(message)s"""'], {}), "('%(asctime)-15s %(levelname)s %(message)s')\n", (567, 611), False, 'import logging\n'), ((2193, 2227), 'os.path.join', 'os.path.join', (['icon_dir', '"""icon.png"""'], {}), "(icon_dir, 'icon.png')\n", (2205, 2227), False, 'import os\n'), ((2470, 2487), 'pcbnew.GetBoard', 'pcbnew.GetBoard', ([], {}), '()\n', (2485, 2487), False, 'import pcbnew\n'), ((3409, 3459), 'os.path.join', 'os.path.join', (['pcb_file_dir', 'config.output_dest_dir'], {}), '(pcb_file_dir, config.output_dest_dir)\n', (3421, 3459), False, 'import os\n'), ((3733, 3793), 'zipfile.ZipFile', 'zipfile.ZipFile', (['output_file_name', '"""w"""', 'zipfile.ZIP_DEFLATED'], {}), "(output_file_name, 'w', zipfile.ZIP_DEFLATED)\n", (3748, 3793), False, 'import zipfile\n'), ((893, 911), 'wx.MessageBox', 'wx.MessageBox', (['msg'], {}), '(msg)\n', (906, 911), False, 'import wx\n'), ((1019, 1037), 'wx.LogWarning', 'wx.LogWarning', (['msg'], {}), '(msg)\n', (1032, 1037), False, 'import wx\n'), ((1227, 1258), 'os.path.splitext', 'os.path.splitext', (['pcb_file_name'], {}), '(pcb_file_name)\n', (1243, 1258), False, 'import os\n'), ((2136, 2161), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2151, 2161), False, 'import os\n'), ((2570, 2600), 'os.path.dirname', 'os.path.dirname', (['pcb_file_name'], {}), '(pcb_file_name)\n', (2585, 2600), False, 'import os\n'), ((3867, 3892), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (3882, 3892), False, 'import os\n'), ((3902, 3928), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (3918, 3928), False, 'import os\n')] |
import torch as pt
import numpy as np
from model.PFSeg import PFSeg3D
from medpy.metric.binary import jc,hd95
from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D
# from loss.FALoss3D import FALoss3D
import cv2
from loss.TaskFusionLoss import TaskFusionLoss
from loss.DiceLoss import BinaryDiceLoss
from config import config
import argparse
from tqdm import tqdm
# from tensorboardX import SummaryWriter
crop_size=config.crop_size
size=crop_size[2]
img_size=config.input_img_size
parser = argparse.ArgumentParser(description='Patch-free 3D Medical Image Segmentation.')
parser.add_argument('-dataset_path',type=str,default='/newdata/why/BraTS20',help='path to dataset')
parser.add_argument('-model_save_to',type=str,default='.',help='path to output')
parser.add_argument('-bs', type=int, default=1, help='input batch size')
parser.add_argument('-epoch', type=int, default=100, help='number of epochs')
parser.add_argument('-lr', type=float, default=0.0001, help='learning rate')
parser.add_argument('-w_sr', type=float, default=0.5, help='w_sr of the lossfunc')
parser.add_argument('-w_tf', type=float, default=0.5, help='w_tf of the lossfunc')
parser.add_argument('-load_pretrained',type=str,default='',help='load a pretrained model')
parser.add_argument('-v', help="increase output verbosity", action="store_true")
args = parser.parse_args()
dataset_path=args.dataset_path
lr=args.lr
epoch=args.epoch
batch_size=args.bs
model_path=args.model_save_to
w_sr=args.w_sr
w_tf=args.w_tf
pretrained_model=args.load_pretrained
print(args)
model=PFSeg3D(in_channels=1,out_channels=1).cuda()
if pt.cuda.device_count()>1:
if batch_size<pt.cuda.device_count():
batch_size=pt.cuda.device_count()
print('Batch size has to be larger than GPU#. Set to {:d} instead.'.format(batch_size))
model=pt.nn.DataParallel(model)
if not pretrained_model=='':
model.load_state_dict(pt.load(pretrained_model,map_location = 'cpu'))
trainset=GuidedBraTSDataset3D(dataset_path,mode='train')
valset=GuidedBraTSDataset3D(dataset_path,mode='val')
testset=GuidedBraTSDataset3D(dataset_path,mode='test')
train_dataset=pt.utils.data.DataLoader(trainset,batch_size=batch_size,shuffle=True,drop_last=True)
val_dataset=pt.utils.data.DataLoader(valset,batch_size=1,shuffle=True,drop_last=True)
test_dataset=pt.utils.data.DataLoader(testset,batch_size=1,shuffle=True,drop_last=True)
lossfunc_sr=pt.nn.MSELoss()
lossfunc_seg=pt.nn.BCELoss()
lossfunc_dice=BinaryDiceLoss()
lossfunc_pf=TaskFusionLoss()
optimizer = pt.optim.Adam(model.parameters(), lr=lr)
# # scheduler = pt.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)
scheduler=pt.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='max',patience=20)
def ValModel():
model.eval()
dice_sum=0
hd_sum=0
jc_sum=0
weight_map=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
weight_map[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=1
weight_map=1./weight_map
for i,data in enumerate(val_dataset):
output_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
label_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
(inputs,labels,_,guidance,mask)=data
labels3D = pt.autograd.Variable(labels).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
inputs3D = pt.autograd.Variable(inputs[:,a:(a+crop_size[0]),b:(b+crop_size[1]),c:(c+crop_size[2])]).type(pt.FloatTensor).cuda().unsqueeze(1)
with pt.no_grad():
outputs3D,_ = model(inputs3D,guidance)
outputs3D=np.array(outputs3D.cpu().data.numpy())
output_list[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=outputs3D
label_list=np.array(labels3D.cpu().data.numpy())
output_list=np.array(output_list)*weight_map
output_list[output_list<0.5]=0
output_list[output_list>=0.5]=1
pr_sum = output_list.sum()
gt_sum = label_list.sum()
pr_gt_sum = np.sum(output_list[label_list == 1])
dice = 2 * pr_gt_sum / (pr_sum + gt_sum)
dice_sum += dice
if args.v:
final_img=np.zeros(shape=(2*img_size[1],2*2*img_size[2]))
final_img[:,:2*img_size[2]]=output_list[0,0,64,:,:]*255
final_img[:,2*img_size[2]:]=label_list[0,0,64,:,:]*255
cv2.imwrite('ValPhase_BraTS.png',final_img)
print("dice:",dice)
hausdorff=hd95(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
jaccard=jc(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
hd_sum+=hausdorff
jc_sum+=jaccard
print("Finished. Total dice: ",dice_sum/len(val_dataset),'\n')
print("Finished. Avg Jaccard: ",jc_sum/len(val_dataset))
print("Finished. Avg hausdorff: ",hd_sum/len(val_dataset))
return dice_sum/len(val_dataset)
def TestModel():
model.eval()
dice_sum=0
hd_sum=0
jc_sum=0
weight_map=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
weight_map[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=1
weight_map=1./weight_map
for i,data in enumerate(test_dataset):
output_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
label_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
(inputs,labels,_,guidance,mask)=data
labels3D = pt.autograd.Variable(labels).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
for a in range(0,img_size[0]-crop_size[0]+1,crop_size[0]//2): # overlap0.5
for b in range(0,img_size[1]-crop_size[1]+1,crop_size[1]//2):
for c in range(0,img_size[2]-crop_size[2]+1,crop_size[2]//2):
inputs3D = pt.autograd.Variable(inputs[:,a:(a+crop_size[0]),b:(b+crop_size[1]),c:(c+crop_size[2])]).type(pt.FloatTensor).cuda().unsqueeze(1)
with pt.no_grad():
outputs3D,_ = model(inputs3D,guidance)
outputs3D=np.array(outputs3D.cpu().data.numpy())
output_list[:,:,(2*a):(2*(a+crop_size[0])),(2*b):(2*(b+crop_size[1])),(2*c):(2*(c+crop_size[2]))]+=outputs3D
label_list=np.array(labels3D.cpu().data.numpy())
output_list=np.array(output_list)*weight_map
output_list[output_list<0.5]=0
output_list[output_list>=0.5]=1
final_img=np.zeros(shape=(2*img_size[1],2*2*img_size[2]))
final_img[:,:2*img_size[2]]=output_list[0,0,64,:,:]*255
final_img[:,2*img_size[2]:]=label_list[0,0,64,:,:]*255
cv2.imwrite('TestPhase_BraTS.png',final_img)
pr_sum = output_list.sum()
gt_sum = label_list.sum()
pr_gt_sum = np.sum(output_list[label_list == 1])
dice = 2 * pr_gt_sum / (pr_sum + gt_sum)
dice_sum += dice
hausdorff=hd95(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
jaccard=jc(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
hd_sum+=hausdorff
jc_sum+=jaccard
print("Finished. Test Total dice: ",dice_sum/len(test_dataset),'\n')
print("Finished. Test Avg Jaccard: ",jc_sum/len(test_dataset))
print("Finished. Test Avg hausdorff: ",hd_sum/len(test_dataset))
return dice_sum/len(test_dataset)
best_dice=0
iterator=tqdm(train_dataset, ncols=100)
for x in range(epoch):
model.train()
loss_sum=0
print('\n==>Epoch',x,': lr=',optimizer.param_groups[0]['lr'],'==>\n')
for data in iterator:
(inputs,labels_seg,labels_sr,guidance,mask)=data
optimizer.zero_grad()
inputs = pt.autograd.Variable(inputs).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
mask = pt.autograd.Variable(mask).type(pt.FloatTensor).cuda().unsqueeze(1)
labels_seg = pt.autograd.Variable(labels_seg).type(pt.FloatTensor).cuda().unsqueeze(1)
labels_sr = pt.autograd.Variable(labels_sr).type(pt.FloatTensor).cuda().unsqueeze(1)
outputs_seg,outputs_sr = model(inputs,guidance)
loss_seg = lossfunc_seg(outputs_seg, labels_seg)
loss_sr = lossfunc_sr(outputs_sr, labels_sr)
loss_pf = lossfunc_pf(outputs_seg,outputs_sr,labels_seg*labels_sr)
loss_guide=lossfunc_sr(mask*outputs_sr,mask*labels_sr)
loss=lossfunc_dice(outputs_seg,labels_seg)+loss_seg+w_sr*(loss_sr+loss_guide)+w_tf*loss_pf
loss.backward()
optimizer.step()
loss_sum+=loss.item()
if args.v:
final_img=np.zeros(shape=(2*size,2*size*5))
iterator.set_postfix(loss=loss.item(),loss_seg=loss_seg.item(),loss_sr=loss_sr.item())
final_img[:,0:(2*size)]=outputs_seg.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(2*size):(4*size)]=outputs_sr.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(4*size):(6*size)]=labels_seg.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(6*size):(8*size)]=labels_sr.cpu().data.numpy()[0,0,size//2,:,:]*255
final_img[:,(8*size):]=cv2.resize(inputs.cpu().data.numpy()[0,0,size//4,:,:],((2*size),(2*size)))*255
cv2.imwrite('combine.png',final_img)
print('==>End of epoch',x,'==>\n')
print('===VAL===>')
dice=ValModel()
scheduler.step(dice)
if dice>best_dice:
best_dice=dice
print('New best dice! Model saved to',model_path+'/PFSeg_3D_BraTS_patch-free_bs'+str(batch_size)+'_best.pt')
pt.save(model.state_dict(), model_path+'/PFSeg_3D_BraTS_patch-free_bs'+str(batch_size)+'_best.pt')
print('===TEST===>')
TestModel()
print('\nBest Dice:',best_dice) | [
"numpy.sum",
"argparse.ArgumentParser",
"torch.cuda.device_count",
"torch.no_grad",
"torch.nn.MSELoss",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"loss.TaskFusionLoss.TaskFusionLoss",
"cv2.imwrite",
"torch.load",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"tqdm.tqdm",
"model.PFSeg.PFSeg3D",
"torch.autograd.Variable",
"loss.DiceLoss.BinaryDiceLoss",
"dataset.GuidedBraTSDataset3D.GuidedBraTSDataset3D",
"numpy.zeros",
"numpy.array",
"torch.nn.DataParallel"
] | [((500, 585), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Patch-free 3D Medical Image Segmentation."""'}), "(description='Patch-free 3D Medical Image Segmentation.'\n )\n", (523, 585), False, 'import argparse\n'), ((1956, 2004), 'dataset.GuidedBraTSDataset3D.GuidedBraTSDataset3D', 'GuidedBraTSDataset3D', (['dataset_path'], {'mode': '"""train"""'}), "(dataset_path, mode='train')\n", (1976, 2004), False, 'from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D\n'), ((2011, 2057), 'dataset.GuidedBraTSDataset3D.GuidedBraTSDataset3D', 'GuidedBraTSDataset3D', (['dataset_path'], {'mode': '"""val"""'}), "(dataset_path, mode='val')\n", (2031, 2057), False, 'from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D\n'), ((2065, 2112), 'dataset.GuidedBraTSDataset3D.GuidedBraTSDataset3D', 'GuidedBraTSDataset3D', (['dataset_path'], {'mode': '"""test"""'}), "(dataset_path, mode='test')\n", (2085, 2112), False, 'from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D\n'), ((2127, 2218), 'torch.utils.data.DataLoader', 'pt.utils.data.DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(trainset, batch_size=batch_size, shuffle=True,\n drop_last=True)\n', (2151, 2218), True, 'import torch as pt\n'), ((2224, 2300), 'torch.utils.data.DataLoader', 'pt.utils.data.DataLoader', (['valset'], {'batch_size': '(1)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(valset, batch_size=1, shuffle=True, drop_last=True)\n', (2248, 2300), True, 'import torch as pt\n'), ((2311, 2388), 'torch.utils.data.DataLoader', 'pt.utils.data.DataLoader', (['testset'], {'batch_size': '(1)', 'shuffle': '(True)', 'drop_last': '(True)'}), '(testset, batch_size=1, shuffle=True, drop_last=True)\n', (2335, 2388), True, 'import torch as pt\n'), ((2399, 2414), 'torch.nn.MSELoss', 'pt.nn.MSELoss', ([], {}), '()\n', (2412, 2414), True, 'import torch as pt\n'), ((2428, 2443), 'torch.nn.BCELoss', 'pt.nn.BCELoss', ([], {}), '()\n', (2441, 2443), True, 'import torch as pt\n'), ((2458, 2474), 'loss.DiceLoss.BinaryDiceLoss', 'BinaryDiceLoss', ([], {}), '()\n', (2472, 2474), False, 'from loss.DiceLoss import BinaryDiceLoss\n'), ((2487, 2503), 'loss.TaskFusionLoss.TaskFusionLoss', 'TaskFusionLoss', ([], {}), '()\n', (2501, 2503), False, 'from loss.TaskFusionLoss import TaskFusionLoss\n'), ((2642, 2717), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'pt.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""max"""', 'patience': '(20)'}), "(optimizer, mode='max', patience=20)\n", (2681, 2717), True, 'import torch as pt\n'), ((8485, 8515), 'tqdm.tqdm', 'tqdm', (['train_dataset'], {'ncols': '(100)'}), '(train_dataset, ncols=100)\n', (8489, 8515), False, 'from tqdm import tqdm\n'), ((1601, 1623), 'torch.cuda.device_count', 'pt.cuda.device_count', ([], {}), '()\n', (1621, 1623), True, 'import torch as pt\n'), ((1817, 1842), 'torch.nn.DataParallel', 'pt.nn.DataParallel', (['model'], {}), '(model)\n', (1835, 1842), True, 'import torch as pt\n'), ((2806, 2873), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (2814, 2873), True, 'import numpy as np\n'), ((5703, 5770), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (5711, 5770), True, 'import numpy as np\n'), ((1553, 1591), 'model.PFSeg.PFSeg3D', 'PFSeg3D', ([], {'in_channels': '(1)', 'out_channels': '(1)'}), '(in_channels=1, out_channels=1)\n', (1560, 1591), False, 'from model.PFSeg import PFSeg3D\n'), ((1645, 1667), 'torch.cuda.device_count', 'pt.cuda.device_count', ([], {}), '()\n', (1665, 1667), True, 'import torch as pt\n'), ((1688, 1710), 'torch.cuda.device_count', 'pt.cuda.device_count', ([], {}), '()\n', (1708, 1710), True, 'import torch as pt\n'), ((1898, 1943), 'torch.load', 'pt.load', (['pretrained_model'], {'map_location': '"""cpu"""'}), "(pretrained_model, map_location='cpu')\n", (1905, 1943), True, 'import torch as pt\n'), ((3297, 3364), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (3305, 3364), True, 'import numpy as np\n'), ((3374, 3441), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (3382, 3441), True, 'import numpy as np\n'), ((4728, 4764), 'numpy.sum', 'np.sum', (['output_list[label_list == 1]'], {}), '(output_list[label_list == 1])\n', (4734, 4764), True, 'import numpy as np\n'), ((6195, 6262), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (6203, 6262), True, 'import numpy as np\n'), ((6272, 6339), 'numpy.zeros', 'np.zeros', (['(1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2])'], {}), '((1, 1, 2 * img_size[0], 2 * img_size[1], 2 * img_size[2]))\n', (6280, 6339), True, 'import numpy as np\n'), ((7548, 7602), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2 * img_size[1], 2 * 2 * img_size[2])'}), '(shape=(2 * img_size[1], 2 * 2 * img_size[2]))\n', (7556, 7602), True, 'import numpy as np\n'), ((7731, 7776), 'cv2.imwrite', 'cv2.imwrite', (['"""TestPhase_BraTS.png"""', 'final_img'], {}), "('TestPhase_BraTS.png', final_img)\n", (7742, 7776), False, 'import cv2\n'), ((7874, 7910), 'numpy.sum', 'np.sum', (['output_list[label_list == 1]'], {}), '(output_list[label_list == 1])\n', (7880, 7910), True, 'import numpy as np\n'), ((4517, 4538), 'numpy.array', 'np.array', (['output_list'], {}), '(output_list)\n', (4525, 4538), True, 'import numpy as np\n'), ((4881, 4935), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2 * img_size[1], 2 * 2 * img_size[2])'}), '(shape=(2 * img_size[1], 2 * 2 * img_size[2]))\n', (4889, 4935), True, 'import numpy as np\n'), ((5076, 5120), 'cv2.imwrite', 'cv2.imwrite', (['"""ValPhase_BraTS.png"""', 'final_img'], {}), "('ValPhase_BraTS.png', final_img)\n", (5087, 5120), False, 'import cv2\n'), ((7416, 7437), 'numpy.array', 'np.array', (['output_list'], {}), '(output_list)\n', (7424, 7437), True, 'import numpy as np\n'), ((9737, 9777), 'numpy.zeros', 'np.zeros', ([], {'shape': '(2 * size, 2 * size * 5)'}), '(shape=(2 * size, 2 * size * 5))\n', (9745, 9777), True, 'import numpy as np\n'), ((10365, 10402), 'cv2.imwrite', 'cv2.imwrite', (['"""combine.png"""', 'final_img'], {}), "('combine.png', final_img)\n", (10376, 10402), False, 'import cv2\n'), ((4164, 4176), 'torch.no_grad', 'pt.no_grad', ([], {}), '()\n', (4174, 4176), True, 'import torch as pt\n'), ((7062, 7074), 'torch.no_grad', 'pt.no_grad', ([], {}), '()\n', (7072, 7074), True, 'import torch as pt\n'), ((3497, 3525), 'torch.autograd.Variable', 'pt.autograd.Variable', (['labels'], {}), '(labels)\n', (3517, 3525), True, 'import torch as pt\n'), ((3586, 3616), 'torch.autograd.Variable', 'pt.autograd.Variable', (['guidance'], {}), '(guidance)\n', (3606, 3616), True, 'import torch as pt\n'), ((3673, 3699), 'torch.autograd.Variable', 'pt.autograd.Variable', (['mask'], {}), '(mask)\n', (3693, 3699), True, 'import torch as pt\n'), ((6395, 6423), 'torch.autograd.Variable', 'pt.autograd.Variable', (['labels'], {}), '(labels)\n', (6415, 6423), True, 'import torch as pt\n'), ((6484, 6514), 'torch.autograd.Variable', 'pt.autograd.Variable', (['guidance'], {}), '(guidance)\n', (6504, 6514), True, 'import torch as pt\n'), ((6571, 6597), 'torch.autograd.Variable', 'pt.autograd.Variable', (['mask'], {}), '(mask)\n', (6591, 6597), True, 'import torch as pt\n'), ((8778, 8806), 'torch.autograd.Variable', 'pt.autograd.Variable', (['inputs'], {}), '(inputs)\n', (8798, 8806), True, 'import torch as pt\n'), ((8867, 8897), 'torch.autograd.Variable', 'pt.autograd.Variable', (['guidance'], {}), '(guidance)\n', (8887, 8897), True, 'import torch as pt\n'), ((8954, 8980), 'torch.autograd.Variable', 'pt.autograd.Variable', (['mask'], {}), '(mask)\n', (8974, 8980), True, 'import torch as pt\n'), ((9043, 9075), 'torch.autograd.Variable', 'pt.autograd.Variable', (['labels_seg'], {}), '(labels_seg)\n', (9063, 9075), True, 'import torch as pt\n'), ((9137, 9168), 'torch.autograd.Variable', 'pt.autograd.Variable', (['labels_sr'], {}), '(labels_sr)\n', (9157, 9168), True, 'import torch as pt\n'), ((4009, 4104), 'torch.autograd.Variable', 'pt.autograd.Variable', (['inputs[:, a:a + crop_size[0], b:b + crop_size[1], c:c + crop_size[2]]'], {}), '(inputs[:, a:a + crop_size[0], b:b + crop_size[1], c:c +\n crop_size[2]])\n', (4029, 4104), True, 'import torch as pt\n'), ((6907, 7002), 'torch.autograd.Variable', 'pt.autograd.Variable', (['inputs[:, a:a + crop_size[0], b:b + crop_size[1], c:c + crop_size[2]]'], {}), '(inputs[:, a:a + crop_size[0], b:b + crop_size[1], c:c +\n crop_size[2]])\n', (6927, 7002), True, 'import torch as pt\n')] |
from os.path import abspath, join
import subprocess
from django.apps import apps as django_apps
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import render_to_string
from termcolor import colored
SEPARATOR = '---------------------------------------------------------------'
# settings with a default of None are required
DEFAULT_SETTINGS = {
'EMBER_APP_NAME': None,
'API_PATH': None,
'EMBER_APP_PATH': 'client',
'MODELS_TO_SYNC': None
}
class EmberCommand(BaseCommand):
@classmethod
def get_setting(cls, key):
'''Get a setting from the user's project by key, falling back on the default
if there's no setting available.'''
return settings.EMBER_TOOLKIT.get(key, DEFAULT_SETTINGS[key])
@classmethod
def get_full_ember_path(cls):
'''Return the full, absolute path to the project's Ember app.'''
return abspath(join(
settings.BASE_DIR,
cls.get_setting('EMBER_APP_PATH')))
def notify(self, some_text):
self.stdout.write(SEPARATOR)
self.stdout.write(some_text)
self.stdout.write(SEPARATOR)
@classmethod
def assert_required_settings(cls, *args):
'''Raise a useful error if any of args are not configured in
settings.EMBER_TOOLKIT'''
if not hasattr(settings, 'EMBER_TOOLKIT'):
raise CommandError('You must define an EMBER_TOOLKIT dict in settings')
missing_settings = []
for key in args:
if cls.get_setting(key) is None:
missing_settings.append(key)
if missing_settings:
raise CommandError(
'settings.EMBER_TOOLKIT is missing the following keys: ' +
', '.join(missing_settings))
def run_ember_command(self, cmd_name, *args, **kwargs):
'''Run the named ember in the project's FULL_EMBER_PATH. Any args and kwargs
will be converted into positional and named arguments respectively
(booleans are assumed to be "boolean named arguments")
e.g.: run_ember_command('generate', 'route', 'foobar', pod=True)
becomes: ember generate route foobar --pod
'''
command = ['ember', cmd_name] + list(args)
for key, value in kwargs:
# in the unlikely case we pass None or False, just omit the kwarg
if value:
command.append('--' + key)
if value is not True:
command.append("'{}'".format(value))
self.notify('Running {}...'.format(colored(' '.join(command), 'green')))
subprocess.check_call(command, cwd=self.get_full_ember_path())
@classmethod
def write_initial_config(cls):
'''Generate an Ember config file with support for backend
"autoconfiguration" at the given path.'''
config_source = render_to_string(
'django_ember_toolkit/environment.js',
{'app_name': cls.get_setting('EMBER_APP_NAME')})
config_path = join(cls.get_full_ember_path(), 'config/environment.js')
with open(config_path, 'w') as config_file:
config_file.write(config_source)
def get_sync_model_set(cls):
'''Return a set containing the actual Model class objects that are
specified by MODELS_TO_SYNC.'''
for app_config in django_apps.get_app_configs():
model_name_set = set(cls.get_setting('MODELS_TO_SYNC'))
model_set = set()
for Model in app_config.get_models():
key = Model._meta.app_label + '.' + Model.__name__
app_star = Model._meta.app_label + '.*'
if key in model_name_set or app_star in model_name_set:
model_set.add(Model)
return model_set
| [
"django.conf.settings.EMBER_TOOLKIT.get",
"django.core.management.base.CommandError",
"django.apps.apps.get_app_configs"
] | [((765, 819), 'django.conf.settings.EMBER_TOOLKIT.get', 'settings.EMBER_TOOLKIT.get', (['key', 'DEFAULT_SETTINGS[key]'], {}), '(key, DEFAULT_SETTINGS[key])\n', (791, 819), False, 'from django.conf import settings\n'), ((3407, 3436), 'django.apps.apps.get_app_configs', 'django_apps.get_app_configs', ([], {}), '()\n', (3434, 3436), True, 'from django.apps import apps as django_apps\n'), ((1435, 1500), 'django.core.management.base.CommandError', 'CommandError', (['"""You must define an EMBER_TOOLKIT dict in settings"""'], {}), "('You must define an EMBER_TOOLKIT dict in settings')\n", (1447, 1500), False, 'from django.core.management.base import BaseCommand, CommandError\n')] |
from flask import render_template
def index():
return render_template('index.html')
def documentation():
return render_template('documentation.html')
def api_landing():
return render_template('api_landing.html')
| [
"flask.render_template"
] | [((59, 88), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (74, 88), False, 'from flask import render_template\n'), ((122, 159), 'flask.render_template', 'render_template', (['"""documentation.html"""'], {}), "('documentation.html')\n", (137, 159), False, 'from flask import render_template\n'), ((191, 226), 'flask.render_template', 'render_template', (['"""api_landing.html"""'], {}), "('api_landing.html')\n", (206, 226), False, 'from flask import render_template\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.